2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
31 /* when under memory pressure rx ring refill may fail and needs a retry */
32 #define HTT_RX_RING_REFILL_RETRY_MS 50
34 #define HTT_RX_RING_REFILL_RESCHED_MS 5
36 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
38 static struct sk_buff *
39 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
41 struct ath10k_skb_rxcb *rxcb;
43 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
44 if (rxcb->paddr == paddr)
45 return ATH10K_RXCB_SKB(rxcb);
51 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
54 struct ath10k_skb_rxcb *rxcb;
58 if (htt->rx_ring.in_ord_rx) {
59 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
60 skb = ATH10K_RXCB_SKB(rxcb);
61 dma_unmap_single(htt->ar->dev, rxcb->paddr,
62 skb->len + skb_tailroom(skb),
64 hash_del(&rxcb->hlist);
65 dev_kfree_skb_any(skb);
68 for (i = 0; i < htt->rx_ring.size; i++) {
69 skb = htt->rx_ring.netbufs_ring[i];
73 rxcb = ATH10K_SKB_RXCB(skb);
74 dma_unmap_single(htt->ar->dev, rxcb->paddr,
75 skb->len + skb_tailroom(skb),
77 dev_kfree_skb_any(skb);
81 htt->rx_ring.fill_cnt = 0;
82 hash_init(htt->rx_ring.skb_table);
83 memset(htt->rx_ring.netbufs_ring, 0,
84 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
87 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
89 struct htt_rx_desc *rx_desc;
90 struct ath10k_skb_rxcb *rxcb;
95 /* The Full Rx Reorder firmware has no way of telling the host
96 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
97 * To keep things simple make sure ring is always half empty. This
98 * guarantees there'll be no replenishment overruns possible.
100 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
102 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
104 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
110 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
112 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
115 /* Clear rx_desc attention word before posting to Rx ring */
116 rx_desc = (struct htt_rx_desc *)skb->data;
117 rx_desc->attention.flags = __cpu_to_le32(0);
119 paddr = dma_map_single(htt->ar->dev, skb->data,
120 skb->len + skb_tailroom(skb),
123 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
124 dev_kfree_skb_any(skb);
129 rxcb = ATH10K_SKB_RXCB(skb);
131 htt->rx_ring.netbufs_ring[idx] = skb;
132 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
133 htt->rx_ring.fill_cnt++;
135 if (htt->rx_ring.in_ord_rx) {
136 hash_add(htt->rx_ring.skb_table,
137 &ATH10K_SKB_RXCB(skb)->hlist,
143 idx &= htt->rx_ring.size_mask;
148 * Make sure the rx buffer is updated before available buffer
149 * index to avoid any potential rx ring corruption.
152 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
156 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
158 lockdep_assert_held(&htt->rx_ring.lock);
159 return __ath10k_htt_rx_ring_fill_n(htt, num);
162 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
164 int ret, num_deficit, num_to_fill;
166 /* Refilling the whole RX ring buffer proves to be a bad idea. The
167 * reason is RX may take up significant amount of CPU cycles and starve
168 * other tasks, e.g. TX on an ethernet device while acting as a bridge
169 * with ath10k wlan interface. This ended up with very poor performance
170 * once CPU the host system was overwhelmed with RX on ath10k.
172 * By limiting the number of refills the replenishing occurs
173 * progressively. This in turns makes use of the fact tasklets are
174 * processed in FIFO order. This means actual RX processing can starve
175 * out refilling. If there's not enough buffers on RX ring FW will not
176 * report RX until it is refilled with enough buffers. This
177 * automatically balances load wrt to CPU power.
179 * This probably comes at a cost of lower maximum throughput but
180 * improves the average and stability.
182 spin_lock_bh(&htt->rx_ring.lock);
183 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
184 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
185 num_deficit -= num_to_fill;
186 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
187 if (ret == -ENOMEM) {
189 * Failed to fill it to the desired level -
190 * we'll start a timer and try again next time.
191 * As long as enough buffers are left in the ring for
192 * another A-MPDU rx, no special recovery is needed.
194 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
195 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
196 } else if (num_deficit > 0) {
197 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
198 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
200 spin_unlock_bh(&htt->rx_ring.lock);
203 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
205 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
207 ath10k_htt_rx_msdu_buff_replenish(htt);
210 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
212 struct ath10k_htt *htt = &ar->htt;
215 spin_lock_bh(&htt->rx_ring.lock);
216 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
217 htt->rx_ring.fill_cnt));
218 spin_unlock_bh(&htt->rx_ring.lock);
221 ath10k_htt_rx_ring_free(htt);
226 void ath10k_htt_rx_free(struct ath10k_htt *htt)
228 del_timer_sync(&htt->rx_ring.refill_retry_timer);
230 skb_queue_purge(&htt->rx_compl_q);
231 skb_queue_purge(&htt->rx_in_ord_compl_q);
232 skb_queue_purge(&htt->tx_fetch_ind_q);
234 ath10k_htt_rx_ring_free(htt);
236 dma_free_coherent(htt->ar->dev,
238 sizeof(htt->rx_ring.paddrs_ring)),
239 htt->rx_ring.paddrs_ring,
240 htt->rx_ring.base_paddr);
242 dma_free_coherent(htt->ar->dev,
243 sizeof(*htt->rx_ring.alloc_idx.vaddr),
244 htt->rx_ring.alloc_idx.vaddr,
245 htt->rx_ring.alloc_idx.paddr);
247 kfree(htt->rx_ring.netbufs_ring);
250 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
252 struct ath10k *ar = htt->ar;
254 struct sk_buff *msdu;
256 lockdep_assert_held(&htt->rx_ring.lock);
258 if (htt->rx_ring.fill_cnt == 0) {
259 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
263 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
264 msdu = htt->rx_ring.netbufs_ring[idx];
265 htt->rx_ring.netbufs_ring[idx] = NULL;
266 htt->rx_ring.paddrs_ring[idx] = 0;
269 idx &= htt->rx_ring.size_mask;
270 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
271 htt->rx_ring.fill_cnt--;
273 dma_unmap_single(htt->ar->dev,
274 ATH10K_SKB_RXCB(msdu)->paddr,
275 msdu->len + skb_tailroom(msdu),
277 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
278 msdu->data, msdu->len + skb_tailroom(msdu));
283 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
284 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
285 struct sk_buff_head *amsdu)
287 struct ath10k *ar = htt->ar;
288 int msdu_len, msdu_chaining = 0;
289 struct sk_buff *msdu;
290 struct htt_rx_desc *rx_desc;
292 lockdep_assert_held(&htt->rx_ring.lock);
295 int last_msdu, msdu_len_invalid, msdu_chained;
297 msdu = ath10k_htt_rx_netbuf_pop(htt);
299 __skb_queue_purge(amsdu);
303 __skb_queue_tail(amsdu, msdu);
305 rx_desc = (struct htt_rx_desc *)msdu->data;
307 /* FIXME: we must report msdu payload since this is what caller
310 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
314 * Sanity check - confirm the HW is finished filling in the
316 * If the HW and SW are working correctly, then it's guaranteed
317 * that the HW's MAC DMA is done before this point in the SW.
318 * To prevent the case that we handle a stale Rx descriptor,
319 * just assert for now until we have a way to recover.
321 if (!(__le32_to_cpu(rx_desc->attention.flags)
322 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
323 __skb_queue_purge(amsdu);
327 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
328 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
329 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
330 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
331 RX_MSDU_START_INFO0_MSDU_LENGTH);
332 msdu_chained = rx_desc->frag_info.ring2_more_count;
334 if (msdu_len_invalid)
338 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
339 msdu_len -= msdu->len;
341 /* Note: Chained buffers do not contain rx descriptor */
342 while (msdu_chained--) {
343 msdu = ath10k_htt_rx_netbuf_pop(htt);
345 __skb_queue_purge(amsdu);
349 __skb_queue_tail(amsdu, msdu);
351 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
352 msdu_len -= msdu->len;
356 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
357 RX_MSDU_END_INFO0_LAST_MSDU;
359 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
360 sizeof(*rx_desc) - sizeof(u32));
366 if (skb_queue_empty(amsdu))
370 * Don't refill the ring yet.
372 * First, the elements popped here are still in use - it is not
373 * safe to overwrite them until the matching call to
374 * mpdu_desc_list_next. Second, for efficiency it is preferable to
375 * refill the rx ring with 1 PPDU's worth of rx buffers (something
376 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
377 * (something like 3 buffers). Consequently, we'll rely on the txrx
378 * SW to tell us when it is done pulling all the PPDU's rx buffers
379 * out of the rx ring, and then refill it just once.
382 return msdu_chaining;
385 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
388 struct ath10k *ar = htt->ar;
389 struct ath10k_skb_rxcb *rxcb;
390 struct sk_buff *msdu;
392 lockdep_assert_held(&htt->rx_ring.lock);
394 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
398 rxcb = ATH10K_SKB_RXCB(msdu);
399 hash_del(&rxcb->hlist);
400 htt->rx_ring.fill_cnt--;
402 dma_unmap_single(htt->ar->dev, rxcb->paddr,
403 msdu->len + skb_tailroom(msdu),
405 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
406 msdu->data, msdu->len + skb_tailroom(msdu));
411 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
412 struct htt_rx_in_ord_ind *ev,
413 struct sk_buff_head *list)
415 struct ath10k *ar = htt->ar;
416 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
417 struct htt_rx_desc *rxd;
418 struct sk_buff *msdu;
423 lockdep_assert_held(&htt->rx_ring.lock);
425 msdu_count = __le16_to_cpu(ev->msdu_count);
426 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
428 while (msdu_count--) {
429 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
431 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
433 __skb_queue_purge(list);
437 __skb_queue_tail(list, msdu);
440 rxd = (void *)msdu->data;
442 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
444 skb_put(msdu, sizeof(*rxd));
445 skb_pull(msdu, sizeof(*rxd));
446 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
448 if (!(__le32_to_cpu(rxd->attention.flags) &
449 RX_ATTENTION_FLAGS_MSDU_DONE)) {
450 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
461 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
463 struct ath10k *ar = htt->ar;
467 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
469 htt->rx_confused = false;
471 /* XXX: The fill level could be changed during runtime in response to
472 * the host processing latency. Is this really worth it?
474 htt->rx_ring.size = HTT_RX_RING_SIZE;
475 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
476 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
478 if (!is_power_of_2(htt->rx_ring.size)) {
479 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
483 htt->rx_ring.netbufs_ring =
484 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
486 if (!htt->rx_ring.netbufs_ring)
489 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
491 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
495 htt->rx_ring.paddrs_ring = vaddr;
496 htt->rx_ring.base_paddr = paddr;
498 vaddr = dma_alloc_coherent(htt->ar->dev,
499 sizeof(*htt->rx_ring.alloc_idx.vaddr),
504 htt->rx_ring.alloc_idx.vaddr = vaddr;
505 htt->rx_ring.alloc_idx.paddr = paddr;
506 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
507 *htt->rx_ring.alloc_idx.vaddr = 0;
509 /* Initialize the Rx refill retry timer */
510 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
512 spin_lock_init(&htt->rx_ring.lock);
514 htt->rx_ring.fill_cnt = 0;
515 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
516 hash_init(htt->rx_ring.skb_table);
518 skb_queue_head_init(&htt->rx_compl_q);
519 skb_queue_head_init(&htt->rx_in_ord_compl_q);
520 skb_queue_head_init(&htt->tx_fetch_ind_q);
521 atomic_set(&htt->num_mpdus_ready, 0);
523 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
524 htt->rx_ring.size, htt->rx_ring.fill_level);
528 dma_free_coherent(htt->ar->dev,
530 sizeof(htt->rx_ring.paddrs_ring)),
531 htt->rx_ring.paddrs_ring,
532 htt->rx_ring.base_paddr);
534 kfree(htt->rx_ring.netbufs_ring);
539 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
540 enum htt_rx_mpdu_encrypt_type type)
543 case HTT_RX_MPDU_ENCRYPT_NONE:
545 case HTT_RX_MPDU_ENCRYPT_WEP40:
546 case HTT_RX_MPDU_ENCRYPT_WEP104:
547 return IEEE80211_WEP_IV_LEN;
548 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
549 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
550 return IEEE80211_TKIP_IV_LEN;
551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
552 return IEEE80211_CCMP_HDR_LEN;
553 case HTT_RX_MPDU_ENCRYPT_WEP128:
554 case HTT_RX_MPDU_ENCRYPT_WAPI:
558 ath10k_warn(ar, "unsupported encryption type %d\n", type);
562 #define MICHAEL_MIC_LEN 8
564 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
565 enum htt_rx_mpdu_encrypt_type type)
568 case HTT_RX_MPDU_ENCRYPT_NONE:
570 case HTT_RX_MPDU_ENCRYPT_WEP40:
571 case HTT_RX_MPDU_ENCRYPT_WEP104:
572 return IEEE80211_WEP_ICV_LEN;
573 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
574 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
575 return IEEE80211_TKIP_ICV_LEN;
576 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
577 return IEEE80211_CCMP_MIC_LEN;
578 case HTT_RX_MPDU_ENCRYPT_WEP128:
579 case HTT_RX_MPDU_ENCRYPT_WAPI:
583 ath10k_warn(ar, "unsupported encryption type %d\n", type);
587 struct amsdu_subframe_hdr {
593 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
595 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
596 struct ieee80211_rx_status *status,
597 struct htt_rx_desc *rxd)
599 struct ieee80211_supported_band *sband;
600 u8 cck, rate, bw, sgi, mcs, nss;
603 u32 info1, info2, info3;
605 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
606 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
607 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
609 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
613 /* To get legacy rate index band is required. Since band can't
614 * be undefined check if freq is non-zero.
619 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
620 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
621 rate &= ~RX_PPDU_START_RATE_FLAG;
623 sband = &ar->mac.sbands[status->band];
624 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
627 case HTT_RX_HT_WITH_TXBF:
628 /* HT-SIG - Table 20-11 in info2 and info3 */
631 bw = (info2 >> 7) & 1;
632 sgi = (info3 >> 7) & 1;
634 status->rate_idx = mcs;
635 status->encoding = RX_ENC_HT;
637 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
639 status->bw = RATE_INFO_BW_40;
642 case HTT_RX_VHT_WITH_TXBF:
643 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
648 group_id = (info2 >> 4) & 0x3F;
650 if (GROUP_ID_IS_SU_MIMO(group_id)) {
651 mcs = (info3 >> 4) & 0x0F;
652 nss = ((info2 >> 10) & 0x07) + 1;
654 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
655 * so it's impossible to decode MCS. Also since
656 * firmware consumes Group Id Management frames host
657 * has no knowledge regarding group/user position
658 * mapping so it's impossible to pick the correct Nsts
661 * Bandwidth and SGI are valid so report the rateinfo
662 * on best-effort basis.
669 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
670 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
671 __le32_to_cpu(rxd->attention.flags),
672 __le32_to_cpu(rxd->mpdu_start.info0),
673 __le32_to_cpu(rxd->mpdu_start.info1),
674 __le32_to_cpu(rxd->msdu_start.common.info0),
675 __le32_to_cpu(rxd->msdu_start.common.info1),
676 rxd->ppdu_start.info0,
677 __le32_to_cpu(rxd->ppdu_start.info1),
678 __le32_to_cpu(rxd->ppdu_start.info2),
679 __le32_to_cpu(rxd->ppdu_start.info3),
680 __le32_to_cpu(rxd->ppdu_start.info4));
682 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
683 __le32_to_cpu(rxd->msdu_end.common.info0),
684 __le32_to_cpu(rxd->mpdu_end.info0));
686 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
687 "rx desc msdu payload: ",
688 rxd->msdu_payload, 50);
691 status->rate_idx = mcs;
695 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
703 status->bw = RATE_INFO_BW_40;
707 status->bw = RATE_INFO_BW_80;
710 status->bw = RATE_INFO_BW_160;
714 status->encoding = RX_ENC_VHT;
721 static struct ieee80211_channel *
722 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
724 struct ath10k_peer *peer;
725 struct ath10k_vif *arvif;
726 struct cfg80211_chan_def def;
729 lockdep_assert_held(&ar->data_lock);
734 if (rxd->attention.flags &
735 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
738 if (!(rxd->msdu_end.common.info0 &
739 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
742 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
743 RX_MPDU_START_INFO0_PEER_IDX);
745 peer = ath10k_peer_find_by_id(ar, peer_id);
749 arvif = ath10k_get_arvif(ar, peer->vdev_id);
750 if (WARN_ON_ONCE(!arvif))
753 if (ath10k_mac_vif_chan(arvif->vif, &def))
759 static struct ieee80211_channel *
760 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
762 struct ath10k_vif *arvif;
763 struct cfg80211_chan_def def;
765 lockdep_assert_held(&ar->data_lock);
767 list_for_each_entry(arvif, &ar->arvifs, list) {
768 if (arvif->vdev_id == vdev_id &&
769 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
777 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
778 struct ieee80211_chanctx_conf *conf,
781 struct cfg80211_chan_def *def = data;
786 static struct ieee80211_channel *
787 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
789 struct cfg80211_chan_def def = {};
791 ieee80211_iter_chan_contexts_atomic(ar->hw,
792 ath10k_htt_rx_h_any_chan_iter,
798 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
799 struct ieee80211_rx_status *status,
800 struct htt_rx_desc *rxd,
803 struct ieee80211_channel *ch;
805 spin_lock_bh(&ar->data_lock);
806 ch = ar->scan_channel;
810 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
812 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
814 ch = ath10k_htt_rx_h_any_channel(ar);
816 ch = ar->tgt_oper_chan;
817 spin_unlock_bh(&ar->data_lock);
822 status->band = ch->band;
823 status->freq = ch->center_freq;
828 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
829 struct ieee80211_rx_status *status,
830 struct htt_rx_desc *rxd)
832 /* FIXME: Get real NF */
833 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
834 rxd->ppdu_start.rssi_comb;
835 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
838 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
839 struct ieee80211_rx_status *status,
840 struct htt_rx_desc *rxd)
842 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
843 * means all prior MSDUs in a PPDU are reported to mac80211 without the
844 * TSF. Is it worth holding frames until end of PPDU is known?
846 * FIXME: Can we get/compute 64bit TSF?
848 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
849 status->flag |= RX_FLAG_MACTIME_END;
852 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
853 struct sk_buff_head *amsdu,
854 struct ieee80211_rx_status *status,
857 struct sk_buff *first;
858 struct htt_rx_desc *rxd;
862 if (skb_queue_empty(amsdu))
865 first = skb_peek(amsdu);
866 rxd = (void *)first->data - sizeof(*rxd);
868 is_first_ppdu = !!(rxd->attention.flags &
869 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
870 is_last_ppdu = !!(rxd->attention.flags &
871 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
874 /* New PPDU starts so clear out the old per-PPDU status. */
876 status->rate_idx = 0;
878 status->encoding = RX_ENC_LEGACY;
879 status->bw = RATE_INFO_BW_20;
880 status->flag &= ~RX_FLAG_MACTIME_END;
881 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
883 ath10k_htt_rx_h_signal(ar, status, rxd);
884 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
885 ath10k_htt_rx_h_rates(ar, status, rxd);
889 ath10k_htt_rx_h_mactime(ar, status, rxd);
892 static const char * const tid_to_ac[] = {
903 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
908 if (!ieee80211_is_data_qos(hdr->frame_control))
911 qc = ieee80211_get_qos_ctl(hdr);
912 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
914 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
916 snprintf(out, size, "tid %d", tid);
921 static void ath10k_process_rx(struct ath10k *ar,
922 struct ieee80211_rx_status *rx_status,
925 struct ieee80211_rx_status *status;
926 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
929 status = IEEE80211_SKB_RXCB(skb);
930 *status = *rx_status;
932 ath10k_dbg(ar, ATH10K_DBG_DATA,
933 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
936 ieee80211_get_SA(hdr),
937 ath10k_get_tid(hdr, tid, sizeof(tid)),
938 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
940 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
941 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
942 (status->encoding == RX_ENC_HT) ? "ht" : "",
943 (status->encoding == RX_ENC_VHT) ? "vht" : "",
944 (status->bw == RATE_INFO_BW_40) ? "40" : "",
945 (status->bw == RATE_INFO_BW_80) ? "80" : "",
946 (status->bw == RATE_INFO_BW_160) ? "160" : "",
947 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
951 status->band, status->flag,
952 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
953 !!(status->flag & RX_FLAG_MMIC_ERROR),
954 !!(status->flag & RX_FLAG_AMSDU_MORE));
955 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
956 skb->data, skb->len);
957 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
958 trace_ath10k_rx_payload(ar, skb->data, skb->len);
960 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
963 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
964 struct ieee80211_hdr *hdr)
966 int len = ieee80211_hdrlen(hdr->frame_control);
968 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
969 ar->running_fw->fw_file.fw_features))
970 len = round_up(len, 4);
975 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
976 struct sk_buff *msdu,
977 struct ieee80211_rx_status *status,
978 enum htt_rx_mpdu_encrypt_type enctype,
981 struct ieee80211_hdr *hdr;
982 struct htt_rx_desc *rxd;
988 rxd = (void *)msdu->data - sizeof(*rxd);
989 is_first = !!(rxd->msdu_end.common.info0 &
990 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
991 is_last = !!(rxd->msdu_end.common.info0 &
992 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
994 /* Delivered decapped frame:
996 * [crypto param] <-- can be trimmed if !fcs_err &&
997 * !decrypt_err && !peer_idx_invalid
998 * [amsdu header] <-- only if A-MSDU
1001 * [FCS] <-- at end, needs to be trimmed
1004 /* This probably shouldn't happen but warn just in case */
1005 if (unlikely(WARN_ON_ONCE(!is_first)))
1008 /* This probably shouldn't happen but warn just in case */
1009 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1012 skb_trim(msdu, msdu->len - FCS_LEN);
1014 /* In most cases this will be true for sniffed frames. It makes sense
1015 * to deliver them as-is without stripping the crypto param. This is
1016 * necessary for software based decryption.
1018 * If there's no error then the frame is decrypted. At least that is
1019 * the case for frames that come in via fragmented rx indication.
1024 /* The payload is decrypted so strip crypto params. Start from tail
1025 * since hdr is used to compute some stuff.
1028 hdr = (void *)msdu->data;
1031 if (status->flag & RX_FLAG_IV_STRIPPED)
1032 skb_trim(msdu, msdu->len -
1033 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1036 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1037 !ieee80211_has_morefrags(hdr->frame_control) &&
1038 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1039 skb_trim(msdu, msdu->len - 8);
1042 if (status->flag & RX_FLAG_IV_STRIPPED) {
1043 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1044 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1046 memmove((void *)msdu->data + crypto_len,
1047 (void *)msdu->data, hdr_len);
1048 skb_pull(msdu, crypto_len);
1052 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1053 struct sk_buff *msdu,
1054 struct ieee80211_rx_status *status,
1055 const u8 first_hdr[64])
1057 struct ieee80211_hdr *hdr;
1058 struct htt_rx_desc *rxd;
1064 /* Delivered decapped frame:
1065 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1068 * Note: The nwifi header doesn't have QoS Control and is
1069 * (always?) a 3addr frame.
1071 * Note2: There's no A-MSDU subframe header. Even if it's part
1075 /* pull decapped header and copy SA & DA */
1076 rxd = (void *)msdu->data - sizeof(*rxd);
1078 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1079 skb_put(msdu, l3_pad_bytes);
1081 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1083 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1084 ether_addr_copy(da, ieee80211_get_DA(hdr));
1085 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1086 skb_pull(msdu, hdr_len);
1088 /* push original 802.11 header */
1089 hdr = (struct ieee80211_hdr *)first_hdr;
1090 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1091 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1093 /* original 802.11 header has a different DA and in
1094 * case of 4addr it may also have different SA
1096 hdr = (struct ieee80211_hdr *)msdu->data;
1097 ether_addr_copy(ieee80211_get_DA(hdr), da);
1098 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1101 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1102 struct sk_buff *msdu,
1103 enum htt_rx_mpdu_encrypt_type enctype)
1105 struct ieee80211_hdr *hdr;
1106 struct htt_rx_desc *rxd;
1107 size_t hdr_len, crypto_len;
1109 bool is_first, is_last, is_amsdu;
1110 int bytes_aligned = ar->hw_params.decap_align_bytes;
1112 rxd = (void *)msdu->data - sizeof(*rxd);
1113 hdr = (void *)rxd->rx_hdr_status;
1115 is_first = !!(rxd->msdu_end.common.info0 &
1116 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1117 is_last = !!(rxd->msdu_end.common.info0 &
1118 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1119 is_amsdu = !(is_first && is_last);
1124 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1125 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1127 rfc1042 += round_up(hdr_len, bytes_aligned) +
1128 round_up(crypto_len, bytes_aligned);
1132 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1137 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1138 struct sk_buff *msdu,
1139 struct ieee80211_rx_status *status,
1140 const u8 first_hdr[64],
1141 enum htt_rx_mpdu_encrypt_type enctype)
1143 struct ieee80211_hdr *hdr;
1150 struct htt_rx_desc *rxd;
1152 /* Delivered decapped frame:
1153 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1157 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1158 if (WARN_ON_ONCE(!rfc1042))
1161 rxd = (void *)msdu->data - sizeof(*rxd);
1162 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1163 skb_put(msdu, l3_pad_bytes);
1164 skb_pull(msdu, l3_pad_bytes);
1166 /* pull decapped header and copy SA & DA */
1167 eth = (struct ethhdr *)msdu->data;
1168 ether_addr_copy(da, eth->h_dest);
1169 ether_addr_copy(sa, eth->h_source);
1170 skb_pull(msdu, sizeof(struct ethhdr));
1172 /* push rfc1042/llc/snap */
1173 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1174 sizeof(struct rfc1042_hdr));
1176 /* push original 802.11 header */
1177 hdr = (struct ieee80211_hdr *)first_hdr;
1178 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1179 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1181 /* original 802.11 header has a different DA and in
1182 * case of 4addr it may also have different SA
1184 hdr = (struct ieee80211_hdr *)msdu->data;
1185 ether_addr_copy(ieee80211_get_DA(hdr), da);
1186 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1189 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1190 struct sk_buff *msdu,
1191 struct ieee80211_rx_status *status,
1192 const u8 first_hdr[64])
1194 struct ieee80211_hdr *hdr;
1197 struct htt_rx_desc *rxd;
1199 /* Delivered decapped frame:
1200 * [amsdu header] <-- replaced with 802.11 hdr
1205 rxd = (void *)msdu->data - sizeof(*rxd);
1206 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1208 skb_put(msdu, l3_pad_bytes);
1209 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1211 hdr = (struct ieee80211_hdr *)first_hdr;
1212 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1213 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1216 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1217 struct sk_buff *msdu,
1218 struct ieee80211_rx_status *status,
1220 enum htt_rx_mpdu_encrypt_type enctype,
1223 struct htt_rx_desc *rxd;
1224 enum rx_msdu_decap_format decap;
1226 /* First msdu's decapped header:
1227 * [802.11 header] <-- padded to 4 bytes long
1228 * [crypto param] <-- padded to 4 bytes long
1229 * [amsdu header] <-- only if A-MSDU
1232 * Other (2nd, 3rd, ..) msdu's decapped header:
1233 * [amsdu header] <-- only if A-MSDU
1237 rxd = (void *)msdu->data - sizeof(*rxd);
1238 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1239 RX_MSDU_START_INFO1_DECAP_FORMAT);
1242 case RX_MSDU_DECAP_RAW:
1243 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1246 case RX_MSDU_DECAP_NATIVE_WIFI:
1247 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1249 case RX_MSDU_DECAP_ETHERNET2_DIX:
1250 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1252 case RX_MSDU_DECAP_8023_SNAP_LLC:
1253 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1258 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1260 struct htt_rx_desc *rxd;
1262 bool is_ip4, is_ip6;
1263 bool is_tcp, is_udp;
1264 bool ip_csum_ok, tcpudp_csum_ok;
1266 rxd = (void *)skb->data - sizeof(*rxd);
1267 flags = __le32_to_cpu(rxd->attention.flags);
1268 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1270 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1271 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1272 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1273 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1274 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1275 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1277 if (!is_ip4 && !is_ip6)
1278 return CHECKSUM_NONE;
1279 if (!is_tcp && !is_udp)
1280 return CHECKSUM_NONE;
1282 return CHECKSUM_NONE;
1283 if (!tcpudp_csum_ok)
1284 return CHECKSUM_NONE;
1286 return CHECKSUM_UNNECESSARY;
1289 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1291 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1294 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1295 struct sk_buff_head *amsdu,
1296 struct ieee80211_rx_status *status)
1298 struct sk_buff *first;
1299 struct sk_buff *last;
1300 struct sk_buff *msdu;
1301 struct htt_rx_desc *rxd;
1302 struct ieee80211_hdr *hdr;
1303 enum htt_rx_mpdu_encrypt_type enctype;
1308 bool has_crypto_err;
1310 bool has_peer_idx_invalid;
1315 if (skb_queue_empty(amsdu))
1318 first = skb_peek(amsdu);
1319 rxd = (void *)first->data - sizeof(*rxd);
1321 is_mgmt = !!(rxd->attention.flags &
1322 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1324 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1325 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1327 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1328 * decapped header. It'll be used for undecapping of each MSDU.
1330 hdr = (void *)rxd->rx_hdr_status;
1331 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1332 memcpy(first_hdr, hdr, hdr_len);
1334 /* Each A-MSDU subframe will use the original header as the base and be
1335 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1337 hdr = (void *)first_hdr;
1338 qos = ieee80211_get_qos_ctl(hdr);
1339 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1341 /* Some attention flags are valid only in the last MSDU. */
1342 last = skb_peek_tail(amsdu);
1343 rxd = (void *)last->data - sizeof(*rxd);
1344 attention = __le32_to_cpu(rxd->attention.flags);
1346 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1347 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1348 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1349 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1351 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1352 * e.g. due to fcs error, missing peer or invalid key data it will
1353 * report the frame as raw.
1355 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1358 !has_peer_idx_invalid);
1360 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1361 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1362 RX_FLAG_MMIC_ERROR |
1364 RX_FLAG_IV_STRIPPED |
1365 RX_FLAG_ONLY_MONITOR |
1366 RX_FLAG_MMIC_STRIPPED);
1369 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1372 status->flag |= RX_FLAG_MMIC_ERROR;
1374 /* Firmware reports all necessary management frames via WMI already.
1375 * They are not reported to monitor interfaces at all so pass the ones
1376 * coming via HTT to monitor interfaces instead. This simplifies
1380 status->flag |= RX_FLAG_ONLY_MONITOR;
1383 status->flag |= RX_FLAG_DECRYPTED;
1385 if (likely(!is_mgmt))
1386 status->flag |= RX_FLAG_IV_STRIPPED |
1387 RX_FLAG_MMIC_STRIPPED;
1390 skb_queue_walk(amsdu, msdu) {
1391 ath10k_htt_rx_h_csum_offload(msdu);
1392 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1395 /* Undecapping involves copying the original 802.11 header back
1396 * to sk_buff. If frame is protected and hardware has decrypted
1397 * it then remove the protected bit.
1404 hdr = (void *)msdu->data;
1405 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1409 static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1410 struct sk_buff_head *amsdu,
1411 struct ieee80211_rx_status *status)
1413 struct sk_buff *msdu;
1415 while ((msdu = __skb_dequeue(amsdu))) {
1416 /* Setup per-MSDU flags */
1417 if (skb_queue_empty(amsdu))
1418 status->flag &= ~RX_FLAG_AMSDU_MORE;
1420 status->flag |= RX_FLAG_AMSDU_MORE;
1422 ath10k_process_rx(ar, status, msdu);
1426 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1428 struct sk_buff *skb, *first;
1432 /* TODO: Might could optimize this by using
1433 * skb_try_coalesce or similar method to
1434 * decrease copying, or maybe get mac80211 to
1435 * provide a way to just receive a list of
1439 first = __skb_dequeue(amsdu);
1441 /* Allocate total length all at once. */
1442 skb_queue_walk(amsdu, skb)
1443 total_len += skb->len;
1445 space = total_len - skb_tailroom(first);
1447 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1448 /* TODO: bump some rx-oom error stat */
1449 /* put it back together so we can free the
1450 * whole list at once.
1452 __skb_queue_head(amsdu, first);
1456 /* Walk list again, copying contents into
1459 while ((skb = __skb_dequeue(amsdu))) {
1460 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1462 dev_kfree_skb_any(skb);
1465 __skb_queue_head(amsdu, first);
1469 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1470 struct sk_buff_head *amsdu)
1472 struct sk_buff *first;
1473 struct htt_rx_desc *rxd;
1474 enum rx_msdu_decap_format decap;
1476 first = skb_peek(amsdu);
1477 rxd = (void *)first->data - sizeof(*rxd);
1478 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1479 RX_MSDU_START_INFO1_DECAP_FORMAT);
1481 /* FIXME: Current unchaining logic can only handle simple case of raw
1482 * msdu chaining. If decapping is other than raw the chaining may be
1483 * more complex and this isn't handled by the current code. Don't even
1484 * try re-constructing such frames - it'll be pretty much garbage.
1486 if (decap != RX_MSDU_DECAP_RAW ||
1487 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1488 __skb_queue_purge(amsdu);
1492 ath10k_unchain_msdu(amsdu);
1495 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1496 struct sk_buff_head *amsdu,
1497 struct ieee80211_rx_status *rx_status)
1499 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1500 * invalid/dangerous frames.
1503 if (!rx_status->freq) {
1504 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1508 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1509 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1516 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1517 struct sk_buff_head *amsdu,
1518 struct ieee80211_rx_status *rx_status)
1520 if (skb_queue_empty(amsdu))
1523 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1526 __skb_queue_purge(amsdu);
1529 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1531 struct ath10k *ar = htt->ar;
1532 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1533 struct sk_buff_head amsdu;
1536 __skb_queue_head_init(&amsdu);
1538 spin_lock_bh(&htt->rx_ring.lock);
1539 if (htt->rx_confused) {
1540 spin_unlock_bh(&htt->rx_ring.lock);
1543 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1544 spin_unlock_bh(&htt->rx_ring.lock);
1547 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1548 __skb_queue_purge(&amsdu);
1549 /* FIXME: It's probably a good idea to reboot the
1550 * device instead of leaving it inoperable.
1552 htt->rx_confused = true;
1556 num_msdus = skb_queue_len(&amsdu);
1557 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1559 /* only for ret = 1 indicates chained msdus */
1561 ath10k_htt_rx_h_unchain(ar, &amsdu);
1563 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1564 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1565 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1570 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1571 struct htt_rx_indication *rx)
1573 struct ath10k *ar = htt->ar;
1574 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1575 int num_mpdu_ranges;
1576 int i, mpdu_count = 0;
1578 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1579 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1580 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1582 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1584 (sizeof(struct htt_rx_indication_mpdu_range) *
1587 for (i = 0; i < num_mpdu_ranges; i++)
1588 mpdu_count += mpdu_ranges[i].mpdu_count;
1590 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1593 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1594 struct sk_buff *skb)
1596 struct ath10k_htt *htt = &ar->htt;
1597 struct htt_resp *resp = (struct htt_resp *)skb->data;
1598 struct htt_tx_done tx_done = {};
1599 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1604 case HTT_DATA_TX_STATUS_NO_ACK:
1605 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1607 case HTT_DATA_TX_STATUS_OK:
1608 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1610 case HTT_DATA_TX_STATUS_DISCARD:
1611 case HTT_DATA_TX_STATUS_POSTPONE:
1612 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1613 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1616 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1617 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1621 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1622 resp->data_tx_completion.num_msdus);
1624 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1625 msdu_id = resp->data_tx_completion.msdus[i];
1626 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1628 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1629 * interrupt and main interrupt (MSI/-X range case) for the same
1630 * HTC service so it should be safe to use kfifo_put w/o lock.
1632 * From kfifo_put() documentation:
1633 * Note that with only one concurrent reader and one concurrent
1634 * writer, you don't need extra locking to use these macro.
1636 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1637 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1638 tx_done.msdu_id, tx_done.status);
1639 ath10k_txrx_tx_unref(htt, &tx_done);
1644 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1646 struct htt_rx_addba *ev = &resp->rx_addba;
1647 struct ath10k_peer *peer;
1648 struct ath10k_vif *arvif;
1649 u16 info0, tid, peer_id;
1651 info0 = __le16_to_cpu(ev->info0);
1652 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1653 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1655 ath10k_dbg(ar, ATH10K_DBG_HTT,
1656 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1657 tid, peer_id, ev->window_size);
1659 spin_lock_bh(&ar->data_lock);
1660 peer = ath10k_peer_find_by_id(ar, peer_id);
1662 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1664 spin_unlock_bh(&ar->data_lock);
1668 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1670 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1672 spin_unlock_bh(&ar->data_lock);
1676 ath10k_dbg(ar, ATH10K_DBG_HTT,
1677 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1678 peer->addr, tid, ev->window_size);
1680 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1681 spin_unlock_bh(&ar->data_lock);
1684 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1686 struct htt_rx_delba *ev = &resp->rx_delba;
1687 struct ath10k_peer *peer;
1688 struct ath10k_vif *arvif;
1689 u16 info0, tid, peer_id;
1691 info0 = __le16_to_cpu(ev->info0);
1692 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1693 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1695 ath10k_dbg(ar, ATH10K_DBG_HTT,
1696 "htt rx delba tid %hu peer_id %hu\n",
1699 spin_lock_bh(&ar->data_lock);
1700 peer = ath10k_peer_find_by_id(ar, peer_id);
1702 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1704 spin_unlock_bh(&ar->data_lock);
1708 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1710 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1712 spin_unlock_bh(&ar->data_lock);
1716 ath10k_dbg(ar, ATH10K_DBG_HTT,
1717 "htt rx stop rx ba session sta %pM tid %hu\n",
1720 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1721 spin_unlock_bh(&ar->data_lock);
1724 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1725 struct sk_buff_head *amsdu)
1727 struct sk_buff *msdu;
1728 struct htt_rx_desc *rxd;
1730 if (skb_queue_empty(list))
1733 if (WARN_ON(!skb_queue_empty(amsdu)))
1736 while ((msdu = __skb_dequeue(list))) {
1737 __skb_queue_tail(amsdu, msdu);
1739 rxd = (void *)msdu->data - sizeof(*rxd);
1740 if (rxd->msdu_end.common.info0 &
1741 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1745 msdu = skb_peek_tail(amsdu);
1746 rxd = (void *)msdu->data - sizeof(*rxd);
1747 if (!(rxd->msdu_end.common.info0 &
1748 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1749 skb_queue_splice_init(amsdu, list);
1756 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1757 struct sk_buff *skb)
1759 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1761 if (!ieee80211_has_protected(hdr->frame_control))
1764 /* Offloaded frames are already decrypted but firmware insists they are
1765 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1766 * will drop the frame.
1769 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1770 status->flag |= RX_FLAG_DECRYPTED |
1771 RX_FLAG_IV_STRIPPED |
1772 RX_FLAG_MMIC_STRIPPED;
1775 static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1776 struct sk_buff_head *list)
1778 struct ath10k_htt *htt = &ar->htt;
1779 struct ieee80211_rx_status *status = &htt->rx_status;
1780 struct htt_rx_offload_msdu *rx;
1781 struct sk_buff *msdu;
1785 while ((msdu = __skb_dequeue(list))) {
1786 /* Offloaded frames don't have Rx descriptor. Instead they have
1787 * a short meta information header.
1790 rx = (void *)msdu->data;
1792 skb_put(msdu, sizeof(*rx));
1793 skb_pull(msdu, sizeof(*rx));
1795 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1796 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1797 dev_kfree_skb_any(msdu);
1801 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1803 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1804 * actual payload is unaligned. Align the frame. Otherwise
1805 * mac80211 complains. This shouldn't reduce performance much
1806 * because these offloaded frames are rare.
1808 offset = 4 - ((unsigned long)msdu->data & 3);
1809 skb_put(msdu, offset);
1810 memmove(msdu->data + offset, msdu->data, msdu->len);
1811 skb_pull(msdu, offset);
1813 /* FIXME: The frame is NWifi. Re-construct QoS Control
1814 * if possible later.
1817 memset(status, 0, sizeof(*status));
1818 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1820 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1821 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1822 ath10k_process_rx(ar, status, msdu);
1828 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1830 struct ath10k_htt *htt = &ar->htt;
1831 struct htt_resp *resp = (void *)skb->data;
1832 struct ieee80211_rx_status *status = &htt->rx_status;
1833 struct sk_buff_head list;
1834 struct sk_buff_head amsdu;
1841 int ret, num_msdus = 0;
1843 lockdep_assert_held(&htt->rx_ring.lock);
1845 if (htt->rx_confused)
1848 skb_pull(skb, sizeof(resp->hdr));
1849 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1851 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1852 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1853 vdev_id = resp->rx_in_ord_ind.vdev_id;
1854 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1855 offload = !!(resp->rx_in_ord_ind.info &
1856 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1857 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1859 ath10k_dbg(ar, ATH10K_DBG_HTT,
1860 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1861 vdev_id, peer_id, tid, offload, frag, msdu_count);
1863 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1864 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1868 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1869 * extracted and processed.
1871 __skb_queue_head_init(&list);
1872 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1874 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1875 htt->rx_confused = true;
1879 /* Offloaded frames are very different and need to be handled
1883 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
1885 while (!skb_queue_empty(&list)) {
1886 __skb_queue_head_init(&amsdu);
1887 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1890 /* Note: The in-order indication may report interleaved
1891 * frames from different PPDUs meaning reported rx rate
1892 * to mac80211 isn't accurate/reliable. It's still
1893 * better to report something than nothing though. This
1894 * should still give an idea about rx rate to the user.
1896 num_msdus += skb_queue_len(&amsdu);
1897 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1898 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1899 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1900 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1905 /* Should not happen. */
1906 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1907 htt->rx_confused = true;
1908 __skb_queue_purge(&list);
1915 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1916 const __le32 *resp_ids,
1922 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1925 for (i = 0; i < num_resp_ids; i++) {
1926 resp_id = le32_to_cpu(resp_ids[i]);
1928 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
1931 /* TODO: free resp_id */
1935 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
1937 struct ieee80211_hw *hw = ar->hw;
1938 struct ieee80211_txq *txq;
1939 struct htt_resp *resp = (struct htt_resp *)skb->data;
1940 struct htt_tx_fetch_record *record;
1942 size_t max_num_bytes;
1943 size_t max_num_msdus;
1946 const __le32 *resp_ids;
1954 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
1956 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
1957 if (unlikely(skb->len < len)) {
1958 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
1962 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
1963 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
1965 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
1966 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
1968 if (unlikely(skb->len < len)) {
1969 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1973 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1974 num_records, num_resp_ids,
1975 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
1977 if (!ar->htt.tx_q_state.enabled) {
1978 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
1982 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
1983 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
1989 for (i = 0; i < num_records; i++) {
1990 record = &resp->tx_fetch_ind.records[i];
1991 peer_id = MS(le16_to_cpu(record->info),
1992 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
1993 tid = MS(le16_to_cpu(record->info),
1994 HTT_TX_FETCH_RECORD_INFO_TID);
1995 max_num_msdus = le16_to_cpu(record->num_msdus);
1996 max_num_bytes = le32_to_cpu(record->num_bytes);
1998 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1999 i, peer_id, tid, max_num_msdus, max_num_bytes);
2001 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2002 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2003 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2008 spin_lock_bh(&ar->data_lock);
2009 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2010 spin_unlock_bh(&ar->data_lock);
2012 /* It is okay to release the lock and use txq because RCU read
2016 if (unlikely(!txq)) {
2017 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2025 while (num_msdus < max_num_msdus &&
2026 num_bytes < max_num_bytes) {
2027 ret = ath10k_mac_tx_push_txq(hw, txq);
2035 record->num_msdus = cpu_to_le16(num_msdus);
2036 record->num_bytes = cpu_to_le32(num_bytes);
2038 ath10k_htt_tx_txq_recalc(hw, txq);
2043 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2044 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2046 ret = ath10k_htt_tx_fetch_resp(ar,
2047 resp->tx_fetch_ind.token,
2048 resp->tx_fetch_ind.fetch_seq_num,
2049 resp->tx_fetch_ind.records,
2051 if (unlikely(ret)) {
2052 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2053 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2054 /* FIXME: request fw restart */
2057 ath10k_htt_tx_txq_sync(ar);
2060 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2061 struct sk_buff *skb)
2063 const struct htt_resp *resp = (void *)skb->data;
2067 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2069 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2070 if (unlikely(skb->len < len)) {
2071 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2075 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2076 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2078 if (unlikely(skb->len < len)) {
2079 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2083 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2084 resp->tx_fetch_confirm.resp_ids,
2088 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2089 struct sk_buff *skb)
2091 const struct htt_resp *resp = (void *)skb->data;
2092 const struct htt_tx_mode_switch_record *record;
2093 struct ieee80211_txq *txq;
2094 struct ath10k_txq *artxq;
2097 enum htt_tx_mode_switch_mode mode;
2106 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2108 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2109 if (unlikely(skb->len < len)) {
2110 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2114 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2115 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2117 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2118 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2119 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2120 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2122 ath10k_dbg(ar, ATH10K_DBG_HTT,
2123 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2124 info0, info1, enable, num_records, mode, threshold);
2126 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2128 if (unlikely(skb->len < len)) {
2129 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2134 case HTT_TX_MODE_SWITCH_PUSH:
2135 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2138 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2146 ar->htt.tx_q_state.enabled = enable;
2147 ar->htt.tx_q_state.mode = mode;
2148 ar->htt.tx_q_state.num_push_allowed = threshold;
2152 for (i = 0; i < num_records; i++) {
2153 record = &resp->tx_mode_switch_ind.records[i];
2154 info0 = le16_to_cpu(record->info0);
2155 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2156 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2158 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2159 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2160 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2165 spin_lock_bh(&ar->data_lock);
2166 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2167 spin_unlock_bh(&ar->data_lock);
2169 /* It is okay to release the lock and use txq because RCU read
2173 if (unlikely(!txq)) {
2174 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2179 spin_lock_bh(&ar->htt.tx_lock);
2180 artxq = (void *)txq->drv_priv;
2181 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2182 spin_unlock_bh(&ar->htt.tx_lock);
2187 ath10k_mac_tx_push_pending(ar);
2190 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2194 release = ath10k_htt_t2h_msg_handler(ar, skb);
2196 /* Free the indication buffer */
2198 dev_kfree_skb_any(skb);
2201 static inline bool is_valid_legacy_rate(u8 rate)
2203 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2204 18, 24, 36, 48, 54};
2207 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2208 if (rate == legacy_rates[i])
2216 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2217 struct ieee80211_sta *sta,
2218 struct ath10k_per_peer_tx_stats *peer_stats)
2220 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2222 struct rate_info txrate;
2224 lockdep_assert_held(&ar->data_lock);
2226 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2227 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2228 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2229 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2230 sgi = ATH10K_HW_GI(peer_stats->flags);
2232 if (((txrate.flags == WMI_RATE_PREAMBLE_HT) ||
2233 (txrate.flags == WMI_RATE_PREAMBLE_VHT)) && txrate.mcs > 9) {
2234 ath10k_warn(ar, "Invalid mcs %hhd peer stats", txrate.mcs);
2238 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2240 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2241 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2242 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2244 if (!is_valid_legacy_rate(rate)) {
2245 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2250 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2252 if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2254 arsta->txrate.legacy = rate;
2255 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2256 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2257 arsta->txrate.mcs = txrate.mcs;
2259 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2260 arsta->txrate.mcs = txrate.mcs;
2264 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2266 arsta->txrate.nss = txrate.nss;
2267 arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20;
2270 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2271 struct sk_buff *skb)
2273 struct htt_resp *resp = (struct htt_resp *)skb->data;
2274 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2275 struct htt_per_peer_tx_stats_ind *tx_stats;
2276 struct ieee80211_sta *sta;
2277 struct ath10k_peer *peer;
2279 u8 ppdu_len, num_ppdu;
2281 num_ppdu = resp->peer_tx_stats.num_ppdu;
2282 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2284 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2285 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2289 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2290 (resp->peer_tx_stats.payload);
2291 peer_id = __le16_to_cpu(tx_stats->peer_id);
2294 spin_lock_bh(&ar->data_lock);
2295 peer = ath10k_peer_find_by_id(ar, peer_id);
2297 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2303 for (i = 0; i < num_ppdu; i++) {
2304 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2305 (resp->peer_tx_stats.payload + i * ppdu_len);
2307 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2308 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2309 p_tx_stats->failed_bytes =
2310 __le32_to_cpu(tx_stats->failed_bytes);
2311 p_tx_stats->ratecode = tx_stats->ratecode;
2312 p_tx_stats->flags = tx_stats->flags;
2313 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2314 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2315 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2317 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2321 spin_unlock_bh(&ar->data_lock);
2325 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2327 struct ath10k_htt *htt = &ar->htt;
2328 struct htt_resp *resp = (struct htt_resp *)skb->data;
2329 enum htt_t2h_msg_type type;
2331 /* confirm alignment */
2332 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2333 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2335 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2336 resp->hdr.msg_type);
2338 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2339 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2340 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2343 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2346 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2347 htt->target_version_major = resp->ver_resp.major;
2348 htt->target_version_minor = resp->ver_resp.minor;
2349 complete(&htt->target_version_received);
2352 case HTT_T2H_MSG_TYPE_RX_IND:
2353 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2355 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2356 struct htt_peer_map_event ev = {
2357 .vdev_id = resp->peer_map.vdev_id,
2358 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2360 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2361 ath10k_peer_map_event(htt, &ev);
2364 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2365 struct htt_peer_unmap_event ev = {
2366 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2368 ath10k_peer_unmap_event(htt, &ev);
2371 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2372 struct htt_tx_done tx_done = {};
2373 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2375 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2378 case HTT_MGMT_TX_STATUS_OK:
2379 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2381 case HTT_MGMT_TX_STATUS_RETRY:
2382 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2384 case HTT_MGMT_TX_STATUS_DROP:
2385 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2389 status = ath10k_txrx_tx_unref(htt, &tx_done);
2391 spin_lock_bh(&htt->tx_lock);
2392 ath10k_htt_tx_mgmt_dec_pending(htt);
2393 spin_unlock_bh(&htt->tx_lock);
2397 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2398 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2400 case HTT_T2H_MSG_TYPE_SEC_IND: {
2401 struct ath10k *ar = htt->ar;
2402 struct htt_security_indication *ev = &resp->security_indication;
2404 ath10k_dbg(ar, ATH10K_DBG_HTT,
2405 "sec ind peer_id %d unicast %d type %d\n",
2406 __le16_to_cpu(ev->peer_id),
2407 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2408 MS(ev->flags, HTT_SECURITY_TYPE));
2409 complete(&ar->install_key_done);
2412 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2413 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2414 skb->data, skb->len);
2415 atomic_inc(&htt->num_mpdus_ready);
2418 case HTT_T2H_MSG_TYPE_TEST:
2420 case HTT_T2H_MSG_TYPE_STATS_CONF:
2421 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2423 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2424 /* Firmware can return tx frames if it's unable to fully
2425 * process them and suspects host may be able to fix it. ath10k
2426 * sends all tx frames as already inspected so this shouldn't
2427 * happen unless fw has a bug.
2429 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2431 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2432 ath10k_htt_rx_addba(ar, resp);
2434 case HTT_T2H_MSG_TYPE_RX_DELBA:
2435 ath10k_htt_rx_delba(ar, resp);
2437 case HTT_T2H_MSG_TYPE_PKTLOG: {
2438 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2440 offsetof(struct htt_resp,
2441 pktlog_msg.payload));
2444 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2445 /* Ignore this event because mac80211 takes care of Rx
2446 * aggregation reordering.
2450 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2451 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2454 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2456 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2457 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2458 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2460 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2461 ath10k_dbg(ar, ATH10K_DBG_HTT,
2462 "htt chan change freq %u phymode %s\n",
2463 freq, ath10k_wmi_phymode_str(phymode));
2466 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2468 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2469 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2471 if (!tx_fetch_ind) {
2472 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2475 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2478 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2479 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2481 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2482 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2484 case HTT_T2H_MSG_TYPE_PEER_STATS:
2485 ath10k_htt_fetch_peer_stats(ar, skb);
2487 case HTT_T2H_MSG_TYPE_EN_STATS:
2489 ath10k_warn(ar, "htt event (%d) not handled\n",
2490 resp->hdr.msg_type);
2491 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2492 skb->data, skb->len);
2497 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2499 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2500 struct sk_buff *skb)
2502 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2503 dev_kfree_skb_any(skb);
2505 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2507 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2509 struct ath10k_htt *htt = &ar->htt;
2510 struct htt_tx_done tx_done = {};
2511 struct sk_buff_head tx_ind_q;
2512 struct sk_buff *skb;
2513 unsigned long flags;
2514 int quota = 0, done, num_rx_msdus;
2515 bool resched_napi = false;
2517 __skb_queue_head_init(&tx_ind_q);
2519 /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
2520 * process it first to utilize full available quota.
2522 while (quota < budget) {
2523 if (skb_queue_empty(&htt->rx_in_ord_compl_q))
2526 skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
2528 resched_napi = true;
2532 spin_lock_bh(&htt->rx_ring.lock);
2533 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2534 spin_unlock_bh(&htt->rx_ring.lock);
2535 if (num_rx_msdus < 0) {
2536 resched_napi = true;
2540 dev_kfree_skb_any(skb);
2541 if (num_rx_msdus > 0)
2542 quota += num_rx_msdus;
2544 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2545 !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
2546 resched_napi = true;
2551 while (quota < budget) {
2552 /* no more data to receive */
2553 if (!atomic_read(&htt->num_mpdus_ready))
2556 num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
2557 if (num_rx_msdus < 0) {
2558 resched_napi = true;
2562 quota += num_rx_msdus;
2563 atomic_dec(&htt->num_mpdus_ready);
2564 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2565 atomic_read(&htt->num_mpdus_ready)) {
2566 resched_napi = true;
2571 /* From NAPI documentation:
2572 * The napi poll() function may also process TX completions, in which
2573 * case if it processes the entire TX ring then it should count that
2574 * work as the rest of the budget.
2576 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2579 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2580 * From kfifo_get() documentation:
2581 * Note that with only one concurrent reader and one concurrent writer,
2582 * you don't need extra locking to use these macro.
2584 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2585 ath10k_txrx_tx_unref(htt, &tx_done);
2587 ath10k_mac_tx_push_pending(ar);
2589 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2590 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2591 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2593 while ((skb = __skb_dequeue(&tx_ind_q))) {
2594 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2595 dev_kfree_skb_any(skb);
2599 ath10k_htt_rx_msdu_buff_replenish(htt);
2600 /* In case of rx failure or more data to read, report budget
2601 * to reschedule NAPI poll
2603 done = resched_napi ? budget : quota;
2607 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);