1 /******************************************************************************
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
39 static const u16 default_tid_to_tx_fifo[] = {
59 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
69 static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
72 if (unlikely(!ptr->addr))
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
80 * iwl_txq_update_write_ptr - Send new write index to hardware
82 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
86 int txq_id = txq->q.id;
88 if (txq->need_update == 0)
91 /* if we're trying to save power */
92 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
93 /* wake up nic if it's powered down ...
94 * uCode will wake up, and interrupt us again, so next
95 * time we'll skip this part. */
96 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
98 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
99 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
100 iwl_set_bit(priv, CSR_GP_CNTRL,
101 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
105 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
106 txq->q.write_ptr | (txq_id << 8));
108 /* else not in power-save mode, uCode will never sleep when we're
109 * trying to tx (during RFKILL, we're not trying to tx). */
111 iwl_write32(priv, HBUS_TARG_WRPTR,
112 txq->q.write_ptr | (txq_id << 8));
114 txq->need_update = 0;
118 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
122 * iwl_tx_queue_free - Deallocate DMA queue.
123 * @txq: Transmit queue to deallocate.
125 * Empty queue by removing and destroying all BD's.
127 * 0-fill, but do not free "txq" descriptor structure.
129 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 struct iwl_tx_queue *txq = &priv->txq[txq_id];
132 struct iwl_queue *q = &txq->q;
133 struct pci_dev *dev = priv->pci_dev;
139 /* first, empty all BD's */
140 for (; q->write_ptr != q->read_ptr;
141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
144 /* De-alloc array of command/tx buffers */
145 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
148 /* De-alloc circular buffer of TFDs */
150 pci_free_consistent(dev, priv->hw_params.tfd_size *
151 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
153 /* De-alloc array of per-TFD driver data */
157 /* deallocate arrays */
163 /* 0-fill queue descriptor structure */
164 memset(txq, 0, sizeof(*txq));
166 EXPORT_SYMBOL(iwl_tx_queue_free);
169 * iwl_cmd_queue_free - Deallocate DMA queue.
170 * @txq: Transmit queue to deallocate.
172 * Empty queue by removing and destroying all BD's.
174 * 0-fill, but do not free "txq" descriptor structure.
176 void iwl_cmd_queue_free(struct iwl_priv *priv)
178 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
179 struct iwl_queue *q = &txq->q;
180 struct pci_dev *dev = priv->pci_dev;
186 /* De-alloc array of command/tx buffers */
187 for (i = 0; i <= TFD_CMD_SLOTS; i++)
190 /* De-alloc circular buffer of TFDs */
192 pci_free_consistent(dev, priv->hw_params.tfd_size *
193 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
195 /* deallocate arrays */
201 /* 0-fill queue descriptor structure */
202 memset(txq, 0, sizeof(*txq));
204 EXPORT_SYMBOL(iwl_cmd_queue_free);
206 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
209 * Theory of operation
211 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
212 * of buffer descriptors, each of which points to one or more data buffers for
213 * the device to read from or fill. Driver and device exchange status of each
214 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
215 * entries in each circular buffer, to protect against confusing empty and full
218 * The device reads or writes the data in the queues via the device's several
219 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
221 * For Tx queue, there are low mark and high mark limits. If, after queuing
222 * the packet for Tx, free space become < low mark, Tx queue stopped. When
223 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
226 * See more detailed info in iwl-4965-hw.h.
227 ***************************************************/
229 int iwl_queue_space(const struct iwl_queue *q)
231 int s = q->read_ptr - q->write_ptr;
233 if (q->read_ptr > q->write_ptr)
238 /* keep some reserve to not confuse empty and full situations */
244 EXPORT_SYMBOL(iwl_queue_space);
248 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
250 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
251 int count, int slots_num, u32 id)
254 q->n_window = slots_num;
257 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
258 * and iwl_queue_dec_wrap are broken. */
259 BUG_ON(!is_power_of_2(count));
261 /* slots_num must be power-of-two size, otherwise
262 * get_cmd_index is broken. */
263 BUG_ON(!is_power_of_2(slots_num));
265 q->low_mark = q->n_window / 4;
269 q->high_mark = q->n_window / 8;
270 if (q->high_mark < 2)
273 q->write_ptr = q->read_ptr = 0;
279 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
281 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
282 struct iwl_tx_queue *txq, u32 id)
284 struct pci_dev *dev = priv->pci_dev;
285 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
287 /* Driver private data, only for Tx (not command) queues,
288 * not shared with device. */
289 if (id != IWL_CMD_QUEUE_NUM) {
290 txq->txb = kmalloc(sizeof(txq->txb[0]) *
291 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
293 IWL_ERR(priv, "kmalloc for auxiliary BD "
294 "structures failed\n");
301 /* Circular buffer of transmit frame descriptors (TFDs),
302 * shared with device */
303 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
306 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
321 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
323 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
324 int slots_num, u32 txq_id)
328 int actual_slots = slots_num;
331 * Alloc buffer array for commands (Tx or other types of commands).
332 * For the command queue (#4), allocate command space + one big
333 * command for scan, since scan command is very huge; the system will
334 * not have two scans at the same time, so only one is needed.
335 * For normal Tx queues (all other queues), no super-size command
338 if (txq_id == IWL_CMD_QUEUE_NUM)
341 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
343 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
346 if (!txq->meta || !txq->cmd)
347 goto out_free_arrays;
349 len = sizeof(struct iwl_device_cmd);
350 for (i = 0; i < actual_slots; i++) {
351 /* only happens for cmd queue */
353 len += IWL_MAX_SCAN_SIZE;
355 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
360 /* Alloc driver data array and TFD circular buffer */
361 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
365 txq->need_update = 0;
367 /* aggregation TX queues will get their ID when aggregation begins */
368 if (txq_id <= IWL_TX_FIFO_AC3)
369 txq->swq_id = txq_id;
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
372 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
376 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
378 /* Tell device where to find queue */
379 priv->cfg->ops->lib->txq_init(priv, txq);
383 for (i = 0; i < actual_slots; i++)
391 EXPORT_SYMBOL(iwl_tx_queue_init);
394 * iwl_hw_txq_ctx_free - Free TXQ Context
396 * Destroy all TX DMA queues and structures
398 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
404 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
406 if (txq_id == IWL_CMD_QUEUE_NUM)
407 iwl_cmd_queue_free(priv);
409 iwl_tx_queue_free(priv, txq_id);
410 iwl_free_dma_ptr(priv, &priv->kw);
412 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
414 /* free tx queue structure */
415 iwl_free_txq_mem(priv);
417 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
420 * iwl_txq_ctx_reset - Reset TX queue context
421 * Destroys all DMA structures and initialize them again
426 int iwl_txq_ctx_reset(struct iwl_priv *priv)
429 int txq_id, slots_num;
432 /* Free all tx/cmd queues and keep-warm buffer */
433 iwl_hw_txq_ctx_free(priv);
435 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
436 priv->hw_params.scd_bc_tbls_size);
438 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
441 /* Alloc keep-warm buffer */
442 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
444 IWL_ERR(priv, "Keep Warm allocation failed\n");
448 /* allocate tx queue structure */
449 ret = iwl_alloc_txq_mem(priv);
453 spin_lock_irqsave(&priv->lock, flags);
455 /* Turn off all Tx DMA fifos */
456 priv->cfg->ops->lib->txq_set_sched(priv, 0);
458 /* Tell NIC where to find the "keep warm" buffer */
459 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
461 spin_unlock_irqrestore(&priv->lock, flags);
463 /* Alloc and init all Tx queues, including the command queue (#4) */
464 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
465 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
466 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
467 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
470 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
478 iwl_hw_txq_ctx_free(priv);
479 iwl_free_dma_ptr(priv, &priv->kw);
481 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
487 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
489 void iwl_txq_ctx_stop(struct iwl_priv *priv)
494 /* Turn off all Tx DMA fifos */
495 spin_lock_irqsave(&priv->lock, flags);
497 priv->cfg->ops->lib->txq_set_sched(priv, 0);
499 /* Stop each Tx DMA channel, and wait for it to be idle */
500 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
501 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
502 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
503 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
506 spin_unlock_irqrestore(&priv->lock, flags);
508 /* Deallocate memory for all Tx queues */
509 iwl_hw_txq_ctx_free(priv);
511 EXPORT_SYMBOL(iwl_txq_ctx_stop);
514 * handle build REPLY_TX command notification.
516 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
517 struct iwl_tx_cmd *tx_cmd,
518 struct ieee80211_tx_info *info,
519 struct ieee80211_hdr *hdr,
522 __le16 fc = hdr->frame_control;
523 __le32 tx_flags = tx_cmd->tx_flags;
525 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
526 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
527 tx_flags |= TX_CMD_FLG_ACK_MSK;
528 if (ieee80211_is_mgmt(fc))
529 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
530 if (ieee80211_is_probe_resp(fc) &&
531 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
532 tx_flags |= TX_CMD_FLG_TSF_MSK;
534 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
535 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
538 if (ieee80211_is_back_req(fc))
539 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
542 tx_cmd->sta_id = std_id;
543 if (ieee80211_has_morefrags(fc))
544 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
546 if (ieee80211_is_data_qos(fc)) {
547 u8 *qc = ieee80211_get_qos_ctl(hdr);
548 tx_cmd->tid_tspec = qc[0] & 0xf;
549 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
551 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
554 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
556 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
557 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
559 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
560 if (ieee80211_is_mgmt(fc)) {
561 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
562 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
564 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
566 tx_cmd->timeout.pm_frame_timeout = 0;
569 tx_cmd->driver_txop = 0;
570 tx_cmd->tx_flags = tx_flags;
571 tx_cmd->next_frame_len = 0;
574 #define RTS_HCCA_RETRY_LIMIT 3
575 #define RTS_DFAULT_RETRY_LIMIT 60
577 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
578 struct iwl_tx_cmd *tx_cmd,
579 struct ieee80211_tx_info *info,
580 __le16 fc, int is_hcca)
588 /* Set retry limit on DATA packets and Probe Responses*/
589 if (ieee80211_is_probe_resp(fc))
590 data_retry_limit = 3;
592 data_retry_limit = IWL_DEFAULT_TX_RETRY;
593 tx_cmd->data_retry_limit = data_retry_limit;
595 /* Set retry limit on RTS packets */
596 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
597 RTS_DFAULT_RETRY_LIMIT;
598 if (data_retry_limit < rts_retry_limit)
599 rts_retry_limit = data_retry_limit;
600 tx_cmd->rts_retry_limit = rts_retry_limit;
602 /* DATA packets will use the uCode station table for rate/antenna
604 if (ieee80211_is_data(fc)) {
605 tx_cmd->initial_rate_index = 0;
606 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
611 * If the current TX rate stored in mac80211 has the MCS bit set, it's
612 * not really a TX rate. Thus, we use the lowest supported rate for
613 * this band. Also use the lowest supported rate if the stored rate
616 rate_idx = info->control.rates[0].idx;
617 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
618 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
619 rate_idx = rate_lowest_index(&priv->bands[info->band],
621 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
622 if (info->band == IEEE80211_BAND_5GHZ)
623 rate_idx += IWL_FIRST_OFDM_RATE;
624 /* Get PLCP rate for tx_cmd->rate_n_flags */
625 rate_plcp = iwl_rates[rate_idx].plcp;
626 /* Zero out flags for this packet */
629 /* Set CCK flag as needed */
630 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
631 rate_flags |= RATE_MCS_CCK_MSK;
633 /* Set up RTS and CTS flags for certain packets */
634 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
635 case cpu_to_le16(IEEE80211_STYPE_AUTH):
636 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
637 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
638 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
639 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
640 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
641 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
648 /* Set up antennas */
649 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
650 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
652 /* Set the rate in the TX cmd */
653 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
656 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
657 struct ieee80211_tx_info *info,
658 struct iwl_tx_cmd *tx_cmd,
659 struct sk_buff *skb_frag,
662 struct ieee80211_key_conf *keyconf = info->control.hw_key;
664 switch (keyconf->alg) {
666 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
667 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
668 if (info->flags & IEEE80211_TX_CTL_AMPDU)
669 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
670 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
674 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
675 ieee80211_get_tkip_key(keyconf, skb_frag,
676 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
677 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
681 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
682 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
684 if (keyconf->keylen == WEP_KEY_LEN_128)
685 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
687 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
689 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
690 "with key %d\n", keyconf->keyidx);
694 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
700 * start REPLY_TX command process
702 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
704 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
705 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
706 struct iwl_tx_queue *txq;
708 struct iwl_device_cmd *out_cmd;
709 struct iwl_cmd_meta *out_meta;
710 struct iwl_tx_cmd *tx_cmd;
712 dma_addr_t phys_addr;
713 dma_addr_t txcmd_phys;
714 dma_addr_t scratch_phys;
715 u16 len, len_org, firstlen, secondlen;
720 u8 wait_write_ptr = 0;
726 spin_lock_irqsave(&priv->lock, flags);
727 if (iwl_is_rfkill(priv)) {
728 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
732 fc = hdr->frame_control;
734 #ifdef CONFIG_IWLWIFI_DEBUG
735 if (ieee80211_is_auth(fc))
736 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
737 else if (ieee80211_is_assoc_req(fc))
738 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
739 else if (ieee80211_is_reassoc_req(fc))
740 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
743 /* drop all non-injected data frame if we are not associated */
744 if (ieee80211_is_data(fc) &&
745 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
746 (!iwl_is_associated(priv) ||
747 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
748 !priv->assoc_station_added)) {
749 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
753 hdr_len = ieee80211_hdrlen(fc);
755 /* Find (or create) index into station table for destination station */
756 if (info->flags & IEEE80211_TX_CTL_INJECTED)
757 sta_id = priv->hw_params.bcast_sta_id;
759 sta_id = iwl_get_sta_id(priv, hdr);
760 if (sta_id == IWL_INVALID_STATION) {
761 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
766 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
768 txq_id = skb_get_queue_mapping(skb);
769 if (ieee80211_is_data_qos(fc)) {
770 qc = ieee80211_get_qos_ctl(hdr);
771 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
772 if (unlikely(tid >= MAX_TID_COUNT))
774 seq_number = priv->stations[sta_id].tid[tid].seq_number;
775 seq_number &= IEEE80211_SCTL_SEQ;
776 hdr->seq_ctrl = hdr->seq_ctrl &
777 cpu_to_le16(IEEE80211_SCTL_FRAG);
778 hdr->seq_ctrl |= cpu_to_le16(seq_number);
780 /* aggregation is on for this <sta,tid> */
781 if (info->flags & IEEE80211_TX_CTL_AMPDU)
782 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
785 txq = &priv->txq[txq_id];
786 swq_id = txq->swq_id;
789 if (unlikely(iwl_queue_space(q) < q->high_mark))
792 if (ieee80211_is_data_qos(fc))
793 priv->stations[sta_id].tid[tid].tfds_in_queue++;
795 /* Set up driver data for this TFD */
796 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
797 txq->txb[q->write_ptr].skb[0] = skb;
799 /* Set up first empty entry in queue's array of Tx/cmd buffers */
800 out_cmd = txq->cmd[q->write_ptr];
801 out_meta = &txq->meta[q->write_ptr];
802 tx_cmd = &out_cmd->cmd.tx;
803 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
804 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
807 * Set up the Tx-command (not MAC!) header.
808 * Store the chosen Tx queue and TFD index within the sequence field;
809 * after Tx, uCode's Tx response will return this value so driver can
810 * locate the frame within the tx queue and do post-tx processing.
812 out_cmd->hdr.cmd = REPLY_TX;
813 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
814 INDEX_TO_SEQ(q->write_ptr)));
816 /* Copy MAC header from skb into command buffer */
817 memcpy(tx_cmd->hdr, hdr, hdr_len);
820 /* Total # bytes to be transmitted */
822 tx_cmd->len = cpu_to_le16(len);
824 if (info->control.hw_key)
825 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
827 /* TODO need this for burst mode later on */
828 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
829 iwl_dbg_log_tx_data_frame(priv, len, hdr);
831 /* set is_hcca to 0; it probably will never be implemented */
832 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
834 iwl_update_stats(priv, true, fc, len);
836 * Use the first empty entry in this queue's command buffer array
837 * to contain the Tx command and MAC header concatenated together
838 * (payload data will be in another buffer).
839 * Size of this varies, due to varying MAC header length.
840 * If end is not dword aligned, we'll have 2 extra bytes at the end
841 * of the MAC header (device reads on dword boundaries).
842 * We'll tell device about this padding later.
844 len = sizeof(struct iwl_tx_cmd) +
845 sizeof(struct iwl_cmd_header) + hdr_len;
848 firstlen = len = (len + 3) & ~3;
855 /* Tell NIC about any 2-byte padding after MAC header */
857 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
859 /* Physical address of this Tx command's header (not MAC header!),
860 * within command buffer array. */
861 txcmd_phys = pci_map_single(priv->pci_dev,
863 PCI_DMA_BIDIRECTIONAL);
864 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
865 pci_unmap_len_set(out_meta, len, len);
866 /* Add buffer containing Tx command and MAC(!) header to TFD's
868 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
869 txcmd_phys, len, 1, 0);
871 if (!ieee80211_has_morefrags(hdr->frame_control)) {
872 txq->need_update = 1;
874 priv->stations[sta_id].tid[tid].seq_number = seq_number;
877 txq->need_update = 0;
880 /* Set up TFD's 2nd entry to point directly to remainder of skb,
881 * if any (802.11 null frames have no payload). */
882 secondlen = len = skb->len - hdr_len;
884 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
885 len, PCI_DMA_TODEVICE);
886 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
891 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
892 offsetof(struct iwl_tx_cmd, scratch);
894 len = sizeof(struct iwl_tx_cmd) +
895 sizeof(struct iwl_cmd_header) + hdr_len;
896 /* take back ownership of DMA buffer to enable update */
897 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
898 len, PCI_DMA_BIDIRECTIONAL);
899 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
900 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
902 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
903 le16_to_cpu(out_cmd->hdr.sequence));
904 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
905 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
906 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
908 /* Set up entry for this TFD in Tx byte-count array */
909 if (info->flags & IEEE80211_TX_CTL_AMPDU)
910 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
911 le16_to_cpu(tx_cmd->len));
913 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
914 len, PCI_DMA_BIDIRECTIONAL);
916 trace_iwlwifi_dev_tx(priv,
917 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
918 sizeof(struct iwl_tfd),
919 &out_cmd->hdr, firstlen,
920 skb->data + hdr_len, secondlen);
922 /* Tell device the write index *just past* this latest filled TFD */
923 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
924 ret = iwl_txq_update_write_ptr(priv, txq);
925 spin_unlock_irqrestore(&priv->lock, flags);
930 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
931 if (wait_write_ptr) {
932 spin_lock_irqsave(&priv->lock, flags);
933 txq->need_update = 1;
934 iwl_txq_update_write_ptr(priv, txq);
935 spin_unlock_irqrestore(&priv->lock, flags);
937 iwl_stop_queue(priv, txq->swq_id);
944 spin_unlock_irqrestore(&priv->lock, flags);
947 EXPORT_SYMBOL(iwl_tx_skb);
949 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
952 * iwl_enqueue_hcmd - enqueue a uCode command
953 * @priv: device private data point
954 * @cmd: a point to the ucode command structure
956 * The function returns < 0 values to indicate the operation is
957 * failed. On success, it turns the index (> 0) of command in the
960 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
962 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
963 struct iwl_queue *q = &txq->q;
964 struct iwl_device_cmd *out_cmd;
965 struct iwl_cmd_meta *out_meta;
966 dma_addr_t phys_addr;
972 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
973 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
975 /* If any of the command structures end up being larger than
976 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
977 * we will need to increase the size of the TFD entries */
978 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
979 !(cmd->flags & CMD_SIZE_HUGE));
981 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
982 IWL_WARN(priv, "Not sending command - %s KILL\n",
983 iwl_is_rfkill(priv) ? "RF" : "CT");
987 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
988 IWL_ERR(priv, "No space for Tx\n");
989 if (iwl_within_ct_kill_margin(priv))
990 iwl_tt_enter_ct_kill(priv);
992 IWL_ERR(priv, "Restarting adapter due to queue full\n");
993 queue_work(priv->workqueue, &priv->restart);
998 spin_lock_irqsave(&priv->hcmd_lock, flags);
1000 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
1001 out_cmd = txq->cmd[idx];
1002 out_meta = &txq->meta[idx];
1004 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
1005 out_meta->flags = cmd->flags;
1006 if (cmd->flags & CMD_WANT_SKB)
1007 out_meta->source = cmd;
1008 if (cmd->flags & CMD_ASYNC)
1009 out_meta->callback = cmd->callback;
1011 out_cmd->hdr.cmd = cmd->id;
1012 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1014 /* At this point, the out_cmd now has all of the incoming cmd
1017 out_cmd->hdr.flags = 0;
1018 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1019 INDEX_TO_SEQ(q->write_ptr));
1020 if (cmd->flags & CMD_SIZE_HUGE)
1021 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1022 len = sizeof(struct iwl_device_cmd);
1023 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
1026 #ifdef CONFIG_IWLWIFI_DEBUG
1027 switch (out_cmd->hdr.cmd) {
1028 case REPLY_TX_LINK_QUALITY_CMD:
1029 case SENSITIVITY_CMD:
1030 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
1031 "%d bytes at %d[%d]:%d\n",
1032 get_cmd_string(out_cmd->hdr.cmd),
1034 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1035 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1038 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
1039 "%d bytes at %d[%d]:%d\n",
1040 get_cmd_string(out_cmd->hdr.cmd),
1042 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1043 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1046 txq->need_update = 1;
1048 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1049 /* Set up entry in queue's byte count circular buffer */
1050 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1052 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1053 fix_size, PCI_DMA_BIDIRECTIONAL);
1054 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1055 pci_unmap_len_set(out_meta, len, fix_size);
1057 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1059 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1060 phys_addr, fix_size, 1,
1063 /* Increment and update queue's write index */
1064 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1065 ret = iwl_txq_update_write_ptr(priv, txq);
1067 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1068 return ret ? ret : idx;
1071 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1073 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1074 struct iwl_queue *q = &txq->q;
1075 struct iwl_tx_info *tx_info;
1078 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1079 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1080 "is out of range [0-%d] %d %d.\n", txq_id,
1081 index, q->n_bd, q->write_ptr, q->read_ptr);
1085 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1086 q->read_ptr != index;
1087 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1089 tx_info = &txq->txb[txq->q.read_ptr];
1090 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1091 tx_info->skb[0] = NULL;
1093 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1094 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1096 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1101 EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1105 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1107 * When FW advances 'R' index, all entries between old and new 'R' index
1108 * need to be reclaimed. As result, some free space forms. If there is
1109 * enough free space (> low mark), wake the stack that feeds us.
1111 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1112 int idx, int cmd_idx)
1114 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1115 struct iwl_queue *q = &txq->q;
1118 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1119 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1120 "is out of range [0-%d] %d %d.\n", txq_id,
1121 idx, q->n_bd, q->write_ptr, q->read_ptr);
1125 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1126 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1129 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
1130 q->write_ptr, q->read_ptr);
1131 queue_work(priv->workqueue, &priv->restart);
1138 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1139 * @rxb: Rx buffer to reclaim
1141 * If an Rx buffer has an async callback associated with it the callback
1142 * will be executed. The attached skb (if present) will only be freed
1143 * if the callback returns 1
1145 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1147 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1148 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1149 int txq_id = SEQ_TO_QUEUE(sequence);
1150 int index = SEQ_TO_INDEX(sequence);
1152 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1153 struct iwl_device_cmd *cmd;
1154 struct iwl_cmd_meta *meta;
1156 /* If a Tx command is being handled and it isn't in the actual
1157 * command queue then there a command routing bug has been introduced
1158 * in the queue management code. */
1159 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1160 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1162 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1163 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1164 iwl_print_hex_error(priv, pkt, 32);
1168 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1169 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1170 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
1172 pci_unmap_single(priv->pci_dev,
1173 pci_unmap_addr(meta, mapping),
1174 pci_unmap_len(meta, len),
1175 PCI_DMA_BIDIRECTIONAL);
1177 /* Input error checking is done when commands are added to queue. */
1178 if (meta->flags & CMD_WANT_SKB) {
1179 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1181 } else if (meta->callback)
1182 meta->callback(priv, cmd, pkt);
1184 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1186 if (!(meta->flags & CMD_ASYNC)) {
1187 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1188 wake_up_interruptible(&priv->wait_command_queue);
1191 EXPORT_SYMBOL(iwl_tx_cmd_complete);
1194 * Find first available (lowest unused) Tx Queue, mark it "active".
1195 * Called only when finding queue for aggregation.
1196 * Should never return anything < 7, because they should already
1197 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1199 static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1203 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1204 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1209 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1215 unsigned long flags;
1216 struct iwl_tid_data *tid_data;
1218 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1219 tx_fifo = default_tid_to_tx_fifo[tid];
1223 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1226 sta_id = iwl_find_station(priv, ra);
1227 if (sta_id == IWL_INVALID_STATION) {
1228 IWL_ERR(priv, "Start AGG on invalid station\n");
1231 if (unlikely(tid >= MAX_TID_COUNT))
1234 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1235 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1239 txq_id = iwl_txq_ctx_activate_free(priv);
1241 IWL_ERR(priv, "No free aggregation queue available\n");
1245 spin_lock_irqsave(&priv->sta_lock, flags);
1246 tid_data = &priv->stations[sta_id].tid[tid];
1247 *ssn = SEQ_TO_SN(tid_data->seq_number);
1248 tid_data->agg.txq_id = txq_id;
1249 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1250 spin_unlock_irqrestore(&priv->sta_lock, flags);
1252 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1257 if (tid_data->tfds_in_queue == 0) {
1258 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1259 tid_data->agg.state = IWL_AGG_ON;
1260 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1262 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1263 tid_data->tfds_in_queue);
1264 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1268 EXPORT_SYMBOL(iwl_tx_agg_start);
1270 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1272 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1273 struct iwl_tid_data *tid_data;
1274 int ret, write_ptr, read_ptr;
1275 unsigned long flags;
1278 IWL_ERR(priv, "ra = NULL\n");
1282 if (unlikely(tid >= MAX_TID_COUNT))
1285 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1286 tx_fifo_id = default_tid_to_tx_fifo[tid];
1290 sta_id = iwl_find_station(priv, ra);
1292 if (sta_id == IWL_INVALID_STATION) {
1293 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1297 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1298 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
1300 tid_data = &priv->stations[sta_id].tid[tid];
1301 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1302 txq_id = tid_data->agg.txq_id;
1303 write_ptr = priv->txq[txq_id].q.write_ptr;
1304 read_ptr = priv->txq[txq_id].q.read_ptr;
1306 /* The queue is not empty */
1307 if (write_ptr != read_ptr) {
1308 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1309 priv->stations[sta_id].tid[tid].agg.state =
1310 IWL_EMPTYING_HW_QUEUE_DELBA;
1314 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1315 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1317 spin_lock_irqsave(&priv->lock, flags);
1318 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1320 spin_unlock_irqrestore(&priv->lock, flags);
1325 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1329 EXPORT_SYMBOL(iwl_tx_agg_stop);
1331 int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1333 struct iwl_queue *q = &priv->txq[txq_id].q;
1334 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1335 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1337 switch (priv->stations[sta_id].tid[tid].agg.state) {
1338 case IWL_EMPTYING_HW_QUEUE_DELBA:
1339 /* We are reclaiming the last packet of the */
1340 /* aggregated HW queue */
1341 if ((txq_id == tid_data->agg.txq_id) &&
1342 (q->read_ptr == q->write_ptr)) {
1343 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1344 int tx_fifo = default_tid_to_tx_fifo[tid];
1345 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1346 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1348 tid_data->agg.state = IWL_AGG_OFF;
1349 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1352 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1353 /* We are reclaiming the last packet of the queue */
1354 if (tid_data->tfds_in_queue == 0) {
1355 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1356 tid_data->agg.state = IWL_AGG_ON;
1357 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1363 EXPORT_SYMBOL(iwl_txq_check_empty);
1366 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1368 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1369 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1371 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1372 struct iwl_ht_agg *agg,
1373 struct iwl_compressed_ba_resp *ba_resp)
1377 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1378 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1381 struct ieee80211_tx_info *info;
1383 if (unlikely(!agg->wait_for_ba)) {
1384 IWL_ERR(priv, "Received BA when not expected\n");
1388 /* Mark that the expected block-ack response arrived */
1389 agg->wait_for_ba = 0;
1390 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1392 /* Calculate shift to align block-ack bits with our Tx window bits */
1393 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1394 if (sh < 0) /* tbw something is wrong with indices */
1397 /* don't use 64-bit values for now */
1398 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1400 if (agg->frame_count > (64 - sh)) {
1401 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1405 /* check for success or failure according to the
1406 * transmitted bitmap and block-ack bitmap */
1407 bitmap &= agg->bitmap;
1409 /* For each frame attempted in aggregation,
1410 * update driver's record of tx frame's status. */
1411 for (i = 0; i < agg->frame_count ; i++) {
1412 ack = bitmap & (1ULL << i);
1414 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1415 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1416 agg->start_idx + i);
1419 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1420 memset(&info->status, 0, sizeof(info->status));
1421 info->flags |= IEEE80211_TX_STAT_ACK;
1422 info->flags |= IEEE80211_TX_STAT_AMPDU;
1423 info->status.ampdu_ack_map = successes;
1424 info->status.ampdu_ack_len = agg->frame_count;
1425 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1427 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1433 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1435 * Handles block-acknowledge notification from device, which reports success
1436 * of frames sent via aggregation.
1438 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1439 struct iwl_rx_mem_buffer *rxb)
1441 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1442 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1443 struct iwl_tx_queue *txq = NULL;
1444 struct iwl_ht_agg *agg;
1449 /* "flow" corresponds to Tx queue */
1450 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1452 /* "ssn" is start of block-ack Tx window, corresponds to index
1453 * (in Tx queue's circular buffer) of first TFD/frame in window */
1454 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1456 if (scd_flow >= priv->hw_params.max_txq_num) {
1458 "BUG_ON scd_flow is bigger than number of queues\n");
1462 txq = &priv->txq[scd_flow];
1463 sta_id = ba_resp->sta_id;
1465 agg = &priv->stations[sta_id].tid[tid].agg;
1467 /* Find index just before block-ack window */
1468 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1470 /* TODO: Need to get this copy more safely - now good for debug */
1472 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1475 (u8 *) &ba_resp->sta_addr_lo32,
1477 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1478 "%d, scd_ssn = %d\n",
1481 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1484 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1486 (unsigned long long)agg->bitmap);
1488 /* Update driver's record of ACK vs. not for each frame in window */
1489 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1491 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1492 * block-ack window (we assume that they've been successfully
1493 * transmitted ... if not, it's too late anyway). */
1494 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1495 /* calculate mac80211 ampdu sw queue to wake */
1496 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1497 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1499 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1500 priv->mac80211_registered &&
1501 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1502 iwl_wake_queue(priv, txq->swq_id);
1504 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1507 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1509 #ifdef CONFIG_IWLWIFI_DEBUG
1510 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1512 const char *iwl_get_tx_fail_reason(u32 status)
1514 switch (status & TX_STATUS_MSK) {
1515 case TX_STATUS_SUCCESS:
1517 TX_STATUS_ENTRY(SHORT_LIMIT);
1518 TX_STATUS_ENTRY(LONG_LIMIT);
1519 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1520 TX_STATUS_ENTRY(MGMNT_ABORT);
1521 TX_STATUS_ENTRY(NEXT_FRAG);
1522 TX_STATUS_ENTRY(LIFE_EXPIRE);
1523 TX_STATUS_ENTRY(DEST_PS);
1524 TX_STATUS_ENTRY(ABORTED);
1525 TX_STATUS_ENTRY(BT_RETRY);
1526 TX_STATUS_ENTRY(STA_INVALID);
1527 TX_STATUS_ENTRY(FRAG_DROPPED);
1528 TX_STATUS_ENTRY(TID_DISABLE);
1529 TX_STATUS_ENTRY(FRAME_FLUSHED);
1530 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1531 TX_STATUS_ENTRY(TX_LOCKED);
1532 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1537 EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1538 #endif /* CONFIG_IWLWIFI_DEBUG */