2 * Marvell Wireless LAN device driver: WMM
4 * Copyright (C) 2011-2014, Marvell International Ltd.
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
40 static bool disable_tx_amsdu;
41 module_param(disable_tx_amsdu, bool, 0644);
43 /* WMM information IE */
44 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
45 0x00, 0x50, 0xf2, 0x02,
49 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
55 static u8 tos_to_tid[] = {
56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
57 0x01, /* 0 1 0 AC_BK */
58 0x02, /* 0 0 0 AC_BK */
59 0x00, /* 0 0 1 AC_BE */
60 0x03, /* 0 1 1 AC_BE */
61 0x04, /* 1 0 0 AC_VI */
62 0x05, /* 1 0 1 AC_VI */
63 0x06, /* 1 1 0 AC_VO */
64 0x07 /* 1 1 1 AC_VO */
67 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
70 * This function debug prints the priority parameters for a WMM AC.
73 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
75 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
77 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
78 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
79 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
80 & MWIFIEX_ACI) >> 5]],
81 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
82 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
83 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
84 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
85 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
86 le16_to_cpu(ac_param->tx_op_limit));
90 * This function allocates a route address list.
92 * The function also initializes the list with the provided RA.
94 static struct mwifiex_ra_list_tbl *
95 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
97 struct mwifiex_ra_list_tbl *ra_list;
99 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
103 INIT_LIST_HEAD(&ra_list->list);
104 skb_queue_head_init(&ra_list->skb_head);
106 memcpy(ra_list->ra, ra, ETH_ALEN);
108 ra_list->total_pkt_count = 0;
110 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
115 /* This function returns random no between 16 and 32 to be used as threshold
116 * for no of packets after which BA setup is initiated.
118 static u8 mwifiex_get_random_ba_threshold(void)
121 struct timeval ba_tstamp;
124 /* setup ba_packet_threshold here random number between
125 * [BA_SETUP_PACKET_OFFSET,
126 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
129 do_gettimeofday(&ba_tstamp);
130 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
131 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
132 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
133 + BA_SETUP_PACKET_OFFSET;
139 * This function allocates and adds a RA list for all TIDs
142 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
145 struct mwifiex_ra_list_tbl *ra_list;
146 struct mwifiex_adapter *adapter = priv->adapter;
147 struct mwifiex_sta_node *node;
151 for (i = 0; i < MAX_NUM_TID; ++i) {
152 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
153 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
158 ra_list->is_11n_enabled = 0;
159 ra_list->tdls_link = false;
160 ra_list->ba_status = BA_SETUP_NONE;
161 ra_list->amsdu_in_ampdu = false;
162 if (!mwifiex_queuing_ra_based(priv)) {
163 if (mwifiex_get_tdls_link_status(priv, ra) ==
164 TDLS_SETUP_COMPLETE) {
165 ra_list->tdls_link = true;
166 ra_list->is_11n_enabled =
167 mwifiex_tdls_peer_11n_enabled(priv, ra);
169 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
172 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
173 node = mwifiex_get_sta_entry(priv, ra);
174 ra_list->is_11n_enabled =
175 mwifiex_is_sta_11n_enabled(priv, node);
176 if (ra_list->is_11n_enabled)
177 ra_list->max_amsdu = node->max_amsdu;
178 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
181 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
182 ra_list, ra_list->is_11n_enabled);
184 if (ra_list->is_11n_enabled) {
185 ra_list->ba_pkt_count = 0;
186 ra_list->ba_packet_thr =
187 mwifiex_get_random_ba_threshold();
189 list_add_tail(&ra_list->list,
190 &priv->wmm.tid_tbl_ptr[i].ra_list);
195 * This function sets the WMM queue priorities to their default values.
197 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
199 /* Default queue priorities: VO->VI->BE->BK */
200 priv->wmm.queue_priority[0] = WMM_AC_VO;
201 priv->wmm.queue_priority[1] = WMM_AC_VI;
202 priv->wmm.queue_priority[2] = WMM_AC_BE;
203 priv->wmm.queue_priority[3] = WMM_AC_BK;
207 * This function map ACs to TIDs.
210 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
212 struct mwifiex_wmm_desc *wmm = &priv->wmm;
213 u8 *queue_priority = wmm->queue_priority;
216 for (i = 0; i < 4; ++i) {
217 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
218 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
221 for (i = 0; i < MAX_NUM_TID; ++i)
222 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
224 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
228 * This function initializes WMM priority queues.
231 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
232 struct ieee_types_wmm_parameter *wmm_ie)
234 u16 cw_min, avg_back_off, tmp[4];
238 if (!wmm_ie || !priv->wmm_enabled) {
239 /* WMM is not enabled, just set the defaults and return */
240 mwifiex_wmm_default_queue_priorities(priv);
244 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
245 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
246 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
247 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
250 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
251 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
252 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
253 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
254 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
256 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
257 priv->wmm.queue_priority[ac_idx] = ac_idx;
258 tmp[ac_idx] = avg_back_off;
260 dev_dbg(priv->adapter->dev,
261 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
262 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
263 cw_min, avg_back_off);
264 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
268 for (i = 0; i < num_ac; i++) {
269 for (j = 1; j < num_ac - i; j++) {
270 if (tmp[j - 1] > tmp[j]) {
271 swap(tmp[j - 1], tmp[j]);
272 swap(priv->wmm.queue_priority[j - 1],
273 priv->wmm.queue_priority[j]);
274 } else if (tmp[j - 1] == tmp[j]) {
275 if (priv->wmm.queue_priority[j - 1]
276 < priv->wmm.queue_priority[j])
277 swap(priv->wmm.queue_priority[j - 1],
278 priv->wmm.queue_priority[j]);
283 mwifiex_wmm_queue_priorities_tid(priv);
287 * This function evaluates whether or not an AC is to be downgraded.
289 * In case the AC is not enabled, the highest AC is returned that is
290 * enabled and does not require admission control.
292 static enum mwifiex_wmm_ac_e
293 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
294 enum mwifiex_wmm_ac_e eval_ac)
297 enum mwifiex_wmm_ac_e ret_ac;
298 struct mwifiex_wmm_ac_status *ac_status;
300 ac_status = &priv->wmm.ac_status[eval_ac];
302 if (!ac_status->disabled)
303 /* Okay to use this AC, its enabled */
306 /* Setup a default return value of the lowest priority */
310 * Find the highest AC that is enabled and does not require
311 * admission control. The spec disallows downgrading to an AC,
312 * which is enabled due to a completed admission control.
313 * Unadmitted traffic is not to be sent on an AC with admitted
316 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
317 ac_status = &priv->wmm.ac_status[down_ac];
319 if (!ac_status->disabled && !ac_status->flow_required)
320 /* AC is enabled and does not require admission
322 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
329 * This function downgrades WMM priority queue.
332 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
336 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
337 "BK(0), BE(1), VI(2), VO(3)\n");
339 if (!priv->wmm_enabled) {
340 /* WMM is not enabled, default priorities */
341 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
342 priv->wmm.ac_down_graded_vals[ac_val] =
343 (enum mwifiex_wmm_ac_e) ac_val;
345 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
346 priv->wmm.ac_down_graded_vals[ac_val]
347 = mwifiex_wmm_eval_downgrade_ac(priv,
348 (enum mwifiex_wmm_ac_e) ac_val);
349 dev_dbg(priv->adapter->dev,
350 "info: WMM: AC PRIO %d maps to %d\n",
351 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
357 * This function converts the IP TOS field to an WMM AC
360 static enum mwifiex_wmm_ac_e
361 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
363 /* Map of TOS UP values to WMM AC */
364 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
374 if (tos >= ARRAY_SIZE(tos_to_ac))
377 return tos_to_ac[tos];
381 * This function evaluates a given TID and downgrades it to a lower
382 * TID if the WMM Parameter IE received from the AP indicates that the
383 * AP is disabled (due to call admission control (ACM bit). Mapping
384 * of TID to AC is taken care of internally.
386 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
388 enum mwifiex_wmm_ac_e ac, ac_down;
391 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
392 ac_down = priv->wmm.ac_down_graded_vals[ac];
394 /* Send the index to tid array, picking from the array will be
395 * taken care by dequeuing function
397 new_tid = ac_to_tid[ac_down][tid % 2];
403 * This function initializes the WMM state information and the
404 * WMM data path queues.
407 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
410 struct mwifiex_private *priv;
412 for (j = 0; j < adapter->priv_num; ++j) {
413 priv = adapter->priv[j];
417 for (i = 0; i < MAX_NUM_TID; ++i) {
418 if (!disable_tx_amsdu &&
419 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
420 priv->aggr_prio_tbl[i].amsdu =
421 priv->tos_to_tid_inv[i];
423 priv->aggr_prio_tbl[i].amsdu =
424 BA_STREAM_NOT_ALLOWED;
425 priv->aggr_prio_tbl[i].ampdu_ap =
426 priv->tos_to_tid_inv[i];
427 priv->aggr_prio_tbl[i].ampdu_user =
428 priv->tos_to_tid_inv[i];
431 mwifiex_set_ba_params(priv);
432 mwifiex_reset_11n_rx_seq_num(priv);
434 atomic_set(&priv->wmm.tx_pkts_queued, 0);
435 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
440 * This function checks if WMM Tx queue is empty.
443 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
446 struct mwifiex_private *priv;
448 for (i = 0; i < adapter->priv_num; ++i) {
449 priv = adapter->priv[i];
450 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
458 * This function deletes all packets in an RA list node.
460 * The packet sent completion callback handler are called with
461 * status failure, after they are dequeued to ensure proper
462 * cleanup. The RA list node itself is freed at the end.
465 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
466 struct mwifiex_ra_list_tbl *ra_list)
468 struct mwifiex_adapter *adapter = priv->adapter;
469 struct sk_buff *skb, *tmp;
471 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
472 mwifiex_write_data_complete(adapter, skb, 0, -1);
476 * This function deletes all packets in an RA list.
478 * Each nodes in the RA list are freed individually first, and then
479 * the RA list itself is freed.
482 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
483 struct list_head *ra_list_head)
485 struct mwifiex_ra_list_tbl *ra_list;
487 list_for_each_entry(ra_list, ra_list_head, list)
488 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
492 * This function deletes all packets in all RA lists.
494 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
498 for (i = 0; i < MAX_NUM_TID; i++)
499 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
502 atomic_set(&priv->wmm.tx_pkts_queued, 0);
503 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
507 * This function deletes all route addresses from all RA lists.
509 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
511 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
514 for (i = 0; i < MAX_NUM_TID; ++i) {
515 dev_dbg(priv->adapter->dev,
516 "info: ra_list: freeing buf for tid %d\n", i);
517 list_for_each_entry_safe(ra_list, tmp_node,
518 &priv->wmm.tid_tbl_ptr[i].ra_list,
520 list_del(&ra_list->list);
524 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
528 static int mwifiex_free_ack_frame(int id, void *p, void *data)
530 pr_warn("Have pending ack frames!\n");
536 * This function cleans up the Tx and Rx queues.
539 * - All packets in RA lists
540 * - All entries in Rx reorder table
541 * - All entries in Tx BA stream table
542 * - MPA buffer (if required)
546 mwifiex_clean_txrx(struct mwifiex_private *priv)
549 struct sk_buff *skb, *tmp;
551 mwifiex_11n_cleanup_reorder_tbl(priv);
552 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
554 mwifiex_wmm_cleanup_queues(priv);
555 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
557 if (priv->adapter->if_ops.cleanup_mpa_buf)
558 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
560 mwifiex_wmm_delete_all_ralist(priv);
561 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
563 if (priv->adapter->if_ops.clean_pcie_ring &&
564 !priv->adapter->surprise_removed)
565 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
566 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
568 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
569 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
571 idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
572 idr_destroy(&priv->ack_status_frames);
576 * This function retrieves a particular RA list node, matching with the
577 * given TID and RA address.
579 struct mwifiex_ra_list_tbl *
580 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
583 struct mwifiex_ra_list_tbl *ra_list;
585 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
587 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
595 * This function retrieves an RA list node for a given TID and
598 * If no such node is found, a new node is added first and then
601 struct mwifiex_ra_list_tbl *
602 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
605 struct mwifiex_ra_list_tbl *ra_list;
607 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
610 mwifiex_ralist_add(priv, ra_addr);
612 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
616 * This function deletes RA list nodes for given mac for all TIDs.
617 * Function also decrements TX pending count accordingly.
620 mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
622 struct mwifiex_ra_list_tbl *ra_list;
626 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
628 for (i = 0; i < MAX_NUM_TID; ++i) {
629 ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
633 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
634 atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued);
635 list_del(&ra_list->list);
638 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
642 * This function checks if a particular RA list node exists in a given TID
646 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
647 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
649 struct mwifiex_ra_list_tbl *rlist;
651 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
653 if (rlist == ra_list)
661 * This function adds a packet to WMM queue.
663 * In disconnected state the packet is immediately dropped and the
664 * packet send completion callback is called with status failure.
666 * Otherwise, the correct RA list node is located and the packet
667 * is queued at the list tail.
670 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
673 struct mwifiex_adapter *adapter = priv->adapter;
675 struct mwifiex_ra_list_tbl *ra_list;
676 u8 ra[ETH_ALEN], tid_down;
678 struct list_head list_head;
679 int tdls_status = TDLS_NOT_SETUP;
680 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
681 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
683 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
685 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
686 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
687 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
688 dev_dbg(adapter->dev,
689 "TDLS setup packet for %pM. Don't block\n", ra);
690 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
691 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
694 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
695 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
696 mwifiex_write_data_complete(adapter, skb, 0, -1);
702 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
704 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
706 /* In case of infra as we have already created the list during
707 association we just don't have to call get_queue_raptr, we will
708 have only 1 raptr for a tid in case of infra */
709 if (!mwifiex_queuing_ra_based(priv) &&
710 !mwifiex_is_skb_mgmt_frame(skb)) {
711 switch (tdls_status) {
712 case TDLS_SETUP_COMPLETE:
713 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
715 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
717 case TDLS_SETUP_INPROGRESS:
718 skb_queue_tail(&priv->tdls_txq, skb);
719 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
723 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
724 if (!list_empty(&list_head))
725 ra_list = list_first_entry(
726 &list_head, struct mwifiex_ra_list_tbl,
733 memcpy(ra, skb->data, ETH_ALEN);
734 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
735 eth_broadcast_addr(ra);
736 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
740 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
741 mwifiex_write_data_complete(adapter, skb, 0, -1);
745 skb_queue_tail(&ra_list->skb_head, skb);
747 ra_list->ba_pkt_count++;
748 ra_list->total_pkt_count++;
750 if (atomic_read(&priv->wmm.highest_queued_prio) <
751 priv->tos_to_tid_inv[tid_down])
752 atomic_set(&priv->wmm.highest_queued_prio,
753 priv->tos_to_tid_inv[tid_down]);
755 atomic_inc(&priv->wmm.tx_pkts_queued);
757 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
761 * This function processes the get WMM status command response from firmware.
763 * The response may contain multiple TLVs -
764 * - AC Queue status TLVs
765 * - Current WMM Parameter IE TLV
766 * - Admission Control action frame TLVs
768 * This function parses the TLVs and then calls further specific functions
769 * to process any changes in the queue prioritize or state.
771 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
772 const struct host_cmd_ds_command *resp)
774 u8 *curr = (u8 *) &resp->params.get_wmm_status;
775 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
778 struct mwifiex_ie_types_data *tlv_hdr;
779 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
780 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
781 struct mwifiex_wmm_ac_status *ac_status;
783 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
786 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
787 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
788 tlv_len = le16_to_cpu(tlv_hdr->header.len);
790 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
793 switch (le16_to_cpu(tlv_hdr->header.type)) {
794 case TLV_TYPE_WMMQSTATUS:
796 (struct mwifiex_ie_types_wmm_queue_status *)
798 dev_dbg(priv->adapter->dev,
799 "info: CMD_RESP: WMM_GET_STATUS:"
800 " QSTATUS TLV: %d, %d, %d\n",
801 tlv_wmm_qstatus->queue_index,
802 tlv_wmm_qstatus->flow_required,
803 tlv_wmm_qstatus->disabled);
805 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
807 ac_status->disabled = tlv_wmm_qstatus->disabled;
808 ac_status->flow_required =
809 tlv_wmm_qstatus->flow_required;
810 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
813 case WLAN_EID_VENDOR_SPECIFIC:
815 * Point the regular IEEE IE 2 bytes into the Marvell IE
816 * and setup the IEEE IE type and length byte fields
820 (struct ieee_types_wmm_parameter *) (curr +
822 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
823 wmm_param_ie->vend_hdr.element_id =
824 WLAN_EID_VENDOR_SPECIFIC;
826 dev_dbg(priv->adapter->dev,
827 "info: CMD_RESP: WMM_GET_STATUS:"
828 " WMM Parameter Set Count: %d\n",
829 wmm_param_ie->qos_info_bitmap &
830 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
832 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
833 wmm_ie, wmm_param_ie,
834 wmm_param_ie->vend_hdr.len + 2);
843 curr += (tlv_len + sizeof(tlv_hdr->header));
844 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
847 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
848 mwifiex_wmm_setup_ac_downgrade(priv);
854 * Callback handler from the command module to allow insertion of a WMM TLV.
856 * If the BSS we are associating to supports WMM, this function adds the
857 * required WMM Information IE to the association request command buffer in
858 * the form of a Marvell extended IEEE IE.
861 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
863 struct ieee_types_wmm_parameter *wmm_ie,
864 struct ieee80211_ht_cap *ht_cap)
866 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
878 dev_dbg(priv->adapter->dev,
879 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
880 wmm_ie->vend_hdr.element_id);
882 if ((priv->wmm_required ||
883 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
884 priv->adapter->config_bands & BAND_AN))) &&
885 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
886 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
887 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
888 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
889 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
890 le16_to_cpu(wmm_tlv->header.len));
891 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
892 memcpy((u8 *) (wmm_tlv->wmm_ie
893 + le16_to_cpu(wmm_tlv->header.len)
894 - sizeof(priv->wmm_qosinfo)),
895 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
897 ret_len = sizeof(wmm_tlv->header)
898 + le16_to_cpu(wmm_tlv->header.len);
900 *assoc_buf += ret_len;
907 * This function computes the time delay in the driver queues for a
910 * When the packet is received at the OS/Driver interface, the current
911 * time is set in the packet structure. The difference between the present
912 * time and that received time is computed in this function and limited
913 * based on pre-compiled limits in the driver.
916 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
917 const struct sk_buff *skb)
919 u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
923 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
924 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
926 * Pass max value if queue_delay is beyond the uint8 range
928 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
930 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
931 " %d ms sent to FW\n", queue_delay, ret_val);
937 * This function retrieves the highest priority RA list table pointer.
939 static struct mwifiex_ra_list_tbl *
940 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
941 struct mwifiex_private **priv, int *tid)
943 struct mwifiex_private *priv_tmp;
944 struct mwifiex_ra_list_tbl *ptr;
945 struct mwifiex_tid_tbl *tid_ptr;
947 unsigned long flags_ra;
950 /* check the BSS with highest priority first */
951 for (j = adapter->priv_num - 1; j >= 0; --j) {
952 /* iterate over BSS with the equal priority */
953 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
954 &adapter->bss_prio_tbl[j].bss_prio_head,
957 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
959 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
962 /* iterate over the WMM queues of the BSS */
963 hqp = &priv_tmp->wmm.highest_queued_prio;
964 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
966 spin_lock_irqsave(&priv_tmp->wmm.
967 ra_list_spinlock, flags_ra);
969 tid_ptr = &(priv_tmp)->wmm.
970 tid_tbl_ptr[tos_to_tid[i]];
972 /* iterate over receiver addresses */
973 list_for_each_entry(ptr, &tid_ptr->ra_list,
976 if (!skb_queue_empty(&ptr->skb_head))
977 /* holds both locks */
981 spin_unlock_irqrestore(&priv_tmp->wmm.
992 /* holds ra_list_spinlock */
993 if (atomic_read(hqp) > i)
995 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
998 *tid = tos_to_tid[i];
1003 /* This functions rotates ra and bss lists so packets are picked round robin.
1005 * After a packet is successfully transmitted, rotate the ra list, so the ra
1006 * next to the one transmitted, will come first in the list. This way we pick
1007 * the ra' in a round robin fashion. Same applies to bss nodes of equal
1010 * Function also increments wmm.packets_out counter.
1012 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1013 struct mwifiex_ra_list_tbl *ra,
1016 struct mwifiex_adapter *adapter = priv->adapter;
1017 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1018 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1019 unsigned long flags;
1021 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
1023 * dirty trick: we remove 'head' temporarily and reinsert it after
1024 * curr bss node. imagine list to stay fixed while head is moved
1026 list_move(&tbl[priv->bss_priority].bss_prio_head,
1027 &tbl[priv->bss_priority].bss_prio_cur->list);
1028 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
1030 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1031 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1032 priv->wmm.packets_out[tid]++;
1034 list_move(&tid_ptr->ra_list, &ra->list);
1036 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1040 * This function checks if 11n aggregation is possible.
1043 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1044 struct mwifiex_ra_list_tbl *ptr,
1047 int count = 0, total_size = 0;
1048 struct sk_buff *skb, *tmp;
1051 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1052 ptr->is_11n_enabled)
1053 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1055 max_amsdu_size = max_buf_size;
1057 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1058 total_size += skb->len;
1059 if (total_size >= max_amsdu_size)
1061 if (++count >= MIN_NUM_AMSDU)
1069 * This function sends a single packet to firmware for transmission.
1072 mwifiex_send_single_packet(struct mwifiex_private *priv,
1073 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1074 unsigned long ra_list_flags)
1075 __releases(&priv->wmm.ra_list_spinlock)
1077 struct sk_buff *skb, *skb_next;
1078 struct mwifiex_tx_param tx_param;
1079 struct mwifiex_adapter *adapter = priv->adapter;
1080 struct mwifiex_txinfo *tx_info;
1082 if (skb_queue_empty(&ptr->skb_head)) {
1083 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1085 dev_dbg(adapter->dev, "data: nothing to send\n");
1089 skb = skb_dequeue(&ptr->skb_head);
1091 tx_info = MWIFIEX_SKB_TXCB(skb);
1092 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1094 ptr->total_pkt_count--;
1096 if (!skb_queue_empty(&ptr->skb_head))
1097 skb_next = skb_peek(&ptr->skb_head);
1101 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1103 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1104 sizeof(struct txpd) : 0);
1106 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1107 /* Queue the packet back at the head */
1108 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1110 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1111 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1113 mwifiex_write_data_complete(adapter, skb, 0, -1);
1117 skb_queue_tail(&ptr->skb_head, skb);
1119 ptr->total_pkt_count++;
1120 ptr->ba_pkt_count++;
1121 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1122 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1125 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1126 atomic_dec(&priv->wmm.tx_pkts_queued);
1131 * This function checks if the first packet in the given RA list
1132 * is already processed or not.
1135 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1136 struct mwifiex_ra_list_tbl *ptr)
1138 struct sk_buff *skb;
1139 struct mwifiex_txinfo *tx_info;
1141 if (skb_queue_empty(&ptr->skb_head))
1144 skb = skb_peek(&ptr->skb_head);
1146 tx_info = MWIFIEX_SKB_TXCB(skb);
1147 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1154 * This function sends a single processed packet to firmware for
1158 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1159 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1160 unsigned long ra_list_flags)
1161 __releases(&priv->wmm.ra_list_spinlock)
1163 struct mwifiex_tx_param tx_param;
1164 struct mwifiex_adapter *adapter = priv->adapter;
1166 struct sk_buff *skb, *skb_next;
1167 struct mwifiex_txinfo *tx_info;
1169 if (skb_queue_empty(&ptr->skb_head)) {
1170 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1175 skb = skb_dequeue(&ptr->skb_head);
1177 if (adapter->data_sent || adapter->tx_lock_flag) {
1178 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1180 skb_queue_tail(&adapter->tx_data_q, skb);
1181 atomic_inc(&adapter->tx_queued);
1185 if (!skb_queue_empty(&ptr->skb_head))
1186 skb_next = skb_peek(&ptr->skb_head);
1190 tx_info = MWIFIEX_SKB_TXCB(skb);
1192 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1194 if (adapter->iface_type == MWIFIEX_USB) {
1195 adapter->data_sent = true;
1196 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1199 tx_param.next_pkt_len =
1200 ((skb_next) ? skb_next->len +
1201 sizeof(struct txpd) : 0);
1202 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1208 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1209 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1211 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1212 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1214 mwifiex_write_data_complete(adapter, skb, 0, -1);
1218 skb_queue_tail(&ptr->skb_head, skb);
1220 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1221 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1225 if (adapter->iface_type != MWIFIEX_PCIE)
1226 adapter->data_sent = false;
1227 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1228 adapter->dbg.num_tx_host_to_card_failure++;
1229 mwifiex_write_data_complete(adapter, skb, 0, ret);
1232 if (adapter->iface_type != MWIFIEX_PCIE)
1233 adapter->data_sent = false;
1236 mwifiex_write_data_complete(adapter, skb, 0, ret);
1240 if (ret != -EBUSY) {
1241 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1242 atomic_dec(&priv->wmm.tx_pkts_queued);
1247 * This function dequeues a packet from the highest priority list
1251 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1253 struct mwifiex_ra_list_tbl *ptr;
1254 struct mwifiex_private *priv = NULL;
1257 int tid_del = 0, tid = 0;
1258 unsigned long flags;
1260 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1264 tid = mwifiex_get_tid(ptr);
1266 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1268 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1269 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1270 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1274 if (mwifiex_is_ptr_processed(priv, ptr)) {
1275 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1276 /* ra_list_spinlock has been freed in
1277 mwifiex_send_processed_packet() */
1281 if (!ptr->is_11n_enabled ||
1283 priv->wps.session_enable) {
1284 if (ptr->is_11n_enabled &&
1286 ptr->amsdu_in_ampdu &&
1287 mwifiex_is_amsdu_allowed(priv, tid) &&
1288 mwifiex_is_11n_aggragation_possible(priv, ptr,
1289 adapter->tx_buf_size))
1290 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1291 /* ra_list_spinlock has been freed in
1292 * mwifiex_11n_aggregate_pkt()
1295 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1296 /* ra_list_spinlock has been freed in
1297 * mwifiex_send_single_packet()
1300 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1301 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1302 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1303 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1304 BA_SETUP_INPROGRESS);
1305 mwifiex_send_addba(priv, tid, ptr->ra);
1306 } else if (mwifiex_find_stream_to_delete
1307 (priv, tid, &tid_del, ra)) {
1308 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1309 BA_SETUP_INPROGRESS);
1310 mwifiex_send_delba(priv, tid_del, ra, 1);
1313 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1314 mwifiex_is_11n_aggragation_possible(priv, ptr,
1315 adapter->tx_buf_size))
1316 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1317 /* ra_list_spinlock has been freed in
1318 mwifiex_11n_aggregate_pkt() */
1320 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1321 /* ra_list_spinlock has been freed in
1322 mwifiex_send_single_packet() */
1328 * This function transmits the highest priority packet awaiting in the
1332 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1335 if (mwifiex_dequeue_tx_packet(adapter))
1337 if (adapter->iface_type != MWIFIEX_SDIO) {
1338 if (adapter->data_sent ||
1339 adapter->tx_lock_flag)
1342 if (atomic_read(&adapter->tx_queued) >=
1343 MWIFIEX_MAX_PKTS_TXQ)
1346 } while (!mwifiex_wmm_lists_empty(adapter));