]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/wireless/ath/wcn36xx/dxe.c
Merge remote-tracking branch 'sound-current/for-linus'
[karo-tx-linux.git] / drivers / net / wireless / ath / wcn36xx / dxe.c
1 /*
2  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 /* DXE - DMA transfer engine
18  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19  * through low channels data packets are transfered
20  * through high channels managment packets are transfered
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/interrupt.h>
26 #include "wcn36xx.h"
27 #include "txrx.h"
28
29 void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30 {
31         struct wcn36xx_dxe_ch *ch = is_low ?
32                 &wcn->dxe_tx_l_ch :
33                 &wcn->dxe_tx_h_ch;
34
35         return ch->head_blk_ctl->bd_cpu_addr;
36 }
37
38 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
39 {
40         wcn36xx_dbg(WCN36XX_DBG_DXE,
41                     "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
42                     addr, data);
43
44         writel(data, wcn->mmio + addr);
45 }
46
47 #define wcn36xx_dxe_write_register_x(wcn, reg, reg_data)                 \
48 do {                                                                     \
49         if (wcn->chip_version == WCN36XX_CHIP_3680)                      \
50                 wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
51         else                                                             \
52                 wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
53 } while (0)                                                              \
54
55 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
56 {
57         *data = readl(wcn->mmio + addr);
58
59         wcn36xx_dbg(WCN36XX_DBG_DXE,
60                     "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
61                     addr, *data);
62 }
63
64 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
65 {
66         struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
67         int i;
68
69         for (i = 0; i < ch->desc_num && ctl; i++) {
70                 next = ctl->next;
71                 kfree(ctl);
72                 ctl = next;
73         }
74 }
75
76 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
77 {
78         struct wcn36xx_dxe_ctl *prev_ctl = NULL;
79         struct wcn36xx_dxe_ctl *cur_ctl = NULL;
80         int i;
81
82         spin_lock_init(&ch->lock);
83         for (i = 0; i < ch->desc_num; i++) {
84                 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
85                 if (!cur_ctl)
86                         goto out_fail;
87
88                 spin_lock_init(&cur_ctl->skb_lock);
89                 cur_ctl->ctl_blk_order = i;
90                 if (i == 0) {
91                         ch->head_blk_ctl = cur_ctl;
92                         ch->tail_blk_ctl = cur_ctl;
93                 } else if (ch->desc_num - 1 == i) {
94                         prev_ctl->next = cur_ctl;
95                         cur_ctl->next = ch->head_blk_ctl;
96                 } else {
97                         prev_ctl->next = cur_ctl;
98                 }
99                 prev_ctl = cur_ctl;
100         }
101
102         return 0;
103
104 out_fail:
105         wcn36xx_dxe_free_ctl_block(ch);
106         return -ENOMEM;
107 }
108
109 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
110 {
111         int ret;
112
113         wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
114         wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
115         wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
116         wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
117
118         wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
119         wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
120         wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
121         wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
122
123         wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
124         wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
125
126         wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
127         wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
128
129         wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
130         wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
131
132         wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
133         wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
134
135         wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
136         wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
137
138         /* DXE control block allocation */
139         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
140         if (ret)
141                 goto out_err;
142         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
143         if (ret)
144                 goto out_err;
145         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
146         if (ret)
147                 goto out_err;
148         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
149         if (ret)
150                 goto out_err;
151
152         /* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
153         ret = wcn->ctrl_ops->smsm_change_state(
154                 WCN36XX_SMSM_WLAN_TX_ENABLE,
155                 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
156
157         return 0;
158
159 out_err:
160         wcn36xx_err("Failed to allocate DXE control blocks\n");
161         wcn36xx_dxe_free_ctl_blks(wcn);
162         return -ENOMEM;
163 }
164
165 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
166 {
167         wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
168         wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
169         wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
170         wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
171 }
172
173 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
174 {
175         struct wcn36xx_dxe_desc *cur_dxe = NULL;
176         struct wcn36xx_dxe_desc *prev_dxe = NULL;
177         struct wcn36xx_dxe_ctl *cur_ctl = NULL;
178         size_t size;
179         int i;
180
181         size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
182         wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
183                                               GFP_KERNEL);
184         if (!wcn_ch->cpu_addr)
185                 return -ENOMEM;
186
187         memset(wcn_ch->cpu_addr, 0, size);
188
189         cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
190         cur_ctl = wcn_ch->head_blk_ctl;
191
192         for (i = 0; i < wcn_ch->desc_num; i++) {
193                 cur_ctl->desc = cur_dxe;
194                 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
195                         i * sizeof(struct wcn36xx_dxe_desc);
196
197                 switch (wcn_ch->ch_type) {
198                 case WCN36XX_DXE_CH_TX_L:
199                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
200                         cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
201                         break;
202                 case WCN36XX_DXE_CH_TX_H:
203                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
204                         cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
205                         break;
206                 case WCN36XX_DXE_CH_RX_L:
207                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
208                         cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
209                         break;
210                 case WCN36XX_DXE_CH_RX_H:
211                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
212                         cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
213                         break;
214                 }
215                 if (0 == i) {
216                         cur_dxe->phy_next_l = 0;
217                 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
218                         prev_dxe->phy_next_l =
219                                 cur_ctl->desc_phy_addr;
220                 } else if (i == (wcn_ch->desc_num - 1)) {
221                         prev_dxe->phy_next_l =
222                                 cur_ctl->desc_phy_addr;
223                         cur_dxe->phy_next_l =
224                                 wcn_ch->head_blk_ctl->desc_phy_addr;
225                 }
226                 cur_ctl = cur_ctl->next;
227                 prev_dxe = cur_dxe;
228                 cur_dxe++;
229         }
230
231         return 0;
232 }
233
234 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
235                                    struct wcn36xx_dxe_mem_pool *pool)
236 {
237         int i, chunk_size = pool->chunk_size;
238         dma_addr_t bd_phy_addr = pool->phy_addr;
239         void *bd_cpu_addr = pool->virt_addr;
240         struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
241
242         for (i = 0; i < ch->desc_num; i++) {
243                 /* Only every second dxe needs a bd pointer,
244                    the other will point to the skb data */
245                 if (!(i & 1)) {
246                         cur->bd_phy_addr = bd_phy_addr;
247                         cur->bd_cpu_addr = bd_cpu_addr;
248                         bd_phy_addr += chunk_size;
249                         bd_cpu_addr += chunk_size;
250                 } else {
251                         cur->bd_phy_addr = 0;
252                         cur->bd_cpu_addr = NULL;
253                 }
254                 cur = cur->next;
255         }
256 }
257
258 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
259 {
260         int reg_data = 0;
261
262         wcn36xx_dxe_read_register(wcn,
263                                   WCN36XX_DXE_INT_MASK_REG,
264                                   &reg_data);
265
266         reg_data |= wcn_ch;
267
268         wcn36xx_dxe_write_register(wcn,
269                                    WCN36XX_DXE_INT_MASK_REG,
270                                    (int)reg_data);
271         return 0;
272 }
273
274 static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
275 {
276         struct wcn36xx_dxe_desc *dxe = ctl->desc;
277         struct sk_buff *skb;
278
279         skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
280         if (skb == NULL)
281                 return -ENOMEM;
282
283         dxe->dst_addr_l = dma_map_single(dev,
284                                          skb_tail_pointer(skb),
285                                          WCN36XX_PKT_SIZE,
286                                          DMA_FROM_DEVICE);
287         ctl->skb = skb;
288
289         return 0;
290 }
291
292 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
293                                     struct wcn36xx_dxe_ch *wcn_ch)
294 {
295         int i;
296         struct wcn36xx_dxe_ctl *cur_ctl = NULL;
297
298         cur_ctl = wcn_ch->head_blk_ctl;
299
300         for (i = 0; i < wcn_ch->desc_num; i++) {
301                 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
302                 cur_ctl = cur_ctl->next;
303         }
304
305         return 0;
306 }
307
308 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
309                                      struct wcn36xx_dxe_ch *wcn_ch)
310 {
311         struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
312         int i;
313
314         for (i = 0; i < wcn_ch->desc_num; i++) {
315                 kfree_skb(cur->skb);
316                 cur = cur->next;
317         }
318 }
319
320 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
321 {
322         struct ieee80211_tx_info *info;
323         struct sk_buff *skb;
324         unsigned long flags;
325
326         spin_lock_irqsave(&wcn->dxe_lock, flags);
327         skb = wcn->tx_ack_skb;
328         wcn->tx_ack_skb = NULL;
329         spin_unlock_irqrestore(&wcn->dxe_lock, flags);
330
331         if (!skb) {
332                 wcn36xx_warn("Spurious TX complete indication\n");
333                 return;
334         }
335
336         info = IEEE80211_SKB_CB(skb);
337
338         if (status == 1)
339                 info->flags |= IEEE80211_TX_STAT_ACK;
340
341         wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
342
343         ieee80211_tx_status_irqsafe(wcn->hw, skb);
344         ieee80211_wake_queues(wcn->hw);
345 }
346
347 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
348 {
349         struct wcn36xx_dxe_ctl *ctl;
350         struct ieee80211_tx_info *info;
351         unsigned long flags;
352
353         /*
354          * Make at least one loop of do-while because in case ring is
355          * completely full head and tail are pointing to the same element
356          * and while-do will not make any cycles.
357          */
358         spin_lock_irqsave(&ch->lock, flags);
359         ctl = ch->tail_blk_ctl;
360         do {
361                 if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
362                         break;
363                 if (ctl->skb) {
364                         dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
365                                          ctl->skb->len, DMA_TO_DEVICE);
366                         info = IEEE80211_SKB_CB(ctl->skb);
367                         if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
368                                 /* Keep frame until TX status comes */
369                                 ieee80211_free_txskb(wcn->hw, ctl->skb);
370                         }
371                         spin_lock(&ctl->skb_lock);
372                         if (wcn->queues_stopped) {
373                                 wcn->queues_stopped = false;
374                                 ieee80211_wake_queues(wcn->hw);
375                         }
376                         spin_unlock(&ctl->skb_lock);
377
378                         ctl->skb = NULL;
379                 }
380                 ctl = ctl->next;
381         } while (ctl != ch->head_blk_ctl &&
382                !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
383
384         ch->tail_blk_ctl = ctl;
385         spin_unlock_irqrestore(&ch->lock, flags);
386 }
387
388 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
389 {
390         struct wcn36xx *wcn = (struct wcn36xx *)dev;
391         int int_src, int_reason;
392
393         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
394
395         if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
396                 wcn36xx_dxe_read_register(wcn,
397                                           WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
398                                           &int_reason);
399
400                 /* TODO: Check int_reason */
401
402                 wcn36xx_dxe_write_register(wcn,
403                                            WCN36XX_DXE_0_INT_CLR,
404                                            WCN36XX_INT_MASK_CHAN_TX_H);
405
406                 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
407                                            WCN36XX_INT_MASK_CHAN_TX_H);
408                 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
409                 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
410         }
411
412         if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
413                 wcn36xx_dxe_read_register(wcn,
414                                           WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
415                                           &int_reason);
416                 /* TODO: Check int_reason */
417
418                 wcn36xx_dxe_write_register(wcn,
419                                            WCN36XX_DXE_0_INT_CLR,
420                                            WCN36XX_INT_MASK_CHAN_TX_L);
421
422                 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
423                                            WCN36XX_INT_MASK_CHAN_TX_L);
424                 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
425                 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
426         }
427
428         return IRQ_HANDLED;
429 }
430
431 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
432 {
433         struct wcn36xx *wcn = (struct wcn36xx *)dev;
434
435         disable_irq_nosync(wcn->rx_irq);
436         wcn36xx_dxe_rx_frame(wcn);
437         enable_irq(wcn->rx_irq);
438         return IRQ_HANDLED;
439 }
440
441 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
442 {
443         int ret;
444
445         ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
446                           IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
447         if (ret) {
448                 wcn36xx_err("failed to alloc tx irq\n");
449                 goto out_err;
450         }
451
452         ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
453                           "wcn36xx_rx", wcn);
454         if (ret) {
455                 wcn36xx_err("failed to alloc rx irq\n");
456                 goto out_txirq;
457         }
458
459         enable_irq_wake(wcn->rx_irq);
460
461         return 0;
462
463 out_txirq:
464         free_irq(wcn->tx_irq, wcn);
465 out_err:
466         return ret;
467
468 }
469
470 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
471                                      struct wcn36xx_dxe_ch *ch)
472 {
473         struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
474         struct wcn36xx_dxe_desc *dxe = ctl->desc;
475         dma_addr_t  dma_addr;
476         struct sk_buff *skb;
477
478         while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
479                 skb = ctl->skb;
480                 dma_addr = dxe->dst_addr_l;
481                 wcn36xx_dxe_fill_skb(wcn->dev, ctl);
482
483                 switch (ch->ch_type) {
484                 case WCN36XX_DXE_CH_RX_L:
485                         dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
486                         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
487                                                    WCN36XX_DXE_INT_CH1_MASK);
488                         break;
489                 case WCN36XX_DXE_CH_RX_H:
490                         dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
491                         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
492                                                    WCN36XX_DXE_INT_CH3_MASK);
493                         break;
494                 default:
495                         wcn36xx_warn("Unknown channel\n");
496                 }
497
498                 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
499                                  DMA_FROM_DEVICE);
500                 wcn36xx_rx_skb(wcn, skb);
501                 ctl = ctl->next;
502                 dxe = ctl->desc;
503         }
504
505         ch->head_blk_ctl = ctl;
506
507         return 0;
508 }
509
510 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
511 {
512         int int_src;
513
514         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
515
516         /* RX_LOW_PRI */
517         if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
518                 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
519                                            WCN36XX_DXE_INT_CH1_MASK);
520                 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
521         }
522
523         /* RX_HIGH_PRI */
524         if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
525                 /* Clean up all the INT within this channel */
526                 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
527                                            WCN36XX_DXE_INT_CH3_MASK);
528                 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
529         }
530
531         if (!int_src)
532                 wcn36xx_warn("No DXE interrupt pending\n");
533 }
534
535 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
536 {
537         size_t s;
538         void *cpu_addr;
539
540         /* Allocate BD headers for MGMT frames */
541
542         /* Where this come from ask QC */
543         wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
544                 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
545
546         s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
547         cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
548                                       GFP_KERNEL);
549         if (!cpu_addr)
550                 goto out_err;
551
552         wcn->mgmt_mem_pool.virt_addr = cpu_addr;
553         memset(cpu_addr, 0, s);
554
555         /* Allocate BD headers for DATA frames */
556
557         /* Where this come from ask QC */
558         wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
559                 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
560
561         s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
562         cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
563                                       GFP_KERNEL);
564         if (!cpu_addr)
565                 goto out_err;
566
567         wcn->data_mem_pool.virt_addr = cpu_addr;
568         memset(cpu_addr, 0, s);
569
570         return 0;
571
572 out_err:
573         wcn36xx_dxe_free_mem_pools(wcn);
574         wcn36xx_err("Failed to allocate BD mempool\n");
575         return -ENOMEM;
576 }
577
578 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
579 {
580         if (wcn->mgmt_mem_pool.virt_addr)
581                 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
582                                   WCN36XX_DXE_CH_DESC_NUMB_TX_H,
583                                   wcn->mgmt_mem_pool.virt_addr,
584                                   wcn->mgmt_mem_pool.phy_addr);
585
586         if (wcn->data_mem_pool.virt_addr) {
587                 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
588                                   WCN36XX_DXE_CH_DESC_NUMB_TX_L,
589                                   wcn->data_mem_pool.virt_addr,
590                                   wcn->data_mem_pool.phy_addr);
591         }
592 }
593
594 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
595                          struct wcn36xx_vif *vif_priv,
596                          struct sk_buff *skb,
597                          bool is_low)
598 {
599         struct wcn36xx_dxe_ctl *ctl = NULL;
600         struct wcn36xx_dxe_desc *desc = NULL;
601         struct wcn36xx_dxe_ch *ch = NULL;
602         unsigned long flags;
603         int ret;
604
605         ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
606
607         spin_lock_irqsave(&ch->lock, flags);
608         ctl = ch->head_blk_ctl;
609
610         spin_lock(&ctl->next->skb_lock);
611
612         /*
613          * If skb is not null that means that we reached the tail of the ring
614          * hence ring is full. Stop queues to let mac80211 back off until ring
615          * has an empty slot again.
616          */
617         if (NULL != ctl->next->skb) {
618                 ieee80211_stop_queues(wcn->hw);
619                 wcn->queues_stopped = true;
620                 spin_unlock(&ctl->next->skb_lock);
621                 spin_unlock_irqrestore(&ch->lock, flags);
622                 return -EBUSY;
623         }
624         spin_unlock(&ctl->next->skb_lock);
625
626         ctl->skb = NULL;
627         desc = ctl->desc;
628
629         /* Set source address of the BD we send */
630         desc->src_addr_l = ctl->bd_phy_addr;
631
632         desc->dst_addr_l = ch->dxe_wq;
633         desc->fr_len = sizeof(struct wcn36xx_tx_bd);
634         desc->ctrl = ch->ctrl_bd;
635
636         wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
637
638         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
639                          (char *)desc, sizeof(*desc));
640         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
641                          "BD   >>> ", (char *)ctl->bd_cpu_addr,
642                          sizeof(struct wcn36xx_tx_bd));
643
644         /* Set source address of the SKB we send */
645         ctl = ctl->next;
646         ctl->skb = skb;
647         desc = ctl->desc;
648         if (ctl->bd_cpu_addr) {
649                 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
650                 ret = -EINVAL;
651                 goto unlock;
652         }
653
654         desc->src_addr_l = dma_map_single(wcn->dev,
655                                           ctl->skb->data,
656                                           ctl->skb->len,
657                                           DMA_TO_DEVICE);
658
659         desc->dst_addr_l = ch->dxe_wq;
660         desc->fr_len = ctl->skb->len;
661
662         /* set dxe descriptor to VALID */
663         desc->ctrl = ch->ctrl_skb;
664
665         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
666                          (char *)desc, sizeof(*desc));
667         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
668                          (char *)ctl->skb->data, ctl->skb->len);
669
670         /* Move the head of the ring to the next empty descriptor */
671          ch->head_blk_ctl = ctl->next;
672
673         /*
674          * When connected and trying to send data frame chip can be in sleep
675          * mode and writing to the register will not wake up the chip. Instead
676          * notify chip about new frame through SMSM bus.
677          */
678         if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
679                 wcn->ctrl_ops->smsm_change_state(
680                                   0,
681                                   WCN36XX_SMSM_WLAN_TX_ENABLE);
682         } else {
683                 /* indicate End Of Packet and generate interrupt on descriptor
684                  * done.
685                  */
686                 wcn36xx_dxe_write_register(wcn,
687                         ch->reg_ctrl, ch->def_ctrl);
688         }
689
690         ret = 0;
691 unlock:
692         spin_unlock_irqrestore(&ch->lock, flags);
693         return ret;
694 }
695
696 int wcn36xx_dxe_init(struct wcn36xx *wcn)
697 {
698         int reg_data = 0, ret;
699
700         reg_data = WCN36XX_DXE_REG_RESET;
701         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
702
703         /* Setting interrupt path */
704         reg_data = WCN36XX_DXE_CCU_INT;
705         wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
706
707         /***************************************/
708         /* Init descriptors for TX LOW channel */
709         /***************************************/
710         wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
711         wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
712
713         /* Write channel head to a NEXT register */
714         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
715                 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
716
717         /* Program DMA destination addr for TX LOW */
718         wcn36xx_dxe_write_register(wcn,
719                 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
720                 WCN36XX_DXE_WQ_TX_L);
721
722         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
723         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
724
725         /***************************************/
726         /* Init descriptors for TX HIGH channel */
727         /***************************************/
728         wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
729         wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
730
731         /* Write channel head to a NEXT register */
732         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
733                 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
734
735         /* Program DMA destination addr for TX HIGH */
736         wcn36xx_dxe_write_register(wcn,
737                 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
738                 WCN36XX_DXE_WQ_TX_H);
739
740         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
741
742         /* Enable channel interrupts */
743         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
744
745         /***************************************/
746         /* Init descriptors for RX LOW channel */
747         /***************************************/
748         wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
749
750         /* For RX we need to preallocated buffers */
751         wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
752
753         /* Write channel head to a NEXT register */
754         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
755                 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
756
757         /* Write DMA source address */
758         wcn36xx_dxe_write_register(wcn,
759                 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
760                 WCN36XX_DXE_WQ_RX_L);
761
762         /* Program preallocated destination address */
763         wcn36xx_dxe_write_register(wcn,
764                 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
765                 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
766
767         /* Enable default control registers */
768         wcn36xx_dxe_write_register(wcn,
769                 WCN36XX_DXE_REG_CTL_RX_L,
770                 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
771
772         /* Enable channel interrupts */
773         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
774
775         /***************************************/
776         /* Init descriptors for RX HIGH channel */
777         /***************************************/
778         wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
779
780         /* For RX we need to prealocat buffers */
781         wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
782
783         /* Write chanel head to a NEXT register */
784         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
785                 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
786
787         /* Write DMA source address */
788         wcn36xx_dxe_write_register(wcn,
789                 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
790                 WCN36XX_DXE_WQ_RX_H);
791
792         /* Program preallocated destination address */
793         wcn36xx_dxe_write_register(wcn,
794                 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
795                  wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
796
797         /* Enable default control registers */
798         wcn36xx_dxe_write_register(wcn,
799                 WCN36XX_DXE_REG_CTL_RX_H,
800                 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
801
802         /* Enable channel interrupts */
803         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
804
805         ret = wcn36xx_dxe_request_irqs(wcn);
806         if (ret < 0)
807                 goto out_err;
808
809         return 0;
810
811 out_err:
812         return ret;
813 }
814
815 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
816 {
817         free_irq(wcn->tx_irq, wcn);
818         free_irq(wcn->rx_irq, wcn);
819
820         if (wcn->tx_ack_skb) {
821                 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
822                 wcn->tx_ack_skb = NULL;
823         }
824
825         wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
826         wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
827 }