]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/wireless/ath/ath6kl/htc_mbox.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / drivers / net / wireless / ath / ath6kl / htc_mbox.c
1 /*
2  * Copyright (c) 2007-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include "core.h"
19 #include "hif.h"
20 #include "debug.h"
21 #include "hif-ops.h"
22 #include "trace.h"
23
24 #include <asm/unaligned.h>
25
26 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
27
28 static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
29 static void ath6kl_htc_mbox_stop(struct htc_target *target);
30 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
31                                               struct list_head *pkt_queue);
32 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
33                                        struct ath6kl_htc_credit_info *cred_info,
34                                        u16 svc_pri_order[], int len);
35
36 /* threshold to re-enable Tx bundling for an AC*/
37 #define TX_RESUME_BUNDLE_THRESHOLD      1500
38
39 /* Functions for Tx credit handling */
40 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
41                                   struct htc_endpoint_credit_dist *ep_dist,
42                                   int credits)
43 {
44         ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
45                    ep_dist->endpoint, credits);
46
47         ep_dist->credits += credits;
48         ep_dist->cred_assngd += credits;
49         cred_info->cur_free_credits -= credits;
50 }
51
52 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
53                                struct list_head *ep_list,
54                                int tot_credits)
55 {
56         struct htc_endpoint_credit_dist *cur_ep_dist;
57         int count;
58
59         ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
60
61         cred_info->cur_free_credits = tot_credits;
62         cred_info->total_avail_credits = tot_credits;
63
64         list_for_each_entry(cur_ep_dist, ep_list, list) {
65                 if (cur_ep_dist->endpoint == ENDPOINT_0)
66                         continue;
67
68                 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
69
70                 if (tot_credits > 4) {
71                         if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
72                             (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
73                                 ath6kl_credit_deposit(cred_info,
74                                                       cur_ep_dist,
75                                                       cur_ep_dist->cred_min);
76                                 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
77                         }
78                 }
79
80                 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
81                         ath6kl_credit_deposit(cred_info, cur_ep_dist,
82                                               cur_ep_dist->cred_min);
83                         /*
84                          * Control service is always marked active, it
85                          * never goes inactive EVER.
86                          */
87                         cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
88                 }
89
90                 /*
91                  * Streams have to be created (explicit | implicit) for all
92                  * kinds of traffic. BE endpoints are also inactive in the
93                  * beginning. When BE traffic starts it creates implicit
94                  * streams that redistributes credits.
95                  *
96                  * Note: all other endpoints have minimums set but are
97                  * initially given NO credits. credits will be distributed
98                  * as traffic activity demands
99                  */
100         }
101
102         /*
103          * For ath6kl_credit_seek function,
104          * it use list_for_each_entry_reverse to walk around the whole ep list.
105          * Therefore assign this lowestpri_ep_dist after walk around the ep_list
106          */
107         cred_info->lowestpri_ep_dist = cur_ep_dist->list;
108
109         WARN_ON(cred_info->cur_free_credits <= 0);
110
111         list_for_each_entry(cur_ep_dist, ep_list, list) {
112                 if (cur_ep_dist->endpoint == ENDPOINT_0)
113                         continue;
114
115                 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
116                         cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
117                 else {
118                         /*
119                          * For the remaining data endpoints, we assume that
120                          * each cred_per_msg are the same. We use a simple
121                          * calculation here, we take the remaining credits
122                          * and determine how many max messages this can
123                          * cover and then set each endpoint's normal value
124                          * equal to 3/4 this amount.
125                          */
126                         count = (cred_info->cur_free_credits /
127                                  cur_ep_dist->cred_per_msg)
128                                 * cur_ep_dist->cred_per_msg;
129                         count = (count * 3) >> 2;
130                         count = max(count, cur_ep_dist->cred_per_msg);
131                         cur_ep_dist->cred_norm = count;
132
133                 }
134
135                 ath6kl_dbg(ATH6KL_DBG_CREDIT,
136                            "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
137                            cur_ep_dist->endpoint,
138                            cur_ep_dist->svc_id,
139                            cur_ep_dist->credits,
140                            cur_ep_dist->cred_per_msg,
141                            cur_ep_dist->cred_norm,
142                            cur_ep_dist->cred_min);
143         }
144 }
145
146 /* initialize and setup credit distribution */
147 static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
148                                struct ath6kl_htc_credit_info *cred_info)
149 {
150         u16 servicepriority[5];
151
152         memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
153
154         servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
155         servicepriority[1] = WMI_DATA_VO_SVC;
156         servicepriority[2] = WMI_DATA_VI_SVC;
157         servicepriority[3] = WMI_DATA_BE_SVC;
158         servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
159
160         /* set priority list */
161         ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
162
163         return 0;
164 }
165
166 /* reduce an ep's credits back to a set limit */
167 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
168                                  struct htc_endpoint_credit_dist *ep_dist,
169                                  int limit)
170 {
171         int credits;
172
173         ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
174                    ep_dist->endpoint, limit);
175
176         ep_dist->cred_assngd = limit;
177
178         if (ep_dist->credits <= limit)
179                 return;
180
181         credits = ep_dist->credits - limit;
182         ep_dist->credits -= credits;
183         cred_info->cur_free_credits += credits;
184 }
185
186 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
187                                  struct list_head *epdist_list)
188 {
189         struct htc_endpoint_credit_dist *cur_list;
190
191         list_for_each_entry(cur_list, epdist_list, list) {
192                 if (cur_list->endpoint == ENDPOINT_0)
193                         continue;
194
195                 if (cur_list->cred_to_dist > 0) {
196                         cur_list->credits += cur_list->cred_to_dist;
197                         cur_list->cred_to_dist = 0;
198
199                         if (cur_list->credits > cur_list->cred_assngd)
200                                 ath6kl_credit_reduce(cred_info,
201                                                      cur_list,
202                                                      cur_list->cred_assngd);
203
204                         if (cur_list->credits > cur_list->cred_norm)
205                                 ath6kl_credit_reduce(cred_info, cur_list,
206                                                      cur_list->cred_norm);
207
208                         if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
209                                 if (cur_list->txq_depth == 0)
210                                         ath6kl_credit_reduce(cred_info,
211                                                              cur_list, 0);
212                         }
213                 }
214         }
215 }
216
217 /*
218  * HTC has an endpoint that needs credits, ep_dist is the endpoint in
219  * question.
220  */
221 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
222                                 struct htc_endpoint_credit_dist *ep_dist)
223 {
224         struct htc_endpoint_credit_dist *curdist_list;
225         int credits = 0;
226         int need;
227
228         if (ep_dist->svc_id == WMI_CONTROL_SVC)
229                 goto out;
230
231         if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
232             (ep_dist->svc_id == WMI_DATA_VO_SVC))
233                 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
234                         goto out;
235
236         /*
237          * For all other services, we follow a simple algorithm of:
238          *
239          * 1. checking the free pool for credits
240          * 2. checking lower priority endpoints for credits to take
241          */
242
243         credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
244
245         if (credits >= ep_dist->seek_cred)
246                 goto out;
247
248         /*
249          * We don't have enough in the free pool, try taking away from
250          * lower priority services The rule for taking away credits:
251          *
252          *   1. Only take from lower priority endpoints
253          *   2. Only take what is allocated above the minimum (never
254          *      starve an endpoint completely)
255          *   3. Only take what you need.
256          */
257
258         list_for_each_entry_reverse(curdist_list,
259                                     &cred_info->lowestpri_ep_dist,
260                                     list) {
261                 if (curdist_list == ep_dist)
262                         break;
263
264                 need = ep_dist->seek_cred - cred_info->cur_free_credits;
265
266                 if ((curdist_list->cred_assngd - need) >=
267                      curdist_list->cred_min) {
268                         /*
269                          * The current one has been allocated more than
270                          * it's minimum and it has enough credits assigned
271                          * above it's minimum to fulfill our need try to
272                          * take away just enough to fulfill our need.
273                          */
274                         ath6kl_credit_reduce(cred_info, curdist_list,
275                                              curdist_list->cred_assngd - need);
276
277                         if (cred_info->cur_free_credits >=
278                             ep_dist->seek_cred)
279                                 break;
280                 }
281
282                 if (curdist_list->endpoint == ENDPOINT_0)
283                         break;
284         }
285
286         credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
287
288 out:
289         /* did we find some credits? */
290         if (credits)
291                 ath6kl_credit_deposit(cred_info, ep_dist, credits);
292
293         ep_dist->seek_cred = 0;
294 }
295
296 /* redistribute credits based on activity change */
297 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
298                                        struct list_head *ep_dist_list)
299 {
300         struct htc_endpoint_credit_dist *curdist_list;
301
302         list_for_each_entry(curdist_list, ep_dist_list, list) {
303                 if (curdist_list->endpoint == ENDPOINT_0)
304                         continue;
305
306                 if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
307                     (curdist_list->svc_id == WMI_DATA_BE_SVC))
308                         curdist_list->dist_flags |= HTC_EP_ACTIVE;
309
310                 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
311                     !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
312                         if (curdist_list->txq_depth == 0)
313                                 ath6kl_credit_reduce(info, curdist_list, 0);
314                         else
315                                 ath6kl_credit_reduce(info,
316                                                      curdist_list,
317                                                      curdist_list->cred_min);
318                 }
319         }
320 }
321
322 /*
323  *
324  * This function is invoked whenever endpoints require credit
325  * distributions. A lock is held while this function is invoked, this
326  * function shall NOT block. The ep_dist_list is a list of distribution
327  * structures in prioritized order as defined by the call to the
328  * htc_set_credit_dist() api.
329  */
330 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
331                                      struct list_head *ep_dist_list,
332                               enum htc_credit_dist_reason reason)
333 {
334         switch (reason) {
335         case HTC_CREDIT_DIST_SEND_COMPLETE:
336                 ath6kl_credit_update(cred_info, ep_dist_list);
337                 break;
338         case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
339                 ath6kl_credit_redistribute(cred_info, ep_dist_list);
340                 break;
341         default:
342                 break;
343         }
344
345         WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
346         WARN_ON(cred_info->cur_free_credits < 0);
347 }
348
349 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
350 {
351         u8 *align_addr;
352
353         if (!IS_ALIGNED((unsigned long) *buf, 4)) {
354                 align_addr = PTR_ALIGN(*buf - 4, 4);
355                 memmove(align_addr, *buf, len);
356                 *buf = align_addr;
357         }
358 }
359
360 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
361                                    int ctrl0, int ctrl1)
362 {
363         struct htc_frame_hdr *hdr;
364
365         packet->buf -= HTC_HDR_LENGTH;
366         hdr =  (struct htc_frame_hdr *)packet->buf;
367
368         /* Endianess? */
369         put_unaligned((u16)packet->act_len, &hdr->payld_len);
370         hdr->flags = flags;
371         hdr->eid = packet->endpoint;
372         hdr->ctrl[0] = ctrl0;
373         hdr->ctrl[1] = ctrl1;
374 }
375
376 static void htc_reclaim_txctrl_buf(struct htc_target *target,
377                                    struct htc_packet *pkt)
378 {
379         spin_lock_bh(&target->htc_lock);
380         list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
381         spin_unlock_bh(&target->htc_lock);
382 }
383
384 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
385                                               bool tx)
386 {
387         struct htc_packet *packet = NULL;
388         struct list_head *buf_list;
389
390         buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
391
392         spin_lock_bh(&target->htc_lock);
393
394         if (list_empty(buf_list)) {
395                 spin_unlock_bh(&target->htc_lock);
396                 return NULL;
397         }
398
399         packet = list_first_entry(buf_list, struct htc_packet, list);
400         list_del(&packet->list);
401         spin_unlock_bh(&target->htc_lock);
402
403         if (tx)
404                 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
405
406         return packet;
407 }
408
409 static void htc_tx_comp_update(struct htc_target *target,
410                                struct htc_endpoint *endpoint,
411                                struct htc_packet *packet)
412 {
413         packet->completion = NULL;
414         packet->buf += HTC_HDR_LENGTH;
415
416         if (!packet->status)
417                 return;
418
419         ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
420                    packet->status, packet->endpoint, packet->act_len,
421                    packet->info.tx.cred_used);
422
423         /* on failure to submit, reclaim credits for this packet */
424         spin_lock_bh(&target->tx_lock);
425         endpoint->cred_dist.cred_to_dist +=
426                                 packet->info.tx.cred_used;
427         endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
428
429         ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
430                    target->credit_info, &target->cred_dist_list);
431
432         ath6kl_credit_distribute(target->credit_info,
433                                  &target->cred_dist_list,
434                                  HTC_CREDIT_DIST_SEND_COMPLETE);
435
436         spin_unlock_bh(&target->tx_lock);
437 }
438
439 static void htc_tx_complete(struct htc_endpoint *endpoint,
440                             struct list_head *txq)
441 {
442         if (list_empty(txq))
443                 return;
444
445         ath6kl_dbg(ATH6KL_DBG_HTC,
446                    "htc tx complete ep %d pkts %d\n",
447                    endpoint->eid, get_queue_depth(txq));
448
449         ath6kl_tx_complete(endpoint->target, txq);
450 }
451
452 static void htc_tx_comp_handler(struct htc_target *target,
453                                 struct htc_packet *packet)
454 {
455         struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
456         struct list_head container;
457
458         ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
459                    packet->info.tx.seqno);
460
461         htc_tx_comp_update(target, endpoint, packet);
462         INIT_LIST_HEAD(&container);
463         list_add_tail(&packet->list, &container);
464         /* do completion */
465         htc_tx_complete(endpoint, &container);
466 }
467
468 static void htc_async_tx_scat_complete(struct htc_target *target,
469                                        struct hif_scatter_req *scat_req)
470 {
471         struct htc_endpoint *endpoint;
472         struct htc_packet *packet;
473         struct list_head tx_compq;
474         int i;
475
476         INIT_LIST_HEAD(&tx_compq);
477
478         ath6kl_dbg(ATH6KL_DBG_HTC,
479                    "htc tx scat complete len %d entries %d\n",
480                    scat_req->len, scat_req->scat_entries);
481
482         if (scat_req->status)
483                 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
484
485         packet = scat_req->scat_list[0].packet;
486         endpoint = &target->endpoint[packet->endpoint];
487
488         /* walk through the scatter list and process */
489         for (i = 0; i < scat_req->scat_entries; i++) {
490                 packet = scat_req->scat_list[i].packet;
491                 if (!packet) {
492                         WARN_ON(1);
493                         return;
494                 }
495
496                 packet->status = scat_req->status;
497                 htc_tx_comp_update(target, endpoint, packet);
498                 list_add_tail(&packet->list, &tx_compq);
499         }
500
501         /* free scatter request */
502         hif_scatter_req_add(target->dev->ar, scat_req);
503
504         /* complete all packets */
505         htc_tx_complete(endpoint, &tx_compq);
506 }
507
508 static int ath6kl_htc_tx_issue(struct htc_target *target,
509                                struct htc_packet *packet)
510 {
511         int status;
512         bool sync = false;
513         u32 padded_len, send_len;
514
515         if (!packet->completion)
516                 sync = true;
517
518         send_len = packet->act_len + HTC_HDR_LENGTH;
519
520         padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
521
522         ath6kl_dbg(ATH6KL_DBG_HTC,
523                    "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
524                    send_len, packet->info.tx.seqno, padded_len,
525                    target->dev->ar->mbox_info.htc_addr,
526                    sync ? "sync" : "async");
527
528         if (sync) {
529                 status = hif_read_write_sync(target->dev->ar,
530                                 target->dev->ar->mbox_info.htc_addr,
531                                  packet->buf, padded_len,
532                                  HIF_WR_SYNC_BLOCK_INC);
533
534                 packet->status = status;
535                 packet->buf += HTC_HDR_LENGTH;
536         } else
537                 status = hif_write_async(target->dev->ar,
538                                 target->dev->ar->mbox_info.htc_addr,
539                                 packet->buf, padded_len,
540                                 HIF_WR_ASYNC_BLOCK_INC, packet);
541
542         trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
543
544         return status;
545 }
546
547 static int htc_check_credits(struct htc_target *target,
548                              struct htc_endpoint *ep, u8 *flags,
549                              enum htc_endpoint_id eid, unsigned int len,
550                              int *req_cred)
551 {
552
553         *req_cred = (len > target->tgt_cred_sz) ?
554                      DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
555
556         ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
557                    *req_cred, ep->cred_dist.credits);
558
559         if (ep->cred_dist.credits < *req_cred) {
560                 if (eid == ENDPOINT_0)
561                         return -EINVAL;
562
563                 /* Seek more credits */
564                 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
565
566                 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
567
568                 ep->cred_dist.seek_cred = 0;
569
570                 if (ep->cred_dist.credits < *req_cred) {
571                         ath6kl_dbg(ATH6KL_DBG_CREDIT,
572                                    "credit not found for ep %d\n",
573                                    eid);
574                         return -EINVAL;
575                 }
576         }
577
578         ep->cred_dist.credits -= *req_cred;
579         ep->ep_st.cred_cosumd += *req_cred;
580
581          /* When we are getting low on credits, ask for more */
582         if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
583                 ep->cred_dist.seek_cred =
584                 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
585
586                 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
587
588                 /* see if we were successful in getting more */
589                 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
590                         /* tell the target we need credits ASAP! */
591                         *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
592                         ep->ep_st.cred_low_indicate += 1;
593                         ath6kl_dbg(ATH6KL_DBG_CREDIT,
594                                    "credit we need credits asap\n");
595                 }
596         }
597
598         return 0;
599 }
600
601 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
602                                    struct htc_endpoint *endpoint,
603                                    struct list_head *queue)
604 {
605         int req_cred;
606         u8 flags;
607         struct htc_packet *packet;
608         unsigned int len;
609
610         while (true) {
611
612                 flags = 0;
613
614                 if (list_empty(&endpoint->txq))
615                         break;
616                 packet = list_first_entry(&endpoint->txq, struct htc_packet,
617                                           list);
618
619                 ath6kl_dbg(ATH6KL_DBG_HTC,
620                            "htc tx got packet 0x%p queue depth %d\n",
621                            packet, get_queue_depth(&endpoint->txq));
622
623                 len = CALC_TXRX_PADDED_LEN(target,
624                                            packet->act_len + HTC_HDR_LENGTH);
625
626                 if (htc_check_credits(target, endpoint, &flags,
627                                       packet->endpoint, len, &req_cred))
628                         break;
629
630                 /* now we can fully move onto caller's queue */
631                 packet = list_first_entry(&endpoint->txq, struct htc_packet,
632                                           list);
633                 list_move_tail(&packet->list, queue);
634
635                 /* save the number of credits this packet consumed */
636                 packet->info.tx.cred_used = req_cred;
637
638                 /* all TX packets are handled asynchronously */
639                 packet->completion = htc_tx_comp_handler;
640                 packet->context = target;
641                 endpoint->ep_st.tx_issued += 1;
642
643                 /* save send flags */
644                 packet->info.tx.flags = flags;
645                 packet->info.tx.seqno = endpoint->seqno;
646                 endpoint->seqno++;
647         }
648 }
649
650 /* See if the padded tx length falls on a credit boundary */
651 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
652                                   struct htc_endpoint *ep)
653 {
654         int rem_cred, cred_pad;
655
656         rem_cred = *len % cred_sz;
657
658         /* No padding needed */
659         if  (!rem_cred)
660                 return 0;
661
662         if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
663                 return -1;
664
665         /*
666          * The transfer consumes a "partial" credit, this
667          * packet cannot be bundled unless we add
668          * additional "dummy" padding (max 255 bytes) to
669          * consume the entire credit.
670          */
671         cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
672
673         if ((cred_pad > 0) && (cred_pad <= 255))
674                 *len += cred_pad;
675         else
676                 /* The amount of padding is too large, send as non-bundled */
677                 return -1;
678
679         return cred_pad;
680 }
681
682 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
683                                          struct htc_endpoint *endpoint,
684                                          struct hif_scatter_req *scat_req,
685                                          int n_scat,
686                                          struct list_head *queue)
687 {
688         struct htc_packet *packet;
689         int i, len, rem_scat, cred_pad;
690         int status = 0;
691         u8 flags;
692
693         rem_scat = target->max_tx_bndl_sz;
694
695         for (i = 0; i < n_scat; i++) {
696                 scat_req->scat_list[i].packet = NULL;
697
698                 if (list_empty(queue))
699                         break;
700
701                 packet = list_first_entry(queue, struct htc_packet, list);
702                 len = CALC_TXRX_PADDED_LEN(target,
703                                            packet->act_len + HTC_HDR_LENGTH);
704
705                 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
706                                                   &len, endpoint);
707                 if (cred_pad < 0 || rem_scat < len) {
708                         status = -ENOSPC;
709                         break;
710                 }
711
712                 rem_scat -= len;
713                 /* now remove it from the queue */
714                 list_del(&packet->list);
715
716                 scat_req->scat_list[i].packet = packet;
717                 /* prepare packet and flag message as part of a send bundle */
718                 flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
719                 ath6kl_htc_tx_prep_pkt(packet, flags,
720                                        cred_pad, packet->info.tx.seqno);
721                 /* Make sure the buffer is 4-byte aligned */
722                 ath6kl_htc_tx_buf_align(&packet->buf,
723                                         packet->act_len + HTC_HDR_LENGTH);
724                 scat_req->scat_list[i].buf = packet->buf;
725                 scat_req->scat_list[i].len = len;
726
727                 scat_req->len += len;
728                 scat_req->scat_entries++;
729                 ath6kl_dbg(ATH6KL_DBG_HTC,
730                            "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
731                            i, packet, packet->info.tx.seqno, len, rem_scat);
732         }
733
734         /* Roll back scatter setup in case of any failure */
735         if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
736                 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
737                         packet = scat_req->scat_list[i].packet;
738                         if (packet) {
739                                 packet->buf += HTC_HDR_LENGTH;
740                                 list_add(&packet->list, queue);
741                         }
742                 }
743                 return -EAGAIN;
744         }
745
746         return status;
747 }
748
749 /*
750  * Drain a queue and send as bundles this function may return without fully
751  * draining the queue when
752  *
753  *    1. scatter resources are exhausted
754  *    2. a message that will consume a partial credit will stop the
755  *    bundling process early
756  *    3. we drop below the minimum number of messages for a bundle
757  */
758 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
759                                  struct list_head *queue,
760                                  int *sent_bundle, int *n_bundle_pkts)
761 {
762         struct htc_target *target = endpoint->target;
763         struct hif_scatter_req *scat_req = NULL;
764         int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
765         struct htc_packet *packet;
766         int status;
767         u32 txb_mask;
768         u8 ac = WMM_NUM_AC;
769
770         if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
771             (WMI_CONTROL_SVC != endpoint->svc_id))
772                 ac = target->dev->ar->ep2ac_map[endpoint->eid];
773
774         while (true) {
775                 status = 0;
776                 n_scat = get_queue_depth(queue);
777                 n_scat = min(n_scat, target->msg_per_bndl_max);
778
779                 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
780                         /* not enough to bundle */
781                         break;
782
783                 scat_req = hif_scatter_req_get(target->dev->ar);
784
785                 if (!scat_req) {
786                         /* no scatter resources  */
787                         ath6kl_dbg(ATH6KL_DBG_HTC,
788                                    "htc tx no more scatter resources\n");
789                         break;
790                 }
791
792                 if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
793                         if (WMM_AC_BE == ac)
794                                 /*
795                                  * BE, BK have priorities and bit
796                                  * positions reversed
797                                  */
798                                 txb_mask = (1 << WMM_AC_BK);
799                         else
800                                 /*
801                                  * any AC with priority lower than
802                                  * itself
803                                  */
804                                 txb_mask = ((1 << ac) - 1);
805
806                         /*
807                          * when the scatter request resources drop below a
808                          * certain threshold, disable Tx bundling for all
809                          * AC's with priority lower than the current requesting
810                          * AC. Otherwise re-enable Tx bundling for them
811                          */
812                         if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
813                                 target->tx_bndl_mask &= ~txb_mask;
814                         else
815                                 target->tx_bndl_mask |= txb_mask;
816                 }
817
818                 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
819                            n_scat);
820
821                 scat_req->len = 0;
822                 scat_req->scat_entries = 0;
823
824                 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
825                                                        scat_req, n_scat,
826                                                        queue);
827                 if (status == -EAGAIN) {
828                         hif_scatter_req_add(target->dev->ar, scat_req);
829                         break;
830                 }
831
832                 /* send path is always asynchronous */
833                 scat_req->complete = htc_async_tx_scat_complete;
834                 n_sent_bundle++;
835                 tot_pkts_bundle += scat_req->scat_entries;
836
837                 ath6kl_dbg(ATH6KL_DBG_HTC,
838                            "htc tx scatter bytes %d entries %d\n",
839                            scat_req->len, scat_req->scat_entries);
840
841                 for (i = 0; i < scat_req->scat_entries; i++) {
842                         packet = scat_req->scat_list[i].packet;
843                         trace_ath6kl_htc_tx(packet->status, packet->endpoint,
844                                             packet->buf, packet->act_len);
845                 }
846
847                 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
848
849                 if (status)
850                         break;
851         }
852
853         *sent_bundle = n_sent_bundle;
854         *n_bundle_pkts = tot_pkts_bundle;
855         ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
856                    n_sent_bundle);
857
858         return;
859 }
860
861 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
862                                      struct htc_endpoint *endpoint)
863 {
864         struct list_head txq;
865         struct htc_packet *packet;
866         int bundle_sent;
867         int n_pkts_bundle;
868         u8 ac = WMM_NUM_AC;
869         int status;
870
871         spin_lock_bh(&target->tx_lock);
872
873         endpoint->tx_proc_cnt++;
874         if (endpoint->tx_proc_cnt > 1) {
875                 endpoint->tx_proc_cnt--;
876                 spin_unlock_bh(&target->tx_lock);
877                 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
878                 return;
879         }
880
881         /*
882          * drain the endpoint TX queue for transmission as long
883          * as we have enough credits.
884          */
885         INIT_LIST_HEAD(&txq);
886
887         if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
888             (WMI_CONTROL_SVC != endpoint->svc_id))
889                 ac = target->dev->ar->ep2ac_map[endpoint->eid];
890
891         while (true) {
892
893                 if (list_empty(&endpoint->txq))
894                         break;
895
896                 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
897
898                 if (list_empty(&txq))
899                         break;
900
901                 spin_unlock_bh(&target->tx_lock);
902
903                 bundle_sent = 0;
904                 n_pkts_bundle = 0;
905
906                 while (true) {
907                         /* try to send a bundle on each pass */
908                         if ((target->tx_bndl_mask) &&
909                             (get_queue_depth(&txq) >=
910                             HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
911                                 int temp1 = 0, temp2 = 0;
912
913                                 /* check if bundling is enabled for an AC */
914                                 if (target->tx_bndl_mask & (1 << ac)) {
915                                         ath6kl_htc_tx_bundle(endpoint, &txq,
916                                                              &temp1, &temp2);
917                                         bundle_sent += temp1;
918                                         n_pkts_bundle += temp2;
919                                 }
920                         }
921
922                         if (list_empty(&txq))
923                                 break;
924
925                         packet = list_first_entry(&txq, struct htc_packet,
926                                                   list);
927                         list_del(&packet->list);
928
929                         ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
930                                                0, packet->info.tx.seqno);
931                         status = ath6kl_htc_tx_issue(target, packet);
932
933                         if (status) {
934                                 packet->status = status;
935                                 packet->completion(packet->context, packet);
936                         }
937                 }
938
939                 spin_lock_bh(&target->tx_lock);
940
941                 endpoint->ep_st.tx_bundles += bundle_sent;
942                 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
943
944                 /*
945                  * if an AC has bundling disabled and no tx bundling
946                  * has occured continously for a certain number of TX,
947                  * enable tx bundling for this AC
948                  */
949                 if (!bundle_sent) {
950                         if (!(target->tx_bndl_mask & (1 << ac)) &&
951                             (ac < WMM_NUM_AC)) {
952                                 if (++target->ac_tx_count[ac] >=
953                                         TX_RESUME_BUNDLE_THRESHOLD) {
954                                         target->ac_tx_count[ac] = 0;
955                                         target->tx_bndl_mask |= (1 << ac);
956                                 }
957                         }
958                 } else {
959                         /* tx bundling will reset the counter */
960                         if (ac < WMM_NUM_AC)
961                                 target->ac_tx_count[ac] = 0;
962                 }
963         }
964
965         endpoint->tx_proc_cnt = 0;
966         spin_unlock_bh(&target->tx_lock);
967 }
968
969 static bool ath6kl_htc_tx_try(struct htc_target *target,
970                               struct htc_endpoint *endpoint,
971                               struct htc_packet *tx_pkt)
972 {
973         struct htc_ep_callbacks ep_cb;
974         int txq_depth;
975         bool overflow = false;
976
977         ep_cb = endpoint->ep_cb;
978
979         spin_lock_bh(&target->tx_lock);
980         txq_depth = get_queue_depth(&endpoint->txq);
981         spin_unlock_bh(&target->tx_lock);
982
983         if (txq_depth >= endpoint->max_txq_depth)
984                 overflow = true;
985
986         if (overflow)
987                 ath6kl_dbg(ATH6KL_DBG_HTC,
988                            "htc tx overflow ep %d depth %d max %d\n",
989                            endpoint->eid, txq_depth,
990                            endpoint->max_txq_depth);
991
992         if (overflow && ep_cb.tx_full) {
993                 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
994                     HTC_SEND_FULL_DROP) {
995                         endpoint->ep_st.tx_dropped += 1;
996                         return false;
997                 }
998         }
999
1000         spin_lock_bh(&target->tx_lock);
1001         list_add_tail(&tx_pkt->list, &endpoint->txq);
1002         spin_unlock_bh(&target->tx_lock);
1003
1004         ath6kl_htc_tx_from_queue(target, endpoint);
1005
1006         return true;
1007 }
1008
1009 static void htc_chk_ep_txq(struct htc_target *target)
1010 {
1011         struct htc_endpoint *endpoint;
1012         struct htc_endpoint_credit_dist *cred_dist;
1013
1014         /*
1015          * Run through the credit distribution list to see if there are
1016          * packets queued. NOTE: no locks need to be taken since the
1017          * distribution list is not dynamic (cannot be re-ordered) and we
1018          * are not modifying any state.
1019          */
1020         list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
1021                 endpoint = cred_dist->htc_ep;
1022
1023                 spin_lock_bh(&target->tx_lock);
1024                 if (!list_empty(&endpoint->txq)) {
1025                         ath6kl_dbg(ATH6KL_DBG_HTC,
1026                                    "htc creds ep %d credits %d pkts %d\n",
1027                                    cred_dist->endpoint,
1028                                    endpoint->cred_dist.credits,
1029                                    get_queue_depth(&endpoint->txq));
1030                         spin_unlock_bh(&target->tx_lock);
1031                         /*
1032                          * Try to start the stalled queue, this list is
1033                          * ordered by priority. If there are credits
1034                          * available the highest priority queue will get a
1035                          * chance to reclaim credits from lower priority
1036                          * ones.
1037                          */
1038                         ath6kl_htc_tx_from_queue(target, endpoint);
1039                         spin_lock_bh(&target->tx_lock);
1040                 }
1041                 spin_unlock_bh(&target->tx_lock);
1042         }
1043 }
1044
1045 static int htc_setup_tx_complete(struct htc_target *target)
1046 {
1047         struct htc_packet *send_pkt = NULL;
1048         int status;
1049
1050         send_pkt = htc_get_control_buf(target, true);
1051
1052         if (!send_pkt)
1053                 return -ENOMEM;
1054
1055         if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
1056                 struct htc_setup_comp_ext_msg *setup_comp_ext;
1057                 u32 flags = 0;
1058
1059                 setup_comp_ext =
1060                     (struct htc_setup_comp_ext_msg *)send_pkt->buf;
1061                 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
1062                 setup_comp_ext->msg_id =
1063                         cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1064
1065                 if (target->msg_per_bndl_max > 0) {
1066                         /* Indicate HTC bundling to the target */
1067                         flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
1068                         setup_comp_ext->msg_per_rxbndl =
1069                                                 target->msg_per_bndl_max;
1070                 }
1071
1072                 memcpy(&setup_comp_ext->flags, &flags,
1073                        sizeof(setup_comp_ext->flags));
1074                 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
1075                                  sizeof(struct htc_setup_comp_ext_msg),
1076                                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1077
1078         } else {
1079                 struct htc_setup_comp_msg *setup_comp;
1080                 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
1081                 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
1082                 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
1083                 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
1084                                  sizeof(struct htc_setup_comp_msg),
1085                                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1086         }
1087
1088         /* we want synchronous operation */
1089         send_pkt->completion = NULL;
1090         ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
1091         status = ath6kl_htc_tx_issue(target, send_pkt);
1092
1093         if (send_pkt != NULL)
1094                 htc_reclaim_txctrl_buf(target, send_pkt);
1095
1096         return status;
1097 }
1098
1099 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1100                                 struct ath6kl_htc_credit_info *credit_info,
1101                                 u16 srvc_pri_order[], int list_len)
1102 {
1103         struct htc_endpoint *endpoint;
1104         int i, ep;
1105
1106         target->credit_info = credit_info;
1107
1108         list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1109                       &target->cred_dist_list);
1110
1111         for (i = 0; i < list_len; i++) {
1112                 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1113                         endpoint = &target->endpoint[ep];
1114                         if (endpoint->svc_id == srvc_pri_order[i]) {
1115                                 list_add_tail(&endpoint->cred_dist.list,
1116                                               &target->cred_dist_list);
1117                                 break;
1118                         }
1119                 }
1120                 if (ep >= ENDPOINT_MAX) {
1121                         WARN_ON(1);
1122                         return;
1123                 }
1124         }
1125 }
1126
1127 static int ath6kl_htc_mbox_tx(struct htc_target *target,
1128                               struct htc_packet *packet)
1129 {
1130         struct htc_endpoint *endpoint;
1131         struct list_head queue;
1132
1133         ath6kl_dbg(ATH6KL_DBG_HTC,
1134                    "htc tx ep id %d buf 0x%p len %d\n",
1135                    packet->endpoint, packet->buf, packet->act_len);
1136
1137         if (packet->endpoint >= ENDPOINT_MAX) {
1138                 WARN_ON(1);
1139                 return -EINVAL;
1140         }
1141
1142         endpoint = &target->endpoint[packet->endpoint];
1143
1144         if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1145                 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1146                                  -ECANCELED : -ENOSPC;
1147                 INIT_LIST_HEAD(&queue);
1148                 list_add(&packet->list, &queue);
1149                 htc_tx_complete(endpoint, &queue);
1150         }
1151
1152         return 0;
1153 }
1154
1155 /* flush endpoint TX queue */
1156 static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1157                            enum htc_endpoint_id eid, u16 tag)
1158 {
1159         struct htc_packet *packet, *tmp_pkt;
1160         struct list_head discard_q, container;
1161         struct htc_endpoint *endpoint = &target->endpoint[eid];
1162
1163         if (!endpoint->svc_id) {
1164                 WARN_ON(1);
1165                 return;
1166         }
1167
1168         /* initialize the discard queue */
1169         INIT_LIST_HEAD(&discard_q);
1170
1171         spin_lock_bh(&target->tx_lock);
1172
1173         list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1174                 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1175                     (tag == packet->info.tx.tag))
1176                         list_move_tail(&packet->list, &discard_q);
1177         }
1178
1179         spin_unlock_bh(&target->tx_lock);
1180
1181         list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1182                 packet->status = -ECANCELED;
1183                 list_del(&packet->list);
1184                 ath6kl_dbg(ATH6KL_DBG_HTC,
1185                            "htc tx flushing pkt 0x%p len %d  ep %d tag 0x%x\n",
1186                            packet, packet->act_len,
1187                            packet->endpoint, packet->info.tx.tag);
1188
1189                 INIT_LIST_HEAD(&container);
1190                 list_add_tail(&packet->list, &container);
1191                 htc_tx_complete(endpoint, &container);
1192         }
1193
1194 }
1195
1196 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1197 {
1198         struct htc_endpoint *endpoint;
1199         int i;
1200
1201         dump_cred_dist_stats(target);
1202
1203         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1204                 endpoint = &target->endpoint[i];
1205                 if (endpoint->svc_id == 0)
1206                         /* not in use.. */
1207                         continue;
1208                 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1209         }
1210 }
1211
1212 static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1213                                              enum htc_endpoint_id eid,
1214                                              bool active)
1215 {
1216         struct htc_endpoint *endpoint = &target->endpoint[eid];
1217         bool dist = false;
1218
1219         if (endpoint->svc_id == 0) {
1220                 WARN_ON(1);
1221                 return;
1222         }
1223
1224         spin_lock_bh(&target->tx_lock);
1225
1226         if (active) {
1227                 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1228                         endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1229                         dist = true;
1230                 }
1231         } else {
1232                 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1233                         endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1234                         dist = true;
1235                 }
1236         }
1237
1238         if (dist) {
1239                 endpoint->cred_dist.txq_depth =
1240                         get_queue_depth(&endpoint->txq);
1241
1242                 ath6kl_dbg(ATH6KL_DBG_HTC,
1243                            "htc tx activity ctxt 0x%p dist 0x%p\n",
1244                            target->credit_info, &target->cred_dist_list);
1245
1246                 ath6kl_credit_distribute(target->credit_info,
1247                                          &target->cred_dist_list,
1248                                          HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1249         }
1250
1251         spin_unlock_bh(&target->tx_lock);
1252
1253         if (dist && !active)
1254                 htc_chk_ep_txq(target);
1255 }
1256
1257 /* HTC Rx */
1258
1259 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1260                                               int n_look_ahds)
1261 {
1262         endpoint->ep_st.rx_pkts++;
1263         if (n_look_ahds == 1)
1264                 endpoint->ep_st.rx_lkahds++;
1265         else if (n_look_ahds > 1)
1266                 endpoint->ep_st.rx_bundle_lkahd++;
1267 }
1268
1269 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1270                                           enum htc_endpoint_id eid, int len)
1271 {
1272         return (eid == target->dev->ar->ctrl_ep) ?
1273                 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1274 }
1275
1276 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1277 {
1278         struct list_head queue;
1279
1280         INIT_LIST_HEAD(&queue);
1281         list_add_tail(&packet->list, &queue);
1282         return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1283 }
1284
1285 static void htc_reclaim_rxbuf(struct htc_target *target,
1286                               struct htc_packet *packet,
1287                               struct htc_endpoint *ep)
1288 {
1289         if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1290                 htc_rxpkt_reset(packet);
1291                 packet->status = -ECANCELED;
1292                 ep->ep_cb.rx(ep->target, packet);
1293         } else {
1294                 htc_rxpkt_reset(packet);
1295                 htc_add_rxbuf((void *)(target), packet);
1296         }
1297 }
1298
1299 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1300                                 struct htc_packet *packet)
1301 {
1302         spin_lock_bh(&target->htc_lock);
1303         list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1304         spin_unlock_bh(&target->htc_lock);
1305 }
1306
1307 static int ath6kl_htc_rx_packet(struct htc_target *target,
1308                                 struct htc_packet *packet,
1309                                 u32 rx_len)
1310 {
1311         struct ath6kl_device *dev = target->dev;
1312         u32 padded_len;
1313         int status;
1314
1315         padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1316
1317         if (padded_len > packet->buf_len) {
1318                 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1319                            padded_len, rx_len, packet->buf_len);
1320                 return -ENOMEM;
1321         }
1322
1323         ath6kl_dbg(ATH6KL_DBG_HTC,
1324                    "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
1325                    packet, packet->info.rx.exp_hdr,
1326                    padded_len, dev->ar->mbox_info.htc_addr);
1327
1328         status = hif_read_write_sync(dev->ar,
1329                                      dev->ar->mbox_info.htc_addr,
1330                                      packet->buf, padded_len,
1331                                      HIF_RD_SYNC_BLOCK_FIX);
1332
1333         packet->status = status;
1334
1335         return status;
1336 }
1337
1338 /*
1339  * optimization for recv packets, we can indicate a
1340  * "hint" that there are more  single-packets to fetch
1341  * on this endpoint.
1342  */
1343 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1344                                        struct htc_endpoint *endpoint,
1345                                        struct htc_packet *packet)
1346 {
1347         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1348
1349         if (htc_hdr->eid == packet->endpoint) {
1350                 if (!list_empty(&endpoint->rx_bufq))
1351                         packet->info.rx.indicat_flags |=
1352                                         HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1353         }
1354 }
1355
1356 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1357 {
1358         struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1359
1360         if (ep_cb.rx_refill_thresh > 0) {
1361                 spin_lock_bh(&endpoint->target->rx_lock);
1362                 if (get_queue_depth(&endpoint->rx_bufq)
1363                     < ep_cb.rx_refill_thresh) {
1364                         spin_unlock_bh(&endpoint->target->rx_lock);
1365                         ep_cb.rx_refill(endpoint->target, endpoint->eid);
1366                         return;
1367                 }
1368                 spin_unlock_bh(&endpoint->target->rx_lock);
1369         }
1370 }
1371
1372 /* This function is called with rx_lock held */
1373 static int ath6kl_htc_rx_setup(struct htc_target *target,
1374                                struct htc_endpoint *ep,
1375                                u32 *lk_ahds, struct list_head *queue, int n_msg)
1376 {
1377         struct htc_packet *packet;
1378         /* FIXME: type of lk_ahds can't be right */
1379         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1380         struct htc_ep_callbacks ep_cb;
1381         int status = 0, j, full_len;
1382         bool no_recycle;
1383
1384         full_len = CALC_TXRX_PADDED_LEN(target,
1385                                         le16_to_cpu(htc_hdr->payld_len) +
1386                                         sizeof(*htc_hdr));
1387
1388         if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1389                 ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1390                             htc_hdr->eid, htc_hdr->flags,
1391                             le16_to_cpu(htc_hdr->payld_len));
1392                 return -EINVAL;
1393         }
1394
1395         ep_cb = ep->ep_cb;
1396         for (j = 0; j < n_msg; j++) {
1397
1398                 /*
1399                  * Reset flag, any packets allocated using the
1400                  * rx_alloc() API cannot be recycled on
1401                  * cleanup,they must be explicitly returned.
1402                  */
1403                 no_recycle = false;
1404
1405                 if (ep_cb.rx_allocthresh &&
1406                     (full_len > ep_cb.rx_alloc_thresh)) {
1407                         ep->ep_st.rx_alloc_thresh_hit += 1;
1408                         ep->ep_st.rxalloc_thresh_byte +=
1409                                 le16_to_cpu(htc_hdr->payld_len);
1410
1411                         spin_unlock_bh(&target->rx_lock);
1412                         no_recycle = true;
1413
1414                         packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1415                                                       full_len);
1416                         spin_lock_bh(&target->rx_lock);
1417                 } else {
1418                         /* refill handler is being used */
1419                         if (list_empty(&ep->rx_bufq)) {
1420                                 if (ep_cb.rx_refill) {
1421                                         spin_unlock_bh(&target->rx_lock);
1422                                         ep_cb.rx_refill(ep->target, ep->eid);
1423                                         spin_lock_bh(&target->rx_lock);
1424                                 }
1425                         }
1426
1427                         if (list_empty(&ep->rx_bufq))
1428                                 packet = NULL;
1429                         else {
1430                                 packet = list_first_entry(&ep->rx_bufq,
1431                                                 struct htc_packet, list);
1432                                 list_del(&packet->list);
1433                         }
1434                 }
1435
1436                 if (!packet) {
1437                         target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1438                         target->ep_waiting = ep->eid;
1439                         return -ENOSPC;
1440                 }
1441
1442                 /* clear flags */
1443                 packet->info.rx.rx_flags = 0;
1444                 packet->info.rx.indicat_flags = 0;
1445                 packet->status = 0;
1446
1447                 if (no_recycle)
1448                         /*
1449                          * flag that these packets cannot be
1450                          * recycled, they have to be returned to
1451                          * the user
1452                          */
1453                         packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1454
1455                 /* Caller needs to free this upon any failure */
1456                 list_add_tail(&packet->list, queue);
1457
1458                 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1459                         status = -ECANCELED;
1460                         break;
1461                 }
1462
1463                 if (j) {
1464                         packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1465                         packet->info.rx.exp_hdr = 0xFFFFFFFF;
1466                 } else
1467                         /* set expected look ahead */
1468                         packet->info.rx.exp_hdr = *lk_ahds;
1469
1470                 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1471                         HTC_HDR_LENGTH;
1472         }
1473
1474         return status;
1475 }
1476
1477 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1478                                u32 lk_ahds[], int msg,
1479                                struct htc_endpoint *endpoint,
1480                                struct list_head *queue)
1481 {
1482         int status = 0;
1483         struct htc_packet *packet, *tmp_pkt;
1484         struct htc_frame_hdr *htc_hdr;
1485         int i, n_msg;
1486
1487         spin_lock_bh(&target->rx_lock);
1488
1489         for (i = 0; i < msg; i++) {
1490
1491                 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1492
1493                 if (htc_hdr->eid >= ENDPOINT_MAX) {
1494                         ath6kl_err("invalid ep in look-ahead: %d\n",
1495                                    htc_hdr->eid);
1496                         status = -ENOMEM;
1497                         break;
1498                 }
1499
1500                 if (htc_hdr->eid != endpoint->eid) {
1501                         ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1502                                    htc_hdr->eid, endpoint->eid, i);
1503                         status = -ENOMEM;
1504                         break;
1505                 }
1506
1507                 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1508                         ath6kl_err("payload len %d exceeds max htc : %d !\n",
1509                                    htc_hdr->payld_len,
1510                                    (u32) HTC_MAX_PAYLOAD_LENGTH);
1511                         status = -ENOMEM;
1512                         break;
1513                 }
1514
1515                 if (endpoint->svc_id == 0) {
1516                         ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1517                         status = -ENOMEM;
1518                         break;
1519                 }
1520
1521                 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1522                         /*
1523                          * HTC header indicates that every packet to follow
1524                          * has the same padded length so that it can be
1525                          * optimally fetched as a full bundle.
1526                          */
1527                         n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1528                                 HTC_FLG_RX_BNDL_CNT_S;
1529
1530                         /* the count doesn't include the starter frame */
1531                         n_msg++;
1532                         if (n_msg > target->msg_per_bndl_max) {
1533                                 status = -ENOMEM;
1534                                 break;
1535                         }
1536
1537                         endpoint->ep_st.rx_bundle_from_hdr += 1;
1538                         ath6kl_dbg(ATH6KL_DBG_HTC,
1539                                    "htc rx bundle pkts %d\n",
1540                                    n_msg);
1541                 } else
1542                         /* HTC header only indicates 1 message to fetch */
1543                         n_msg = 1;
1544
1545                 /* Setup packet buffers for each message */
1546                 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1547                                              queue, n_msg);
1548
1549                 /*
1550                  * This is due to unavailabilty of buffers to rx entire data.
1551                  * Return no error so that free buffers from queue can be used
1552                  * to receive partial data.
1553                  */
1554                 if (status == -ENOSPC) {
1555                         spin_unlock_bh(&target->rx_lock);
1556                         return 0;
1557                 }
1558
1559                 if (status)
1560                         break;
1561         }
1562
1563         spin_unlock_bh(&target->rx_lock);
1564
1565         if (status) {
1566                 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1567                         list_del(&packet->list);
1568                         htc_reclaim_rxbuf(target, packet,
1569                                           &target->endpoint[packet->endpoint]);
1570                 }
1571         }
1572
1573         return status;
1574 }
1575
1576 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1577 {
1578         if (packets->endpoint != ENDPOINT_0) {
1579                 WARN_ON(1);
1580                 return;
1581         }
1582
1583         if (packets->status == -ECANCELED) {
1584                 reclaim_rx_ctrl_buf(context, packets);
1585                 return;
1586         }
1587
1588         if (packets->act_len > 0) {
1589                 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1590                            packets->act_len + HTC_HDR_LENGTH);
1591
1592                 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1593                                 "htc rx unexpected endpoint 0 message", "",
1594                                 packets->buf - HTC_HDR_LENGTH,
1595                                 packets->act_len + HTC_HDR_LENGTH);
1596         }
1597
1598         htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1599 }
1600
1601 static void htc_proc_cred_rpt(struct htc_target *target,
1602                               struct htc_credit_report *rpt,
1603                               int n_entries,
1604                               enum htc_endpoint_id from_ep)
1605 {
1606         struct htc_endpoint *endpoint;
1607         int tot_credits = 0, i;
1608         bool dist = false;
1609
1610         spin_lock_bh(&target->tx_lock);
1611
1612         for (i = 0; i < n_entries; i++, rpt++) {
1613                 if (rpt->eid >= ENDPOINT_MAX) {
1614                         WARN_ON(1);
1615                         spin_unlock_bh(&target->tx_lock);
1616                         return;
1617                 }
1618
1619                 endpoint = &target->endpoint[rpt->eid];
1620
1621                 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1622                            "credit report ep %d credits %d\n",
1623                            rpt->eid, rpt->credits);
1624
1625                 endpoint->ep_st.tx_cred_rpt += 1;
1626                 endpoint->ep_st.cred_retnd += rpt->credits;
1627
1628                 if (from_ep == rpt->eid) {
1629                         /*
1630                          * This credit report arrived on the same endpoint
1631                          * indicating it arrived in an RX packet.
1632                          */
1633                         endpoint->ep_st.cred_from_rx += rpt->credits;
1634                         endpoint->ep_st.cred_rpt_from_rx += 1;
1635                 } else if (from_ep == ENDPOINT_0) {
1636                         /* credit arrived on endpoint 0 as a NULL message */
1637                         endpoint->ep_st.cred_from_ep0 += rpt->credits;
1638                         endpoint->ep_st.cred_rpt_ep0 += 1;
1639                 } else {
1640                         endpoint->ep_st.cred_from_other += rpt->credits;
1641                         endpoint->ep_st.cred_rpt_from_other += 1;
1642                 }
1643
1644                 if (rpt->eid == ENDPOINT_0)
1645                         /* always give endpoint 0 credits back */
1646                         endpoint->cred_dist.credits += rpt->credits;
1647                 else {
1648                         endpoint->cred_dist.cred_to_dist += rpt->credits;
1649                         dist = true;
1650                 }
1651
1652                 /*
1653                  * Refresh tx depth for distribution function that will
1654                  * recover these credits NOTE: this is only valid when
1655                  * there are credits to recover!
1656                  */
1657                 endpoint->cred_dist.txq_depth =
1658                         get_queue_depth(&endpoint->txq);
1659
1660                 tot_credits += rpt->credits;
1661         }
1662
1663         if (dist) {
1664                 /*
1665                  * This was a credit return based on a completed send
1666                  * operations note, this is done with the lock held
1667                  */
1668                 ath6kl_credit_distribute(target->credit_info,
1669                                          &target->cred_dist_list,
1670                                          HTC_CREDIT_DIST_SEND_COMPLETE);
1671         }
1672
1673         spin_unlock_bh(&target->tx_lock);
1674
1675         if (tot_credits)
1676                 htc_chk_ep_txq(target);
1677 }
1678
1679 static int htc_parse_trailer(struct htc_target *target,
1680                              struct htc_record_hdr *record,
1681                              u8 *record_buf, u32 *next_lk_ahds,
1682                              enum htc_endpoint_id endpoint,
1683                              int *n_lk_ahds)
1684 {
1685         struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1686         struct htc_lookahead_report *lk_ahd;
1687         int len;
1688
1689         switch (record->rec_id) {
1690         case HTC_RECORD_CREDITS:
1691                 len = record->len / sizeof(struct htc_credit_report);
1692                 if (!len) {
1693                         WARN_ON(1);
1694                         return -EINVAL;
1695                 }
1696
1697                 htc_proc_cred_rpt(target,
1698                                   (struct htc_credit_report *) record_buf,
1699                                   len, endpoint);
1700                 break;
1701         case HTC_RECORD_LOOKAHEAD:
1702                 len = record->len / sizeof(*lk_ahd);
1703                 if (!len) {
1704                         WARN_ON(1);
1705                         return -EINVAL;
1706                 }
1707
1708                 lk_ahd = (struct htc_lookahead_report *) record_buf;
1709                 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1710                     next_lk_ahds) {
1711
1712                         ath6kl_dbg(ATH6KL_DBG_HTC,
1713                                    "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1714                                    lk_ahd->pre_valid, lk_ahd->post_valid);
1715
1716                         /* look ahead bytes are valid, copy them over */
1717                         memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1718
1719                         ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1720                                         "htc rx next look ahead",
1721                                         "", next_lk_ahds, 4);
1722
1723                         *n_lk_ahds = 1;
1724                 }
1725                 break;
1726         case HTC_RECORD_LOOKAHEAD_BUNDLE:
1727                 len = record->len / sizeof(*bundle_lkahd_rpt);
1728                 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1729                         WARN_ON(1);
1730                         return -EINVAL;
1731                 }
1732
1733                 if (next_lk_ahds) {
1734                         int i;
1735
1736                         bundle_lkahd_rpt =
1737                                 (struct htc_bundle_lkahd_rpt *) record_buf;
1738
1739                         ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1740                                         "", record_buf, record->len);
1741
1742                         for (i = 0; i < len; i++) {
1743                                 memcpy((u8 *)&next_lk_ahds[i],
1744                                        bundle_lkahd_rpt->lk_ahd, 4);
1745                                 bundle_lkahd_rpt++;
1746                         }
1747
1748                         *n_lk_ahds = i;
1749                 }
1750                 break;
1751         default:
1752                 ath6kl_err("unhandled record: id:%d len:%d\n",
1753                            record->rec_id, record->len);
1754                 break;
1755         }
1756
1757         return 0;
1758
1759 }
1760
1761 static int htc_proc_trailer(struct htc_target *target,
1762                             u8 *buf, int len, u32 *next_lk_ahds,
1763                             int *n_lk_ahds, enum htc_endpoint_id endpoint)
1764 {
1765         struct htc_record_hdr *record;
1766         int orig_len;
1767         int status;
1768         u8 *record_buf;
1769         u8 *orig_buf;
1770
1771         ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1772         ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1773
1774         orig_buf = buf;
1775         orig_len = len;
1776         status = 0;
1777
1778         while (len > 0) {
1779
1780                 if (len < sizeof(struct htc_record_hdr)) {
1781                         status = -ENOMEM;
1782                         break;
1783                 }
1784                 /* these are byte aligned structs */
1785                 record = (struct htc_record_hdr *) buf;
1786                 len -= sizeof(struct htc_record_hdr);
1787                 buf += sizeof(struct htc_record_hdr);
1788
1789                 if (record->len > len) {
1790                         ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1791                                    record->len, record->rec_id, len);
1792                         status = -ENOMEM;
1793                         break;
1794                 }
1795                 record_buf = buf;
1796
1797                 status = htc_parse_trailer(target, record, record_buf,
1798                                            next_lk_ahds, endpoint, n_lk_ahds);
1799
1800                 if (status)
1801                         break;
1802
1803                 /* advance buffer past this record for next time around */
1804                 buf += record->len;
1805                 len -= record->len;
1806         }
1807
1808         if (status)
1809                 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1810                                 "", orig_buf, orig_len);
1811
1812         return status;
1813 }
1814
1815 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1816                                      struct htc_packet *packet,
1817                                      u32 *next_lkahds, int *n_lkahds)
1818 {
1819         int status = 0;
1820         u16 payload_len;
1821         u32 lk_ahd;
1822         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1823
1824         if (n_lkahds != NULL)
1825                 *n_lkahds = 0;
1826
1827         /*
1828          * NOTE: we cannot assume the alignment of buf, so we use the safe
1829          * macros to retrieve 16 bit fields.
1830          */
1831         payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1832
1833         memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1834
1835         if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1836                 /*
1837                  * Refresh the expected header and the actual length as it
1838                  * was unknown when this packet was grabbed as part of the
1839                  * bundle.
1840                  */
1841                 packet->info.rx.exp_hdr = lk_ahd;
1842                 packet->act_len = payload_len + HTC_HDR_LENGTH;
1843
1844                 /* validate the actual header that was refreshed  */
1845                 if (packet->act_len > packet->buf_len) {
1846                         ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1847                                    payload_len, lk_ahd);
1848                         /*
1849                          * Limit this to max buffer just to print out some
1850                          * of the buffer.
1851                          */
1852                         packet->act_len = min(packet->act_len, packet->buf_len);
1853                         status = -ENOMEM;
1854                         goto fail_rx;
1855                 }
1856
1857                 if (packet->endpoint != htc_hdr->eid) {
1858                         ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1859                                    htc_hdr->eid, packet->endpoint);
1860                         status = -ENOMEM;
1861                         goto fail_rx;
1862                 }
1863         }
1864
1865         if (lk_ahd != packet->info.rx.exp_hdr) {
1866                 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1867                            __func__, packet, packet->info.rx.rx_flags);
1868                 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1869                                 "", &packet->info.rx.exp_hdr, 4);
1870                 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1871                                 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1872                 status = -ENOMEM;
1873                 goto fail_rx;
1874         }
1875
1876         if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1877                 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1878                     htc_hdr->ctrl[0] > payload_len) {
1879                         ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1880                                    __func__, payload_len, htc_hdr->ctrl[0]);
1881                         status = -ENOMEM;
1882                         goto fail_rx;
1883                 }
1884
1885                 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1886                         next_lkahds = NULL;
1887                         n_lkahds = NULL;
1888                 }
1889
1890                 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1891                                           + payload_len - htc_hdr->ctrl[0],
1892                                           htc_hdr->ctrl[0], next_lkahds,
1893                                            n_lkahds, packet->endpoint);
1894
1895                 if (status)
1896                         goto fail_rx;
1897
1898                 packet->act_len -= htc_hdr->ctrl[0];
1899         }
1900
1901         packet->buf += HTC_HDR_LENGTH;
1902         packet->act_len -= HTC_HDR_LENGTH;
1903
1904 fail_rx:
1905         if (status)
1906                 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1907                                 "", packet->buf, packet->act_len);
1908
1909         return status;
1910 }
1911
1912 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1913                                    struct htc_packet *packet)
1914 {
1915                 ath6kl_dbg(ATH6KL_DBG_HTC,
1916                            "htc rx complete ep %d packet 0x%p\n",
1917                            endpoint->eid, packet);
1918
1919                 endpoint->ep_cb.rx(endpoint->target, packet);
1920 }
1921
1922 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1923                                 struct list_head *rxq,
1924                                 struct list_head *sync_compq,
1925                                 int *n_pkt_fetched, bool part_bundle)
1926 {
1927         struct hif_scatter_req *scat_req;
1928         struct htc_packet *packet;
1929         int rem_space = target->max_rx_bndl_sz;
1930         int n_scat_pkt, status = 0, i, len;
1931
1932         n_scat_pkt = get_queue_depth(rxq);
1933         n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1934
1935         if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1936                 /*
1937                  * We were forced to split this bundle receive operation
1938                  * all packets in this partial bundle must have their
1939                  * lookaheads ignored.
1940                  */
1941                 part_bundle = true;
1942
1943                 /*
1944                  * This would only happen if the target ignored our max
1945                  * bundle limit.
1946                  */
1947                 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1948                             __func__, get_queue_depth(rxq), n_scat_pkt);
1949         }
1950
1951         len = 0;
1952
1953         ath6kl_dbg(ATH6KL_DBG_HTC,
1954                    "htc rx bundle depth %d pkts %d\n",
1955                    get_queue_depth(rxq), n_scat_pkt);
1956
1957         scat_req = hif_scatter_req_get(target->dev->ar);
1958
1959         if (scat_req == NULL)
1960                 goto fail_rx_pkt;
1961
1962         for (i = 0; i < n_scat_pkt; i++) {
1963                 int pad_len;
1964
1965                 packet = list_first_entry(rxq, struct htc_packet, list);
1966                 list_del(&packet->list);
1967
1968                 pad_len = CALC_TXRX_PADDED_LEN(target,
1969                                                    packet->act_len);
1970
1971                 if ((rem_space - pad_len) < 0) {
1972                         list_add(&packet->list, rxq);
1973                         break;
1974                 }
1975
1976                 rem_space -= pad_len;
1977
1978                 if (part_bundle || (i < (n_scat_pkt - 1)))
1979                         /*
1980                          * Packet 0..n-1 cannot be checked for look-aheads
1981                          * since we are fetching a bundle the last packet
1982                          * however can have it's lookahead used
1983                          */
1984                         packet->info.rx.rx_flags |=
1985                             HTC_RX_PKT_IGNORE_LOOKAHEAD;
1986
1987                 /* NOTE: 1 HTC packet per scatter entry */
1988                 scat_req->scat_list[i].buf = packet->buf;
1989                 scat_req->scat_list[i].len = pad_len;
1990
1991                 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1992
1993                 list_add_tail(&packet->list, sync_compq);
1994
1995                 WARN_ON(!scat_req->scat_list[i].len);
1996                 len += scat_req->scat_list[i].len;
1997         }
1998
1999         scat_req->len = len;
2000         scat_req->scat_entries = i;
2001
2002         status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
2003
2004         if (!status)
2005                 *n_pkt_fetched = i;
2006
2007         /* free scatter request */
2008         hif_scatter_req_add(target->dev->ar, scat_req);
2009
2010 fail_rx_pkt:
2011
2012         return status;
2013 }
2014
2015 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
2016                                          struct list_head *comp_pktq,
2017                                          u32 lk_ahds[],
2018                                          int *n_lk_ahd)
2019 {
2020         struct htc_packet *packet, *tmp_pkt;
2021         struct htc_endpoint *ep;
2022         int status = 0;
2023
2024         list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2025                 ep = &target->endpoint[packet->endpoint];
2026
2027                 trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2028                                     packet->buf, packet->act_len);
2029
2030                 /* process header for each of the recv packet */
2031                 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2032                                                    n_lk_ahd);
2033                 if (status)
2034                         return status;
2035
2036                 list_del(&packet->list);
2037
2038                 if (list_empty(comp_pktq)) {
2039                         /*
2040                          * Last packet's more packet flag is set
2041                          * based on the lookahead.
2042                          */
2043                         if (*n_lk_ahd > 0)
2044                                 ath6kl_htc_rx_set_indicate(lk_ahds[0],
2045                                                            ep, packet);
2046                 } else
2047                         /*
2048                          * Packets in a bundle automatically have
2049                          * this flag set.
2050                          */
2051                         packet->info.rx.indicat_flags |=
2052                                 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
2053
2054                 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
2055
2056                 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
2057                         ep->ep_st.rx_bundl += 1;
2058
2059                 ath6kl_htc_rx_complete(ep, packet);
2060         }
2061
2062         return status;
2063 }
2064
2065 static int ath6kl_htc_rx_fetch(struct htc_target *target,
2066                                struct list_head *rx_pktq,
2067                                struct list_head *comp_pktq)
2068 {
2069         int fetched_pkts;
2070         bool part_bundle = false;
2071         int status = 0;
2072         struct list_head tmp_rxq;
2073         struct htc_packet *packet, *tmp_pkt;
2074
2075         /* now go fetch the list of HTC packets */
2076         while (!list_empty(rx_pktq)) {
2077                 fetched_pkts = 0;
2078
2079                 INIT_LIST_HEAD(&tmp_rxq);
2080
2081                 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
2082                         /*
2083                          * There are enough packets to attempt a
2084                          * bundle transfer and recv bundling is
2085                          * allowed.
2086                          */
2087                         status = ath6kl_htc_rx_bundle(target, rx_pktq,
2088                                                       &tmp_rxq,
2089                                                       &fetched_pkts,
2090                                                       part_bundle);
2091                         if (status)
2092                                 goto fail_rx;
2093
2094                         if (!list_empty(rx_pktq))
2095                                 part_bundle = true;
2096
2097                         list_splice_tail_init(&tmp_rxq, comp_pktq);
2098                 }
2099
2100                 if (!fetched_pkts) {
2101
2102                         packet = list_first_entry(rx_pktq, struct htc_packet,
2103                                                    list);
2104
2105                         /* fully synchronous */
2106                         packet->completion = NULL;
2107
2108                         if (!list_is_singular(rx_pktq))
2109                                 /*
2110                                  * look_aheads in all packet
2111                                  * except the last one in the
2112                                  * bundle must be ignored
2113                                  */
2114                                 packet->info.rx.rx_flags |=
2115                                         HTC_RX_PKT_IGNORE_LOOKAHEAD;
2116
2117                         /* go fetch the packet */
2118                         status = ath6kl_htc_rx_packet(target, packet,
2119                                                       packet->act_len);
2120
2121                         list_move_tail(&packet->list, &tmp_rxq);
2122
2123                         if (status)
2124                                 goto fail_rx;
2125
2126                         list_splice_tail_init(&tmp_rxq, comp_pktq);
2127                 }
2128         }
2129
2130         return 0;
2131
2132 fail_rx:
2133
2134         /*
2135          * Cleanup any packets we allocated but didn't use to
2136          * actually fetch any packets.
2137          */
2138
2139         list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2140                 list_del(&packet->list);
2141                 htc_reclaim_rxbuf(target, packet,
2142                                   &target->endpoint[packet->endpoint]);
2143         }
2144
2145         list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2146                 list_del(&packet->list);
2147                 htc_reclaim_rxbuf(target, packet,
2148                                   &target->endpoint[packet->endpoint]);
2149         }
2150
2151         return status;
2152 }
2153
2154 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2155                                      u32 msg_look_ahead, int *num_pkts)
2156 {
2157         struct htc_packet *packets, *tmp_pkt;
2158         struct htc_endpoint *endpoint;
2159         struct list_head rx_pktq, comp_pktq;
2160         int status = 0;
2161         u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2162         int num_look_ahead = 1;
2163         enum htc_endpoint_id id;
2164         int n_fetched = 0;
2165
2166         INIT_LIST_HEAD(&comp_pktq);
2167         *num_pkts = 0;
2168
2169         /*
2170          * On first entry copy the look_aheads into our temp array for
2171          * processing
2172          */
2173         look_aheads[0] = msg_look_ahead;
2174
2175         while (true) {
2176
2177                 /*
2178                  * First lookahead sets the expected endpoint IDs for all
2179                  * packets in a bundle.
2180                  */
2181                 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2182                 endpoint = &target->endpoint[id];
2183
2184                 if (id >= ENDPOINT_MAX) {
2185                         ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2186                                    id);
2187                         status = -ENOMEM;
2188                         break;
2189                 }
2190
2191                 INIT_LIST_HEAD(&rx_pktq);
2192                 INIT_LIST_HEAD(&comp_pktq);
2193
2194                 /*
2195                  * Try to allocate as many HTC RX packets indicated by the
2196                  * look_aheads.
2197                  */
2198                 status = ath6kl_htc_rx_alloc(target, look_aheads,
2199                                              num_look_ahead, endpoint,
2200                                              &rx_pktq);
2201                 if (status)
2202                         break;
2203
2204                 if (get_queue_depth(&rx_pktq) >= 2)
2205                         /*
2206                          * A recv bundle was detected, force IRQ status
2207                          * re-check again
2208                          */
2209                         target->chk_irq_status_cnt = 1;
2210
2211                 n_fetched += get_queue_depth(&rx_pktq);
2212
2213                 num_look_ahead = 0;
2214
2215                 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2216
2217                 if (!status)
2218                         ath6kl_htc_rx_chk_water_mark(endpoint);
2219
2220                 /* Process fetched packets */
2221                 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2222                                                        look_aheads,
2223                                                        &num_look_ahead);
2224
2225                 if (!num_look_ahead || status)
2226                         break;
2227
2228                 /*
2229                  * For SYNCH processing, if we get here, we are running
2230                  * through the loop again due to a detected lookahead. Set
2231                  * flag that we should re-check IRQ status registers again
2232                  * before leaving IRQ processing, this can net better
2233                  * performance in high throughput situations.
2234                  */
2235                 target->chk_irq_status_cnt = 1;
2236         }
2237
2238         if (status) {
2239                 ath6kl_err("failed to get pending recv messages: %d\n",
2240                            status);
2241
2242                 /* cleanup any packets in sync completion queue */
2243                 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2244                         list_del(&packets->list);
2245                         htc_reclaim_rxbuf(target, packets,
2246                                           &target->endpoint[packets->endpoint]);
2247                 }
2248
2249                 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2250                         ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2251                         ath6kl_hif_rx_control(target->dev, false);
2252                 }
2253         }
2254
2255         /*
2256          * Before leaving, check to see if host ran out of buffers and
2257          * needs to stop the receiver.
2258          */
2259         if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2260                 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2261                 ath6kl_hif_rx_control(target->dev, false);
2262         }
2263         *num_pkts = n_fetched;
2264
2265         return status;
2266 }
2267
2268 /*
2269  * Synchronously wait for a control message from the target,
2270  * This function is used at initialization time ONLY.  At init messages
2271  * on ENDPOINT 0 are expected.
2272  */
2273 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2274 {
2275         struct htc_packet *packet = NULL;
2276         struct htc_frame_hdr *htc_hdr;
2277         u32 look_ahead;
2278
2279         if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2280                                        HTC_TARGET_RESPONSE_TIMEOUT))
2281                 return NULL;
2282
2283         ath6kl_dbg(ATH6KL_DBG_HTC,
2284                    "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2285
2286         htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2287
2288         if (htc_hdr->eid != ENDPOINT_0)
2289                 return NULL;
2290
2291         packet = htc_get_control_buf(target, false);
2292
2293         if (!packet)
2294                 return NULL;
2295
2296         packet->info.rx.rx_flags = 0;
2297         packet->info.rx.exp_hdr = look_ahead;
2298         packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2299
2300         if (packet->act_len > packet->buf_len)
2301                 goto fail_ctrl_rx;
2302
2303         /* we want synchronous operation */
2304         packet->completion = NULL;
2305
2306         /* get the message from the device, this will block */
2307         if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2308                 goto fail_ctrl_rx;
2309
2310         trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2311                             packet->buf, packet->act_len);
2312
2313         /* process receive header */
2314         packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2315
2316         if (packet->status) {
2317                 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2318                            packet->status);
2319                 goto fail_ctrl_rx;
2320         }
2321
2322         return packet;
2323
2324 fail_ctrl_rx:
2325         if (packet != NULL) {
2326                 htc_rxpkt_reset(packet);
2327                 reclaim_rx_ctrl_buf(target, packet);
2328         }
2329
2330         return NULL;
2331 }
2332
2333 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2334                                   struct list_head *pkt_queue)
2335 {
2336         struct htc_endpoint *endpoint;
2337         struct htc_packet *first_pkt;
2338         bool rx_unblock = false;
2339         int status = 0, depth;
2340
2341         if (list_empty(pkt_queue))
2342                 return -ENOMEM;
2343
2344         first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2345
2346         if (first_pkt->endpoint >= ENDPOINT_MAX)
2347                 return status;
2348
2349         depth = get_queue_depth(pkt_queue);
2350
2351         ath6kl_dbg(ATH6KL_DBG_HTC,
2352                    "htc rx add multiple ep id %d cnt %d len %d\n",
2353                 first_pkt->endpoint, depth, first_pkt->buf_len);
2354
2355         endpoint = &target->endpoint[first_pkt->endpoint];
2356
2357         if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2358                 struct htc_packet *packet, *tmp_pkt;
2359
2360                 /* walk through queue and mark each one canceled */
2361                 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2362                         packet->status = -ECANCELED;
2363                         list_del(&packet->list);
2364                         ath6kl_htc_rx_complete(endpoint, packet);
2365                 }
2366
2367                 return status;
2368         }
2369
2370         spin_lock_bh(&target->rx_lock);
2371
2372         list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2373
2374         /* check if we are blocked waiting for a new buffer */
2375         if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2376                 if (target->ep_waiting == first_pkt->endpoint) {
2377                         ath6kl_dbg(ATH6KL_DBG_HTC,
2378                                    "htc rx blocked on ep %d, unblocking\n",
2379                                    target->ep_waiting);
2380                         target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2381                         target->ep_waiting = ENDPOINT_MAX;
2382                         rx_unblock = true;
2383                 }
2384         }
2385
2386         spin_unlock_bh(&target->rx_lock);
2387
2388         if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2389                 /* TODO : implement a buffer threshold count? */
2390                 ath6kl_hif_rx_control(target->dev, true);
2391
2392         return status;
2393 }
2394
2395 static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2396 {
2397         struct htc_endpoint *endpoint;
2398         struct htc_packet *packet, *tmp_pkt;
2399         int i;
2400
2401         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2402                 endpoint = &target->endpoint[i];
2403                 if (!endpoint->svc_id)
2404                         /* not in use.. */
2405                         continue;
2406
2407                 spin_lock_bh(&target->rx_lock);
2408                 list_for_each_entry_safe(packet, tmp_pkt,
2409                                          &endpoint->rx_bufq, list) {
2410                         list_del(&packet->list);
2411                         spin_unlock_bh(&target->rx_lock);
2412                         ath6kl_dbg(ATH6KL_DBG_HTC,
2413                                    "htc rx flush pkt 0x%p  len %d  ep %d\n",
2414                                    packet, packet->buf_len,
2415                                    packet->endpoint);
2416                         /*
2417                          * packets in rx_bufq of endpoint 0 have originally
2418                          * been queued from target->free_ctrl_rxbuf where
2419                          * packet and packet->buf_start are allocated
2420                          * separately using kmalloc(). For other endpoint
2421                          * rx_bufq, it is allocated as skb where packet is
2422                          * skb->head. Take care of this difference while freeing
2423                          * the memory.
2424                          */
2425                         if (packet->endpoint == ENDPOINT_0) {
2426                                 kfree(packet->buf_start);
2427                                 kfree(packet);
2428                         } else {
2429                                 dev_kfree_skb(packet->pkt_cntxt);
2430                         }
2431                         spin_lock_bh(&target->rx_lock);
2432                 }
2433                 spin_unlock_bh(&target->rx_lock);
2434         }
2435 }
2436
2437 static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2438                             struct htc_service_connect_req *conn_req,
2439                             struct htc_service_connect_resp *conn_resp)
2440 {
2441         struct htc_packet *rx_pkt = NULL;
2442         struct htc_packet *tx_pkt = NULL;
2443         struct htc_conn_service_resp *resp_msg;
2444         struct htc_conn_service_msg *conn_msg;
2445         struct htc_endpoint *endpoint;
2446         enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2447         unsigned int max_msg_sz = 0;
2448         int status = 0;
2449         u16 msg_id;
2450
2451         ath6kl_dbg(ATH6KL_DBG_HTC,
2452                    "htc connect service target 0x%p service id 0x%x\n",
2453                    target, conn_req->svc_id);
2454
2455         if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2456                 /* special case for pseudo control service */
2457                 assigned_ep = ENDPOINT_0;
2458                 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2459         } else {
2460                 /* allocate a packet to send to the target */
2461                 tx_pkt = htc_get_control_buf(target, true);
2462
2463                 if (!tx_pkt)
2464                         return -ENOMEM;
2465
2466                 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2467                 memset(conn_msg, 0, sizeof(*conn_msg));
2468                 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2469                 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2470                 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2471
2472                 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2473                                  sizeof(*conn_msg) + conn_msg->svc_meta_len,
2474                                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2475
2476                 /* we want synchronous operation */
2477                 tx_pkt->completion = NULL;
2478                 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2479                 status = ath6kl_htc_tx_issue(target, tx_pkt);
2480
2481                 if (status)
2482                         goto fail_tx;
2483
2484                 /* wait for response */
2485                 rx_pkt = htc_wait_for_ctrl_msg(target);
2486
2487                 if (!rx_pkt) {
2488                         status = -ENOMEM;
2489                         goto fail_tx;
2490                 }
2491
2492                 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2493                 msg_id = le16_to_cpu(resp_msg->msg_id);
2494
2495                 if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
2496                     (rx_pkt->act_len < sizeof(*resp_msg))) {
2497                         status = -ENOMEM;
2498                         goto fail_tx;
2499                 }
2500
2501                 conn_resp->resp_code = resp_msg->status;
2502                 /* check response status */
2503                 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2504                         ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2505                                    resp_msg->svc_id, resp_msg->status);
2506                         status = -ENOMEM;
2507                         goto fail_tx;
2508                 }
2509
2510                 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2511                 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2512         }
2513
2514         if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
2515                          assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
2516                 status = -ENOMEM;
2517                 goto fail_tx;
2518         }
2519
2520         endpoint = &target->endpoint[assigned_ep];
2521         endpoint->eid = assigned_ep;
2522         if (endpoint->svc_id) {
2523                 status = -ENOMEM;
2524                 goto fail_tx;
2525         }
2526
2527         /* return assigned endpoint to caller */
2528         conn_resp->endpoint = assigned_ep;
2529         conn_resp->len_max = max_msg_sz;
2530
2531         /* setup the endpoint */
2532
2533         /* this marks the endpoint in use */
2534         endpoint->svc_id = conn_req->svc_id;
2535
2536         endpoint->max_txq_depth = conn_req->max_txq_depth;
2537         endpoint->len_max = max_msg_sz;
2538         endpoint->ep_cb = conn_req->ep_cb;
2539         endpoint->cred_dist.svc_id = conn_req->svc_id;
2540         endpoint->cred_dist.htc_ep = endpoint;
2541         endpoint->cred_dist.endpoint = assigned_ep;
2542         endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2543
2544         switch (endpoint->svc_id) {
2545         case WMI_DATA_BK_SVC:
2546                 endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
2547                 break;
2548         default:
2549                 endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
2550                 break;
2551         }
2552
2553         if (conn_req->max_rxmsg_sz) {
2554                 /*
2555                  * Override cred_per_msg calculation, this optimizes
2556                  * the credit-low indications since the host will actually
2557                  * issue smaller messages in the Send path.
2558                  */
2559                 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2560                         status = -ENOMEM;
2561                         goto fail_tx;
2562                 }
2563                 endpoint->cred_dist.cred_per_msg =
2564                     conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2565         } else
2566                 endpoint->cred_dist.cred_per_msg =
2567                     max_msg_sz / target->tgt_cred_sz;
2568
2569         if (!endpoint->cred_dist.cred_per_msg)
2570                 endpoint->cred_dist.cred_per_msg = 1;
2571
2572         /* save local connection flags */
2573         endpoint->conn_flags = conn_req->flags;
2574
2575 fail_tx:
2576         if (tx_pkt)
2577                 htc_reclaim_txctrl_buf(target, tx_pkt);
2578
2579         if (rx_pkt) {
2580                 htc_rxpkt_reset(rx_pkt);
2581                 reclaim_rx_ctrl_buf(target, rx_pkt);
2582         }
2583
2584         return status;
2585 }
2586
2587 static void reset_ep_state(struct htc_target *target)
2588 {
2589         struct htc_endpoint *endpoint;
2590         int i;
2591
2592         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2593                 endpoint = &target->endpoint[i];
2594                 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2595                 endpoint->svc_id = 0;
2596                 endpoint->len_max = 0;
2597                 endpoint->max_txq_depth = 0;
2598                 memset(&endpoint->ep_st, 0,
2599                        sizeof(endpoint->ep_st));
2600                 INIT_LIST_HEAD(&endpoint->rx_bufq);
2601                 INIT_LIST_HEAD(&endpoint->txq);
2602                 endpoint->target = target;
2603         }
2604
2605         /* reset distribution list */
2606         /* FIXME: free existing entries */
2607         INIT_LIST_HEAD(&target->cred_dist_list);
2608 }
2609
2610 static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2611                              enum htc_endpoint_id endpoint)
2612 {
2613         int num;
2614
2615         spin_lock_bh(&target->rx_lock);
2616         num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2617         spin_unlock_bh(&target->rx_lock);
2618         return num;
2619 }
2620
2621 static void htc_setup_msg_bndl(struct htc_target *target)
2622 {
2623         /* limit what HTC can handle */
2624         target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2625                                        target->msg_per_bndl_max);
2626
2627         if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2628                 target->msg_per_bndl_max = 0;
2629                 return;
2630         }
2631
2632         /* limit bundle what the device layer can handle */
2633         target->msg_per_bndl_max = min(target->max_scat_entries,
2634                                        target->msg_per_bndl_max);
2635
2636         ath6kl_dbg(ATH6KL_DBG_BOOT,
2637                    "htc bundling allowed msg_per_bndl_max %d\n",
2638                    target->msg_per_bndl_max);
2639
2640         /* Max rx bundle size is limited by the max tx bundle size */
2641         target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2642         /* Max tx bundle size if limited by the extended mbox address range */
2643         target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2644                                      target->max_xfer_szper_scatreq);
2645
2646         ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2647                    target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2648
2649         if (target->max_tx_bndl_sz)
2650                 /* tx_bndl_mask is enabled per AC, each has 1 bit */
2651                 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
2652
2653         if (target->max_rx_bndl_sz)
2654                 target->rx_bndl_enable = true;
2655
2656         if ((target->tgt_cred_sz % target->block_sz) != 0) {
2657                 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2658                             target->tgt_cred_sz);
2659
2660                 /*
2661                  * Disallow send bundling since the credit size is
2662                  * not aligned to a block size the I/O block
2663                  * padding will spill into the next credit buffer
2664                  * which is fatal.
2665                  */
2666                 target->tx_bndl_mask = 0;
2667         }
2668 }
2669
2670 static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2671 {
2672         struct htc_packet *packet = NULL;
2673         struct htc_ready_ext_msg *rdy_msg;
2674         struct htc_service_connect_req connect;
2675         struct htc_service_connect_resp resp;
2676         int status;
2677
2678         /* we should be getting 1 control message that the target is ready */
2679         packet = htc_wait_for_ctrl_msg(target);
2680
2681         if (!packet)
2682                 return -ENOMEM;
2683
2684         /* we controlled the buffer creation so it's properly aligned */
2685         rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2686
2687         if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2688             (packet->act_len < sizeof(struct htc_ready_msg))) {
2689                 status = -ENOMEM;
2690                 goto fail_wait_target;
2691         }
2692
2693         if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2694                 status = -ENOMEM;
2695                 goto fail_wait_target;
2696         }
2697
2698         target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2699         target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2700
2701         ath6kl_dbg(ATH6KL_DBG_BOOT,
2702                    "htc target ready credits %d size %d\n",
2703                    target->tgt_creds, target->tgt_cred_sz);
2704
2705         /* check if this is an extended ready message */
2706         if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2707                 /* this is an extended message */
2708                 target->htc_tgt_ver = rdy_msg->htc_ver;
2709                 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2710         } else {
2711                 /* legacy */
2712                 target->htc_tgt_ver = HTC_VERSION_2P0;
2713                 target->msg_per_bndl_max = 0;
2714         }
2715
2716         ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2717                    (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2718                    target->htc_tgt_ver);
2719
2720         if (target->msg_per_bndl_max > 0)
2721                 htc_setup_msg_bndl(target);
2722
2723         /* setup our pseudo HTC control endpoint connection */
2724         memset(&connect, 0, sizeof(connect));
2725         memset(&resp, 0, sizeof(resp));
2726         connect.ep_cb.rx = htc_ctrl_rx;
2727         connect.ep_cb.rx_refill = NULL;
2728         connect.ep_cb.tx_full = NULL;
2729         connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2730         connect.svc_id = HTC_CTRL_RSVD_SVC;
2731
2732         /* connect fake service */
2733         status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2734
2735         if (status)
2736                 /*
2737                  * FIXME: this call doesn't make sense, the caller should
2738                  * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2739                  */
2740                 ath6kl_hif_cleanup_scatter(target->dev->ar);
2741
2742 fail_wait_target:
2743         if (packet) {
2744                 htc_rxpkt_reset(packet);
2745                 reclaim_rx_ctrl_buf(target, packet);
2746         }
2747
2748         return status;
2749 }
2750
2751 /*
2752  * Start HTC, enable interrupts and let the target know
2753  * host has finished setup.
2754  */
2755 static int ath6kl_htc_mbox_start(struct htc_target *target)
2756 {
2757         struct htc_packet *packet;
2758         int status;
2759
2760         memset(&target->dev->irq_proc_reg, 0,
2761                sizeof(target->dev->irq_proc_reg));
2762
2763         /* Disable interrupts at the chip level */
2764         ath6kl_hif_disable_intrs(target->dev);
2765
2766         target->htc_flags = 0;
2767         target->rx_st_flags = 0;
2768
2769         /* Push control receive buffers into htc control endpoint */
2770         while ((packet = htc_get_control_buf(target, false)) != NULL) {
2771                 status = htc_add_rxbuf(target, packet);
2772                 if (status)
2773                         return status;
2774         }
2775
2776         /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2777         ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2778                            target->tgt_creds);
2779
2780         dump_cred_dist_stats(target);
2781
2782         /* Indicate to the target of the setup completion */
2783         status = htc_setup_tx_complete(target);
2784
2785         if (status)
2786                 return status;
2787
2788         /* unmask interrupts */
2789         status = ath6kl_hif_unmask_intrs(target->dev);
2790
2791         if (status)
2792                 ath6kl_htc_mbox_stop(target);
2793
2794         return status;
2795 }
2796
2797 static int ath6kl_htc_reset(struct htc_target *target)
2798 {
2799         u32 block_size, ctrl_bufsz;
2800         struct htc_packet *packet;
2801         int i;
2802
2803         reset_ep_state(target);
2804
2805         block_size = target->dev->ar->mbox_info.block_size;
2806
2807         ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2808                       (block_size + HTC_HDR_LENGTH) :
2809                       (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2810
2811         for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2812                 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2813                 if (!packet)
2814                         return -ENOMEM;
2815
2816                 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2817                 if (!packet->buf_start) {
2818                         kfree(packet);
2819                         return -ENOMEM;
2820                 }
2821
2822                 packet->buf_len = ctrl_bufsz;
2823                 if (i < NUM_CONTROL_RX_BUFFERS) {
2824                         packet->act_len = 0;
2825                         packet->buf = packet->buf_start;
2826                         packet->endpoint = ENDPOINT_0;
2827                         list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2828                 } else
2829                         list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2830         }
2831
2832         return 0;
2833 }
2834
2835 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2836 static void ath6kl_htc_mbox_stop(struct htc_target *target)
2837 {
2838         spin_lock_bh(&target->htc_lock);
2839         target->htc_flags |= HTC_OP_STATE_STOPPING;
2840         spin_unlock_bh(&target->htc_lock);
2841
2842         /*
2843          * Masking interrupts is a synchronous operation, when this
2844          * function returns all pending HIF I/O has completed, we can
2845          * safely flush the queues.
2846          */
2847         ath6kl_hif_mask_intrs(target->dev);
2848
2849         ath6kl_htc_flush_txep_all(target);
2850
2851         ath6kl_htc_mbox_flush_rx_buf(target);
2852
2853         ath6kl_htc_reset(target);
2854 }
2855
2856 static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2857 {
2858         struct htc_target *target = NULL;
2859         int status = 0;
2860
2861         target = kzalloc(sizeof(*target), GFP_KERNEL);
2862         if (!target) {
2863                 ath6kl_err("unable to allocate memory\n");
2864                 return NULL;
2865         }
2866
2867         target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2868         if (!target->dev) {
2869                 ath6kl_err("unable to allocate memory\n");
2870                 status = -ENOMEM;
2871                 goto err_htc_cleanup;
2872         }
2873
2874         spin_lock_init(&target->htc_lock);
2875         spin_lock_init(&target->rx_lock);
2876         spin_lock_init(&target->tx_lock);
2877
2878         INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2879         INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2880         INIT_LIST_HEAD(&target->cred_dist_list);
2881
2882         target->dev->ar = ar;
2883         target->dev->htc_cnxt = target;
2884         target->ep_waiting = ENDPOINT_MAX;
2885
2886         status = ath6kl_hif_setup(target->dev);
2887         if (status)
2888                 goto err_htc_cleanup;
2889
2890         status = ath6kl_htc_reset(target);
2891         if (status)
2892                 goto err_htc_cleanup;
2893
2894         return target;
2895
2896 err_htc_cleanup:
2897         ath6kl_htc_mbox_cleanup(target);
2898
2899         return NULL;
2900 }
2901
2902 /* cleanup the HTC instance */
2903 static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2904 {
2905         struct htc_packet *packet, *tmp_packet;
2906
2907         ath6kl_hif_cleanup_scatter(target->dev->ar);
2908
2909         list_for_each_entry_safe(packet, tmp_packet,
2910                                  &target->free_ctrl_txbuf, list) {
2911                 list_del(&packet->list);
2912                 kfree(packet->buf_start);
2913                 kfree(packet);
2914         }
2915
2916         list_for_each_entry_safe(packet, tmp_packet,
2917                                  &target->free_ctrl_rxbuf, list) {
2918                 list_del(&packet->list);
2919                 kfree(packet->buf_start);
2920                 kfree(packet);
2921         }
2922
2923         kfree(target->dev);
2924         kfree(target);
2925 }
2926
2927 static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2928         .create = ath6kl_htc_mbox_create,
2929         .wait_target = ath6kl_htc_mbox_wait_target,
2930         .start = ath6kl_htc_mbox_start,
2931         .conn_service = ath6kl_htc_mbox_conn_service,
2932         .tx = ath6kl_htc_mbox_tx,
2933         .stop = ath6kl_htc_mbox_stop,
2934         .cleanup = ath6kl_htc_mbox_cleanup,
2935         .flush_txep = ath6kl_htc_mbox_flush_txep,
2936         .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2937         .activity_changed = ath6kl_htc_mbox_activity_changed,
2938         .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2939         .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2940         .credit_setup = ath6kl_htc_mbox_credit_setup,
2941 };
2942
2943 void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2944 {
2945         ar->htc_ops = &ath6kl_htc_mbox_ops;
2946 }