]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/brocade/bna/bna_tx_rx.c
Merge tag 'mfd-fixes-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[karo-tx-linux.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12   */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include "bna.h"
20 #include "bfi.h"
21
22 /* IB */
23 static void
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 {
26         ib->coalescing_timeo = coalescing_timeo;
27         ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28                                 (u32)ib->coalescing_timeo, 0);
29 }
30
31 /* RXF */
32
33 #define bna_rxf_vlan_cfg_soft_reset(rxf)                                \
34 do {                                                                    \
35         (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;           \
36         (rxf)->vlan_strip_pending = true;                               \
37 } while (0)
38
39 #define bna_rxf_rss_cfg_soft_reset(rxf)                                 \
40 do {                                                                    \
41         if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)                  \
42                 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |           \
43                                 BNA_RSS_F_CFG_PENDING |                 \
44                                 BNA_RSS_F_STATUS_PENDING);              \
45 } while (0)
46
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54                                         enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56                                         enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58                                         enum bna_cleanup_type cleanup);
59
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
61                         enum bna_rxf_event);
62 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
63                         enum bna_rxf_event);
64 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
65                         enum bna_rxf_event);
66 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
67                         enum bna_rxf_event);
68
69 static void
70 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
71 {
72         call_rxf_stop_cbfn(rxf);
73 }
74
75 static void
76 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
77 {
78         switch (event) {
79         case RXF_E_START:
80                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
81                 break;
82
83         case RXF_E_STOP:
84                 call_rxf_stop_cbfn(rxf);
85                 break;
86
87         case RXF_E_FAIL:
88                 /* No-op */
89                 break;
90
91         case RXF_E_CONFIG:
92                 call_rxf_cam_fltr_cbfn(rxf);
93                 break;
94
95         default:
96                 bfa_sm_fault(event);
97         }
98 }
99
100 static void
101 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
102 {
103         if (!bna_rxf_cfg_apply(rxf)) {
104                 /* No more pending config updates */
105                 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
106         }
107 }
108
109 static void
110 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
111 {
112         switch (event) {
113         case RXF_E_STOP:
114                 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
115                 break;
116
117         case RXF_E_FAIL:
118                 bna_rxf_cfg_reset(rxf);
119                 call_rxf_start_cbfn(rxf);
120                 call_rxf_cam_fltr_cbfn(rxf);
121                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
122                 break;
123
124         case RXF_E_CONFIG:
125                 /* No-op */
126                 break;
127
128         case RXF_E_FW_RESP:
129                 if (!bna_rxf_cfg_apply(rxf)) {
130                         /* No more pending config updates */
131                         bfa_fsm_set_state(rxf, bna_rxf_sm_started);
132                 }
133                 break;
134
135         default:
136                 bfa_sm_fault(event);
137         }
138 }
139
140 static void
141 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
142 {
143         call_rxf_start_cbfn(rxf);
144         call_rxf_cam_fltr_cbfn(rxf);
145 }
146
147 static void
148 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
149 {
150         switch (event) {
151         case RXF_E_STOP:
152         case RXF_E_FAIL:
153                 bna_rxf_cfg_reset(rxf);
154                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
155                 break;
156
157         case RXF_E_CONFIG:
158                 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
159                 break;
160
161         default:
162                 bfa_sm_fault(event);
163         }
164 }
165
166 static void
167 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
168 {
169 }
170
171 static void
172 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
173 {
174         switch (event) {
175         case RXF_E_FAIL:
176         case RXF_E_FW_RESP:
177                 bna_rxf_cfg_reset(rxf);
178                 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
179                 break;
180
181         default:
182                 bfa_sm_fault(event);
183         }
184 }
185
186 static void
187 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
188                 enum bfi_enet_h2i_msgs req_type)
189 {
190         struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
191
192         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
193         req->mh.num_entries = htons(
194         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
195         ether_addr_copy(req->mac_addr, mac->addr);
196         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
197                 sizeof(struct bfi_enet_ucast_req), &req->mh);
198         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
199 }
200
201 static void
202 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
203 {
204         struct bfi_enet_mcast_add_req *req =
205                 &rxf->bfi_enet_cmd.mcast_add_req;
206
207         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
208                 0, rxf->rx->rid);
209         req->mh.num_entries = htons(
210         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
211         ether_addr_copy(req->mac_addr, mac->addr);
212         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
213                 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
214         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
215 }
216
217 static void
218 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
219 {
220         struct bfi_enet_mcast_del_req *req =
221                 &rxf->bfi_enet_cmd.mcast_del_req;
222
223         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
224                 0, rxf->rx->rid);
225         req->mh.num_entries = htons(
226         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
227         req->handle = htons(handle);
228         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
229                 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
230         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
231 }
232
233 static void
234 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
235 {
236         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
237
238         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
239                 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
240         req->mh.num_entries = htons(
241                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
242         req->enable = status;
243         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
244                 sizeof(struct bfi_enet_enable_req), &req->mh);
245         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
246 }
247
248 static void
249 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
250 {
251         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
252
253         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
254                 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
255         req->mh.num_entries = htons(
256                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
257         req->enable = status;
258         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
259                 sizeof(struct bfi_enet_enable_req), &req->mh);
260         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
261 }
262
263 static void
264 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
265 {
266         struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
267         int i;
268         int j;
269
270         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
271                 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
272         req->mh.num_entries = htons(
273                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
274         req->block_idx = block_idx;
275         for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
276                 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
277                 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
278                         req->bit_mask[i] =
279                                 htonl(rxf->vlan_filter_table[j]);
280                 else
281                         req->bit_mask[i] = 0xFFFFFFFF;
282         }
283         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
284                 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
285         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
286 }
287
288 static void
289 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
290 {
291         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
292
293         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
294                 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
295         req->mh.num_entries = htons(
296                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
297         req->enable = rxf->vlan_strip_status;
298         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
299                 sizeof(struct bfi_enet_enable_req), &req->mh);
300         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
301 }
302
303 static void
304 bna_bfi_rit_cfg(struct bna_rxf *rxf)
305 {
306         struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
307
308         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
309                 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
310         req->mh.num_entries = htons(
311                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
312         req->size = htons(rxf->rit_size);
313         memcpy(&req->table[0], rxf->rit, rxf->rit_size);
314         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
315                 sizeof(struct bfi_enet_rit_req), &req->mh);
316         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
317 }
318
319 static void
320 bna_bfi_rss_cfg(struct bna_rxf *rxf)
321 {
322         struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
323         int i;
324
325         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
326                 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
327         req->mh.num_entries = htons(
328                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
329         req->cfg.type = rxf->rss_cfg.hash_type;
330         req->cfg.mask = rxf->rss_cfg.hash_mask;
331         for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
332                 req->cfg.key[i] =
333                         htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
334         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335                 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
336         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
337 }
338
339 static void
340 bna_bfi_rss_enable(struct bna_rxf *rxf)
341 {
342         struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343
344         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345                 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
346         req->mh.num_entries = htons(
347                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348         req->enable = rxf->rss_status;
349         bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350                 sizeof(struct bfi_enet_enable_req), &req->mh);
351         bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
352 }
353
354 /* This function gets the multicast MAC that has already been added to CAM */
355 static struct bna_mac *
356 bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr)
357 {
358         struct bna_mac *mac;
359
360         list_for_each_entry(mac, &rxf->mcast_active_q, qe)
361                 if (ether_addr_equal(mac->addr, mac_addr))
362                         return mac;
363
364         list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
365                 if (ether_addr_equal(mac->addr, mac_addr))
366                         return mac;
367
368         return NULL;
369 }
370
371 static struct bna_mcam_handle *
372 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
373 {
374         struct bna_mcam_handle *mchandle;
375
376         list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
377                 if (mchandle->handle == handle)
378                         return mchandle;
379
380         return NULL;
381 }
382
383 static void
384 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
385 {
386         struct bna_mac *mcmac;
387         struct bna_mcam_handle *mchandle;
388
389         mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
390         mchandle = bna_rxf_mchandle_get(rxf, handle);
391         if (mchandle == NULL) {
392                 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
393                 mchandle->handle = handle;
394                 mchandle->refcnt = 0;
395                 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
396         }
397         mchandle->refcnt++;
398         mcmac->handle = mchandle;
399 }
400
401 static int
402 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
403                 enum bna_cleanup_type cleanup)
404 {
405         struct bna_mcam_handle *mchandle;
406         int ret = 0;
407
408         mchandle = mac->handle;
409         if (mchandle == NULL)
410                 return ret;
411
412         mchandle->refcnt--;
413         if (mchandle->refcnt == 0) {
414                 if (cleanup == BNA_HARD_CLEANUP) {
415                         bna_bfi_mcast_del_req(rxf, mchandle->handle);
416                         ret = 1;
417                 }
418                 list_del(&mchandle->qe);
419                 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
420         }
421         mac->handle = NULL;
422
423         return ret;
424 }
425
426 static int
427 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
428 {
429         struct bna_mac *mac = NULL;
430         int ret;
431
432         /* First delete multicast entries to maintain the count */
433         while (!list_empty(&rxf->mcast_pending_del_q)) {
434                 mac = list_first_entry(&rxf->mcast_pending_del_q,
435                                        struct bna_mac, qe);
436                 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
437                 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
438                 if (ret)
439                         return ret;
440         }
441
442         /* Add multicast entries */
443         if (!list_empty(&rxf->mcast_pending_add_q)) {
444                 mac = list_first_entry(&rxf->mcast_pending_add_q,
445                                        struct bna_mac, qe);
446                 list_move_tail(&mac->qe, &rxf->mcast_active_q);
447                 bna_bfi_mcast_add_req(rxf, mac);
448                 return 1;
449         }
450
451         return 0;
452 }
453
454 static int
455 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
456 {
457         u8 vlan_pending_bitmask;
458         int block_idx = 0;
459
460         if (rxf->vlan_pending_bitmask) {
461                 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
462                 while (!(vlan_pending_bitmask & 0x1)) {
463                         block_idx++;
464                         vlan_pending_bitmask >>= 1;
465                 }
466                 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
467                 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
468                 return 1;
469         }
470
471         return 0;
472 }
473
474 static int
475 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
476 {
477         struct bna_mac *mac;
478         int ret;
479
480         /* Throw away delete pending mcast entries */
481         while (!list_empty(&rxf->mcast_pending_del_q)) {
482                 mac = list_first_entry(&rxf->mcast_pending_del_q,
483                                        struct bna_mac, qe);
484                 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
485                 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
486                 if (ret)
487                         return ret;
488         }
489
490         /* Move active mcast entries to pending_add_q */
491         while (!list_empty(&rxf->mcast_active_q)) {
492                 mac = list_first_entry(&rxf->mcast_active_q,
493                                        struct bna_mac, qe);
494                 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
495                 if (bna_rxf_mcast_del(rxf, mac, cleanup))
496                         return 1;
497         }
498
499         return 0;
500 }
501
502 static int
503 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
504 {
505         if (rxf->rss_pending) {
506                 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
507                         rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
508                         bna_bfi_rit_cfg(rxf);
509                         return 1;
510                 }
511
512                 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
513                         rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
514                         bna_bfi_rss_cfg(rxf);
515                         return 1;
516                 }
517
518                 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
519                         rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
520                         bna_bfi_rss_enable(rxf);
521                         return 1;
522                 }
523         }
524
525         return 0;
526 }
527
528 static int
529 bna_rxf_cfg_apply(struct bna_rxf *rxf)
530 {
531         if (bna_rxf_ucast_cfg_apply(rxf))
532                 return 1;
533
534         if (bna_rxf_mcast_cfg_apply(rxf))
535                 return 1;
536
537         if (bna_rxf_promisc_cfg_apply(rxf))
538                 return 1;
539
540         if (bna_rxf_allmulti_cfg_apply(rxf))
541                 return 1;
542
543         if (bna_rxf_vlan_cfg_apply(rxf))
544                 return 1;
545
546         if (bna_rxf_vlan_strip_cfg_apply(rxf))
547                 return 1;
548
549         if (bna_rxf_rss_cfg_apply(rxf))
550                 return 1;
551
552         return 0;
553 }
554
555 static void
556 bna_rxf_cfg_reset(struct bna_rxf *rxf)
557 {
558         bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
559         bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
560         bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
561         bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
562         bna_rxf_vlan_cfg_soft_reset(rxf);
563         bna_rxf_rss_cfg_soft_reset(rxf);
564 }
565
566 static void
567 bna_rit_init(struct bna_rxf *rxf, int rit_size)
568 {
569         struct bna_rx *rx = rxf->rx;
570         struct bna_rxp *rxp;
571         int offset = 0;
572
573         rxf->rit_size = rit_size;
574         list_for_each_entry(rxp, &rx->rxp_q, qe) {
575                 rxf->rit[offset] = rxp->cq.ccb->id;
576                 offset++;
577         }
578 }
579
580 void
581 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
582 {
583         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
584 }
585
586 void
587 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
588                         struct bfi_msgq_mhdr *msghdr)
589 {
590         struct bfi_enet_rsp *rsp =
591                 container_of(msghdr, struct bfi_enet_rsp, mh);
592
593         if (rsp->error) {
594                 /* Clear ucast from cache */
595                 rxf->ucast_active_set = 0;
596         }
597
598         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
599 }
600
601 void
602 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
603                         struct bfi_msgq_mhdr *msghdr)
604 {
605         struct bfi_enet_mcast_add_req *req =
606                 &rxf->bfi_enet_cmd.mcast_add_req;
607         struct bfi_enet_mcast_add_rsp *rsp =
608                 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
609
610         bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
611                 ntohs(rsp->handle));
612         bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
613 }
614
615 static void
616 bna_rxf_init(struct bna_rxf *rxf,
617                 struct bna_rx *rx,
618                 struct bna_rx_config *q_config,
619                 struct bna_res_info *res_info)
620 {
621         rxf->rx = rx;
622
623         INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
624         INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
625         rxf->ucast_pending_set = 0;
626         rxf->ucast_active_set = 0;
627         INIT_LIST_HEAD(&rxf->ucast_active_q);
628         rxf->ucast_pending_mac = NULL;
629
630         INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
631         INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
632         INIT_LIST_HEAD(&rxf->mcast_active_q);
633         INIT_LIST_HEAD(&rxf->mcast_handle_q);
634
635         rxf->rit = (u8 *)
636                 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
637         bna_rit_init(rxf, q_config->num_paths);
638
639         rxf->rss_status = q_config->rss_status;
640         if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
641                 rxf->rss_cfg = q_config->rss_config;
642                 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
643                 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
644                 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
645         }
646
647         rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
648         memset(rxf->vlan_filter_table, 0,
649                         (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
650         rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
651         rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
652
653         rxf->vlan_strip_status = q_config->vlan_strip_status;
654
655         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
656 }
657
658 static void
659 bna_rxf_uninit(struct bna_rxf *rxf)
660 {
661         struct bna_mac *mac;
662
663         rxf->ucast_pending_set = 0;
664         rxf->ucast_active_set = 0;
665
666         while (!list_empty(&rxf->ucast_pending_add_q)) {
667                 mac = list_first_entry(&rxf->ucast_pending_add_q,
668                                        struct bna_mac, qe);
669                 list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
670         }
671
672         if (rxf->ucast_pending_mac) {
673                 list_add_tail(&rxf->ucast_pending_mac->qe,
674                               bna_ucam_mod_free_q(rxf->rx->bna));
675                 rxf->ucast_pending_mac = NULL;
676         }
677
678         while (!list_empty(&rxf->mcast_pending_add_q)) {
679                 mac = list_first_entry(&rxf->mcast_pending_add_q,
680                                        struct bna_mac, qe);
681                 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
682         }
683
684         rxf->rxmode_pending = 0;
685         rxf->rxmode_pending_bitmask = 0;
686         if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
687                 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
688         if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
689                 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
690
691         rxf->rss_pending = 0;
692         rxf->vlan_strip_pending = false;
693
694         rxf->rx = NULL;
695 }
696
697 static void
698 bna_rx_cb_rxf_started(struct bna_rx *rx)
699 {
700         bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
701 }
702
703 static void
704 bna_rxf_start(struct bna_rxf *rxf)
705 {
706         rxf->start_cbfn = bna_rx_cb_rxf_started;
707         rxf->start_cbarg = rxf->rx;
708         bfa_fsm_send_event(rxf, RXF_E_START);
709 }
710
711 static void
712 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
713 {
714         bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
715 }
716
717 static void
718 bna_rxf_stop(struct bna_rxf *rxf)
719 {
720         rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
721         rxf->stop_cbarg = rxf->rx;
722         bfa_fsm_send_event(rxf, RXF_E_STOP);
723 }
724
725 static void
726 bna_rxf_fail(struct bna_rxf *rxf)
727 {
728         bfa_fsm_send_event(rxf, RXF_E_FAIL);
729 }
730
731 enum bna_cb_status
732 bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac)
733 {
734         struct bna_rxf *rxf = &rx->rxf;
735
736         if (rxf->ucast_pending_mac == NULL) {
737                 rxf->ucast_pending_mac =
738                         bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
739                 if (rxf->ucast_pending_mac == NULL)
740                         return BNA_CB_UCAST_CAM_FULL;
741         }
742
743         ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
744         rxf->ucast_pending_set = 1;
745         rxf->cam_fltr_cbfn = NULL;
746         rxf->cam_fltr_cbarg = rx->bna->bnad;
747
748         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
749
750         return BNA_CB_SUCCESS;
751 }
752
753 enum bna_cb_status
754 bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr,
755                  void (*cbfn)(struct bnad *, struct bna_rx *))
756 {
757         struct bna_rxf *rxf = &rx->rxf;
758         struct bna_mac *mac;
759
760         /* Check if already added or pending addition */
761         if (bna_mac_find(&rxf->mcast_active_q, addr) ||
762                 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
763                 if (cbfn)
764                         cbfn(rx->bna->bnad, rx);
765                 return BNA_CB_SUCCESS;
766         }
767
768         mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
769         if (mac == NULL)
770                 return BNA_CB_MCAST_LIST_FULL;
771         ether_addr_copy(mac->addr, addr);
772         list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
773
774         rxf->cam_fltr_cbfn = cbfn;
775         rxf->cam_fltr_cbarg = rx->bna->bnad;
776
777         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
778
779         return BNA_CB_SUCCESS;
780 }
781
782 enum bna_cb_status
783 bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist)
784 {
785         struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
786         struct bna_rxf *rxf = &rx->rxf;
787         struct list_head list_head;
788         const u8 *mcaddr;
789         struct bna_mac *mac, *del_mac;
790         int i;
791
792         /* Purge the pending_add_q */
793         while (!list_empty(&rxf->ucast_pending_add_q)) {
794                 mac = list_first_entry(&rxf->ucast_pending_add_q,
795                                        struct bna_mac, qe);
796                 list_move_tail(&mac->qe, &ucam_mod->free_q);
797         }
798
799         /* Schedule active_q entries for deletion */
800         while (!list_empty(&rxf->ucast_active_q)) {
801                 mac = list_first_entry(&rxf->ucast_active_q,
802                                        struct bna_mac, qe);
803                 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
804                 ether_addr_copy(del_mac->addr, mac->addr);
805                 del_mac->handle = mac->handle;
806                 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
807                 list_move_tail(&mac->qe, &ucam_mod->free_q);
808         }
809
810         /* Allocate nodes */
811         INIT_LIST_HEAD(&list_head);
812         for (i = 0, mcaddr = uclist; i < count; i++) {
813                 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
814                 if (mac == NULL)
815                         goto err_return;
816                 ether_addr_copy(mac->addr, mcaddr);
817                 list_add_tail(&mac->qe, &list_head);
818                 mcaddr += ETH_ALEN;
819         }
820
821         /* Add the new entries */
822         while (!list_empty(&list_head)) {
823                 mac = list_first_entry(&list_head, struct bna_mac, qe);
824                 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
825         }
826
827         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
828
829         return BNA_CB_SUCCESS;
830
831 err_return:
832         while (!list_empty(&list_head)) {
833                 mac = list_first_entry(&list_head, struct bna_mac, qe);
834                 list_move_tail(&mac->qe, &ucam_mod->free_q);
835         }
836
837         return BNA_CB_UCAST_CAM_FULL;
838 }
839
840 enum bna_cb_status
841 bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist)
842 {
843         struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
844         struct bna_rxf *rxf = &rx->rxf;
845         struct list_head list_head;
846         const u8 *mcaddr;
847         struct bna_mac *mac, *del_mac;
848         int i;
849
850         /* Purge the pending_add_q */
851         while (!list_empty(&rxf->mcast_pending_add_q)) {
852                 mac = list_first_entry(&rxf->mcast_pending_add_q,
853                                        struct bna_mac, qe);
854                 list_move_tail(&mac->qe, &mcam_mod->free_q);
855         }
856
857         /* Schedule active_q entries for deletion */
858         while (!list_empty(&rxf->mcast_active_q)) {
859                 mac = list_first_entry(&rxf->mcast_active_q,
860                                        struct bna_mac, qe);
861                 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
862                 ether_addr_copy(del_mac->addr, mac->addr);
863                 del_mac->handle = mac->handle;
864                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
865                 mac->handle = NULL;
866                 list_move_tail(&mac->qe, &mcam_mod->free_q);
867         }
868
869         /* Allocate nodes */
870         INIT_LIST_HEAD(&list_head);
871         for (i = 0, mcaddr = mclist; i < count; i++) {
872                 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
873                 if (mac == NULL)
874                         goto err_return;
875                 ether_addr_copy(mac->addr, mcaddr);
876                 list_add_tail(&mac->qe, &list_head);
877
878                 mcaddr += ETH_ALEN;
879         }
880
881         /* Add the new entries */
882         while (!list_empty(&list_head)) {
883                 mac = list_first_entry(&list_head, struct bna_mac, qe);
884                 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
885         }
886
887         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
888
889         return BNA_CB_SUCCESS;
890
891 err_return:
892         while (!list_empty(&list_head)) {
893                 mac = list_first_entry(&list_head, struct bna_mac, qe);
894                 list_move_tail(&mac->qe, &mcam_mod->free_q);
895         }
896
897         return BNA_CB_MCAST_LIST_FULL;
898 }
899
900 void
901 bna_rx_mcast_delall(struct bna_rx *rx)
902 {
903         struct bna_rxf *rxf = &rx->rxf;
904         struct bna_mac *mac, *del_mac;
905         int need_hw_config = 0;
906
907         /* Purge all entries from pending_add_q */
908         while (!list_empty(&rxf->mcast_pending_add_q)) {
909                 mac = list_first_entry(&rxf->mcast_pending_add_q,
910                                        struct bna_mac, qe);
911                 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
912         }
913
914         /* Schedule all entries in active_q for deletion */
915         while (!list_empty(&rxf->mcast_active_q)) {
916                 mac = list_first_entry(&rxf->mcast_active_q,
917                                        struct bna_mac, qe);
918                 list_del(&mac->qe);
919                 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
920                 memcpy(del_mac, mac, sizeof(*del_mac));
921                 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
922                 mac->handle = NULL;
923                 list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
924                 need_hw_config = 1;
925         }
926
927         if (need_hw_config)
928                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
929 }
930
931 void
932 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
933 {
934         struct bna_rxf *rxf = &rx->rxf;
935         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
936         int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
937         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
938
939         rxf->vlan_filter_table[index] |= bit;
940         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
941                 rxf->vlan_pending_bitmask |= BIT(group_id);
942                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
943         }
944 }
945
946 void
947 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
948 {
949         struct bna_rxf *rxf = &rx->rxf;
950         int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
951         int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
952         int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
953
954         rxf->vlan_filter_table[index] &= ~bit;
955         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
956                 rxf->vlan_pending_bitmask |= BIT(group_id);
957                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
958         }
959 }
960
961 static int
962 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
963 {
964         struct bna_mac *mac = NULL;
965
966         /* Delete MAC addresses previousely added */
967         if (!list_empty(&rxf->ucast_pending_del_q)) {
968                 mac = list_first_entry(&rxf->ucast_pending_del_q,
969                                        struct bna_mac, qe);
970                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
971                 list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
972                 return 1;
973         }
974
975         /* Set default unicast MAC */
976         if (rxf->ucast_pending_set) {
977                 rxf->ucast_pending_set = 0;
978                 ether_addr_copy(rxf->ucast_active_mac.addr,
979                                 rxf->ucast_pending_mac->addr);
980                 rxf->ucast_active_set = 1;
981                 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
982                         BFI_ENET_H2I_MAC_UCAST_SET_REQ);
983                 return 1;
984         }
985
986         /* Add additional MAC entries */
987         if (!list_empty(&rxf->ucast_pending_add_q)) {
988                 mac = list_first_entry(&rxf->ucast_pending_add_q,
989                                        struct bna_mac, qe);
990                 list_add_tail(&mac->qe, &rxf->ucast_active_q);
991                 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
992                 return 1;
993         }
994
995         return 0;
996 }
997
998 static int
999 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1000 {
1001         struct bna_mac *mac;
1002
1003         /* Throw away delete pending ucast entries */
1004         while (!list_empty(&rxf->ucast_pending_del_q)) {
1005                 mac = list_first_entry(&rxf->ucast_pending_del_q,
1006                                        struct bna_mac, qe);
1007                 if (cleanup == BNA_SOFT_CLEANUP)
1008                         list_move_tail(&mac->qe,
1009                                        bna_ucam_mod_del_q(rxf->rx->bna));
1010                 else {
1011                         bna_bfi_ucast_req(rxf, mac,
1012                                           BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1013                         list_move_tail(&mac->qe,
1014                                        bna_ucam_mod_del_q(rxf->rx->bna));
1015                         return 1;
1016                 }
1017         }
1018
1019         /* Move active ucast entries to pending_add_q */
1020         while (!list_empty(&rxf->ucast_active_q)) {
1021                 mac = list_first_entry(&rxf->ucast_active_q,
1022                                        struct bna_mac, qe);
1023                 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
1024                 if (cleanup == BNA_HARD_CLEANUP) {
1025                         bna_bfi_ucast_req(rxf, mac,
1026                                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1027                         return 1;
1028                 }
1029         }
1030
1031         if (rxf->ucast_active_set) {
1032                 rxf->ucast_pending_set = 1;
1033                 rxf->ucast_active_set = 0;
1034                 if (cleanup == BNA_HARD_CLEANUP) {
1035                         bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1036                                 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1037                         return 1;
1038                 }
1039         }
1040
1041         return 0;
1042 }
1043
1044 static int
1045 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1046 {
1047         struct bna *bna = rxf->rx->bna;
1048
1049         /* Enable/disable promiscuous mode */
1050         if (is_promisc_enable(rxf->rxmode_pending,
1051                                 rxf->rxmode_pending_bitmask)) {
1052                 /* move promisc configuration from pending -> active */
1053                 promisc_inactive(rxf->rxmode_pending,
1054                                 rxf->rxmode_pending_bitmask);
1055                 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1056                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1057                 return 1;
1058         } else if (is_promisc_disable(rxf->rxmode_pending,
1059                                 rxf->rxmode_pending_bitmask)) {
1060                 /* move promisc configuration from pending -> active */
1061                 promisc_inactive(rxf->rxmode_pending,
1062                                 rxf->rxmode_pending_bitmask);
1063                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1064                 bna->promisc_rid = BFI_INVALID_RID;
1065                 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1066                 return 1;
1067         }
1068
1069         return 0;
1070 }
1071
1072 static int
1073 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1074 {
1075         struct bna *bna = rxf->rx->bna;
1076
1077         /* Clear pending promisc mode disable */
1078         if (is_promisc_disable(rxf->rxmode_pending,
1079                                 rxf->rxmode_pending_bitmask)) {
1080                 promisc_inactive(rxf->rxmode_pending,
1081                                 rxf->rxmode_pending_bitmask);
1082                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1083                 bna->promisc_rid = BFI_INVALID_RID;
1084                 if (cleanup == BNA_HARD_CLEANUP) {
1085                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1086                         return 1;
1087                 }
1088         }
1089
1090         /* Move promisc mode config from active -> pending */
1091         if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1092                 promisc_enable(rxf->rxmode_pending,
1093                                 rxf->rxmode_pending_bitmask);
1094                 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1095                 if (cleanup == BNA_HARD_CLEANUP) {
1096                         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1097                         return 1;
1098                 }
1099         }
1100
1101         return 0;
1102 }
1103
1104 static int
1105 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1106 {
1107         /* Enable/disable allmulti mode */
1108         if (is_allmulti_enable(rxf->rxmode_pending,
1109                                 rxf->rxmode_pending_bitmask)) {
1110                 /* move allmulti configuration from pending -> active */
1111                 allmulti_inactive(rxf->rxmode_pending,
1112                                 rxf->rxmode_pending_bitmask);
1113                 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1114                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1115                 return 1;
1116         } else if (is_allmulti_disable(rxf->rxmode_pending,
1117                                         rxf->rxmode_pending_bitmask)) {
1118                 /* move allmulti configuration from pending -> active */
1119                 allmulti_inactive(rxf->rxmode_pending,
1120                                 rxf->rxmode_pending_bitmask);
1121                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1122                 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1123                 return 1;
1124         }
1125
1126         return 0;
1127 }
1128
1129 static int
1130 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1131 {
1132         /* Clear pending allmulti mode disable */
1133         if (is_allmulti_disable(rxf->rxmode_pending,
1134                                 rxf->rxmode_pending_bitmask)) {
1135                 allmulti_inactive(rxf->rxmode_pending,
1136                                 rxf->rxmode_pending_bitmask);
1137                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1138                 if (cleanup == BNA_HARD_CLEANUP) {
1139                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1140                         return 1;
1141                 }
1142         }
1143
1144         /* Move allmulti mode config from active -> pending */
1145         if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1146                 allmulti_enable(rxf->rxmode_pending,
1147                                 rxf->rxmode_pending_bitmask);
1148                 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1149                 if (cleanup == BNA_HARD_CLEANUP) {
1150                         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1151                         return 1;
1152                 }
1153         }
1154
1155         return 0;
1156 }
1157
1158 static int
1159 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1160 {
1161         struct bna *bna = rxf->rx->bna;
1162         int ret = 0;
1163
1164         if (is_promisc_enable(rxf->rxmode_pending,
1165                                 rxf->rxmode_pending_bitmask) ||
1166                 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1167                 /* Do nothing if pending enable or already enabled */
1168         } else if (is_promisc_disable(rxf->rxmode_pending,
1169                                         rxf->rxmode_pending_bitmask)) {
1170                 /* Turn off pending disable command */
1171                 promisc_inactive(rxf->rxmode_pending,
1172                         rxf->rxmode_pending_bitmask);
1173         } else {
1174                 /* Schedule enable */
1175                 promisc_enable(rxf->rxmode_pending,
1176                                 rxf->rxmode_pending_bitmask);
1177                 bna->promisc_rid = rxf->rx->rid;
1178                 ret = 1;
1179         }
1180
1181         return ret;
1182 }
1183
1184 static int
1185 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1186 {
1187         struct bna *bna = rxf->rx->bna;
1188         int ret = 0;
1189
1190         if (is_promisc_disable(rxf->rxmode_pending,
1191                                 rxf->rxmode_pending_bitmask) ||
1192                 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1193                 /* Do nothing if pending disable or already disabled */
1194         } else if (is_promisc_enable(rxf->rxmode_pending,
1195                                         rxf->rxmode_pending_bitmask)) {
1196                 /* Turn off pending enable command */
1197                 promisc_inactive(rxf->rxmode_pending,
1198                                 rxf->rxmode_pending_bitmask);
1199                 bna->promisc_rid = BFI_INVALID_RID;
1200         } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1201                 /* Schedule disable */
1202                 promisc_disable(rxf->rxmode_pending,
1203                                 rxf->rxmode_pending_bitmask);
1204                 ret = 1;
1205         }
1206
1207         return ret;
1208 }
1209
1210 static int
1211 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1212 {
1213         int ret = 0;
1214
1215         if (is_allmulti_enable(rxf->rxmode_pending,
1216                         rxf->rxmode_pending_bitmask) ||
1217                         (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1218                 /* Do nothing if pending enable or already enabled */
1219         } else if (is_allmulti_disable(rxf->rxmode_pending,
1220                                         rxf->rxmode_pending_bitmask)) {
1221                 /* Turn off pending disable command */
1222                 allmulti_inactive(rxf->rxmode_pending,
1223                         rxf->rxmode_pending_bitmask);
1224         } else {
1225                 /* Schedule enable */
1226                 allmulti_enable(rxf->rxmode_pending,
1227                                 rxf->rxmode_pending_bitmask);
1228                 ret = 1;
1229         }
1230
1231         return ret;
1232 }
1233
1234 static int
1235 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1236 {
1237         int ret = 0;
1238
1239         if (is_allmulti_disable(rxf->rxmode_pending,
1240                                 rxf->rxmode_pending_bitmask) ||
1241                 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1242                 /* Do nothing if pending disable or already disabled */
1243         } else if (is_allmulti_enable(rxf->rxmode_pending,
1244                                         rxf->rxmode_pending_bitmask)) {
1245                 /* Turn off pending enable command */
1246                 allmulti_inactive(rxf->rxmode_pending,
1247                                 rxf->rxmode_pending_bitmask);
1248         } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1249                 /* Schedule disable */
1250                 allmulti_disable(rxf->rxmode_pending,
1251                                 rxf->rxmode_pending_bitmask);
1252                 ret = 1;
1253         }
1254
1255         return ret;
1256 }
1257
1258 static int
1259 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1260 {
1261         if (rxf->vlan_strip_pending) {
1262                         rxf->vlan_strip_pending = false;
1263                         bna_bfi_vlan_strip_enable(rxf);
1264                         return 1;
1265         }
1266
1267         return 0;
1268 }
1269
1270 /* RX */
1271
1272 #define BNA_GET_RXQS(qcfg)      (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1273         (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1274
1275 #define SIZE_TO_PAGES(size)     (((size) >> PAGE_SHIFT) + ((((size) &\
1276         (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1277
1278 #define call_rx_stop_cbfn(rx)                                           \
1279 do {                                                                \
1280         if ((rx)->stop_cbfn) {                                          \
1281                 void (*cbfn)(void *, struct bna_rx *);    \
1282                 void *cbarg;                                        \
1283                 cbfn = (rx)->stop_cbfn;                          \
1284                 cbarg = (rx)->stop_cbarg;                              \
1285                 (rx)->stop_cbfn = NULL;                                 \
1286                 (rx)->stop_cbarg = NULL;                                \
1287                 cbfn(cbarg, rx);                                        \
1288         }                                                              \
1289 } while (0)
1290
1291 #define call_rx_stall_cbfn(rx)                                          \
1292 do {                                                                    \
1293         if ((rx)->rx_stall_cbfn)                                        \
1294                 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));             \
1295 } while (0)
1296
1297 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)                        \
1298 do {                                                                    \
1299         struct bna_dma_addr cur_q_addr =                                \
1300                 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));      \
1301         (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;        \
1302         (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;        \
1303         (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;              \
1304         (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;              \
1305         (bfi_q)->pages = htons((u16)(bna_qpt)->page_count);     \
1306         (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1307 } while (0)
1308
1309 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1310 static void bna_rx_enet_stop(struct bna_rx *rx);
1311 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1312
1313 bfa_fsm_state_decl(bna_rx, stopped,
1314         struct bna_rx, enum bna_rx_event);
1315 bfa_fsm_state_decl(bna_rx, start_wait,
1316         struct bna_rx, enum bna_rx_event);
1317 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1318         struct bna_rx, enum bna_rx_event);
1319 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1320         struct bna_rx, enum bna_rx_event);
1321 bfa_fsm_state_decl(bna_rx, started,
1322         struct bna_rx, enum bna_rx_event);
1323 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1324         struct bna_rx, enum bna_rx_event);
1325 bfa_fsm_state_decl(bna_rx, stop_wait,
1326         struct bna_rx, enum bna_rx_event);
1327 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1328         struct bna_rx, enum bna_rx_event);
1329 bfa_fsm_state_decl(bna_rx, failed,
1330         struct bna_rx, enum bna_rx_event);
1331 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1332         struct bna_rx, enum bna_rx_event);
1333
1334 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1335 {
1336         call_rx_stop_cbfn(rx);
1337 }
1338
1339 static void bna_rx_sm_stopped(struct bna_rx *rx,
1340                                 enum bna_rx_event event)
1341 {
1342         switch (event) {
1343         case RX_E_START:
1344                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1345                 break;
1346
1347         case RX_E_STOP:
1348                 call_rx_stop_cbfn(rx);
1349                 break;
1350
1351         case RX_E_FAIL:
1352                 /* no-op */
1353                 break;
1354
1355         default:
1356                 bfa_sm_fault(event);
1357                 break;
1358         }
1359 }
1360
1361 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1362 {
1363         bna_bfi_rx_enet_start(rx);
1364 }
1365
1366 static void
1367 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1368 {
1369 }
1370
1371 static void
1372 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1373 {
1374         switch (event) {
1375         case RX_E_FAIL:
1376         case RX_E_STOPPED:
1377                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1378                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1379                 break;
1380
1381         case RX_E_STARTED:
1382                 bna_rx_enet_stop(rx);
1383                 break;
1384
1385         default:
1386                 bfa_sm_fault(event);
1387                 break;
1388         }
1389 }
1390
1391 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1392                                 enum bna_rx_event event)
1393 {
1394         switch (event) {
1395         case RX_E_STOP:
1396                 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1397                 break;
1398
1399         case RX_E_FAIL:
1400                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1401                 break;
1402
1403         case RX_E_STARTED:
1404                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1405                 break;
1406
1407         default:
1408                 bfa_sm_fault(event);
1409                 break;
1410         }
1411 }
1412
1413 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1414 {
1415         rx->rx_post_cbfn(rx->bna->bnad, rx);
1416         bna_rxf_start(&rx->rxf);
1417 }
1418
1419 static void
1420 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1421 {
1422 }
1423
1424 static void
1425 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1426 {
1427         switch (event) {
1428         case RX_E_FAIL:
1429                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1430                 bna_rxf_fail(&rx->rxf);
1431                 call_rx_stall_cbfn(rx);
1432                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1433                 break;
1434
1435         case RX_E_RXF_STARTED:
1436                 bna_rxf_stop(&rx->rxf);
1437                 break;
1438
1439         case RX_E_RXF_STOPPED:
1440                 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1441                 call_rx_stall_cbfn(rx);
1442                 bna_rx_enet_stop(rx);
1443                 break;
1444
1445         default:
1446                 bfa_sm_fault(event);
1447                 break;
1448         }
1449
1450 }
1451
1452 static void
1453 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1454 {
1455 }
1456
1457 static void
1458 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1459 {
1460         switch (event) {
1461         case RX_E_FAIL:
1462         case RX_E_STOPPED:
1463                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1464                 break;
1465
1466         case RX_E_STARTED:
1467                 bna_rx_enet_stop(rx);
1468                 break;
1469
1470         default:
1471                 bfa_sm_fault(event);
1472         }
1473 }
1474
1475 static void
1476 bna_rx_sm_started_entry(struct bna_rx *rx)
1477 {
1478         struct bna_rxp *rxp;
1479         int is_regular = (rx->type == BNA_RX_T_REGULAR);
1480
1481         /* Start IB */
1482         list_for_each_entry(rxp, &rx->rxp_q, qe)
1483                 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1484
1485         bna_ethport_cb_rx_started(&rx->bna->ethport);
1486 }
1487
1488 static void
1489 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1490 {
1491         switch (event) {
1492         case RX_E_STOP:
1493                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1494                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1495                 bna_rxf_stop(&rx->rxf);
1496                 break;
1497
1498         case RX_E_FAIL:
1499                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1500                 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1501                 bna_rxf_fail(&rx->rxf);
1502                 call_rx_stall_cbfn(rx);
1503                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1504                 break;
1505
1506         default:
1507                 bfa_sm_fault(event);
1508                 break;
1509         }
1510 }
1511
1512 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1513                                 enum bna_rx_event event)
1514 {
1515         switch (event) {
1516         case RX_E_STOP:
1517                 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1518                 break;
1519
1520         case RX_E_FAIL:
1521                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1522                 bna_rxf_fail(&rx->rxf);
1523                 call_rx_stall_cbfn(rx);
1524                 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1525                 break;
1526
1527         case RX_E_RXF_STARTED:
1528                 bfa_fsm_set_state(rx, bna_rx_sm_started);
1529                 break;
1530
1531         default:
1532                 bfa_sm_fault(event);
1533                 break;
1534         }
1535 }
1536
1537 static void
1538 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1539 {
1540 }
1541
1542 static void
1543 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1544 {
1545         switch (event) {
1546         case RX_E_FAIL:
1547         case RX_E_RXF_STOPPED:
1548                 /* No-op */
1549                 break;
1550
1551         case RX_E_CLEANUP_DONE:
1552                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1553                 break;
1554
1555         default:
1556                 bfa_sm_fault(event);
1557                 break;
1558         }
1559 }
1560
1561 static void
1562 bna_rx_sm_failed_entry(struct bna_rx *rx)
1563 {
1564 }
1565
1566 static void
1567 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1568 {
1569         switch (event) {
1570         case RX_E_START:
1571                 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1572                 break;
1573
1574         case RX_E_STOP:
1575                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1576                 break;
1577
1578         case RX_E_FAIL:
1579         case RX_E_RXF_STARTED:
1580         case RX_E_RXF_STOPPED:
1581                 /* No-op */
1582                 break;
1583
1584         case RX_E_CLEANUP_DONE:
1585                 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1586                 break;
1587
1588         default:
1589                 bfa_sm_fault(event);
1590                 break;
1591 }       }
1592
1593 static void
1594 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1595 {
1596 }
1597
1598 static void
1599 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1600 {
1601         switch (event) {
1602         case RX_E_STOP:
1603                 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1604                 break;
1605
1606         case RX_E_FAIL:
1607                 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1608                 break;
1609
1610         case RX_E_CLEANUP_DONE:
1611                 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1612                 break;
1613
1614         default:
1615                 bfa_sm_fault(event);
1616                 break;
1617         }
1618 }
1619
1620 static void
1621 bna_bfi_rx_enet_start(struct bna_rx *rx)
1622 {
1623         struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1624         struct bna_rxp *rxp = NULL;
1625         struct bna_rxq *q0 = NULL, *q1 = NULL;
1626         int i;
1627
1628         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1629                 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1630         cfg_req->mh.num_entries = htons(
1631                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1632
1633         cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1634         cfg_req->num_queue_sets = rx->num_paths;
1635         for (i = 0; i < rx->num_paths; i++) {
1636                 rxp = rxp ? list_next_entry(rxp, qe)
1637                         : list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
1638                 GET_RXQS(rxp, q0, q1);
1639                 switch (rxp->type) {
1640                 case BNA_RXP_SLR:
1641                 case BNA_RXP_HDS:
1642                         /* Small RxQ */
1643                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1644                                                 &q1->qpt);
1645                         cfg_req->q_cfg[i].qs.rx_buffer_size =
1646                                 htons((u16)q1->buffer_size);
1647                         /* Fall through */
1648
1649                 case BNA_RXP_SINGLE:
1650                         /* Large/Single RxQ */
1651                         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1652                                                 &q0->qpt);
1653                         if (q0->multi_buffer)
1654                                 /* multi-buffer is enabled by allocating
1655                                  * a new rx with new set of resources.
1656                                  * q0->buffer_size should be initialized to
1657                                  * fragment size.
1658                                  */
1659                                 cfg_req->rx_cfg.multi_buffer =
1660                                         BNA_STATUS_T_ENABLED;
1661                         else
1662                                 q0->buffer_size =
1663                                         bna_enet_mtu_get(&rx->bna->enet);
1664                         cfg_req->q_cfg[i].ql.rx_buffer_size =
1665                                 htons((u16)q0->buffer_size);
1666                         break;
1667
1668                 default:
1669                         BUG_ON(1);
1670                 }
1671
1672                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1673                                         &rxp->cq.qpt);
1674
1675                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1676                         rxp->cq.ib.ib_seg_host_addr.lsb;
1677                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1678                         rxp->cq.ib.ib_seg_host_addr.msb;
1679                 cfg_req->q_cfg[i].ib.intr.msix_index =
1680                         htons((u16)rxp->cq.ib.intr_vector);
1681         }
1682
1683         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1684         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1685         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1686         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1687         cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1688                                 ? BNA_STATUS_T_ENABLED :
1689                                 BNA_STATUS_T_DISABLED;
1690         cfg_req->ib_cfg.coalescing_timeout =
1691                         htonl((u32)rxp->cq.ib.coalescing_timeo);
1692         cfg_req->ib_cfg.inter_pkt_timeout =
1693                         htonl((u32)rxp->cq.ib.interpkt_timeo);
1694         cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1695
1696         switch (rxp->type) {
1697         case BNA_RXP_SLR:
1698                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1699                 break;
1700
1701         case BNA_RXP_HDS:
1702                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1703                 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1704                 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1705                 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1706                 break;
1707
1708         case BNA_RXP_SINGLE:
1709                 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1710                 break;
1711
1712         default:
1713                 BUG_ON(1);
1714         }
1715         cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1716
1717         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1718                 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1719         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1720 }
1721
1722 static void
1723 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1724 {
1725         struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1726
1727         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1728                 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1729         req->mh.num_entries = htons(
1730                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1731         bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1732                 &req->mh);
1733         bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1734 }
1735
1736 static void
1737 bna_rx_enet_stop(struct bna_rx *rx)
1738 {
1739         struct bna_rxp *rxp;
1740
1741         /* Stop IB */
1742         list_for_each_entry(rxp, &rx->rxp_q, qe)
1743                 bna_ib_stop(rx->bna, &rxp->cq.ib);
1744
1745         bna_bfi_rx_enet_stop(rx);
1746 }
1747
1748 static int
1749 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1750 {
1751         if ((rx_mod->rx_free_count == 0) ||
1752                 (rx_mod->rxp_free_count == 0) ||
1753                 (rx_mod->rxq_free_count == 0))
1754                 return 0;
1755
1756         if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1757                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1758                         (rx_mod->rxq_free_count < rx_cfg->num_paths))
1759                                 return 0;
1760         } else {
1761                 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1762                         (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1763                         return 0;
1764         }
1765
1766         return 1;
1767 }
1768
1769 static struct bna_rxq *
1770 bna_rxq_get(struct bna_rx_mod *rx_mod)
1771 {
1772         struct bna_rxq *rxq = NULL;
1773
1774         rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
1775         list_del(&rxq->qe);
1776         rx_mod->rxq_free_count--;
1777
1778         return rxq;
1779 }
1780
1781 static void
1782 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1783 {
1784         list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1785         rx_mod->rxq_free_count++;
1786 }
1787
1788 static struct bna_rxp *
1789 bna_rxp_get(struct bna_rx_mod *rx_mod)
1790 {
1791         struct bna_rxp *rxp = NULL;
1792
1793         rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
1794         list_del(&rxp->qe);
1795         rx_mod->rxp_free_count--;
1796
1797         return rxp;
1798 }
1799
1800 static void
1801 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1802 {
1803         list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1804         rx_mod->rxp_free_count++;
1805 }
1806
1807 static struct bna_rx *
1808 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1809 {
1810         struct bna_rx *rx = NULL;
1811
1812         BUG_ON(list_empty(&rx_mod->rx_free_q));
1813         if (type == BNA_RX_T_REGULAR)
1814                 rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
1815         else
1816                 rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
1817
1818         rx_mod->rx_free_count--;
1819         list_move_tail(&rx->qe, &rx_mod->rx_active_q);
1820         rx->type = type;
1821
1822         return rx;
1823 }
1824
1825 static void
1826 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1827 {
1828         struct list_head *qe;
1829
1830         list_for_each_prev(qe, &rx_mod->rx_free_q)
1831                 if (((struct bna_rx *)qe)->rid < rx->rid)
1832                         break;
1833
1834         list_add(&rx->qe, qe);
1835         rx_mod->rx_free_count++;
1836 }
1837
1838 static void
1839 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1840                 struct bna_rxq *q1)
1841 {
1842         switch (rxp->type) {
1843         case BNA_RXP_SINGLE:
1844                 rxp->rxq.single.only = q0;
1845                 rxp->rxq.single.reserved = NULL;
1846                 break;
1847         case BNA_RXP_SLR:
1848                 rxp->rxq.slr.large = q0;
1849                 rxp->rxq.slr.small = q1;
1850                 break;
1851         case BNA_RXP_HDS:
1852                 rxp->rxq.hds.data = q0;
1853                 rxp->rxq.hds.hdr = q1;
1854                 break;
1855         default:
1856                 break;
1857         }
1858 }
1859
1860 static void
1861 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1862                 struct bna_rxp *rxp,
1863                 u32 page_count,
1864                 u32 page_size,
1865                 struct bna_mem_descr *qpt_mem,
1866                 struct bna_mem_descr *swqpt_mem,
1867                 struct bna_mem_descr *page_mem)
1868 {
1869         u8 *kva;
1870         u64 dma;
1871         struct bna_dma_addr bna_dma;
1872         int     i;
1873
1874         rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1875         rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1876         rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1877         rxq->qpt.page_count = page_count;
1878         rxq->qpt.page_size = page_size;
1879
1880         rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1881         rxq->rcb->sw_q = page_mem->kva;
1882
1883         kva = page_mem->kva;
1884         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1885
1886         for (i = 0; i < rxq->qpt.page_count; i++) {
1887                 rxq->rcb->sw_qpt[i] = kva;
1888                 kva += PAGE_SIZE;
1889
1890                 BNA_SET_DMA_ADDR(dma, &bna_dma);
1891                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1892                         bna_dma.lsb;
1893                 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1894                         bna_dma.msb;
1895                 dma += PAGE_SIZE;
1896         }
1897 }
1898
1899 static void
1900 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1901                 u32 page_count,
1902                 u32 page_size,
1903                 struct bna_mem_descr *qpt_mem,
1904                 struct bna_mem_descr *swqpt_mem,
1905                 struct bna_mem_descr *page_mem)
1906 {
1907         u8 *kva;
1908         u64 dma;
1909         struct bna_dma_addr bna_dma;
1910         int     i;
1911
1912         rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1913         rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1914         rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1915         rxp->cq.qpt.page_count = page_count;
1916         rxp->cq.qpt.page_size = page_size;
1917
1918         rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1919         rxp->cq.ccb->sw_q = page_mem->kva;
1920
1921         kva = page_mem->kva;
1922         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1923
1924         for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1925                 rxp->cq.ccb->sw_qpt[i] = kva;
1926                 kva += PAGE_SIZE;
1927
1928                 BNA_SET_DMA_ADDR(dma, &bna_dma);
1929                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1930                         bna_dma.lsb;
1931                 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1932                         bna_dma.msb;
1933                 dma += PAGE_SIZE;
1934         }
1935 }
1936
1937 static void
1938 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
1939 {
1940         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1941
1942         bfa_wc_down(&rx_mod->rx_stop_wc);
1943 }
1944
1945 static void
1946 bna_rx_mod_cb_rx_stopped_all(void *arg)
1947 {
1948         struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1949
1950         if (rx_mod->stop_cbfn)
1951                 rx_mod->stop_cbfn(&rx_mod->bna->enet);
1952         rx_mod->stop_cbfn = NULL;
1953 }
1954
1955 static void
1956 bna_rx_start(struct bna_rx *rx)
1957 {
1958         rx->rx_flags |= BNA_RX_F_ENET_STARTED;
1959         if (rx->rx_flags & BNA_RX_F_ENABLED)
1960                 bfa_fsm_send_event(rx, RX_E_START);
1961 }
1962
1963 static void
1964 bna_rx_stop(struct bna_rx *rx)
1965 {
1966         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1967         if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
1968                 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
1969         else {
1970                 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
1971                 rx->stop_cbarg = &rx->bna->rx_mod;
1972                 bfa_fsm_send_event(rx, RX_E_STOP);
1973         }
1974 }
1975
1976 static void
1977 bna_rx_fail(struct bna_rx *rx)
1978 {
1979         /* Indicate Enet is not enabled, and failed */
1980         rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1981         bfa_fsm_send_event(rx, RX_E_FAIL);
1982 }
1983
1984 void
1985 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1986 {
1987         struct bna_rx *rx;
1988
1989         rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
1990         if (type == BNA_RX_T_LOOPBACK)
1991                 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
1992
1993         list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
1994                 if (rx->type == type)
1995                         bna_rx_start(rx);
1996 }
1997
1998 void
1999 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2000 {
2001         struct bna_rx *rx;
2002
2003         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2004         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2005
2006         rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2007
2008         bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2009
2010         list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
2011                 if (rx->type == type) {
2012                         bfa_wc_up(&rx_mod->rx_stop_wc);
2013                         bna_rx_stop(rx);
2014                 }
2015
2016         bfa_wc_wait(&rx_mod->rx_stop_wc);
2017 }
2018
2019 void
2020 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2021 {
2022         struct bna_rx *rx;
2023
2024         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2025         rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2026
2027         list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
2028                 bna_rx_fail(rx);
2029 }
2030
2031 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2032                         struct bna_res_info *res_info)
2033 {
2034         int     index;
2035         struct bna_rx *rx_ptr;
2036         struct bna_rxp *rxp_ptr;
2037         struct bna_rxq *rxq_ptr;
2038
2039         rx_mod->bna = bna;
2040         rx_mod->flags = 0;
2041
2042         rx_mod->rx = (struct bna_rx *)
2043                 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2044         rx_mod->rxp = (struct bna_rxp *)
2045                 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2046         rx_mod->rxq = (struct bna_rxq *)
2047                 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2048
2049         /* Initialize the queues */
2050         INIT_LIST_HEAD(&rx_mod->rx_free_q);
2051         rx_mod->rx_free_count = 0;
2052         INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2053         rx_mod->rxq_free_count = 0;
2054         INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2055         rx_mod->rxp_free_count = 0;
2056         INIT_LIST_HEAD(&rx_mod->rx_active_q);
2057
2058         /* Build RX queues */
2059         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2060                 rx_ptr = &rx_mod->rx[index];
2061
2062                 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2063                 rx_ptr->bna = NULL;
2064                 rx_ptr->rid = index;
2065                 rx_ptr->stop_cbfn = NULL;
2066                 rx_ptr->stop_cbarg = NULL;
2067
2068                 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2069                 rx_mod->rx_free_count++;
2070         }
2071
2072         /* build RX-path queue */
2073         for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2074                 rxp_ptr = &rx_mod->rxp[index];
2075                 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2076                 rx_mod->rxp_free_count++;
2077         }
2078
2079         /* build RXQ queue */
2080         for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2081                 rxq_ptr = &rx_mod->rxq[index];
2082                 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2083                 rx_mod->rxq_free_count++;
2084         }
2085 }
2086
2087 void
2088 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2089 {
2090         rx_mod->bna = NULL;
2091 }
2092
2093 void
2094 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2095 {
2096         struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2097         struct bna_rxp *rxp = NULL;
2098         struct bna_rxq *q0 = NULL, *q1 = NULL;
2099         int i;
2100
2101         bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2102                 sizeof(struct bfi_enet_rx_cfg_rsp));
2103
2104         rx->hw_id = cfg_rsp->hw_id;
2105
2106         for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
2107              i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
2108                 GET_RXQS(rxp, q0, q1);
2109
2110                 /* Setup doorbells */
2111                 rxp->cq.ccb->i_dbell->doorbell_addr =
2112                         rx->bna->pcidev.pci_bar_kva
2113                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
2114                 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2115                 q0->rcb->q_dbell =
2116                         rx->bna->pcidev.pci_bar_kva
2117                         + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2118                 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2119                 if (q1) {
2120                         q1->rcb->q_dbell =
2121                         rx->bna->pcidev.pci_bar_kva
2122                         + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2123                         q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2124                 }
2125
2126                 /* Initialize producer/consumer indexes */
2127                 (*rxp->cq.ccb->hw_producer_index) = 0;
2128                 rxp->cq.ccb->producer_index = 0;
2129                 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2130                 if (q1)
2131                         q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2132         }
2133
2134         bfa_fsm_send_event(rx, RX_E_STARTED);
2135 }
2136
2137 void
2138 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2139 {
2140         bfa_fsm_send_event(rx, RX_E_STOPPED);
2141 }
2142
2143 void
2144 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2145 {
2146         u32 cq_size, hq_size, dq_size;
2147         u32 cpage_count, hpage_count, dpage_count;
2148         struct bna_mem_info *mem_info;
2149         u32 cq_depth;
2150         u32 hq_depth;
2151         u32 dq_depth;
2152
2153         dq_depth = q_cfg->q0_depth;
2154         hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2155         cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2156
2157         cq_size = cq_depth * BFI_CQ_WI_SIZE;
2158         cq_size = ALIGN(cq_size, PAGE_SIZE);
2159         cpage_count = SIZE_TO_PAGES(cq_size);
2160
2161         dq_depth = roundup_pow_of_two(dq_depth);
2162         dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2163         dq_size = ALIGN(dq_size, PAGE_SIZE);
2164         dpage_count = SIZE_TO_PAGES(dq_size);
2165
2166         if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2167                 hq_depth = roundup_pow_of_two(hq_depth);
2168                 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2169                 hq_size = ALIGN(hq_size, PAGE_SIZE);
2170                 hpage_count = SIZE_TO_PAGES(hq_size);
2171         } else
2172                 hpage_count = 0;
2173
2174         res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2175         mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2176         mem_info->mem_type = BNA_MEM_T_KVA;
2177         mem_info->len = sizeof(struct bna_ccb);
2178         mem_info->num = q_cfg->num_paths;
2179
2180         res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2181         mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2182         mem_info->mem_type = BNA_MEM_T_KVA;
2183         mem_info->len = sizeof(struct bna_rcb);
2184         mem_info->num = BNA_GET_RXQS(q_cfg);
2185
2186         res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2187         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2188         mem_info->mem_type = BNA_MEM_T_DMA;
2189         mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2190         mem_info->num = q_cfg->num_paths;
2191
2192         res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2193         mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2194         mem_info->mem_type = BNA_MEM_T_KVA;
2195         mem_info->len = cpage_count * sizeof(void *);
2196         mem_info->num = q_cfg->num_paths;
2197
2198         res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2199         mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2200         mem_info->mem_type = BNA_MEM_T_DMA;
2201         mem_info->len = PAGE_SIZE * cpage_count;
2202         mem_info->num = q_cfg->num_paths;
2203
2204         res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2205         mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2206         mem_info->mem_type = BNA_MEM_T_DMA;
2207         mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2208         mem_info->num = q_cfg->num_paths;
2209
2210         res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2211         mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2212         mem_info->mem_type = BNA_MEM_T_KVA;
2213         mem_info->len = dpage_count * sizeof(void *);
2214         mem_info->num = q_cfg->num_paths;
2215
2216         res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2217         mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2218         mem_info->mem_type = BNA_MEM_T_DMA;
2219         mem_info->len = PAGE_SIZE * dpage_count;
2220         mem_info->num = q_cfg->num_paths;
2221
2222         res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2223         mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2224         mem_info->mem_type = BNA_MEM_T_DMA;
2225         mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2226         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2227
2228         res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2229         mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2230         mem_info->mem_type = BNA_MEM_T_KVA;
2231         mem_info->len = hpage_count * sizeof(void *);
2232         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2233
2234         res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2235         mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2236         mem_info->mem_type = BNA_MEM_T_DMA;
2237         mem_info->len = PAGE_SIZE * hpage_count;
2238         mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2239
2240         res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2241         mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2242         mem_info->mem_type = BNA_MEM_T_DMA;
2243         mem_info->len = BFI_IBIDX_SIZE;
2244         mem_info->num = q_cfg->num_paths;
2245
2246         res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2247         mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2248         mem_info->mem_type = BNA_MEM_T_KVA;
2249         mem_info->len = BFI_ENET_RSS_RIT_MAX;
2250         mem_info->num = 1;
2251
2252         res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2253         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2254         res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2255 }
2256
2257 struct bna_rx *
2258 bna_rx_create(struct bna *bna, struct bnad *bnad,
2259                 struct bna_rx_config *rx_cfg,
2260                 const struct bna_rx_event_cbfn *rx_cbfn,
2261                 struct bna_res_info *res_info,
2262                 void *priv)
2263 {
2264         struct bna_rx_mod *rx_mod = &bna->rx_mod;
2265         struct bna_rx *rx;
2266         struct bna_rxp *rxp;
2267         struct bna_rxq *q0;
2268         struct bna_rxq *q1;
2269         struct bna_intr_info *intr_info;
2270         struct bna_mem_descr *hqunmap_mem;
2271         struct bna_mem_descr *dqunmap_mem;
2272         struct bna_mem_descr *ccb_mem;
2273         struct bna_mem_descr *rcb_mem;
2274         struct bna_mem_descr *cqpt_mem;
2275         struct bna_mem_descr *cswqpt_mem;
2276         struct bna_mem_descr *cpage_mem;
2277         struct bna_mem_descr *hqpt_mem;
2278         struct bna_mem_descr *dqpt_mem;
2279         struct bna_mem_descr *hsqpt_mem;
2280         struct bna_mem_descr *dsqpt_mem;
2281         struct bna_mem_descr *hpage_mem;
2282         struct bna_mem_descr *dpage_mem;
2283         u32 dpage_count, hpage_count;
2284         u32 hq_idx, dq_idx, rcb_idx;
2285         u32 cq_depth, i;
2286         u32 page_count;
2287
2288         if (!bna_rx_res_check(rx_mod, rx_cfg))
2289                 return NULL;
2290
2291         intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2292         ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2293         rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2294         dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2295         hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2296         cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2297         cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2298         cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2299         hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2300         dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2301         hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2302         dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2303         hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2304         dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2305
2306         page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2307                         PAGE_SIZE;
2308
2309         dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2310                         PAGE_SIZE;
2311
2312         hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2313                         PAGE_SIZE;
2314
2315         rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2316         rx->bna = bna;
2317         rx->rx_flags = 0;
2318         INIT_LIST_HEAD(&rx->rxp_q);
2319         rx->stop_cbfn = NULL;
2320         rx->stop_cbarg = NULL;
2321         rx->priv = priv;
2322
2323         rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2324         rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2325         rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2326         rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2327         rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2328         /* Following callbacks are mandatory */
2329         rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2330         rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2331
2332         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2333                 switch (rx->type) {
2334                 case BNA_RX_T_REGULAR:
2335                         if (!(rx->bna->rx_mod.flags &
2336                                 BNA_RX_MOD_F_ENET_LOOPBACK))
2337                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2338                         break;
2339                 case BNA_RX_T_LOOPBACK:
2340                         if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2341                                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2342                         break;
2343                 }
2344         }
2345
2346         rx->num_paths = rx_cfg->num_paths;
2347         for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2348                         i < rx->num_paths; i++) {
2349                 rxp = bna_rxp_get(rx_mod);
2350                 list_add_tail(&rxp->qe, &rx->rxp_q);
2351                 rxp->type = rx_cfg->rxp_type;
2352                 rxp->rx = rx;
2353                 rxp->cq.rx = rx;
2354
2355                 q0 = bna_rxq_get(rx_mod);
2356                 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2357                         q1 = NULL;
2358                 else
2359                         q1 = bna_rxq_get(rx_mod);
2360
2361                 if (1 == intr_info->num)
2362                         rxp->vector = intr_info->idl[0].vector;
2363                 else
2364                         rxp->vector = intr_info->idl[i].vector;
2365
2366                 /* Setup IB */
2367
2368                 rxp->cq.ib.ib_seg_host_addr.lsb =
2369                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2370                 rxp->cq.ib.ib_seg_host_addr.msb =
2371                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2372                 rxp->cq.ib.ib_seg_host_addr_kva =
2373                 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2374                 rxp->cq.ib.intr_type = intr_info->intr_type;
2375                 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2376                         rxp->cq.ib.intr_vector = rxp->vector;
2377                 else
2378                         rxp->cq.ib.intr_vector = BIT(rxp->vector);
2379                 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2380                 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2381                 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2382
2383                 bna_rxp_add_rxqs(rxp, q0, q1);
2384
2385                 /* Setup large Q */
2386
2387                 q0->rx = rx;
2388                 q0->rxp = rxp;
2389
2390                 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2391                 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2392                 rcb_idx++; dq_idx++;
2393                 q0->rcb->q_depth = rx_cfg->q0_depth;
2394                 q0->q_depth = rx_cfg->q0_depth;
2395                 q0->multi_buffer = rx_cfg->q0_multi_buf;
2396                 q0->buffer_size = rx_cfg->q0_buf_size;
2397                 q0->num_vecs = rx_cfg->q0_num_vecs;
2398                 q0->rcb->rxq = q0;
2399                 q0->rcb->bnad = bna->bnad;
2400                 q0->rcb->id = 0;
2401                 q0->rx_packets = q0->rx_bytes = 0;
2402                 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2403                 q0->rxbuf_map_failed = 0;
2404
2405                 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2406                         &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2407
2408                 if (rx->rcb_setup_cbfn)
2409                         rx->rcb_setup_cbfn(bnad, q0->rcb);
2410
2411                 /* Setup small Q */
2412
2413                 if (q1) {
2414                         q1->rx = rx;
2415                         q1->rxp = rxp;
2416
2417                         q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2418                         q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2419                         rcb_idx++; hq_idx++;
2420                         q1->rcb->q_depth = rx_cfg->q1_depth;
2421                         q1->q_depth = rx_cfg->q1_depth;
2422                         q1->multi_buffer = BNA_STATUS_T_DISABLED;
2423                         q1->num_vecs = 1;
2424                         q1->rcb->rxq = q1;
2425                         q1->rcb->bnad = bna->bnad;
2426                         q1->rcb->id = 1;
2427                         q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2428                                         rx_cfg->hds_config.forced_offset
2429                                         : rx_cfg->q1_buf_size;
2430                         q1->rx_packets = q1->rx_bytes = 0;
2431                         q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2432                         q1->rxbuf_map_failed = 0;
2433
2434                         bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2435                                 &hqpt_mem[i], &hsqpt_mem[i],
2436                                 &hpage_mem[i]);
2437
2438                         if (rx->rcb_setup_cbfn)
2439                                 rx->rcb_setup_cbfn(bnad, q1->rcb);
2440                 }
2441
2442                 /* Setup CQ */
2443
2444                 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2445                 cq_depth = rx_cfg->q0_depth +
2446                         ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2447                          0 : rx_cfg->q1_depth);
2448                 /* if multi-buffer is enabled sum of q0_depth
2449                  * and q1_depth need not be a power of 2
2450                  */
2451                 cq_depth = roundup_pow_of_two(cq_depth);
2452                 rxp->cq.ccb->q_depth = cq_depth;
2453                 rxp->cq.ccb->cq = &rxp->cq;
2454                 rxp->cq.ccb->rcb[0] = q0->rcb;
2455                 q0->rcb->ccb = rxp->cq.ccb;
2456                 if (q1) {
2457                         rxp->cq.ccb->rcb[1] = q1->rcb;
2458                         q1->rcb->ccb = rxp->cq.ccb;
2459                 }
2460                 rxp->cq.ccb->hw_producer_index =
2461                         (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2462                 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2463                 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2464                 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2465                 rxp->cq.ccb->rx_coalescing_timeo =
2466                         rxp->cq.ib.coalescing_timeo;
2467                 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2468                 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2469                 rxp->cq.ccb->bnad = bna->bnad;
2470                 rxp->cq.ccb->id = i;
2471
2472                 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2473                         &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2474
2475                 if (rx->ccb_setup_cbfn)
2476                         rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2477         }
2478
2479         rx->hds_cfg = rx_cfg->hds_config;
2480
2481         bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2482
2483         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2484
2485         rx_mod->rid_mask |= BIT(rx->rid);
2486
2487         return rx;
2488 }
2489
2490 void
2491 bna_rx_destroy(struct bna_rx *rx)
2492 {
2493         struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2494         struct bna_rxq *q0 = NULL;
2495         struct bna_rxq *q1 = NULL;
2496         struct bna_rxp *rxp;
2497         struct list_head *qe;
2498
2499         bna_rxf_uninit(&rx->rxf);
2500
2501         while (!list_empty(&rx->rxp_q)) {
2502                 rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
2503                 list_del(&rxp->qe);
2504                 GET_RXQS(rxp, q0, q1);
2505                 if (rx->rcb_destroy_cbfn)
2506                         rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2507                 q0->rcb = NULL;
2508                 q0->rxp = NULL;
2509                 q0->rx = NULL;
2510                 bna_rxq_put(rx_mod, q0);
2511
2512                 if (q1) {
2513                         if (rx->rcb_destroy_cbfn)
2514                                 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2515                         q1->rcb = NULL;
2516                         q1->rxp = NULL;
2517                         q1->rx = NULL;
2518                         bna_rxq_put(rx_mod, q1);
2519                 }
2520                 rxp->rxq.slr.large = NULL;
2521                 rxp->rxq.slr.small = NULL;
2522
2523                 if (rx->ccb_destroy_cbfn)
2524                         rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2525                 rxp->cq.ccb = NULL;
2526                 rxp->rx = NULL;
2527                 bna_rxp_put(rx_mod, rxp);
2528         }
2529
2530         list_for_each(qe, &rx_mod->rx_active_q)
2531                 if (qe == &rx->qe) {
2532                         list_del(&rx->qe);
2533                         break;
2534                 }
2535
2536         rx_mod->rid_mask &= ~BIT(rx->rid);
2537
2538         rx->bna = NULL;
2539         rx->priv = NULL;
2540         bna_rx_put(rx_mod, rx);
2541 }
2542
2543 void
2544 bna_rx_enable(struct bna_rx *rx)
2545 {
2546         if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2547                 return;
2548
2549         rx->rx_flags |= BNA_RX_F_ENABLED;
2550         if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2551                 bfa_fsm_send_event(rx, RX_E_START);
2552 }
2553
2554 void
2555 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2556                 void (*cbfn)(void *, struct bna_rx *))
2557 {
2558         if (type == BNA_SOFT_CLEANUP) {
2559                 /* h/w should not be accessed. Treat we're stopped */
2560                 (*cbfn)(rx->bna->bnad, rx);
2561         } else {
2562                 rx->stop_cbfn = cbfn;
2563                 rx->stop_cbarg = rx->bna->bnad;
2564
2565                 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2566
2567                 bfa_fsm_send_event(rx, RX_E_STOP);
2568         }
2569 }
2570
2571 void
2572 bna_rx_cleanup_complete(struct bna_rx *rx)
2573 {
2574         bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2575 }
2576
2577 void
2578 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2579 {
2580         struct bna_rxf *rxf = &rx->rxf;
2581
2582         if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2583                 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2584                 rxf->vlan_strip_pending = true;
2585                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2586         }
2587 }
2588
2589 void
2590 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2591 {
2592         struct bna_rxf *rxf = &rx->rxf;
2593
2594         if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2595                 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2596                 rxf->vlan_strip_pending = true;
2597                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2598         }
2599 }
2600
2601 enum bna_cb_status
2602 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2603                 enum bna_rxmode bitmask)
2604 {
2605         struct bna_rxf *rxf = &rx->rxf;
2606         int need_hw_config = 0;
2607
2608         /* Error checks */
2609
2610         if (is_promisc_enable(new_mode, bitmask)) {
2611                 /* If promisc mode is already enabled elsewhere in the system */
2612                 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2613                         (rx->bna->promisc_rid != rxf->rx->rid))
2614                         goto err_return;
2615
2616                 /* If default mode is already enabled in the system */
2617                 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2618                         goto err_return;
2619
2620                 /* Trying to enable promiscuous and default mode together */
2621                 if (is_default_enable(new_mode, bitmask))
2622                         goto err_return;
2623         }
2624
2625         if (is_default_enable(new_mode, bitmask)) {
2626                 /* If default mode is already enabled elsewhere in the system */
2627                 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2628                         (rx->bna->default_mode_rid != rxf->rx->rid)) {
2629                                 goto err_return;
2630                 }
2631
2632                 /* If promiscuous mode is already enabled in the system */
2633                 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2634                         goto err_return;
2635         }
2636
2637         /* Process the commands */
2638
2639         if (is_promisc_enable(new_mode, bitmask)) {
2640                 if (bna_rxf_promisc_enable(rxf))
2641                         need_hw_config = 1;
2642         } else if (is_promisc_disable(new_mode, bitmask)) {
2643                 if (bna_rxf_promisc_disable(rxf))
2644                         need_hw_config = 1;
2645         }
2646
2647         if (is_allmulti_enable(new_mode, bitmask)) {
2648                 if (bna_rxf_allmulti_enable(rxf))
2649                         need_hw_config = 1;
2650         } else if (is_allmulti_disable(new_mode, bitmask)) {
2651                 if (bna_rxf_allmulti_disable(rxf))
2652                         need_hw_config = 1;
2653         }
2654
2655         /* Trigger h/w if needed */
2656
2657         if (need_hw_config) {
2658                 rxf->cam_fltr_cbfn = NULL;
2659                 rxf->cam_fltr_cbarg = rx->bna->bnad;
2660                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2661         }
2662
2663         return BNA_CB_SUCCESS;
2664
2665 err_return:
2666         return BNA_CB_FAIL;
2667 }
2668
2669 void
2670 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2671 {
2672         struct bna_rxf *rxf = &rx->rxf;
2673
2674         if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2675                 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2676                 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2677                 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2678         }
2679 }
2680
2681 void
2682 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2683 {
2684         struct bna_rxp *rxp;
2685
2686         list_for_each_entry(rxp, &rx->rxp_q, qe) {
2687                 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2688                 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2689         }
2690 }
2691
2692 void
2693 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2694 {
2695         int i, j;
2696
2697         for (i = 0; i < BNA_LOAD_T_MAX; i++)
2698                 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2699                         bna->rx_mod.dim_vector[i][j] = vector[i][j];
2700 }
2701
2702 void
2703 bna_rx_dim_update(struct bna_ccb *ccb)
2704 {
2705         struct bna *bna = ccb->cq->rx->bna;
2706         u32 load, bias;
2707         u32 pkt_rt, small_rt, large_rt;
2708         u8 coalescing_timeo;
2709
2710         if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2711                 (ccb->pkt_rate.large_pkt_cnt == 0))
2712                 return;
2713
2714         /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2715
2716         small_rt = ccb->pkt_rate.small_pkt_cnt;
2717         large_rt = ccb->pkt_rate.large_pkt_cnt;
2718
2719         pkt_rt = small_rt + large_rt;
2720
2721         if (pkt_rt < BNA_PKT_RATE_10K)
2722                 load = BNA_LOAD_T_LOW_4;
2723         else if (pkt_rt < BNA_PKT_RATE_20K)
2724                 load = BNA_LOAD_T_LOW_3;
2725         else if (pkt_rt < BNA_PKT_RATE_30K)
2726                 load = BNA_LOAD_T_LOW_2;
2727         else if (pkt_rt < BNA_PKT_RATE_40K)
2728                 load = BNA_LOAD_T_LOW_1;
2729         else if (pkt_rt < BNA_PKT_RATE_50K)
2730                 load = BNA_LOAD_T_HIGH_1;
2731         else if (pkt_rt < BNA_PKT_RATE_60K)
2732                 load = BNA_LOAD_T_HIGH_2;
2733         else if (pkt_rt < BNA_PKT_RATE_80K)
2734                 load = BNA_LOAD_T_HIGH_3;
2735         else
2736                 load = BNA_LOAD_T_HIGH_4;
2737
2738         if (small_rt > (large_rt << 1))
2739                 bias = 0;
2740         else
2741                 bias = 1;
2742
2743         ccb->pkt_rate.small_pkt_cnt = 0;
2744         ccb->pkt_rate.large_pkt_cnt = 0;
2745
2746         coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2747         ccb->rx_coalescing_timeo = coalescing_timeo;
2748
2749         /* Set it to IB */
2750         bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2751 }
2752
2753 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2754         {12, 12},
2755         {6, 10},
2756         {5, 10},
2757         {4, 8},
2758         {3, 6},
2759         {3, 6},
2760         {2, 4},
2761         {1, 2},
2762 };
2763
2764 /* TX */
2765
2766 #define call_tx_stop_cbfn(tx)                                           \
2767 do {                                                                    \
2768         if ((tx)->stop_cbfn) {                                          \
2769                 void (*cbfn)(void *, struct bna_tx *);          \
2770                 void *cbarg;                                            \
2771                 cbfn = (tx)->stop_cbfn;                                 \
2772                 cbarg = (tx)->stop_cbarg;                               \
2773                 (tx)->stop_cbfn = NULL;                                 \
2774                 (tx)->stop_cbarg = NULL;                                \
2775                 cbfn(cbarg, (tx));                                      \
2776         }                                                               \
2777 } while (0)
2778
2779 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2780 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2781 static void bna_tx_enet_stop(struct bna_tx *tx);
2782
2783 enum bna_tx_event {
2784         TX_E_START                      = 1,
2785         TX_E_STOP                       = 2,
2786         TX_E_FAIL                       = 3,
2787         TX_E_STARTED                    = 4,
2788         TX_E_STOPPED                    = 5,
2789         TX_E_CLEANUP_DONE               = 7,
2790         TX_E_BW_UPDATE                  = 8,
2791 };
2792
2793 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2794 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2795 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2796 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2797 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2798                         enum bna_tx_event);
2799 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2800                         enum bna_tx_event);
2801 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2802                         enum bna_tx_event);
2803 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2804 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2805                         enum bna_tx_event);
2806
2807 static void
2808 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2809 {
2810         call_tx_stop_cbfn(tx);
2811 }
2812
2813 static void
2814 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2815 {
2816         switch (event) {
2817         case TX_E_START:
2818                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2819                 break;
2820
2821         case TX_E_STOP:
2822                 call_tx_stop_cbfn(tx);
2823                 break;
2824
2825         case TX_E_FAIL:
2826                 /* No-op */
2827                 break;
2828
2829         case TX_E_BW_UPDATE:
2830                 /* No-op */
2831                 break;
2832
2833         default:
2834                 bfa_sm_fault(event);
2835         }
2836 }
2837
2838 static void
2839 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2840 {
2841         bna_bfi_tx_enet_start(tx);
2842 }
2843
2844 static void
2845 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2846 {
2847         switch (event) {
2848         case TX_E_STOP:
2849                 tx->flags &= ~BNA_TX_F_BW_UPDATED;
2850                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2851                 break;
2852
2853         case TX_E_FAIL:
2854                 tx->flags &= ~BNA_TX_F_BW_UPDATED;
2855                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2856                 break;
2857
2858         case TX_E_STARTED:
2859                 if (tx->flags & BNA_TX_F_BW_UPDATED) {
2860                         tx->flags &= ~BNA_TX_F_BW_UPDATED;
2861                         bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2862                 } else
2863                         bfa_fsm_set_state(tx, bna_tx_sm_started);
2864                 break;
2865
2866         case TX_E_BW_UPDATE:
2867                 tx->flags |= BNA_TX_F_BW_UPDATED;
2868                 break;
2869
2870         default:
2871                 bfa_sm_fault(event);
2872         }
2873 }
2874
2875 static void
2876 bna_tx_sm_started_entry(struct bna_tx *tx)
2877 {
2878         struct bna_txq *txq;
2879         int is_regular = (tx->type == BNA_TX_T_REGULAR);
2880
2881         list_for_each_entry(txq, &tx->txq_q, qe) {
2882                 txq->tcb->priority = txq->priority;
2883                 /* Start IB */
2884                 bna_ib_start(tx->bna, &txq->ib, is_regular);
2885         }
2886         tx->tx_resume_cbfn(tx->bna->bnad, tx);
2887 }
2888
2889 static void
2890 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2891 {
2892         switch (event) {
2893         case TX_E_STOP:
2894                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2895                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2896                 bna_tx_enet_stop(tx);
2897                 break;
2898
2899         case TX_E_FAIL:
2900                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2901                 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2902                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2903                 break;
2904
2905         case TX_E_BW_UPDATE:
2906                 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2907                 break;
2908
2909         default:
2910                 bfa_sm_fault(event);
2911         }
2912 }
2913
2914 static void
2915 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
2916 {
2917 }
2918
2919 static void
2920 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2921 {
2922         switch (event) {
2923         case TX_E_FAIL:
2924         case TX_E_STOPPED:
2925                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
2926                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2927                 break;
2928
2929         case TX_E_STARTED:
2930                 /**
2931                  * We are here due to start_wait -> stop_wait transition on
2932                  * TX_E_STOP event
2933                  */
2934                 bna_tx_enet_stop(tx);
2935                 break;
2936
2937         case TX_E_BW_UPDATE:
2938                 /* No-op */
2939                 break;
2940
2941         default:
2942                 bfa_sm_fault(event);
2943         }
2944 }
2945
2946 static void
2947 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
2948 {
2949 }
2950
2951 static void
2952 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
2953 {
2954         switch (event) {
2955         case TX_E_FAIL:
2956         case TX_E_BW_UPDATE:
2957                 /* No-op */
2958                 break;
2959
2960         case TX_E_CLEANUP_DONE:
2961                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2962                 break;
2963
2964         default:
2965                 bfa_sm_fault(event);
2966         }
2967 }
2968
2969 static void
2970 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
2971 {
2972         tx->tx_stall_cbfn(tx->bna->bnad, tx);
2973         bna_tx_enet_stop(tx);
2974 }
2975
2976 static void
2977 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2978 {
2979         switch (event) {
2980         case TX_E_STOP:
2981                 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2982                 break;
2983
2984         case TX_E_FAIL:
2985                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2986                 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2987                 break;
2988
2989         case TX_E_STOPPED:
2990                 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
2991                 break;
2992
2993         case TX_E_BW_UPDATE:
2994                 /* No-op */
2995                 break;
2996
2997         default:
2998                 bfa_sm_fault(event);
2999         }
3000 }
3001
3002 static void
3003 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3004 {
3005         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3006 }
3007
3008 static void
3009 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3010 {
3011         switch (event) {
3012         case TX_E_STOP:
3013                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3014                 break;
3015
3016         case TX_E_FAIL:
3017                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3018                 break;
3019
3020         case TX_E_BW_UPDATE:
3021                 /* No-op */
3022                 break;
3023
3024         case TX_E_CLEANUP_DONE:
3025                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3026                 break;
3027
3028         default:
3029                 bfa_sm_fault(event);
3030         }
3031 }
3032
3033 static void
3034 bna_tx_sm_failed_entry(struct bna_tx *tx)
3035 {
3036 }
3037
3038 static void
3039 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3040 {
3041         switch (event) {
3042         case TX_E_START:
3043                 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3044                 break;
3045
3046         case TX_E_STOP:
3047                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3048                 break;
3049
3050         case TX_E_FAIL:
3051                 /* No-op */
3052                 break;
3053
3054         case TX_E_CLEANUP_DONE:
3055                 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3056                 break;
3057
3058         default:
3059                 bfa_sm_fault(event);
3060         }
3061 }
3062
3063 static void
3064 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3065 {
3066 }
3067
3068 static void
3069 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3070 {
3071         switch (event) {
3072         case TX_E_STOP:
3073                 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3074                 break;
3075
3076         case TX_E_FAIL:
3077                 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3078                 break;
3079
3080         case TX_E_CLEANUP_DONE:
3081                 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3082                 break;
3083
3084         case TX_E_BW_UPDATE:
3085                 /* No-op */
3086                 break;
3087
3088         default:
3089                 bfa_sm_fault(event);
3090         }
3091 }
3092
3093 static void
3094 bna_bfi_tx_enet_start(struct bna_tx *tx)
3095 {
3096         struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3097         struct bna_txq *txq = NULL;
3098         int i;
3099
3100         bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3101                 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3102         cfg_req->mh.num_entries = htons(
3103                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3104
3105         cfg_req->num_queues = tx->num_txq;
3106         for (i = 0; i < tx->num_txq; i++) {
3107                 txq = txq ? list_next_entry(txq, qe)
3108                         : list_first_entry(&tx->txq_q, struct bna_txq, qe);
3109                 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3110                 cfg_req->q_cfg[i].q.priority = txq->priority;
3111
3112                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3113                         txq->ib.ib_seg_host_addr.lsb;
3114                 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3115                         txq->ib.ib_seg_host_addr.msb;
3116                 cfg_req->q_cfg[i].ib.intr.msix_index =
3117                         htons((u16)txq->ib.intr_vector);
3118         }
3119
3120         cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3121         cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3122         cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3123         cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3124         cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3125                                 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3126         cfg_req->ib_cfg.coalescing_timeout =
3127                         htonl((u32)txq->ib.coalescing_timeo);
3128         cfg_req->ib_cfg.inter_pkt_timeout =
3129                         htonl((u32)txq->ib.interpkt_timeo);
3130         cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3131
3132         cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3133         cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3134         cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3135         cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3136
3137         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3138                 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3139         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3140 }
3141
3142 static void
3143 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3144 {
3145         struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3146
3147         bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3148                 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3149         req->mh.num_entries = htons(
3150                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3151         bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3152                 &req->mh);
3153         bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3154 }
3155
3156 static void
3157 bna_tx_enet_stop(struct bna_tx *tx)
3158 {
3159         struct bna_txq *txq;
3160
3161         /* Stop IB */
3162         list_for_each_entry(txq, &tx->txq_q, qe)
3163                 bna_ib_stop(tx->bna, &txq->ib);
3164
3165         bna_bfi_tx_enet_stop(tx);
3166 }
3167
3168 static void
3169 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3170                 struct bna_mem_descr *qpt_mem,
3171                 struct bna_mem_descr *swqpt_mem,
3172                 struct bna_mem_descr *page_mem)
3173 {
3174         u8 *kva;
3175         u64 dma;
3176         struct bna_dma_addr bna_dma;
3177         int i;
3178
3179         txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3180         txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3181         txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3182         txq->qpt.page_count = page_count;
3183         txq->qpt.page_size = page_size;
3184
3185         txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3186         txq->tcb->sw_q = page_mem->kva;
3187
3188         kva = page_mem->kva;
3189         BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3190
3191         for (i = 0; i < page_count; i++) {
3192                 txq->tcb->sw_qpt[i] = kva;
3193                 kva += PAGE_SIZE;
3194
3195                 BNA_SET_DMA_ADDR(dma, &bna_dma);
3196                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3197                         bna_dma.lsb;
3198                 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3199                         bna_dma.msb;
3200                 dma += PAGE_SIZE;
3201         }
3202 }
3203
3204 static struct bna_tx *
3205 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3206 {
3207         struct bna_tx *tx = NULL;
3208
3209         if (list_empty(&tx_mod->tx_free_q))
3210                 return NULL;
3211         if (type == BNA_TX_T_REGULAR)
3212                 tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
3213         else
3214                 tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
3215         list_del(&tx->qe);
3216         tx->type = type;
3217
3218         return tx;
3219 }
3220
3221 static void
3222 bna_tx_free(struct bna_tx *tx)
3223 {
3224         struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3225         struct bna_txq *txq;
3226         struct list_head *qe;
3227
3228         while (!list_empty(&tx->txq_q)) {
3229                 txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
3230                 txq->tcb = NULL;
3231                 txq->tx = NULL;
3232                 list_move_tail(&txq->qe, &tx_mod->txq_free_q);
3233         }
3234
3235         list_for_each(qe, &tx_mod->tx_active_q) {
3236                 if (qe == &tx->qe) {
3237                         list_del(&tx->qe);
3238                         break;
3239                 }
3240         }
3241
3242         tx->bna = NULL;
3243         tx->priv = NULL;
3244
3245         list_for_each_prev(qe, &tx_mod->tx_free_q)
3246                 if (((struct bna_tx *)qe)->rid < tx->rid)
3247                         break;
3248
3249         list_add(&tx->qe, qe);
3250 }
3251
3252 static void
3253 bna_tx_start(struct bna_tx *tx)
3254 {
3255         tx->flags |= BNA_TX_F_ENET_STARTED;
3256         if (tx->flags & BNA_TX_F_ENABLED)
3257                 bfa_fsm_send_event(tx, TX_E_START);
3258 }
3259
3260 static void
3261 bna_tx_stop(struct bna_tx *tx)
3262 {
3263         tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3264         tx->stop_cbarg = &tx->bna->tx_mod;
3265
3266         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3267         bfa_fsm_send_event(tx, TX_E_STOP);
3268 }
3269
3270 static void
3271 bna_tx_fail(struct bna_tx *tx)
3272 {
3273         tx->flags &= ~BNA_TX_F_ENET_STARTED;
3274         bfa_fsm_send_event(tx, TX_E_FAIL);
3275 }
3276
3277 void
3278 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3279 {
3280         struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3281         struct bna_txq *txq = NULL;
3282         int i;
3283
3284         bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3285                 sizeof(struct bfi_enet_tx_cfg_rsp));
3286
3287         tx->hw_id = cfg_rsp->hw_id;
3288
3289         for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
3290              i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
3291                 /* Setup doorbells */
3292                 txq->tcb->i_dbell->doorbell_addr =
3293                         tx->bna->pcidev.pci_bar_kva
3294                         + ntohl(cfg_rsp->q_handles[i].i_dbell);
3295                 txq->tcb->q_dbell =
3296                         tx->bna->pcidev.pci_bar_kva
3297                         + ntohl(cfg_rsp->q_handles[i].q_dbell);
3298                 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3299
3300                 /* Initialize producer/consumer indexes */
3301                 (*txq->tcb->hw_consumer_index) = 0;
3302                 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3303         }
3304
3305         bfa_fsm_send_event(tx, TX_E_STARTED);
3306 }
3307
3308 void
3309 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3310 {
3311         bfa_fsm_send_event(tx, TX_E_STOPPED);
3312 }
3313
3314 void
3315 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3316 {
3317         struct bna_tx *tx;
3318
3319         list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3320                 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3321 }
3322
3323 void
3324 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3325 {
3326         u32 q_size;
3327         u32 page_count;
3328         struct bna_mem_info *mem_info;
3329
3330         res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3331         mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3332         mem_info->mem_type = BNA_MEM_T_KVA;
3333         mem_info->len = sizeof(struct bna_tcb);
3334         mem_info->num = num_txq;
3335
3336         q_size = txq_depth * BFI_TXQ_WI_SIZE;
3337         q_size = ALIGN(q_size, PAGE_SIZE);
3338         page_count = q_size >> PAGE_SHIFT;
3339
3340         res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3341         mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3342         mem_info->mem_type = BNA_MEM_T_DMA;
3343         mem_info->len = page_count * sizeof(struct bna_dma_addr);
3344         mem_info->num = num_txq;
3345
3346         res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3347         mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3348         mem_info->mem_type = BNA_MEM_T_KVA;
3349         mem_info->len = page_count * sizeof(void *);
3350         mem_info->num = num_txq;
3351
3352         res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3353         mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3354         mem_info->mem_type = BNA_MEM_T_DMA;
3355         mem_info->len = PAGE_SIZE * page_count;
3356         mem_info->num = num_txq;
3357
3358         res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3359         mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3360         mem_info->mem_type = BNA_MEM_T_DMA;
3361         mem_info->len = BFI_IBIDX_SIZE;
3362         mem_info->num = num_txq;
3363
3364         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3365         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3366                         BNA_INTR_T_MSIX;
3367         res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3368 }
3369
3370 struct bna_tx *
3371 bna_tx_create(struct bna *bna, struct bnad *bnad,
3372                 struct bna_tx_config *tx_cfg,
3373                 const struct bna_tx_event_cbfn *tx_cbfn,
3374                 struct bna_res_info *res_info, void *priv)
3375 {
3376         struct bna_intr_info *intr_info;
3377         struct bna_tx_mod *tx_mod = &bna->tx_mod;
3378         struct bna_tx *tx;
3379         struct bna_txq *txq;
3380         int page_count;
3381         int i;
3382
3383         intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3384         page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3385                                         PAGE_SIZE;
3386
3387         /**
3388          * Get resources
3389          */
3390
3391         if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3392                 return NULL;
3393
3394         /* Tx */
3395
3396         tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3397         if (!tx)
3398                 return NULL;
3399         tx->bna = bna;
3400         tx->priv = priv;
3401
3402         /* TxQs */
3403
3404         INIT_LIST_HEAD(&tx->txq_q);
3405         for (i = 0; i < tx_cfg->num_txq; i++) {
3406                 if (list_empty(&tx_mod->txq_free_q))
3407                         goto err_return;
3408
3409                 txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
3410                 list_move_tail(&txq->qe, &tx->txq_q);
3411                 txq->tx = tx;
3412         }
3413
3414         /*
3415          * Initialize
3416          */
3417
3418         /* Tx */
3419
3420         tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3421         tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3422         /* Following callbacks are mandatory */
3423         tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3424         tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3425         tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3426
3427         list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3428
3429         tx->num_txq = tx_cfg->num_txq;
3430
3431         tx->flags = 0;
3432         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3433                 switch (tx->type) {
3434                 case BNA_TX_T_REGULAR:
3435                         if (!(tx->bna->tx_mod.flags &
3436                                 BNA_TX_MOD_F_ENET_LOOPBACK))
3437                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3438                         break;
3439                 case BNA_TX_T_LOOPBACK:
3440                         if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3441                                 tx->flags |= BNA_TX_F_ENET_STARTED;
3442                         break;
3443                 }
3444         }
3445
3446         /* TxQ */
3447
3448         i = 0;
3449         list_for_each_entry(txq, &tx->txq_q, qe) {
3450                 txq->tcb = (struct bna_tcb *)
3451                 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3452                 txq->tx_packets = 0;
3453                 txq->tx_bytes = 0;
3454
3455                 /* IB */
3456                 txq->ib.ib_seg_host_addr.lsb =
3457                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3458                 txq->ib.ib_seg_host_addr.msb =
3459                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3460                 txq->ib.ib_seg_host_addr_kva =
3461                 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3462                 txq->ib.intr_type = intr_info->intr_type;
3463                 txq->ib.intr_vector = (intr_info->num == 1) ?
3464                                         intr_info->idl[0].vector :
3465                                         intr_info->idl[i].vector;
3466                 if (intr_info->intr_type == BNA_INTR_T_INTX)
3467                         txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3468                 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3469                 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3470                 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3471
3472                 /* TCB */
3473
3474                 txq->tcb->q_depth = tx_cfg->txq_depth;
3475                 txq->tcb->unmap_q = (void *)
3476                 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3477                 txq->tcb->hw_consumer_index =
3478                         (u32 *)txq->ib.ib_seg_host_addr_kva;
3479                 txq->tcb->i_dbell = &txq->ib.door_bell;
3480                 txq->tcb->intr_type = txq->ib.intr_type;
3481                 txq->tcb->intr_vector = txq->ib.intr_vector;
3482                 txq->tcb->txq = txq;
3483                 txq->tcb->bnad = bnad;
3484                 txq->tcb->id = i;
3485
3486                 /* QPT, SWQPT, Pages */
3487                 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3488                         &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3489                         &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3490                         &res_info[BNA_TX_RES_MEM_T_PAGE].
3491                                   res_u.mem_info.mdl[i]);
3492
3493                 /* Callback to bnad for setting up TCB */
3494                 if (tx->tcb_setup_cbfn)
3495                         (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3496
3497                 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3498                         txq->priority = txq->tcb->id;
3499                 else
3500                         txq->priority = tx_mod->default_prio;
3501
3502                 i++;
3503         }
3504
3505         tx->txf_vlan_id = 0;
3506
3507         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3508
3509         tx_mod->rid_mask |= BIT(tx->rid);
3510
3511         return tx;
3512
3513 err_return:
3514         bna_tx_free(tx);
3515         return NULL;
3516 }
3517
3518 void
3519 bna_tx_destroy(struct bna_tx *tx)
3520 {
3521         struct bna_txq *txq;
3522
3523         list_for_each_entry(txq, &tx->txq_q, qe)
3524                 if (tx->tcb_destroy_cbfn)
3525                         (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3526
3527         tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3528         bna_tx_free(tx);
3529 }
3530
3531 void
3532 bna_tx_enable(struct bna_tx *tx)
3533 {
3534         if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3535                 return;
3536
3537         tx->flags |= BNA_TX_F_ENABLED;
3538
3539         if (tx->flags & BNA_TX_F_ENET_STARTED)
3540                 bfa_fsm_send_event(tx, TX_E_START);
3541 }
3542
3543 void
3544 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3545                 void (*cbfn)(void *, struct bna_tx *))
3546 {
3547         if (type == BNA_SOFT_CLEANUP) {
3548                 (*cbfn)(tx->bna->bnad, tx);
3549                 return;
3550         }
3551
3552         tx->stop_cbfn = cbfn;
3553         tx->stop_cbarg = tx->bna->bnad;
3554
3555         tx->flags &= ~BNA_TX_F_ENABLED;
3556
3557         bfa_fsm_send_event(tx, TX_E_STOP);
3558 }
3559
3560 void
3561 bna_tx_cleanup_complete(struct bna_tx *tx)
3562 {
3563         bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3564 }
3565
3566 static void
3567 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3568 {
3569         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3570
3571         bfa_wc_down(&tx_mod->tx_stop_wc);
3572 }
3573
3574 static void
3575 bna_tx_mod_cb_tx_stopped_all(void *arg)
3576 {
3577         struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3578
3579         if (tx_mod->stop_cbfn)
3580                 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3581         tx_mod->stop_cbfn = NULL;
3582 }
3583
3584 void
3585 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3586                 struct bna_res_info *res_info)
3587 {
3588         int i;
3589
3590         tx_mod->bna = bna;
3591         tx_mod->flags = 0;
3592
3593         tx_mod->tx = (struct bna_tx *)
3594                 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3595         tx_mod->txq = (struct bna_txq *)
3596                 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3597
3598         INIT_LIST_HEAD(&tx_mod->tx_free_q);
3599         INIT_LIST_HEAD(&tx_mod->tx_active_q);
3600
3601         INIT_LIST_HEAD(&tx_mod->txq_free_q);
3602
3603         for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3604                 tx_mod->tx[i].rid = i;
3605                 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3606                 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3607         }
3608
3609         tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3610         tx_mod->default_prio = 0;
3611         tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3612         tx_mod->iscsi_prio = -1;
3613 }
3614
3615 void
3616 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3617 {
3618         tx_mod->bna = NULL;
3619 }
3620
3621 void
3622 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3623 {
3624         struct bna_tx *tx;
3625
3626         tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3627         if (type == BNA_TX_T_LOOPBACK)
3628                 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3629
3630         list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3631                 if (tx->type == type)
3632                         bna_tx_start(tx);
3633 }
3634
3635 void
3636 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3637 {
3638         struct bna_tx *tx;
3639
3640         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3641         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3642
3643         tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3644
3645         bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3646
3647         list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3648                 if (tx->type == type) {
3649                         bfa_wc_up(&tx_mod->tx_stop_wc);
3650                         bna_tx_stop(tx);
3651                 }
3652
3653         bfa_wc_wait(&tx_mod->tx_stop_wc);
3654 }
3655
3656 void
3657 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3658 {
3659         struct bna_tx *tx;
3660
3661         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3662         tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3663
3664         list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3665                 bna_tx_fail(tx);
3666 }
3667
3668 void
3669 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3670 {
3671         struct bna_txq *txq;
3672
3673         list_for_each_entry(txq, &tx->txq_q, qe)
3674                 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3675 }