]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/bfa/bfa_svc.c
[SCSI] bfa: remove all OS wrappers
[karo-tx-linux.git] / drivers / scsi / bfa / bfa_svc.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfa_plog.h"
20 #include "bfa_cs.h"
21 #include "bfa_modules.h"
22
23 BFA_TRC_FILE(HAL, FCXP);
24 BFA_MODULE(fcxp);
25 BFA_MODULE(sgpg);
26 BFA_MODULE(lps);
27 BFA_MODULE(fcport);
28 BFA_MODULE(rport);
29 BFA_MODULE(uf);
30
31 /*
32  * LPS related definitions
33  */
34 #define BFA_LPS_MIN_LPORTS      (1)
35 #define BFA_LPS_MAX_LPORTS      (256)
36
37 /*
38  * Maximum Vports supported per physical port or vf.
39  */
40 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
41 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
42
43
44 /*
45  * FC PORT related definitions
46  */
47 /*
48  * The port is considered disabled if corresponding physical port or IOC are
49  * disabled explicitly
50  */
51 #define BFA_PORT_IS_DISABLED(bfa) \
52         ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53         (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
54
55
56 /*
57  * BFA port state machine events
58  */
59 enum bfa_fcport_sm_event {
60         BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
61         BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
62         BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
63         BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
64         BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
65         BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
66         BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
67         BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
68         BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
69 };
70
71 /*
72  * BFA port link notification state machine events
73  */
74
75 enum bfa_fcport_ln_sm_event {
76         BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
77         BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
78         BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
79 };
80
81 /*
82  * RPORT related definitions
83  */
84 #define bfa_rport_offline_cb(__rp) do {                                 \
85         if ((__rp)->bfa->fcs)                                           \
86                 bfa_cb_rport_offline((__rp)->rport_drv);      \
87         else {                                                          \
88                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
89                                 __bfa_cb_rport_offline, (__rp));      \
90         }                                                               \
91 } while (0)
92
93 #define bfa_rport_online_cb(__rp) do {                                  \
94         if ((__rp)->bfa->fcs)                                           \
95                 bfa_cb_rport_online((__rp)->rport_drv);      \
96         else {                                                          \
97                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
98                                   __bfa_cb_rport_online, (__rp));      \
99                 }                                                       \
100 } while (0)
101
102 /*
103  * forward declarations FCXP related functions
104  */
105 static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107                                 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109                                 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void     bfa_fcxp_qresume(void *cbarg);
111 static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112                                 struct bfi_fcxp_send_req_s *send_req);
113
114 /*
115  * forward declarations for LPS functions
116  */
117 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
118                                 u32 *dm_len);
119 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
120                                 struct bfa_iocfc_cfg_s *cfg,
121                                 struct bfa_meminfo_s *meminfo,
122                                 struct bfa_pcidev_s *pcidev);
123 static void bfa_lps_detach(struct bfa_s *bfa);
124 static void bfa_lps_start(struct bfa_s *bfa);
125 static void bfa_lps_stop(struct bfa_s *bfa);
126 static void bfa_lps_iocdisable(struct bfa_s *bfa);
127 static void bfa_lps_login_rsp(struct bfa_s *bfa,
128                                 struct bfi_lps_login_rsp_s *rsp);
129 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
130                                 struct bfi_lps_logout_rsp_s *rsp);
131 static void bfa_lps_reqq_resume(void *lps_arg);
132 static void bfa_lps_free(struct bfa_lps_s *lps);
133 static void bfa_lps_send_login(struct bfa_lps_s *lps);
134 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
135 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
136 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
137 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
138
139 /*
140  * forward declaration for LPS state machine
141  */
142 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
143 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
144 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
145                                         event);
146 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
147 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
148 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
149                                         event);
150
151 /*
152  * forward declaration for FC Port functions
153  */
154 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
155 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
156 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
157 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
158 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
159 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
160 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
161                         enum bfa_port_linkstate event, bfa_boolean_t trunk);
162 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
163                                 enum bfa_port_linkstate event);
164 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
165 static void bfa_fcport_stats_get_timeout(void *cbarg);
166 static void bfa_fcport_stats_clr_timeout(void *cbarg);
167 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
168
169 /*
170  * forward declaration for FC PORT state machine
171  */
172 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
173                                         enum bfa_fcport_sm_event event);
174 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
175                                         enum bfa_fcport_sm_event event);
176 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
177                                         enum bfa_fcport_sm_event event);
178 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
179                                         enum bfa_fcport_sm_event event);
180 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
181                                         enum bfa_fcport_sm_event event);
182 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
183                                         enum bfa_fcport_sm_event event);
184 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
185                                         enum bfa_fcport_sm_event event);
186 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
187                                         enum bfa_fcport_sm_event event);
188 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
189                                         enum bfa_fcport_sm_event event);
190 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
191                                         enum bfa_fcport_sm_event event);
192 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
193                                         enum bfa_fcport_sm_event event);
194 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
195                                         enum bfa_fcport_sm_event event);
196
197 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
198                                         enum bfa_fcport_ln_sm_event event);
199 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
200                                         enum bfa_fcport_ln_sm_event event);
201 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
202                                         enum bfa_fcport_ln_sm_event event);
203 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
204                                         enum bfa_fcport_ln_sm_event event);
205 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
206                                         enum bfa_fcport_ln_sm_event event);
207 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
208                                         enum bfa_fcport_ln_sm_event event);
209 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
210                                         enum bfa_fcport_ln_sm_event event);
211
212 static struct bfa_sm_table_s hal_port_sm_table[] = {
213         {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
214         {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
215         {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
216         {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
217         {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
218         {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
219         {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
220         {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
221         {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
222         {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
223         {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
224         {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
225 };
226
227
228 /*
229  * forward declaration for RPORT related functions
230  */
231 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
232 static void             bfa_rport_free(struct bfa_rport_s *rport);
233 static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
234 static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
235 static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
236 static void             __bfa_cb_rport_online(void *cbarg,
237                                                 bfa_boolean_t complete);
238 static void             __bfa_cb_rport_offline(void *cbarg,
239                                                 bfa_boolean_t complete);
240
241 /*
242  * forward declaration for RPORT state machine
243  */
244 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
245                                         enum bfa_rport_event event);
246 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
247                                         enum bfa_rport_event event);
248 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
249                                         enum bfa_rport_event event);
250 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
251                                         enum bfa_rport_event event);
252 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
253                                         enum bfa_rport_event event);
254 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
255                                         enum bfa_rport_event event);
256 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
257                                         enum bfa_rport_event event);
258 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
259                                         enum bfa_rport_event event);
260 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
261                                         enum bfa_rport_event event);
262 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
263                                         enum bfa_rport_event event);
264 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
265                                         enum bfa_rport_event event);
266 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
267                                         enum bfa_rport_event event);
268 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
269                                         enum bfa_rport_event event);
270
271 /*
272  * PLOG related definitions
273  */
274 static int
275 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
276 {
277         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
278                 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
279                 return 1;
280
281         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
282                 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
283                 return 1;
284
285         return 0;
286 }
287
288 static u64
289 bfa_get_log_time(void)
290 {
291         u64 system_time = 0;
292         struct timeval tv;
293         do_gettimeofday(&tv);
294
295         /* We are interested in seconds only. */
296         system_time = tv.tv_sec;
297         return system_time;
298 }
299
300 static void
301 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
302 {
303         u16 tail;
304         struct bfa_plog_rec_s *pl_recp;
305
306         if (plog->plog_enabled == 0)
307                 return;
308
309         if (plkd_validate_logrec(pl_rec)) {
310                 bfa_assert(0);
311                 return;
312         }
313
314         tail = plog->tail;
315
316         pl_recp = &(plog->plog_recs[tail]);
317
318         memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
319
320         pl_recp->tv = bfa_get_log_time();
321         BFA_PL_LOG_REC_INCR(plog->tail);
322
323         if (plog->head == plog->tail)
324                 BFA_PL_LOG_REC_INCR(plog->head);
325 }
326
327 void
328 bfa_plog_init(struct bfa_plog_s *plog)
329 {
330         memset((char *)plog, 0, sizeof(struct bfa_plog_s));
331
332         memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
333         plog->head = plog->tail = 0;
334         plog->plog_enabled = 1;
335 }
336
337 void
338 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
339                 enum bfa_plog_eid event,
340                 u16 misc, char *log_str)
341 {
342         struct bfa_plog_rec_s  lp;
343
344         if (plog->plog_enabled) {
345                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
346                 lp.mid = mid;
347                 lp.eid = event;
348                 lp.log_type = BFA_PL_LOG_TYPE_STRING;
349                 lp.misc = misc;
350                 strncpy(lp.log_entry.string_log, log_str,
351                         BFA_PL_STRING_LOG_SZ - 1);
352                 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
353                 bfa_plog_add(plog, &lp);
354         }
355 }
356
357 void
358 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
359                 enum bfa_plog_eid event,
360                 u16 misc, u32 *intarr, u32 num_ints)
361 {
362         struct bfa_plog_rec_s  lp;
363         u32 i;
364
365         if (num_ints > BFA_PL_INT_LOG_SZ)
366                 num_ints = BFA_PL_INT_LOG_SZ;
367
368         if (plog->plog_enabled) {
369                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
370                 lp.mid = mid;
371                 lp.eid = event;
372                 lp.log_type = BFA_PL_LOG_TYPE_INT;
373                 lp.misc = misc;
374
375                 for (i = 0; i < num_ints; i++)
376                         lp.log_entry.int_log[i] = intarr[i];
377
378                 lp.log_num_ints = (u8) num_ints;
379
380                 bfa_plog_add(plog, &lp);
381         }
382 }
383
384 void
385 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
386                         enum bfa_plog_eid event,
387                         u16 misc, struct fchs_s *fchdr)
388 {
389         struct bfa_plog_rec_s  lp;
390         u32     *tmp_int = (u32 *) fchdr;
391         u32     ints[BFA_PL_INT_LOG_SZ];
392
393         if (plog->plog_enabled) {
394                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
395
396                 ints[0] = tmp_int[0];
397                 ints[1] = tmp_int[1];
398                 ints[2] = tmp_int[4];
399
400                 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
401         }
402 }
403
404 void
405 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
406                       enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
407                       u32 pld_w0)
408 {
409         struct bfa_plog_rec_s  lp;
410         u32     *tmp_int = (u32 *) fchdr;
411         u32     ints[BFA_PL_INT_LOG_SZ];
412
413         if (plog->plog_enabled) {
414                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
415
416                 ints[0] = tmp_int[0];
417                 ints[1] = tmp_int[1];
418                 ints[2] = tmp_int[4];
419                 ints[3] = pld_w0;
420
421                 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
422         }
423 }
424
425
426 /*
427  *  fcxp_pvt BFA FCXP private functions
428  */
429
430 static void
431 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
432 {
433         u8             *dm_kva = NULL;
434         u64     dm_pa;
435         u32     buf_pool_sz;
436
437         dm_kva = bfa_meminfo_dma_virt(mi);
438         dm_pa = bfa_meminfo_dma_phys(mi);
439
440         buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
441
442         /*
443          * Initialize the fcxp req payload list
444          */
445         mod->req_pld_list_kva = dm_kva;
446         mod->req_pld_list_pa = dm_pa;
447         dm_kva += buf_pool_sz;
448         dm_pa += buf_pool_sz;
449         memset(mod->req_pld_list_kva, 0, buf_pool_sz);
450
451         /*
452          * Initialize the fcxp rsp payload list
453          */
454         buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
455         mod->rsp_pld_list_kva = dm_kva;
456         mod->rsp_pld_list_pa = dm_pa;
457         dm_kva += buf_pool_sz;
458         dm_pa += buf_pool_sz;
459         memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
460
461         bfa_meminfo_dma_virt(mi) = dm_kva;
462         bfa_meminfo_dma_phys(mi) = dm_pa;
463 }
464
465 static void
466 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
467 {
468         u16     i;
469         struct bfa_fcxp_s *fcxp;
470
471         fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
472         memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
473
474         INIT_LIST_HEAD(&mod->fcxp_free_q);
475         INIT_LIST_HEAD(&mod->fcxp_active_q);
476
477         mod->fcxp_list = fcxp;
478
479         for (i = 0; i < mod->num_fcxps; i++) {
480                 fcxp->fcxp_mod = mod;
481                 fcxp->fcxp_tag = i;
482
483                 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
484                 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
485                 fcxp->reqq_waiting = BFA_FALSE;
486
487                 fcxp = fcxp + 1;
488         }
489
490         bfa_meminfo_kva(mi) = (void *)fcxp;
491 }
492
493 static void
494 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
495                  u32 *dm_len)
496 {
497         u16     num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
498
499         if (num_fcxp_reqs == 0)
500                 return;
501
502         /*
503          * Account for req/rsp payload
504          */
505         *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
506         if (cfg->drvcfg.min_cfg)
507                 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
508         else
509                 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
510
511         /*
512          * Account for fcxp structs
513          */
514         *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
515 }
516
517 static void
518 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
519                 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
520 {
521         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
522
523         memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
524         mod->bfa = bfa;
525         mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
526
527         /*
528          * Initialize FCXP request and response payload sizes.
529          */
530         mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
531         if (!cfg->drvcfg.min_cfg)
532                 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
533
534         INIT_LIST_HEAD(&mod->wait_q);
535
536         claim_fcxp_req_rsp_mem(mod, meminfo);
537         claim_fcxps_mem(mod, meminfo);
538 }
539
540 static void
541 bfa_fcxp_detach(struct bfa_s *bfa)
542 {
543 }
544
545 static void
546 bfa_fcxp_start(struct bfa_s *bfa)
547 {
548 }
549
550 static void
551 bfa_fcxp_stop(struct bfa_s *bfa)
552 {
553 }
554
555 static void
556 bfa_fcxp_iocdisable(struct bfa_s *bfa)
557 {
558         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
559         struct bfa_fcxp_s *fcxp;
560         struct list_head              *qe, *qen;
561
562         list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
563                 fcxp = (struct bfa_fcxp_s *) qe;
564                 if (fcxp->caller == NULL) {
565                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
566                                         BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
567                         bfa_fcxp_free(fcxp);
568                 } else {
569                         fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
570                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
571                                      __bfa_fcxp_send_cbfn, fcxp);
572                 }
573         }
574 }
575
576 static struct bfa_fcxp_s *
577 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
578 {
579         struct bfa_fcxp_s *fcxp;
580
581         bfa_q_deq(&fm->fcxp_free_q, &fcxp);
582
583         if (fcxp)
584                 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
585
586         return fcxp;
587 }
588
589 static void
590 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
591                struct bfa_s *bfa,
592                u8 *use_ibuf,
593                u32 *nr_sgles,
594                bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
595                bfa_fcxp_get_sglen_t *r_sglen_cbfn,
596                struct list_head *r_sgpg_q,
597                int n_sgles,
598                bfa_fcxp_get_sgaddr_t sga_cbfn,
599                bfa_fcxp_get_sglen_t sglen_cbfn)
600 {
601
602         bfa_assert(bfa != NULL);
603
604         bfa_trc(bfa, fcxp->fcxp_tag);
605
606         if (n_sgles == 0) {
607                 *use_ibuf = 1;
608         } else {
609                 bfa_assert(*sga_cbfn != NULL);
610                 bfa_assert(*sglen_cbfn != NULL);
611
612                 *use_ibuf = 0;
613                 *r_sga_cbfn = sga_cbfn;
614                 *r_sglen_cbfn = sglen_cbfn;
615
616                 *nr_sgles = n_sgles;
617
618                 /*
619                  * alloc required sgpgs
620                  */
621                 if (n_sgles > BFI_SGE_INLINE)
622                         bfa_assert(0);
623         }
624
625 }
626
627 static void
628 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
629                void *caller, struct bfa_s *bfa, int nreq_sgles,
630                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
631                bfa_fcxp_get_sglen_t req_sglen_cbfn,
632                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
633                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
634 {
635
636         bfa_assert(bfa != NULL);
637
638         bfa_trc(bfa, fcxp->fcxp_tag);
639
640         fcxp->caller = caller;
641
642         bfa_fcxp_init_reqrsp(fcxp, bfa,
643                 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
644                 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
645                 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
646
647         bfa_fcxp_init_reqrsp(fcxp, bfa,
648                 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
649                 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
650                 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
651
652 }
653
654 static void
655 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
656 {
657         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
658         struct bfa_fcxp_wqe_s *wqe;
659
660         bfa_q_deq(&mod->wait_q, &wqe);
661         if (wqe) {
662                 bfa_trc(mod->bfa, fcxp->fcxp_tag);
663
664                 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
665                         wqe->nrsp_sgles, wqe->req_sga_cbfn,
666                         wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
667                         wqe->rsp_sglen_cbfn);
668
669                 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
670                 return;
671         }
672
673         bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
674         list_del(&fcxp->qe);
675         list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
676 }
677
678 static void
679 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
680                    bfa_status_t req_status, u32 rsp_len,
681                    u32 resid_len, struct fchs_s *rsp_fchs)
682 {
683         /* discarded fcxp completion */
684 }
685
686 static void
687 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
688 {
689         struct bfa_fcxp_s *fcxp = cbarg;
690
691         if (complete) {
692                 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
693                                 fcxp->rsp_status, fcxp->rsp_len,
694                                 fcxp->residue_len, &fcxp->rsp_fchs);
695         } else {
696                 bfa_fcxp_free(fcxp);
697         }
698 }
699
700 static void
701 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
702 {
703         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
704         struct bfa_fcxp_s       *fcxp;
705         u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
706
707         bfa_trc(bfa, fcxp_tag);
708
709         fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
710
711         /*
712          * @todo f/w should not set residue to non-0 when everything
713          *       is received.
714          */
715         if (fcxp_rsp->req_status == BFA_STATUS_OK)
716                 fcxp_rsp->residue_len = 0;
717         else
718                 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
719
720         fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
721
722         bfa_assert(fcxp->send_cbfn != NULL);
723
724         hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
725
726         if (fcxp->send_cbfn != NULL) {
727                 bfa_trc(mod->bfa, (NULL == fcxp->caller));
728                 if (fcxp->caller == NULL) {
729                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
730                                         fcxp_rsp->req_status, fcxp_rsp->rsp_len,
731                                         fcxp_rsp->residue_len, &fcxp_rsp->fchs);
732                         /*
733                          * fcxp automatically freed on return from the callback
734                          */
735                         bfa_fcxp_free(fcxp);
736                 } else {
737                         fcxp->rsp_status = fcxp_rsp->req_status;
738                         fcxp->rsp_len = fcxp_rsp->rsp_len;
739                         fcxp->residue_len = fcxp_rsp->residue_len;
740                         fcxp->rsp_fchs = fcxp_rsp->fchs;
741
742                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
743                                         __bfa_fcxp_send_cbfn, fcxp);
744                 }
745         } else {
746                 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
747         }
748 }
749
750 static void
751 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
752 {
753         union bfi_addr_u      sga_zero = { {0} };
754
755         sge->sg_len = reqlen;
756         sge->flags = BFI_SGE_DATA_LAST;
757         bfa_dma_addr_set(sge[0].sga, req_pa);
758         bfa_sge_to_be(sge);
759         sge++;
760
761         sge->sga = sga_zero;
762         sge->sg_len = reqlen;
763         sge->flags = BFI_SGE_PGDLEN;
764         bfa_sge_to_be(sge);
765 }
766
767 static void
768 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
769                  struct fchs_s *fchs)
770 {
771         /*
772          * TODO: TX ox_id
773          */
774         if (reqlen > 0) {
775                 if (fcxp->use_ireqbuf) {
776                         u32     pld_w0 =
777                                 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
778
779                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
780                                         BFA_PL_EID_TX,
781                                         reqlen + sizeof(struct fchs_s), fchs,
782                                         pld_w0);
783                 } else {
784                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
785                                         BFA_PL_EID_TX,
786                                         reqlen + sizeof(struct fchs_s),
787                                         fchs);
788                 }
789         } else {
790                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
791                                reqlen + sizeof(struct fchs_s), fchs);
792         }
793 }
794
795 static void
796 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
797                  struct bfi_fcxp_send_rsp_s *fcxp_rsp)
798 {
799         if (fcxp_rsp->rsp_len > 0) {
800                 if (fcxp->use_irspbuf) {
801                         u32     pld_w0 =
802                                 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
803
804                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
805                                               BFA_PL_EID_RX,
806                                               (u16) fcxp_rsp->rsp_len,
807                                               &fcxp_rsp->fchs, pld_w0);
808                 } else {
809                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
810                                        BFA_PL_EID_RX,
811                                        (u16) fcxp_rsp->rsp_len,
812                                        &fcxp_rsp->fchs);
813                 }
814         } else {
815                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
816                                (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
817         }
818 }
819
820 /*
821  * Handler to resume sending fcxp when space in available in cpe queue.
822  */
823 static void
824 bfa_fcxp_qresume(void *cbarg)
825 {
826         struct bfa_fcxp_s               *fcxp = cbarg;
827         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
828         struct bfi_fcxp_send_req_s      *send_req;
829
830         fcxp->reqq_waiting = BFA_FALSE;
831         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
832         bfa_fcxp_queue(fcxp, send_req);
833 }
834
835 /*
836  * Queue fcxp send request to foimrware.
837  */
838 static void
839 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
840 {
841         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
842         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
843         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
844         struct bfa_rport_s              *rport = reqi->bfa_rport;
845
846         bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
847                     bfa_lpuid(bfa));
848
849         send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
850         if (rport) {
851                 send_req->rport_fw_hndl = rport->fw_handle;
852                 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
853                 if (send_req->max_frmsz == 0)
854                         send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
855         } else {
856                 send_req->rport_fw_hndl = 0;
857                 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
858         }
859
860         send_req->vf_id = cpu_to_be16(reqi->vf_id);
861         send_req->lp_tag = reqi->lp_tag;
862         send_req->class = reqi->class;
863         send_req->rsp_timeout = rspi->rsp_timeout;
864         send_req->cts = reqi->cts;
865         send_req->fchs = reqi->fchs;
866
867         send_req->req_len = cpu_to_be32(reqi->req_tot_len);
868         send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
869
870         /*
871          * setup req sgles
872          */
873         if (fcxp->use_ireqbuf == 1) {
874                 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
875                                         BFA_FCXP_REQ_PLD_PA(fcxp));
876         } else {
877                 if (fcxp->nreq_sgles > 0) {
878                         bfa_assert(fcxp->nreq_sgles == 1);
879                         hal_fcxp_set_local_sges(send_req->req_sge,
880                                                 reqi->req_tot_len,
881                                                 fcxp->req_sga_cbfn(fcxp->caller,
882                                                                    0));
883                 } else {
884                         bfa_assert(reqi->req_tot_len == 0);
885                         hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
886                 }
887         }
888
889         /*
890          * setup rsp sgles
891          */
892         if (fcxp->use_irspbuf == 1) {
893                 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
894
895                 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
896                                         BFA_FCXP_RSP_PLD_PA(fcxp));
897
898         } else {
899                 if (fcxp->nrsp_sgles > 0) {
900                         bfa_assert(fcxp->nrsp_sgles == 1);
901                         hal_fcxp_set_local_sges(send_req->rsp_sge,
902                                                 rspi->rsp_maxlen,
903                                                 fcxp->rsp_sga_cbfn(fcxp->caller,
904                                                                    0));
905                 } else {
906                         bfa_assert(rspi->rsp_maxlen == 0);
907                         hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
908                 }
909         }
910
911         hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
912
913         bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
914
915         bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
916         bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
917 }
918
919 /*
920  *  hal_fcxp_api BFA FCXP API
921  */
922
923 /*
924  * Allocate an FCXP instance to send a response or to send a request
925  * that has a response. Request/response buffers are allocated by caller.
926  *
927  * @param[in]   bfa             BFA bfa instance
928  * @param[in]   nreq_sgles      Number of SG elements required for request
929  *                              buffer. 0, if fcxp internal buffers are used.
930  *                              Use bfa_fcxp_get_reqbuf() to get the
931  *                              internal req buffer.
932  * @param[in]   req_sgles       SG elements describing request buffer. Will be
933  *                              copied in by BFA and hence can be freed on
934  *                              return from this function.
935  * @param[in]   get_req_sga     function ptr to be called to get a request SG
936  *                              Address (given the sge index).
937  * @param[in]   get_req_sglen   function ptr to be called to get a request SG
938  *                              len (given the sge index).
939  * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
940  *                              Address (given the sge index).
941  * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
942  *                              len (given the sge index).
943  *
944  * @return FCXP instance. NULL on failure.
945  */
946 struct bfa_fcxp_s *
947 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
948                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
949                bfa_fcxp_get_sglen_t req_sglen_cbfn,
950                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
951                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
952 {
953         struct bfa_fcxp_s *fcxp = NULL;
954
955         bfa_assert(bfa != NULL);
956
957         fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
958         if (fcxp == NULL)
959                 return NULL;
960
961         bfa_trc(bfa, fcxp->fcxp_tag);
962
963         bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
964                         req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
965
966         return fcxp;
967 }
968
969 /*
970  * Get the internal request buffer pointer
971  *
972  * @param[in]   fcxp    BFA fcxp pointer
973  *
974  * @return              pointer to the internal request buffer
975  */
976 void *
977 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
978 {
979         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
980         void    *reqbuf;
981
982         bfa_assert(fcxp->use_ireqbuf == 1);
983         reqbuf = ((u8 *)mod->req_pld_list_kva) +
984                 fcxp->fcxp_tag * mod->req_pld_sz;
985         return reqbuf;
986 }
987
988 u32
989 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
990 {
991         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
992
993         return mod->req_pld_sz;
994 }
995
996 /*
997  * Get the internal response buffer pointer
998  *
999  * @param[in]   fcxp    BFA fcxp pointer
1000  *
1001  * @return              pointer to the internal request buffer
1002  */
1003 void *
1004 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1005 {
1006         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1007         void    *rspbuf;
1008
1009         bfa_assert(fcxp->use_irspbuf == 1);
1010
1011         rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1012                 fcxp->fcxp_tag * mod->rsp_pld_sz;
1013         return rspbuf;
1014 }
1015
1016 /*
1017  *              Free the BFA FCXP
1018  *
1019  * @param[in]   fcxp                    BFA fcxp pointer
1020  *
1021  * @return              void
1022  */
1023 void
1024 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1025 {
1026         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1027
1028         bfa_assert(fcxp != NULL);
1029         bfa_trc(mod->bfa, fcxp->fcxp_tag);
1030         bfa_fcxp_put(fcxp);
1031 }
1032
1033 /*
1034  * Send a FCXP request
1035  *
1036  * @param[in]   fcxp    BFA fcxp pointer
1037  * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
1038  * @param[in]   vf_id   virtual Fabric ID
1039  * @param[in]   lp_tag  lport tag
1040  * @param[in]   cts     use Continous sequence
1041  * @param[in]   cos     fc Class of Service
1042  * @param[in]   reqlen  request length, does not include FCHS length
1043  * @param[in]   fchs    fc Header Pointer. The header content will be copied
1044  *                      in by BFA.
1045  *
1046  * @param[in]   cbfn    call back function to be called on receiving
1047  *                                                              the response
1048  * @param[in]   cbarg   arg for cbfn
1049  * @param[in]   rsp_timeout
1050  *                      response timeout
1051  *
1052  * @return              bfa_status_t
1053  */
1054 void
1055 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1056               u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1057               u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1058               void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1059 {
1060         struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1061         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1062         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1063         struct bfi_fcxp_send_req_s      *send_req;
1064
1065         bfa_trc(bfa, fcxp->fcxp_tag);
1066
1067         /*
1068          * setup request/response info
1069          */
1070         reqi->bfa_rport = rport;
1071         reqi->vf_id = vf_id;
1072         reqi->lp_tag = lp_tag;
1073         reqi->class = cos;
1074         rspi->rsp_timeout = rsp_timeout;
1075         reqi->cts = cts;
1076         reqi->fchs = *fchs;
1077         reqi->req_tot_len = reqlen;
1078         rspi->rsp_maxlen = rsp_maxlen;
1079         fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1080         fcxp->send_cbarg = cbarg;
1081
1082         /*
1083          * If no room in CPE queue, wait for space in request queue
1084          */
1085         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1086         if (!send_req) {
1087                 bfa_trc(bfa, fcxp->fcxp_tag);
1088                 fcxp->reqq_waiting = BFA_TRUE;
1089                 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1090                 return;
1091         }
1092
1093         bfa_fcxp_queue(fcxp, send_req);
1094 }
1095
1096 /*
1097  * Abort a BFA FCXP
1098  *
1099  * @param[in]   fcxp    BFA fcxp pointer
1100  *
1101  * @return              void
1102  */
1103 bfa_status_t
1104 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1105 {
1106         bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1107         bfa_assert(0);
1108         return BFA_STATUS_OK;
1109 }
1110
1111 void
1112 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1113                bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1114                void *caller, int nreq_sgles,
1115                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1116                bfa_fcxp_get_sglen_t req_sglen_cbfn,
1117                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1118                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1119 {
1120         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1121
1122         bfa_assert(list_empty(&mod->fcxp_free_q));
1123
1124         wqe->alloc_cbfn = alloc_cbfn;
1125         wqe->alloc_cbarg = alloc_cbarg;
1126         wqe->caller = caller;
1127         wqe->bfa = bfa;
1128         wqe->nreq_sgles = nreq_sgles;
1129         wqe->nrsp_sgles = nrsp_sgles;
1130         wqe->req_sga_cbfn = req_sga_cbfn;
1131         wqe->req_sglen_cbfn = req_sglen_cbfn;
1132         wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1133         wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1134
1135         list_add_tail(&wqe->qe, &mod->wait_q);
1136 }
1137
1138 void
1139 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1140 {
1141         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1142
1143         bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1144         list_del(&wqe->qe);
1145 }
1146
1147 void
1148 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1149 {
1150         /*
1151          * If waiting for room in request queue, cancel reqq wait
1152          * and free fcxp.
1153          */
1154         if (fcxp->reqq_waiting) {
1155                 fcxp->reqq_waiting = BFA_FALSE;
1156                 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1157                 bfa_fcxp_free(fcxp);
1158                 return;
1159         }
1160
1161         fcxp->send_cbfn = bfa_fcxp_null_comp;
1162 }
1163
1164
1165
1166 /*
1167  *  hal_fcxp_public BFA FCXP public functions
1168  */
1169
1170 void
1171 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1172 {
1173         switch (msg->mhdr.msg_id) {
1174         case BFI_FCXP_I2H_SEND_RSP:
1175                 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1176                 break;
1177
1178         default:
1179                 bfa_trc(bfa, msg->mhdr.msg_id);
1180                 bfa_assert(0);
1181         }
1182 }
1183
1184 u32
1185 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1186 {
1187         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1188
1189         return mod->rsp_pld_sz;
1190 }
1191
1192
1193 /*
1194  *  BFA LPS state machine functions
1195  */
1196
1197 /*
1198  * Init state -- no login
1199  */
1200 static void
1201 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1202 {
1203         bfa_trc(lps->bfa, lps->lp_tag);
1204         bfa_trc(lps->bfa, event);
1205
1206         switch (event) {
1207         case BFA_LPS_SM_LOGIN:
1208                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1209                         bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1210                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1211                 } else {
1212                         bfa_sm_set_state(lps, bfa_lps_sm_login);
1213                         bfa_lps_send_login(lps);
1214                 }
1215
1216                 if (lps->fdisc)
1217                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1218                                 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1219                 else
1220                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1221                                 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1222                 break;
1223
1224         case BFA_LPS_SM_LOGOUT:
1225                 bfa_lps_logout_comp(lps);
1226                 break;
1227
1228         case BFA_LPS_SM_DELETE:
1229                 bfa_lps_free(lps);
1230                 break;
1231
1232         case BFA_LPS_SM_RX_CVL:
1233         case BFA_LPS_SM_OFFLINE:
1234                 break;
1235
1236         case BFA_LPS_SM_FWRSP:
1237                 /*
1238                  * Could happen when fabric detects loopback and discards
1239                  * the lps request. Fw will eventually sent out the timeout
1240                  * Just ignore
1241                  */
1242                 break;
1243
1244         default:
1245                 bfa_sm_fault(lps->bfa, event);
1246         }
1247 }
1248
1249 /*
1250  * login is in progress -- awaiting response from firmware
1251  */
1252 static void
1253 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1254 {
1255         bfa_trc(lps->bfa, lps->lp_tag);
1256         bfa_trc(lps->bfa, event);
1257
1258         switch (event) {
1259         case BFA_LPS_SM_FWRSP:
1260                 if (lps->status == BFA_STATUS_OK) {
1261                         bfa_sm_set_state(lps, bfa_lps_sm_online);
1262                         if (lps->fdisc)
1263                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1264                                         BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1265                         else
1266                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1267                                         BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1268                 } else {
1269                         bfa_sm_set_state(lps, bfa_lps_sm_init);
1270                         if (lps->fdisc)
1271                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1272                                         BFA_PL_EID_LOGIN, 0,
1273                                         "FDISC Fail (RJT or timeout)");
1274                         else
1275                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1276                                         BFA_PL_EID_LOGIN, 0,
1277                                         "FLOGI Fail (RJT or timeout)");
1278                 }
1279                 bfa_lps_login_comp(lps);
1280                 break;
1281
1282         case BFA_LPS_SM_OFFLINE:
1283                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1284                 break;
1285
1286         default:
1287                 bfa_sm_fault(lps->bfa, event);
1288         }
1289 }
1290
1291 /*
1292  * login pending - awaiting space in request queue
1293  */
1294 static void
1295 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1296 {
1297         bfa_trc(lps->bfa, lps->lp_tag);
1298         bfa_trc(lps->bfa, event);
1299
1300         switch (event) {
1301         case BFA_LPS_SM_RESUME:
1302                 bfa_sm_set_state(lps, bfa_lps_sm_login);
1303                 break;
1304
1305         case BFA_LPS_SM_OFFLINE:
1306                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1307                 bfa_reqq_wcancel(&lps->wqe);
1308                 break;
1309
1310         case BFA_LPS_SM_RX_CVL:
1311                 /*
1312                  * Login was not even sent out; so when getting out
1313                  * of this state, it will appear like a login retry
1314                  * after Clear virtual link
1315                  */
1316                 break;
1317
1318         default:
1319                 bfa_sm_fault(lps->bfa, event);
1320         }
1321 }
1322
1323 /*
1324  * login complete
1325  */
1326 static void
1327 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1328 {
1329         bfa_trc(lps->bfa, lps->lp_tag);
1330         bfa_trc(lps->bfa, event);
1331
1332         switch (event) {
1333         case BFA_LPS_SM_LOGOUT:
1334                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1335                         bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1336                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1337                 } else {
1338                         bfa_sm_set_state(lps, bfa_lps_sm_logout);
1339                         bfa_lps_send_logout(lps);
1340                 }
1341                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1342                         BFA_PL_EID_LOGO, 0, "Logout");
1343                 break;
1344
1345         case BFA_LPS_SM_RX_CVL:
1346                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1347
1348                 /* Let the vport module know about this event */
1349                 bfa_lps_cvl_event(lps);
1350                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1351                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1352                 break;
1353
1354         case BFA_LPS_SM_OFFLINE:
1355         case BFA_LPS_SM_DELETE:
1356                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1357                 break;
1358
1359         default:
1360                 bfa_sm_fault(lps->bfa, event);
1361         }
1362 }
1363
1364 /*
1365  * logout in progress - awaiting firmware response
1366  */
1367 static void
1368 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1369 {
1370         bfa_trc(lps->bfa, lps->lp_tag);
1371         bfa_trc(lps->bfa, event);
1372
1373         switch (event) {
1374         case BFA_LPS_SM_FWRSP:
1375                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1376                 bfa_lps_logout_comp(lps);
1377                 break;
1378
1379         case BFA_LPS_SM_OFFLINE:
1380                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1381                 break;
1382
1383         default:
1384                 bfa_sm_fault(lps->bfa, event);
1385         }
1386 }
1387
1388 /*
1389  * logout pending -- awaiting space in request queue
1390  */
1391 static void
1392 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1393 {
1394         bfa_trc(lps->bfa, lps->lp_tag);
1395         bfa_trc(lps->bfa, event);
1396
1397         switch (event) {
1398         case BFA_LPS_SM_RESUME:
1399                 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1400                 bfa_lps_send_logout(lps);
1401                 break;
1402
1403         case BFA_LPS_SM_OFFLINE:
1404                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1405                 bfa_reqq_wcancel(&lps->wqe);
1406                 break;
1407
1408         default:
1409                 bfa_sm_fault(lps->bfa, event);
1410         }
1411 }
1412
1413
1414
1415 /*
1416  *  lps_pvt BFA LPS private functions
1417  */
1418
1419 /*
1420  * return memory requirement
1421  */
1422 static void
1423 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1424         u32 *dm_len)
1425 {
1426         if (cfg->drvcfg.min_cfg)
1427                 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1428         else
1429                 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1430 }
1431
1432 /*
1433  * bfa module attach at initialization time
1434  */
1435 static void
1436 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1437         struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1438 {
1439         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1440         struct bfa_lps_s        *lps;
1441         int                     i;
1442
1443         memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1444         mod->num_lps = BFA_LPS_MAX_LPORTS;
1445         if (cfg->drvcfg.min_cfg)
1446                 mod->num_lps = BFA_LPS_MIN_LPORTS;
1447         else
1448                 mod->num_lps = BFA_LPS_MAX_LPORTS;
1449         mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1450
1451         bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1452
1453         INIT_LIST_HEAD(&mod->lps_free_q);
1454         INIT_LIST_HEAD(&mod->lps_active_q);
1455
1456         for (i = 0; i < mod->num_lps; i++, lps++) {
1457                 lps->bfa        = bfa;
1458                 lps->lp_tag     = (u8) i;
1459                 lps->reqq       = BFA_REQQ_LPS;
1460                 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1461                 list_add_tail(&lps->qe, &mod->lps_free_q);
1462         }
1463 }
1464
1465 static void
1466 bfa_lps_detach(struct bfa_s *bfa)
1467 {
1468 }
1469
1470 static void
1471 bfa_lps_start(struct bfa_s *bfa)
1472 {
1473 }
1474
1475 static void
1476 bfa_lps_stop(struct bfa_s *bfa)
1477 {
1478 }
1479
1480 /*
1481  * IOC in disabled state -- consider all lps offline
1482  */
1483 static void
1484 bfa_lps_iocdisable(struct bfa_s *bfa)
1485 {
1486         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1487         struct bfa_lps_s        *lps;
1488         struct list_head                *qe, *qen;
1489
1490         list_for_each_safe(qe, qen, &mod->lps_active_q) {
1491                 lps = (struct bfa_lps_s *) qe;
1492                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1493         }
1494 }
1495
1496 /*
1497  * Firmware login response
1498  */
1499 static void
1500 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1501 {
1502         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1503         struct bfa_lps_s        *lps;
1504
1505         bfa_assert(rsp->lp_tag < mod->num_lps);
1506         lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1507
1508         lps->status = rsp->status;
1509         switch (rsp->status) {
1510         case BFA_STATUS_OK:
1511                 lps->fport      = rsp->f_port;
1512                 lps->npiv_en    = rsp->npiv_en;
1513                 lps->lp_pid     = rsp->lp_pid;
1514                 lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1515                 lps->pr_pwwn    = rsp->port_name;
1516                 lps->pr_nwwn    = rsp->node_name;
1517                 lps->auth_req   = rsp->auth_req;
1518                 lps->lp_mac     = rsp->lp_mac;
1519                 lps->brcd_switch = rsp->brcd_switch;
1520                 lps->fcf_mac    = rsp->fcf_mac;
1521
1522                 break;
1523
1524         case BFA_STATUS_FABRIC_RJT:
1525                 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1526                 lps->lsrjt_expl = rsp->lsrjt_expl;
1527
1528                 break;
1529
1530         case BFA_STATUS_EPROTOCOL:
1531                 lps->ext_status = rsp->ext_status;
1532
1533                 break;
1534
1535         default:
1536                 /* Nothing to do with other status */
1537                 break;
1538         }
1539
1540         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1541 }
1542
1543 /*
1544  * Firmware logout response
1545  */
1546 static void
1547 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1548 {
1549         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1550         struct bfa_lps_s        *lps;
1551
1552         bfa_assert(rsp->lp_tag < mod->num_lps);
1553         lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1554
1555         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1556 }
1557
1558 /*
1559  * Firmware received a Clear virtual link request (for FCoE)
1560  */
1561 static void
1562 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1563 {
1564         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1565         struct bfa_lps_s        *lps;
1566
1567         lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1568
1569         bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1570 }
1571
1572 /*
1573  * Space is available in request queue, resume queueing request to firmware.
1574  */
1575 static void
1576 bfa_lps_reqq_resume(void *lps_arg)
1577 {
1578         struct bfa_lps_s        *lps = lps_arg;
1579
1580         bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1581 }
1582
1583 /*
1584  * lps is freed -- triggered by vport delete
1585  */
1586 static void
1587 bfa_lps_free(struct bfa_lps_s *lps)
1588 {
1589         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1590
1591         lps->lp_pid = 0;
1592         list_del(&lps->qe);
1593         list_add_tail(&lps->qe, &mod->lps_free_q);
1594 }
1595
1596 /*
1597  * send login request to firmware
1598  */
1599 static void
1600 bfa_lps_send_login(struct bfa_lps_s *lps)
1601 {
1602         struct bfi_lps_login_req_s      *m;
1603
1604         m = bfa_reqq_next(lps->bfa, lps->reqq);
1605         bfa_assert(m);
1606
1607         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1608                 bfa_lpuid(lps->bfa));
1609
1610         m->lp_tag       = lps->lp_tag;
1611         m->alpa         = lps->alpa;
1612         m->pdu_size     = cpu_to_be16(lps->pdusz);
1613         m->pwwn         = lps->pwwn;
1614         m->nwwn         = lps->nwwn;
1615         m->fdisc        = lps->fdisc;
1616         m->auth_en      = lps->auth_en;
1617
1618         bfa_reqq_produce(lps->bfa, lps->reqq);
1619 }
1620
1621 /*
1622  * send logout request to firmware
1623  */
1624 static void
1625 bfa_lps_send_logout(struct bfa_lps_s *lps)
1626 {
1627         struct bfi_lps_logout_req_s *m;
1628
1629         m = bfa_reqq_next(lps->bfa, lps->reqq);
1630         bfa_assert(m);
1631
1632         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1633                 bfa_lpuid(lps->bfa));
1634
1635         m->lp_tag    = lps->lp_tag;
1636         m->port_name = lps->pwwn;
1637         bfa_reqq_produce(lps->bfa, lps->reqq);
1638 }
1639
1640 /*
1641  * Indirect login completion handler for non-fcs
1642  */
1643 static void
1644 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1645 {
1646         struct bfa_lps_s *lps   = arg;
1647
1648         if (!complete)
1649                 return;
1650
1651         if (lps->fdisc)
1652                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1653         else
1654                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1655 }
1656
1657 /*
1658  * Login completion handler -- direct call for fcs, queue for others
1659  */
1660 static void
1661 bfa_lps_login_comp(struct bfa_lps_s *lps)
1662 {
1663         if (!lps->bfa->fcs) {
1664                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1665                         lps);
1666                 return;
1667         }
1668
1669         if (lps->fdisc)
1670                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1671         else
1672                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1673 }
1674
1675 /*
1676  * Indirect logout completion handler for non-fcs
1677  */
1678 static void
1679 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1680 {
1681         struct bfa_lps_s *lps   = arg;
1682
1683         if (!complete)
1684                 return;
1685
1686         if (lps->fdisc)
1687                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1688 }
1689
1690 /*
1691  * Logout completion handler -- direct call for fcs, queue for others
1692  */
1693 static void
1694 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1695 {
1696         if (!lps->bfa->fcs) {
1697                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1698                         lps);
1699                 return;
1700         }
1701         if (lps->fdisc)
1702                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1703 }
1704
1705 /*
1706  * Clear virtual link completion handler for non-fcs
1707  */
1708 static void
1709 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1710 {
1711         struct bfa_lps_s *lps   = arg;
1712
1713         if (!complete)
1714                 return;
1715
1716         /* Clear virtual link to base port will result in link down */
1717         if (lps->fdisc)
1718                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1719 }
1720
1721 /*
1722  * Received Clear virtual link event --direct call for fcs,
1723  * queue for others
1724  */
1725 static void
1726 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1727 {
1728         if (!lps->bfa->fcs) {
1729                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1730                         lps);
1731                 return;
1732         }
1733
1734         /* Clear virtual link to base port will result in link down */
1735         if (lps->fdisc)
1736                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1737 }
1738
1739
1740
1741 /*
1742  *  lps_public BFA LPS public functions
1743  */
1744
1745 u32
1746 bfa_lps_get_max_vport(struct bfa_s *bfa)
1747 {
1748         if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1749                 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1750         else
1751                 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1752 }
1753
1754 /*
1755  * Allocate a lport srvice tag.
1756  */
1757 struct bfa_lps_s  *
1758 bfa_lps_alloc(struct bfa_s *bfa)
1759 {
1760         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1761         struct bfa_lps_s        *lps = NULL;
1762
1763         bfa_q_deq(&mod->lps_free_q, &lps);
1764
1765         if (lps == NULL)
1766                 return NULL;
1767
1768         list_add_tail(&lps->qe, &mod->lps_active_q);
1769
1770         bfa_sm_set_state(lps, bfa_lps_sm_init);
1771         return lps;
1772 }
1773
1774 /*
1775  * Free lport service tag. This can be called anytime after an alloc.
1776  * No need to wait for any pending login/logout completions.
1777  */
1778 void
1779 bfa_lps_delete(struct bfa_lps_s *lps)
1780 {
1781         bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1782 }
1783
1784 /*
1785  * Initiate a lport login.
1786  */
1787 void
1788 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1789         wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1790 {
1791         lps->uarg       = uarg;
1792         lps->alpa       = alpa;
1793         lps->pdusz      = pdusz;
1794         lps->pwwn       = pwwn;
1795         lps->nwwn       = nwwn;
1796         lps->fdisc      = BFA_FALSE;
1797         lps->auth_en    = auth_en;
1798         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1799 }
1800
1801 /*
1802  * Initiate a lport fdisc login.
1803  */
1804 void
1805 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1806         wwn_t nwwn)
1807 {
1808         lps->uarg       = uarg;
1809         lps->alpa       = 0;
1810         lps->pdusz      = pdusz;
1811         lps->pwwn       = pwwn;
1812         lps->nwwn       = nwwn;
1813         lps->fdisc      = BFA_TRUE;
1814         lps->auth_en    = BFA_FALSE;
1815         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1816 }
1817
1818
1819 /*
1820  * Initiate a lport FDSIC logout.
1821  */
1822 void
1823 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1824 {
1825         bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1826 }
1827
1828
1829 /*
1830  * Return lport services tag given the pid
1831  */
1832 u8
1833 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1834 {
1835         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1836         struct bfa_lps_s        *lps;
1837         int                     i;
1838
1839         for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1840                 if (lps->lp_pid == pid)
1841                         return lps->lp_tag;
1842         }
1843
1844         /* Return base port tag anyway */
1845         return 0;
1846 }
1847
1848
1849 /*
1850  * return port id assigned to the base lport
1851  */
1852 u32
1853 bfa_lps_get_base_pid(struct bfa_s *bfa)
1854 {
1855         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1856
1857         return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1858 }
1859
1860 /*
1861  * LPS firmware message class handler.
1862  */
1863 void
1864 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1865 {
1866         union bfi_lps_i2h_msg_u msg;
1867
1868         bfa_trc(bfa, m->mhdr.msg_id);
1869         msg.msg = m;
1870
1871         switch (m->mhdr.msg_id) {
1872         case BFI_LPS_H2I_LOGIN_RSP:
1873                 bfa_lps_login_rsp(bfa, msg.login_rsp);
1874                 break;
1875
1876         case BFI_LPS_H2I_LOGOUT_RSP:
1877                 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1878                 break;
1879
1880         case BFI_LPS_H2I_CVL_EVENT:
1881                 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1882                 break;
1883
1884         default:
1885                 bfa_trc(bfa, m->mhdr.msg_id);
1886                 bfa_assert(0);
1887         }
1888 }
1889
1890 /*
1891  * FC PORT state machine functions
1892  */
1893 static void
1894 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1895                         enum bfa_fcport_sm_event event)
1896 {
1897         bfa_trc(fcport->bfa, event);
1898
1899         switch (event) {
1900         case BFA_FCPORT_SM_START:
1901                 /*
1902                  * Start event after IOC is configured and BFA is started.
1903                  */
1904                 if (bfa_fcport_send_enable(fcport)) {
1905                         bfa_trc(fcport->bfa, BFA_TRUE);
1906                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1907                 } else {
1908                         bfa_trc(fcport->bfa, BFA_FALSE);
1909                         bfa_sm_set_state(fcport,
1910                                         bfa_fcport_sm_enabling_qwait);
1911                 }
1912                 break;
1913
1914         case BFA_FCPORT_SM_ENABLE:
1915                 /*
1916                  * Port is persistently configured to be in enabled state. Do
1917                  * not change state. Port enabling is done when START event is
1918                  * received.
1919                  */
1920                 break;
1921
1922         case BFA_FCPORT_SM_DISABLE:
1923                 /*
1924                  * If a port is persistently configured to be disabled, the
1925                  * first event will a port disable request.
1926                  */
1927                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1928                 break;
1929
1930         case BFA_FCPORT_SM_HWFAIL:
1931                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1932                 break;
1933
1934         default:
1935                 bfa_sm_fault(fcport->bfa, event);
1936         }
1937 }
1938
1939 static void
1940 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
1941                                 enum bfa_fcport_sm_event event)
1942 {
1943         char pwwn_buf[BFA_STRING_32];
1944         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1945         bfa_trc(fcport->bfa, event);
1946
1947         switch (event) {
1948         case BFA_FCPORT_SM_QRESUME:
1949                 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1950                 bfa_fcport_send_enable(fcport);
1951                 break;
1952
1953         case BFA_FCPORT_SM_STOP:
1954                 bfa_reqq_wcancel(&fcport->reqq_wait);
1955                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
1956                 break;
1957
1958         case BFA_FCPORT_SM_ENABLE:
1959                 /*
1960                  * Already enable is in progress.
1961                  */
1962                 break;
1963
1964         case BFA_FCPORT_SM_DISABLE:
1965                 /*
1966                  * Just send disable request to firmware when room becomes
1967                  * available in request queue.
1968                  */
1969                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1970                 bfa_reqq_wcancel(&fcport->reqq_wait);
1971                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
1972                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
1973                 wwn2str(pwwn_buf, fcport->pwwn);
1974                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1975                         "Base port disabled: WWN = %s\n", pwwn_buf);
1976                 break;
1977
1978         case BFA_FCPORT_SM_LINKUP:
1979         case BFA_FCPORT_SM_LINKDOWN:
1980                 /*
1981                  * Possible to get link events when doing back-to-back
1982                  * enable/disables.
1983                  */
1984                 break;
1985
1986         case BFA_FCPORT_SM_HWFAIL:
1987                 bfa_reqq_wcancel(&fcport->reqq_wait);
1988                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1989                 break;
1990
1991         default:
1992                 bfa_sm_fault(fcport->bfa, event);
1993         }
1994 }
1995
1996 static void
1997 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
1998                                                 enum bfa_fcport_sm_event event)
1999 {
2000         char pwwn_buf[BFA_STRING_32];
2001         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2002         bfa_trc(fcport->bfa, event);
2003
2004         switch (event) {
2005         case BFA_FCPORT_SM_FWRSP:
2006         case BFA_FCPORT_SM_LINKDOWN:
2007                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2008                 break;
2009
2010         case BFA_FCPORT_SM_LINKUP:
2011                 bfa_fcport_update_linkinfo(fcport);
2012                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2013
2014                 bfa_assert(fcport->event_cbfn);
2015                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2016                 break;
2017
2018         case BFA_FCPORT_SM_ENABLE:
2019                 /*
2020                  * Already being enabled.
2021                  */
2022                 break;
2023
2024         case BFA_FCPORT_SM_DISABLE:
2025                 if (bfa_fcport_send_disable(fcport))
2026                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2027                 else
2028                         bfa_sm_set_state(fcport,
2029                                          bfa_fcport_sm_disabling_qwait);
2030
2031                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2032                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2033                 wwn2str(pwwn_buf, fcport->pwwn);
2034                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2035                         "Base port disabled: WWN = %s\n", pwwn_buf);
2036                 break;
2037
2038         case BFA_FCPORT_SM_STOP:
2039                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2040                 break;
2041
2042         case BFA_FCPORT_SM_HWFAIL:
2043                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2044                 break;
2045
2046         default:
2047                 bfa_sm_fault(fcport->bfa, event);
2048         }
2049 }
2050
2051 static void
2052 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2053                                                 enum bfa_fcport_sm_event event)
2054 {
2055         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2056         char pwwn_buf[BFA_STRING_32];
2057         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2058
2059         bfa_trc(fcport->bfa, event);
2060
2061         switch (event) {
2062         case BFA_FCPORT_SM_LINKUP:
2063                 bfa_fcport_update_linkinfo(fcport);
2064                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2065                 bfa_assert(fcport->event_cbfn);
2066                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2067                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2068                 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2069
2070                         bfa_trc(fcport->bfa,
2071                                 pevent->link_state.vc_fcf.fcf.fipenabled);
2072                         bfa_trc(fcport->bfa,
2073                                 pevent->link_state.vc_fcf.fcf.fipfailed);
2074
2075                         if (pevent->link_state.vc_fcf.fcf.fipfailed)
2076                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2077                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2078                                         "FIP FCF Discovery Failed");
2079                         else
2080                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2081                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2082                                         "FIP FCF Discovered");
2083                 }
2084
2085                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2086                 wwn2str(pwwn_buf, fcport->pwwn);
2087                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2088                         "Base port online: WWN = %s\n", pwwn_buf);
2089                 break;
2090
2091         case BFA_FCPORT_SM_LINKDOWN:
2092                 /*
2093                  * Possible to get link down event.
2094                  */
2095                 break;
2096
2097         case BFA_FCPORT_SM_ENABLE:
2098                 /*
2099                  * Already enabled.
2100                  */
2101                 break;
2102
2103         case BFA_FCPORT_SM_DISABLE:
2104                 if (bfa_fcport_send_disable(fcport))
2105                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2106                 else
2107                         bfa_sm_set_state(fcport,
2108                                          bfa_fcport_sm_disabling_qwait);
2109
2110                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2111                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2112                 wwn2str(pwwn_buf, fcport->pwwn);
2113                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2114                         "Base port disabled: WWN = %s\n", pwwn_buf);
2115                 break;
2116
2117         case BFA_FCPORT_SM_STOP:
2118                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2119                 break;
2120
2121         case BFA_FCPORT_SM_HWFAIL:
2122                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2123                 break;
2124
2125         default:
2126                 bfa_sm_fault(fcport->bfa, event);
2127         }
2128 }
2129
2130 static void
2131 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2132         enum bfa_fcport_sm_event event)
2133 {
2134         char pwwn_buf[BFA_STRING_32];
2135         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2136
2137         bfa_trc(fcport->bfa, event);
2138
2139         switch (event) {
2140         case BFA_FCPORT_SM_ENABLE:
2141                 /*
2142                  * Already enabled.
2143                  */
2144                 break;
2145
2146         case BFA_FCPORT_SM_DISABLE:
2147                 if (bfa_fcport_send_disable(fcport))
2148                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2149                 else
2150                         bfa_sm_set_state(fcport,
2151                                          bfa_fcport_sm_disabling_qwait);
2152
2153                 bfa_fcport_reset_linkinfo(fcport);
2154                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2155                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2156                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2157                 wwn2str(pwwn_buf, fcport->pwwn);
2158                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2159                         "Base port offline: WWN = %s\n", pwwn_buf);
2160                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2161                         "Base port disabled: WWN = %s\n", pwwn_buf);
2162                 break;
2163
2164         case BFA_FCPORT_SM_LINKDOWN:
2165                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2166                 bfa_fcport_reset_linkinfo(fcport);
2167                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2168                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2169                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2170                 wwn2str(pwwn_buf, fcport->pwwn);
2171                 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2172                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2173                                 "Base port offline: WWN = %s\n", pwwn_buf);
2174                 else
2175                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2176                                 "Base port (WWN = %s) "
2177                                 "lost fabric connectivity\n", pwwn_buf);
2178                 break;
2179
2180         case BFA_FCPORT_SM_STOP:
2181                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2182                 bfa_fcport_reset_linkinfo(fcport);
2183                 wwn2str(pwwn_buf, fcport->pwwn);
2184                 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2185                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2186                                 "Base port offline: WWN = %s\n", pwwn_buf);
2187                 else
2188                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2189                                 "Base port (WWN = %s) "
2190                                 "lost fabric connectivity\n", pwwn_buf);
2191                 break;
2192
2193         case BFA_FCPORT_SM_HWFAIL:
2194                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2195                 bfa_fcport_reset_linkinfo(fcport);
2196                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2197                 wwn2str(pwwn_buf, fcport->pwwn);
2198                 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2199                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2200                                 "Base port offline: WWN = %s\n", pwwn_buf);
2201                 else
2202                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2203                                 "Base port (WWN = %s) "
2204                                 "lost fabric connectivity\n", pwwn_buf);
2205                 break;
2206
2207         default:
2208                 bfa_sm_fault(fcport->bfa, event);
2209         }
2210 }
2211
2212 static void
2213 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2214                                  enum bfa_fcport_sm_event event)
2215 {
2216         bfa_trc(fcport->bfa, event);
2217
2218         switch (event) {
2219         case BFA_FCPORT_SM_QRESUME:
2220                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2221                 bfa_fcport_send_disable(fcport);
2222                 break;
2223
2224         case BFA_FCPORT_SM_STOP:
2225                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2226                 bfa_reqq_wcancel(&fcport->reqq_wait);
2227                 break;
2228
2229         case BFA_FCPORT_SM_ENABLE:
2230                 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2231                 break;
2232
2233         case BFA_FCPORT_SM_DISABLE:
2234                 /*
2235                  * Already being disabled.
2236                  */
2237                 break;
2238
2239         case BFA_FCPORT_SM_LINKUP:
2240         case BFA_FCPORT_SM_LINKDOWN:
2241                 /*
2242                  * Possible to get link events when doing back-to-back
2243                  * enable/disables.
2244                  */
2245                 break;
2246
2247         case BFA_FCPORT_SM_HWFAIL:
2248                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2249                 bfa_reqq_wcancel(&fcport->reqq_wait);
2250                 break;
2251
2252         default:
2253                 bfa_sm_fault(fcport->bfa, event);
2254         }
2255 }
2256
2257 static void
2258 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2259                                  enum bfa_fcport_sm_event event)
2260 {
2261         bfa_trc(fcport->bfa, event);
2262
2263         switch (event) {
2264         case BFA_FCPORT_SM_QRESUME:
2265                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2266                 bfa_fcport_send_disable(fcport);
2267                 if (bfa_fcport_send_enable(fcport))
2268                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2269                 else
2270                         bfa_sm_set_state(fcport,
2271                                          bfa_fcport_sm_enabling_qwait);
2272                 break;
2273
2274         case BFA_FCPORT_SM_STOP:
2275                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2276                 bfa_reqq_wcancel(&fcport->reqq_wait);
2277                 break;
2278
2279         case BFA_FCPORT_SM_ENABLE:
2280                 break;
2281
2282         case BFA_FCPORT_SM_DISABLE:
2283                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2284                 break;
2285
2286         case BFA_FCPORT_SM_LINKUP:
2287         case BFA_FCPORT_SM_LINKDOWN:
2288                 /*
2289                  * Possible to get link events when doing back-to-back
2290                  * enable/disables.
2291                  */
2292                 break;
2293
2294         case BFA_FCPORT_SM_HWFAIL:
2295                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2296                 bfa_reqq_wcancel(&fcport->reqq_wait);
2297                 break;
2298
2299         default:
2300                 bfa_sm_fault(fcport->bfa, event);
2301         }
2302 }
2303
2304 static void
2305 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2306                                                 enum bfa_fcport_sm_event event)
2307 {
2308         char pwwn_buf[BFA_STRING_32];
2309         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2310         bfa_trc(fcport->bfa, event);
2311
2312         switch (event) {
2313         case BFA_FCPORT_SM_FWRSP:
2314                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2315                 break;
2316
2317         case BFA_FCPORT_SM_DISABLE:
2318                 /*
2319                  * Already being disabled.
2320                  */
2321                 break;
2322
2323         case BFA_FCPORT_SM_ENABLE:
2324                 if (bfa_fcport_send_enable(fcport))
2325                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2326                 else
2327                         bfa_sm_set_state(fcport,
2328                                          bfa_fcport_sm_enabling_qwait);
2329
2330                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2331                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2332                 wwn2str(pwwn_buf, fcport->pwwn);
2333                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2334                         "Base port enabled: WWN = %s\n", pwwn_buf);
2335                 break;
2336
2337         case BFA_FCPORT_SM_STOP:
2338                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2339                 break;
2340
2341         case BFA_FCPORT_SM_LINKUP:
2342         case BFA_FCPORT_SM_LINKDOWN:
2343                 /*
2344                  * Possible to get link events when doing back-to-back
2345                  * enable/disables.
2346                  */
2347                 break;
2348
2349         case BFA_FCPORT_SM_HWFAIL:
2350                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2351                 break;
2352
2353         default:
2354                 bfa_sm_fault(fcport->bfa, event);
2355         }
2356 }
2357
2358 static void
2359 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2360                                                 enum bfa_fcport_sm_event event)
2361 {
2362         char pwwn_buf[BFA_STRING_32];
2363         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2364         bfa_trc(fcport->bfa, event);
2365
2366         switch (event) {
2367         case BFA_FCPORT_SM_START:
2368                 /*
2369                  * Ignore start event for a port that is disabled.
2370                  */
2371                 break;
2372
2373         case BFA_FCPORT_SM_STOP:
2374                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2375                 break;
2376
2377         case BFA_FCPORT_SM_ENABLE:
2378                 if (bfa_fcport_send_enable(fcport))
2379                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2380                 else
2381                         bfa_sm_set_state(fcport,
2382                                          bfa_fcport_sm_enabling_qwait);
2383
2384                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2385                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2386                 wwn2str(pwwn_buf, fcport->pwwn);
2387                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2388                         "Base port enabled: WWN = %s\n", pwwn_buf);
2389                 break;
2390
2391         case BFA_FCPORT_SM_DISABLE:
2392                 /*
2393                  * Already disabled.
2394                  */
2395                 break;
2396
2397         case BFA_FCPORT_SM_HWFAIL:
2398                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2399                 break;
2400
2401         default:
2402                 bfa_sm_fault(fcport->bfa, event);
2403         }
2404 }
2405
2406 static void
2407 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2408                          enum bfa_fcport_sm_event event)
2409 {
2410         bfa_trc(fcport->bfa, event);
2411
2412         switch (event) {
2413         case BFA_FCPORT_SM_START:
2414                 if (bfa_fcport_send_enable(fcport))
2415                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2416                 else
2417                         bfa_sm_set_state(fcport,
2418                                          bfa_fcport_sm_enabling_qwait);
2419                 break;
2420
2421         default:
2422                 /*
2423                  * Ignore all other events.
2424                  */
2425                 ;
2426         }
2427 }
2428
2429 /*
2430  * Port is enabled. IOC is down/failed.
2431  */
2432 static void
2433 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2434                          enum bfa_fcport_sm_event event)
2435 {
2436         bfa_trc(fcport->bfa, event);
2437
2438         switch (event) {
2439         case BFA_FCPORT_SM_START:
2440                 if (bfa_fcport_send_enable(fcport))
2441                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2442                 else
2443                         bfa_sm_set_state(fcport,
2444                                          bfa_fcport_sm_enabling_qwait);
2445                 break;
2446
2447         default:
2448                 /*
2449                  * Ignore all events.
2450                  */
2451                 ;
2452         }
2453 }
2454
2455 /*
2456  * Port is disabled. IOC is down/failed.
2457  */
2458 static void
2459 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2460                          enum bfa_fcport_sm_event event)
2461 {
2462         bfa_trc(fcport->bfa, event);
2463
2464         switch (event) {
2465         case BFA_FCPORT_SM_START:
2466                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2467                 break;
2468
2469         case BFA_FCPORT_SM_ENABLE:
2470                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2471                 break;
2472
2473         default:
2474                 /*
2475                  * Ignore all events.
2476                  */
2477                 ;
2478         }
2479 }
2480
2481 /*
2482  * Link state is down
2483  */
2484 static void
2485 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2486                 enum bfa_fcport_ln_sm_event event)
2487 {
2488         bfa_trc(ln->fcport->bfa, event);
2489
2490         switch (event) {
2491         case BFA_FCPORT_LN_SM_LINKUP:
2492                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2493                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2494                 break;
2495
2496         default:
2497                 bfa_sm_fault(ln->fcport->bfa, event);
2498         }
2499 }
2500
2501 /*
2502  * Link state is waiting for down notification
2503  */
2504 static void
2505 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2506                 enum bfa_fcport_ln_sm_event event)
2507 {
2508         bfa_trc(ln->fcport->bfa, event);
2509
2510         switch (event) {
2511         case BFA_FCPORT_LN_SM_LINKUP:
2512                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2513                 break;
2514
2515         case BFA_FCPORT_LN_SM_NOTIFICATION:
2516                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2517                 break;
2518
2519         default:
2520                 bfa_sm_fault(ln->fcport->bfa, event);
2521         }
2522 }
2523
2524 /*
2525  * Link state is waiting for down notification and there is a pending up
2526  */
2527 static void
2528 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2529                 enum bfa_fcport_ln_sm_event event)
2530 {
2531         bfa_trc(ln->fcport->bfa, event);
2532
2533         switch (event) {
2534         case BFA_FCPORT_LN_SM_LINKDOWN:
2535                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2536                 break;
2537
2538         case BFA_FCPORT_LN_SM_NOTIFICATION:
2539                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2540                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2541                 break;
2542
2543         default:
2544                 bfa_sm_fault(ln->fcport->bfa, event);
2545         }
2546 }
2547
2548 /*
2549  * Link state is up
2550  */
2551 static void
2552 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2553                 enum bfa_fcport_ln_sm_event event)
2554 {
2555         bfa_trc(ln->fcport->bfa, event);
2556
2557         switch (event) {
2558         case BFA_FCPORT_LN_SM_LINKDOWN:
2559                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2560                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2561                 break;
2562
2563         default:
2564                 bfa_sm_fault(ln->fcport->bfa, event);
2565         }
2566 }
2567
2568 /*
2569  * Link state is waiting for up notification
2570  */
2571 static void
2572 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2573                 enum bfa_fcport_ln_sm_event event)
2574 {
2575         bfa_trc(ln->fcport->bfa, event);
2576
2577         switch (event) {
2578         case BFA_FCPORT_LN_SM_LINKDOWN:
2579                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2580                 break;
2581
2582         case BFA_FCPORT_LN_SM_NOTIFICATION:
2583                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2584                 break;
2585
2586         default:
2587                 bfa_sm_fault(ln->fcport->bfa, event);
2588         }
2589 }
2590
2591 /*
2592  * Link state is waiting for up notification and there is a pending down
2593  */
2594 static void
2595 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2596                 enum bfa_fcport_ln_sm_event event)
2597 {
2598         bfa_trc(ln->fcport->bfa, event);
2599
2600         switch (event) {
2601         case BFA_FCPORT_LN_SM_LINKUP:
2602                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2603                 break;
2604
2605         case BFA_FCPORT_LN_SM_NOTIFICATION:
2606                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2607                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2608                 break;
2609
2610         default:
2611                 bfa_sm_fault(ln->fcport->bfa, event);
2612         }
2613 }
2614
2615 /*
2616  * Link state is waiting for up notification and there are pending down and up
2617  */
2618 static void
2619 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2620                         enum bfa_fcport_ln_sm_event event)
2621 {
2622         bfa_trc(ln->fcport->bfa, event);
2623
2624         switch (event) {
2625         case BFA_FCPORT_LN_SM_LINKDOWN:
2626                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2627                 break;
2628
2629         case BFA_FCPORT_LN_SM_NOTIFICATION:
2630                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2631                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2632                 break;
2633
2634         default:
2635                 bfa_sm_fault(ln->fcport->bfa, event);
2636         }
2637 }
2638
2639
2640
2641 /*
2642  *  hal_port_private
2643  */
2644
2645 static void
2646 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2647 {
2648         struct bfa_fcport_ln_s *ln = cbarg;
2649
2650         if (complete)
2651                 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2652         else
2653                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2654 }
2655
2656 /*
2657  * Send SCN notification to upper layers.
2658  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2659  */
2660 static void
2661 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2662         bfa_boolean_t trunk)
2663 {
2664         if (fcport->cfg.trunked && !trunk)
2665                 return;
2666
2667         switch (event) {
2668         case BFA_PORT_LINKUP:
2669                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2670                 break;
2671         case BFA_PORT_LINKDOWN:
2672                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2673                 break;
2674         default:
2675                 bfa_assert(0);
2676         }
2677 }
2678
2679 static void
2680 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2681 {
2682         struct bfa_fcport_s *fcport = ln->fcport;
2683
2684         if (fcport->bfa->fcs) {
2685                 fcport->event_cbfn(fcport->event_cbarg, event);
2686                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2687         } else {
2688                 ln->ln_event = event;
2689                 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2690                         __bfa_cb_fcport_event, ln);
2691         }
2692 }
2693
2694 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2695                                                         BFA_CACHELINE_SZ))
2696
2697 static void
2698 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2699                 u32 *dm_len)
2700 {
2701         *dm_len += FCPORT_STATS_DMA_SZ;
2702 }
2703
2704 static void
2705 bfa_fcport_qresume(void *cbarg)
2706 {
2707         struct bfa_fcport_s *fcport = cbarg;
2708
2709         bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2710 }
2711
2712 static void
2713 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2714 {
2715         u8              *dm_kva;
2716         u64     dm_pa;
2717
2718         dm_kva = bfa_meminfo_dma_virt(meminfo);
2719         dm_pa  = bfa_meminfo_dma_phys(meminfo);
2720
2721         fcport->stats_kva = dm_kva;
2722         fcport->stats_pa  = dm_pa;
2723         fcport->stats     = (union bfa_fcport_stats_u *) dm_kva;
2724
2725         dm_kva += FCPORT_STATS_DMA_SZ;
2726         dm_pa  += FCPORT_STATS_DMA_SZ;
2727
2728         bfa_meminfo_dma_virt(meminfo) = dm_kva;
2729         bfa_meminfo_dma_phys(meminfo) = dm_pa;
2730 }
2731
2732 /*
2733  * Memory initialization.
2734  */
2735 static void
2736 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2737                 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2738 {
2739         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2740         struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2741         struct bfa_fcport_ln_s *ln = &fcport->ln;
2742         struct timeval tv;
2743
2744         memset(fcport, 0, sizeof(struct bfa_fcport_s));
2745         fcport->bfa = bfa;
2746         ln->fcport = fcport;
2747
2748         bfa_fcport_mem_claim(fcport, meminfo);
2749
2750         bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2751         bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2752
2753         /*
2754          * initialize time stamp for stats reset
2755          */
2756         do_gettimeofday(&tv);
2757         fcport->stats_reset_time = tv.tv_sec;
2758
2759         /*
2760          * initialize and set default configuration
2761          */
2762         port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2763         port_cfg->speed = BFA_PORT_SPEED_AUTO;
2764         port_cfg->trunked = BFA_FALSE;
2765         port_cfg->maxfrsize = 0;
2766
2767         port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2768
2769         bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2770 }
2771
2772 static void
2773 bfa_fcport_detach(struct bfa_s *bfa)
2774 {
2775 }
2776
2777 /*
2778  * Called when IOC is ready.
2779  */
2780 static void
2781 bfa_fcport_start(struct bfa_s *bfa)
2782 {
2783         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2784 }
2785
2786 /*
2787  * Called before IOC is stopped.
2788  */
2789 static void
2790 bfa_fcport_stop(struct bfa_s *bfa)
2791 {
2792         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2793         bfa_trunk_iocdisable(bfa);
2794 }
2795
2796 /*
2797  * Called when IOC failure is detected.
2798  */
2799 static void
2800 bfa_fcport_iocdisable(struct bfa_s *bfa)
2801 {
2802         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2803
2804         bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2805         bfa_trunk_iocdisable(bfa);
2806 }
2807
2808 static void
2809 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2810 {
2811         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2812         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2813
2814         fcport->speed = pevent->link_state.speed;
2815         fcport->topology = pevent->link_state.topology;
2816
2817         if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2818                 fcport->myalpa = 0;
2819
2820         /* QoS Details */
2821         fcport->qos_attr = pevent->link_state.qos_attr;
2822         fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2823
2824         /*
2825          * update trunk state if applicable
2826          */
2827         if (!fcport->cfg.trunked)
2828                 trunk->attr.state = BFA_TRUNK_DISABLED;
2829
2830         /* update FCoE specific */
2831         fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2832
2833         bfa_trc(fcport->bfa, fcport->speed);
2834         bfa_trc(fcport->bfa, fcport->topology);
2835 }
2836
2837 static void
2838 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2839 {
2840         fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2841         fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2842 }
2843
2844 /*
2845  * Send port enable message to firmware.
2846  */
2847 static bfa_boolean_t
2848 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2849 {
2850         struct bfi_fcport_enable_req_s *m;
2851
2852         /*
2853          * Increment message tag before queue check, so that responses to old
2854          * requests are discarded.
2855          */
2856         fcport->msgtag++;
2857
2858         /*
2859          * check for room in queue to send request now
2860          */
2861         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2862         if (!m) {
2863                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2864                                                         &fcport->reqq_wait);
2865                 return BFA_FALSE;
2866         }
2867
2868         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2869                         bfa_lpuid(fcport->bfa));
2870         m->nwwn = fcport->nwwn;
2871         m->pwwn = fcport->pwwn;
2872         m->port_cfg = fcport->cfg;
2873         m->msgtag = fcport->msgtag;
2874         m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2875         bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2876         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2877         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2878
2879         /*
2880          * queue I/O message to firmware
2881          */
2882         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2883         return BFA_TRUE;
2884 }
2885
2886 /*
2887  * Send port disable message to firmware.
2888  */
2889 static  bfa_boolean_t
2890 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2891 {
2892         struct bfi_fcport_req_s *m;
2893
2894         /*
2895          * Increment message tag before queue check, so that responses to old
2896          * requests are discarded.
2897          */
2898         fcport->msgtag++;
2899
2900         /*
2901          * check for room in queue to send request now
2902          */
2903         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2904         if (!m) {
2905                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2906                                                         &fcport->reqq_wait);
2907                 return BFA_FALSE;
2908         }
2909
2910         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2911                         bfa_lpuid(fcport->bfa));
2912         m->msgtag = fcport->msgtag;
2913
2914         /*
2915          * queue I/O message to firmware
2916          */
2917         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2918
2919         return BFA_TRUE;
2920 }
2921
2922 static void
2923 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
2924 {
2925         fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
2926         fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
2927
2928         bfa_trc(fcport->bfa, fcport->pwwn);
2929         bfa_trc(fcport->bfa, fcport->nwwn);
2930 }
2931
2932 static void
2933 bfa_fcport_send_txcredit(void *port_cbarg)
2934 {
2935
2936         struct bfa_fcport_s *fcport = port_cbarg;
2937         struct bfi_fcport_set_svc_params_req_s *m;
2938
2939         /*
2940          * check for room in queue to send request now
2941          */
2942         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2943         if (!m) {
2944                 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
2945                 return;
2946         }
2947
2948         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
2949                         bfa_lpuid(fcport->bfa));
2950         m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
2951
2952         /*
2953          * queue I/O message to firmware
2954          */
2955         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2956 }
2957
2958 static void
2959 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
2960         struct bfa_qos_stats_s *s)
2961 {
2962         u32     *dip = (u32 *) d;
2963         __be32  *sip = (__be32 *) s;
2964         int             i;
2965
2966         /* Now swap the 32 bit fields */
2967         for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
2968                 dip[i] = be32_to_cpu(sip[i]);
2969 }
2970
2971 static void
2972 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
2973         struct bfa_fcoe_stats_s *s)
2974 {
2975         u32     *dip = (u32 *) d;
2976         __be32  *sip = (__be32 *) s;
2977         int             i;
2978
2979         for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
2980              i = i + 2) {
2981 #ifdef __BIG_ENDIAN
2982                 dip[i] = be32_to_cpu(sip[i]);
2983                 dip[i + 1] = be32_to_cpu(sip[i + 1]);
2984 #else
2985                 dip[i] = be32_to_cpu(sip[i + 1]);
2986                 dip[i + 1] = be32_to_cpu(sip[i]);
2987 #endif
2988         }
2989 }
2990
2991 static void
2992 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
2993 {
2994         struct bfa_fcport_s *fcport = cbarg;
2995
2996         if (complete) {
2997                 if (fcport->stats_status == BFA_STATUS_OK) {
2998                         struct timeval tv;
2999
3000                         /* Swap FC QoS or FCoE stats */
3001                         if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3002                                 bfa_fcport_qos_stats_swap(
3003                                         &fcport->stats_ret->fcqos,
3004                                         &fcport->stats->fcqos);
3005                         } else {
3006                                 bfa_fcport_fcoe_stats_swap(
3007                                         &fcport->stats_ret->fcoe,
3008                                         &fcport->stats->fcoe);
3009
3010                                 do_gettimeofday(&tv);
3011                                 fcport->stats_ret->fcoe.secs_reset =
3012                                         tv.tv_sec - fcport->stats_reset_time;
3013                         }
3014                 }
3015                 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3016         } else {
3017                 fcport->stats_busy = BFA_FALSE;
3018                 fcport->stats_status = BFA_STATUS_OK;
3019         }
3020 }
3021
3022 static void
3023 bfa_fcport_stats_get_timeout(void *cbarg)
3024 {
3025         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3026
3027         bfa_trc(fcport->bfa, fcport->stats_qfull);
3028
3029         if (fcport->stats_qfull) {
3030                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3031                 fcport->stats_qfull = BFA_FALSE;
3032         }
3033
3034         fcport->stats_status = BFA_STATUS_ETIMER;
3035         bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3036                 fcport);
3037 }
3038
3039 static void
3040 bfa_fcport_send_stats_get(void *cbarg)
3041 {
3042         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3043         struct bfi_fcport_req_s *msg;
3044
3045         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3046
3047         if (!msg) {
3048                 fcport->stats_qfull = BFA_TRUE;
3049                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3050                                 bfa_fcport_send_stats_get, fcport);
3051                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3052                                 &fcport->stats_reqq_wait);
3053                 return;
3054         }
3055         fcport->stats_qfull = BFA_FALSE;
3056
3057         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3058         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3059                         bfa_lpuid(fcport->bfa));
3060         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3061 }
3062
3063 static void
3064 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3065 {
3066         struct bfa_fcport_s *fcport = cbarg;
3067
3068         if (complete) {
3069                 struct timeval tv;
3070
3071                 /*
3072                  * re-initialize time stamp for stats reset
3073                  */
3074                 do_gettimeofday(&tv);
3075                 fcport->stats_reset_time = tv.tv_sec;
3076
3077                 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3078         } else {
3079                 fcport->stats_busy = BFA_FALSE;
3080                 fcport->stats_status = BFA_STATUS_OK;
3081         }
3082 }
3083
3084 static void
3085 bfa_fcport_stats_clr_timeout(void *cbarg)
3086 {
3087         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3088
3089         bfa_trc(fcport->bfa, fcport->stats_qfull);
3090
3091         if (fcport->stats_qfull) {
3092                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3093                 fcport->stats_qfull = BFA_FALSE;
3094         }
3095
3096         fcport->stats_status = BFA_STATUS_ETIMER;
3097         bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3098                         __bfa_cb_fcport_stats_clr, fcport);
3099 }
3100
3101 static void
3102 bfa_fcport_send_stats_clear(void *cbarg)
3103 {
3104         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3105         struct bfi_fcport_req_s *msg;
3106
3107         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3108
3109         if (!msg) {
3110                 fcport->stats_qfull = BFA_TRUE;
3111                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3112                                 bfa_fcport_send_stats_clear, fcport);
3113                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3114                                                 &fcport->stats_reqq_wait);
3115                 return;
3116         }
3117         fcport->stats_qfull = BFA_FALSE;
3118
3119         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3120         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3121                         bfa_lpuid(fcport->bfa));
3122         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3123 }
3124
3125 /*
3126  * Handle trunk SCN event from firmware.
3127  */
3128 static void
3129 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3130 {
3131         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3132         struct bfi_fcport_trunk_link_s *tlink;
3133         struct bfa_trunk_link_attr_s *lattr;
3134         enum bfa_trunk_state state_prev;
3135         int i;
3136         int link_bm = 0;
3137
3138         bfa_trc(fcport->bfa, fcport->cfg.trunked);
3139         bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3140                    scn->trunk_state == BFA_TRUNK_OFFLINE);
3141
3142         bfa_trc(fcport->bfa, trunk->attr.state);
3143         bfa_trc(fcport->bfa, scn->trunk_state);
3144         bfa_trc(fcport->bfa, scn->trunk_speed);
3145
3146         /*
3147          * Save off new state for trunk attribute query
3148          */
3149         state_prev = trunk->attr.state;
3150         if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3151                 trunk->attr.state = scn->trunk_state;
3152         trunk->attr.speed = scn->trunk_speed;
3153         for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3154                 lattr = &trunk->attr.link_attr[i];
3155                 tlink = &scn->tlink[i];
3156
3157                 lattr->link_state = tlink->state;
3158                 lattr->trunk_wwn  = tlink->trunk_wwn;
3159                 lattr->fctl       = tlink->fctl;
3160                 lattr->speed      = tlink->speed;
3161                 lattr->deskew     = be32_to_cpu(tlink->deskew);
3162
3163                 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3164                         fcport->speed    = tlink->speed;
3165                         fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3166                         link_bm |= 1 << i;
3167                 }
3168
3169                 bfa_trc(fcport->bfa, lattr->link_state);
3170                 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3171                 bfa_trc(fcport->bfa, lattr->fctl);
3172                 bfa_trc(fcport->bfa, lattr->speed);
3173                 bfa_trc(fcport->bfa, lattr->deskew);
3174         }
3175
3176         switch (link_bm) {
3177         case 3:
3178                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3179                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3180                 break;
3181         case 2:
3182                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3183                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3184                 break;
3185         case 1:
3186                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3187                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3188                 break;
3189         default:
3190                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3191                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3192         }
3193
3194         /*
3195          * Notify upper layers if trunk state changed.
3196          */
3197         if ((state_prev != trunk->attr.state) ||
3198                 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3199                 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3200                         BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3201         }
3202 }
3203
3204 static void
3205 bfa_trunk_iocdisable(struct bfa_s *bfa)
3206 {
3207         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3208         int i = 0;
3209
3210         /*
3211          * In trunked mode, notify upper layers that link is down
3212          */
3213         if (fcport->cfg.trunked) {
3214                 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3215                         bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3216
3217                 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3218                 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3219                 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3220                         fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3221                         fcport->trunk.attr.link_attr[i].fctl =
3222                                                 BFA_TRUNK_LINK_FCTL_NORMAL;
3223                         fcport->trunk.attr.link_attr[i].link_state =
3224                                                 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3225                         fcport->trunk.attr.link_attr[i].speed =
3226                                                 BFA_PORT_SPEED_UNKNOWN;
3227                         fcport->trunk.attr.link_attr[i].deskew = 0;
3228                 }
3229         }
3230 }
3231
3232
3233
3234 /*
3235  *  hal_port_public
3236  */
3237
3238 /*
3239  * Called to initialize port attributes
3240  */
3241 void
3242 bfa_fcport_init(struct bfa_s *bfa)
3243 {
3244         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3245
3246         /*
3247          * Initialize port attributes from IOC hardware data.
3248          */
3249         bfa_fcport_set_wwns(fcport);
3250         if (fcport->cfg.maxfrsize == 0)
3251                 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3252         fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3253         fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3254
3255         bfa_assert(fcport->cfg.maxfrsize);
3256         bfa_assert(fcport->cfg.rx_bbcredit);
3257         bfa_assert(fcport->speed_sup);
3258 }
3259
3260 /*
3261  * Firmware message handler.
3262  */
3263 void
3264 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3265 {
3266         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3267         union bfi_fcport_i2h_msg_u i2hmsg;
3268
3269         i2hmsg.msg = msg;
3270         fcport->event_arg.i2hmsg = i2hmsg;
3271
3272         bfa_trc(bfa, msg->mhdr.msg_id);
3273         bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3274
3275         switch (msg->mhdr.msg_id) {
3276         case BFI_FCPORT_I2H_ENABLE_RSP:
3277                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3278                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3279                 break;
3280
3281         case BFI_FCPORT_I2H_DISABLE_RSP:
3282                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3283                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3284                 break;
3285
3286         case BFI_FCPORT_I2H_EVENT:
3287                 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3288                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3289                 else
3290                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3291                 break;
3292
3293         case BFI_FCPORT_I2H_TRUNK_SCN:
3294                 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3295                 break;
3296
3297         case BFI_FCPORT_I2H_STATS_GET_RSP:
3298                 /*
3299                  * check for timer pop before processing the rsp
3300                  */
3301                 if (fcport->stats_busy == BFA_FALSE ||
3302                     fcport->stats_status == BFA_STATUS_ETIMER)
3303                         break;
3304
3305                 bfa_timer_stop(&fcport->timer);
3306                 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3307                 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3308                                 __bfa_cb_fcport_stats_get, fcport);
3309                 break;
3310
3311         case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3312                 /*
3313                  * check for timer pop before processing the rsp
3314                  */
3315                 if (fcport->stats_busy == BFA_FALSE ||
3316                     fcport->stats_status == BFA_STATUS_ETIMER)
3317                         break;
3318
3319                 bfa_timer_stop(&fcport->timer);
3320                 fcport->stats_status = BFA_STATUS_OK;
3321                 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3322                                 __bfa_cb_fcport_stats_clr, fcport);
3323                 break;
3324
3325         case BFI_FCPORT_I2H_ENABLE_AEN:
3326                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3327                 break;
3328
3329         case BFI_FCPORT_I2H_DISABLE_AEN:
3330                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3331                 break;
3332
3333         default:
3334                 bfa_assert(0);
3335         break;
3336         }
3337 }
3338
3339
3340
3341 /*
3342  *  hal_port_api
3343  */
3344
3345 /*
3346  * Registered callback for port events.
3347  */
3348 void
3349 bfa_fcport_event_register(struct bfa_s *bfa,
3350                                 void (*cbfn) (void *cbarg,
3351                                 enum bfa_port_linkstate event),
3352                                 void *cbarg)
3353 {
3354         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3355
3356         fcport->event_cbfn = cbfn;
3357         fcport->event_cbarg = cbarg;
3358 }
3359
3360 bfa_status_t
3361 bfa_fcport_enable(struct bfa_s *bfa)
3362 {
3363         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3364
3365         if (bfa_ioc_is_disabled(&bfa->ioc))
3366                 return BFA_STATUS_IOC_DISABLED;
3367
3368         if (fcport->diag_busy)
3369                 return BFA_STATUS_DIAG_BUSY;
3370
3371         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3372         return BFA_STATUS_OK;
3373 }
3374
3375 bfa_status_t
3376 bfa_fcport_disable(struct bfa_s *bfa)
3377 {
3378
3379         if (bfa_ioc_is_disabled(&bfa->ioc))
3380                 return BFA_STATUS_IOC_DISABLED;
3381
3382         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3383         return BFA_STATUS_OK;
3384 }
3385
3386 /*
3387  * Configure port speed.
3388  */
3389 bfa_status_t
3390 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3391 {
3392         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3393
3394         bfa_trc(bfa, speed);
3395
3396         if (fcport->cfg.trunked == BFA_TRUE)
3397                 return BFA_STATUS_TRUNK_ENABLED;
3398         if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3399                 bfa_trc(bfa, fcport->speed_sup);
3400                 return BFA_STATUS_UNSUPP_SPEED;
3401         }
3402
3403         fcport->cfg.speed = speed;
3404
3405         return BFA_STATUS_OK;
3406 }
3407
3408 /*
3409  * Get current speed.
3410  */
3411 enum bfa_port_speed
3412 bfa_fcport_get_speed(struct bfa_s *bfa)
3413 {
3414         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3415
3416         return fcport->speed;
3417 }
3418
3419 /*
3420  * Configure port topology.
3421  */
3422 bfa_status_t
3423 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3424 {
3425         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3426
3427         bfa_trc(bfa, topology);
3428         bfa_trc(bfa, fcport->cfg.topology);
3429
3430         switch (topology) {
3431         case BFA_PORT_TOPOLOGY_P2P:
3432         case BFA_PORT_TOPOLOGY_LOOP:
3433         case BFA_PORT_TOPOLOGY_AUTO:
3434                 break;
3435
3436         default:
3437                 return BFA_STATUS_EINVAL;
3438         }
3439
3440         fcport->cfg.topology = topology;
3441         return BFA_STATUS_OK;
3442 }
3443
3444 /*
3445  * Get current topology.
3446  */
3447 enum bfa_port_topology
3448 bfa_fcport_get_topology(struct bfa_s *bfa)
3449 {
3450         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3451
3452         return fcport->topology;
3453 }
3454
3455 bfa_status_t
3456 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3457 {
3458         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3459
3460         bfa_trc(bfa, alpa);
3461         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3462         bfa_trc(bfa, fcport->cfg.hardalpa);
3463
3464         fcport->cfg.cfg_hardalpa = BFA_TRUE;
3465         fcport->cfg.hardalpa = alpa;
3466
3467         return BFA_STATUS_OK;
3468 }
3469
3470 bfa_status_t
3471 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3472 {
3473         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3474
3475         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3476         bfa_trc(bfa, fcport->cfg.hardalpa);
3477
3478         fcport->cfg.cfg_hardalpa = BFA_FALSE;
3479         return BFA_STATUS_OK;
3480 }
3481
3482 bfa_boolean_t
3483 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3484 {
3485         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3486
3487         *alpa = fcport->cfg.hardalpa;
3488         return fcport->cfg.cfg_hardalpa;
3489 }
3490
3491 u8
3492 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3493 {
3494         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3495
3496         return fcport->myalpa;
3497 }
3498
3499 bfa_status_t
3500 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3501 {
3502         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3503
3504         bfa_trc(bfa, maxfrsize);
3505         bfa_trc(bfa, fcport->cfg.maxfrsize);
3506
3507         /* with in range */
3508         if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3509                 return BFA_STATUS_INVLD_DFSZ;
3510
3511         /* power of 2, if not the max frame size of 2112 */
3512         if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3513                 return BFA_STATUS_INVLD_DFSZ;
3514
3515         fcport->cfg.maxfrsize = maxfrsize;
3516         return BFA_STATUS_OK;
3517 }
3518
3519 u16
3520 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3521 {
3522         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3523
3524         return fcport->cfg.maxfrsize;
3525 }
3526
3527 u8
3528 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3529 {
3530         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3531
3532         return fcport->cfg.rx_bbcredit;
3533 }
3534
3535 void
3536 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3537 {
3538         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3539
3540         fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3541         bfa_fcport_send_txcredit(fcport);
3542 }
3543
3544 /*
3545  * Get port attributes.
3546  */
3547
3548 wwn_t
3549 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3550 {
3551         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3552         if (node)
3553                 return fcport->nwwn;
3554         else
3555                 return fcport->pwwn;
3556 }
3557
3558 void
3559 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3560 {
3561         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3562
3563         memset(attr, 0, sizeof(struct bfa_port_attr_s));
3564
3565         attr->nwwn = fcport->nwwn;
3566         attr->pwwn = fcport->pwwn;
3567
3568         attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3569         attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3570
3571         memcpy(&attr->pport_cfg, &fcport->cfg,
3572                 sizeof(struct bfa_port_cfg_s));
3573         /* speed attributes */
3574         attr->pport_cfg.speed = fcport->cfg.speed;
3575         attr->speed_supported = fcport->speed_sup;
3576         attr->speed = fcport->speed;
3577         attr->cos_supported = FC_CLASS_3;
3578
3579         /* topology attributes */
3580         attr->pport_cfg.topology = fcport->cfg.topology;
3581         attr->topology = fcport->topology;
3582         attr->pport_cfg.trunked = fcport->cfg.trunked;
3583
3584         /* beacon attributes */
3585         attr->beacon = fcport->beacon;
3586         attr->link_e2e_beacon = fcport->link_e2e_beacon;
3587         attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
3588         attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3589
3590         attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3591         attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3592         attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3593         if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3594                 attr->port_state = BFA_PORT_ST_IOCDIS;
3595         else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3596                 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3597
3598         /* FCoE vlan */
3599         attr->fcoe_vlan = fcport->fcoe_vlan;
3600 }
3601
3602 #define BFA_FCPORT_STATS_TOV    1000
3603
3604 /*
3605  * Fetch port statistics (FCQoS or FCoE).
3606  */
3607 bfa_status_t
3608 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3609         bfa_cb_port_t cbfn, void *cbarg)
3610 {
3611         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3612
3613         if (fcport->stats_busy) {
3614                 bfa_trc(bfa, fcport->stats_busy);
3615                 return BFA_STATUS_DEVBUSY;
3616         }
3617
3618         fcport->stats_busy  = BFA_TRUE;
3619         fcport->stats_ret   = stats;
3620         fcport->stats_cbfn  = cbfn;
3621         fcport->stats_cbarg = cbarg;
3622
3623         bfa_fcport_send_stats_get(fcport);
3624
3625         bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3626                         fcport, BFA_FCPORT_STATS_TOV);
3627         return BFA_STATUS_OK;
3628 }
3629
3630 /*
3631  * Reset port statistics (FCQoS or FCoE).
3632  */
3633 bfa_status_t
3634 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3635 {
3636         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3637
3638         if (fcport->stats_busy) {
3639                 bfa_trc(bfa, fcport->stats_busy);
3640                 return BFA_STATUS_DEVBUSY;
3641         }
3642
3643         fcport->stats_busy  = BFA_TRUE;
3644         fcport->stats_cbfn  = cbfn;
3645         fcport->stats_cbarg = cbarg;
3646
3647         bfa_fcport_send_stats_clear(fcport);
3648
3649         bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3650                         fcport, BFA_FCPORT_STATS_TOV);
3651         return BFA_STATUS_OK;
3652 }
3653
3654
3655 /*
3656  * Fetch port attributes.
3657  */
3658 bfa_boolean_t
3659 bfa_fcport_is_disabled(struct bfa_s *bfa)
3660 {
3661         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3662
3663         return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3664                 BFA_PORT_ST_DISABLED;
3665
3666 }
3667
3668 bfa_boolean_t
3669 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3670 {
3671         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3672
3673         return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3674
3675 }
3676
3677
3678 /*
3679  * Get default minimum ratelim speed
3680  */
3681 enum bfa_port_speed
3682 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3683 {
3684         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3685
3686         bfa_trc(bfa, fcport->cfg.trl_def_speed);
3687         return fcport->cfg.trl_def_speed;
3688
3689 }
3690
3691 bfa_boolean_t
3692 bfa_fcport_is_linkup(struct bfa_s *bfa)
3693 {
3694         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3695
3696         return  (!fcport->cfg.trunked &&
3697                  bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3698                 (fcport->cfg.trunked &&
3699                  fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3700 }
3701
3702 bfa_boolean_t
3703 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3704 {
3705         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3706
3707         return fcport->cfg.qos_enabled;
3708 }
3709
3710 /*
3711  * Rport State machine functions
3712  */
3713 /*
3714  * Beginning state, only online event expected.
3715  */
3716 static void
3717 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3718 {
3719         bfa_trc(rp->bfa, rp->rport_tag);
3720         bfa_trc(rp->bfa, event);
3721
3722         switch (event) {
3723         case BFA_RPORT_SM_CREATE:
3724                 bfa_stats(rp, sm_un_cr);
3725                 bfa_sm_set_state(rp, bfa_rport_sm_created);
3726                 break;
3727
3728         default:
3729                 bfa_stats(rp, sm_un_unexp);
3730                 bfa_sm_fault(rp->bfa, event);
3731         }
3732 }
3733
3734 static void
3735 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3736 {
3737         bfa_trc(rp->bfa, rp->rport_tag);
3738         bfa_trc(rp->bfa, event);
3739
3740         switch (event) {
3741         case BFA_RPORT_SM_ONLINE:
3742                 bfa_stats(rp, sm_cr_on);
3743                 if (bfa_rport_send_fwcreate(rp))
3744                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3745                 else
3746                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3747                 break;
3748
3749         case BFA_RPORT_SM_DELETE:
3750                 bfa_stats(rp, sm_cr_del);
3751                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3752                 bfa_rport_free(rp);
3753                 break;
3754
3755         case BFA_RPORT_SM_HWFAIL:
3756                 bfa_stats(rp, sm_cr_hwf);
3757                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3758                 break;
3759
3760         default:
3761                 bfa_stats(rp, sm_cr_unexp);
3762                 bfa_sm_fault(rp->bfa, event);
3763         }
3764 }
3765
3766 /*
3767  * Waiting for rport create response from firmware.
3768  */
3769 static void
3770 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3771 {
3772         bfa_trc(rp->bfa, rp->rport_tag);
3773         bfa_trc(rp->bfa, event);
3774
3775         switch (event) {
3776         case BFA_RPORT_SM_FWRSP:
3777                 bfa_stats(rp, sm_fwc_rsp);
3778                 bfa_sm_set_state(rp, bfa_rport_sm_online);
3779                 bfa_rport_online_cb(rp);
3780                 break;
3781
3782         case BFA_RPORT_SM_DELETE:
3783                 bfa_stats(rp, sm_fwc_del);
3784                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3785                 break;
3786
3787         case BFA_RPORT_SM_OFFLINE:
3788                 bfa_stats(rp, sm_fwc_off);
3789                 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3790                 break;
3791
3792         case BFA_RPORT_SM_HWFAIL:
3793                 bfa_stats(rp, sm_fwc_hwf);
3794                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3795                 break;
3796
3797         default:
3798                 bfa_stats(rp, sm_fwc_unexp);
3799                 bfa_sm_fault(rp->bfa, event);
3800         }
3801 }
3802
3803 /*
3804  * Request queue is full, awaiting queue resume to send create request.
3805  */
3806 static void
3807 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3808 {
3809         bfa_trc(rp->bfa, rp->rport_tag);
3810         bfa_trc(rp->bfa, event);
3811
3812         switch (event) {
3813         case BFA_RPORT_SM_QRESUME:
3814                 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3815                 bfa_rport_send_fwcreate(rp);
3816                 break;
3817
3818         case BFA_RPORT_SM_DELETE:
3819                 bfa_stats(rp, sm_fwc_del);
3820                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3821                 bfa_reqq_wcancel(&rp->reqq_wait);
3822                 bfa_rport_free(rp);
3823                 break;
3824
3825         case BFA_RPORT_SM_OFFLINE:
3826                 bfa_stats(rp, sm_fwc_off);
3827                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3828                 bfa_reqq_wcancel(&rp->reqq_wait);
3829                 bfa_rport_offline_cb(rp);
3830                 break;
3831
3832         case BFA_RPORT_SM_HWFAIL:
3833                 bfa_stats(rp, sm_fwc_hwf);
3834                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3835                 bfa_reqq_wcancel(&rp->reqq_wait);
3836                 break;
3837
3838         default:
3839                 bfa_stats(rp, sm_fwc_unexp);
3840                 bfa_sm_fault(rp->bfa, event);
3841         }
3842 }
3843
3844 /*
3845  * Online state - normal parking state.
3846  */
3847 static void
3848 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3849 {
3850         struct bfi_rport_qos_scn_s *qos_scn;
3851
3852         bfa_trc(rp->bfa, rp->rport_tag);
3853         bfa_trc(rp->bfa, event);
3854
3855         switch (event) {
3856         case BFA_RPORT_SM_OFFLINE:
3857                 bfa_stats(rp, sm_on_off);
3858                 if (bfa_rport_send_fwdelete(rp))
3859                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3860                 else
3861                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3862                 break;
3863
3864         case BFA_RPORT_SM_DELETE:
3865                 bfa_stats(rp, sm_on_del);
3866                 if (bfa_rport_send_fwdelete(rp))
3867                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3868                 else
3869                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3870                 break;
3871
3872         case BFA_RPORT_SM_HWFAIL:
3873                 bfa_stats(rp, sm_on_hwf);
3874                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3875                 break;
3876
3877         case BFA_RPORT_SM_SET_SPEED:
3878                 bfa_rport_send_fwspeed(rp);
3879                 break;
3880
3881         case BFA_RPORT_SM_QOS_SCN:
3882                 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3883                 rp->qos_attr = qos_scn->new_qos_attr;
3884                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3885                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3886                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3887                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3888
3889                 qos_scn->old_qos_attr.qos_flow_id  =
3890                         be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
3891                 qos_scn->new_qos_attr.qos_flow_id  =
3892                         be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
3893
3894                 if (qos_scn->old_qos_attr.qos_flow_id !=
3895                         qos_scn->new_qos_attr.qos_flow_id)
3896                         bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3897                                                     qos_scn->old_qos_attr,
3898                                                     qos_scn->new_qos_attr);
3899                 if (qos_scn->old_qos_attr.qos_priority !=
3900                         qos_scn->new_qos_attr.qos_priority)
3901                         bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3902                                                   qos_scn->old_qos_attr,
3903                                                   qos_scn->new_qos_attr);
3904                 break;
3905
3906         default:
3907                 bfa_stats(rp, sm_on_unexp);
3908                 bfa_sm_fault(rp->bfa, event);
3909         }
3910 }
3911
3912 /*
3913  * Firmware rport is being deleted - awaiting f/w response.
3914  */
3915 static void
3916 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
3917 {
3918         bfa_trc(rp->bfa, rp->rport_tag);
3919         bfa_trc(rp->bfa, event);
3920
3921         switch (event) {
3922         case BFA_RPORT_SM_FWRSP:
3923                 bfa_stats(rp, sm_fwd_rsp);
3924                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3925                 bfa_rport_offline_cb(rp);
3926                 break;
3927
3928         case BFA_RPORT_SM_DELETE:
3929                 bfa_stats(rp, sm_fwd_del);
3930                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3931                 break;
3932
3933         case BFA_RPORT_SM_HWFAIL:
3934                 bfa_stats(rp, sm_fwd_hwf);
3935                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3936                 bfa_rport_offline_cb(rp);
3937                 break;
3938
3939         default:
3940                 bfa_stats(rp, sm_fwd_unexp);
3941                 bfa_sm_fault(rp->bfa, event);
3942         }
3943 }
3944
3945 static void
3946 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3947 {
3948         bfa_trc(rp->bfa, rp->rport_tag);
3949         bfa_trc(rp->bfa, event);
3950
3951         switch (event) {
3952         case BFA_RPORT_SM_QRESUME:
3953                 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3954                 bfa_rport_send_fwdelete(rp);
3955                 break;
3956
3957         case BFA_RPORT_SM_DELETE:
3958                 bfa_stats(rp, sm_fwd_del);
3959                 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3960                 break;
3961
3962         case BFA_RPORT_SM_HWFAIL:
3963                 bfa_stats(rp, sm_fwd_hwf);
3964                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3965                 bfa_reqq_wcancel(&rp->reqq_wait);
3966                 bfa_rport_offline_cb(rp);
3967                 break;
3968
3969         default:
3970                 bfa_stats(rp, sm_fwd_unexp);
3971                 bfa_sm_fault(rp->bfa, event);
3972         }
3973 }
3974
3975 /*
3976  * Offline state.
3977  */
3978 static void
3979 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
3980 {
3981         bfa_trc(rp->bfa, rp->rport_tag);
3982         bfa_trc(rp->bfa, event);
3983
3984         switch (event) {
3985         case BFA_RPORT_SM_DELETE:
3986                 bfa_stats(rp, sm_off_del);
3987                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3988                 bfa_rport_free(rp);
3989                 break;
3990
3991         case BFA_RPORT_SM_ONLINE:
3992                 bfa_stats(rp, sm_off_on);
3993                 if (bfa_rport_send_fwcreate(rp))
3994                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3995                 else
3996                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3997                 break;
3998
3999         case BFA_RPORT_SM_HWFAIL:
4000                 bfa_stats(rp, sm_off_hwf);
4001                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4002                 break;
4003
4004         default:
4005                 bfa_stats(rp, sm_off_unexp);
4006                 bfa_sm_fault(rp->bfa, event);
4007         }
4008 }
4009
4010 /*
4011  * Rport is deleted, waiting for firmware response to delete.
4012  */
4013 static void
4014 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4015 {
4016         bfa_trc(rp->bfa, rp->rport_tag);
4017         bfa_trc(rp->bfa, event);
4018
4019         switch (event) {
4020         case BFA_RPORT_SM_FWRSP:
4021                 bfa_stats(rp, sm_del_fwrsp);
4022                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4023                 bfa_rport_free(rp);
4024                 break;
4025
4026         case BFA_RPORT_SM_HWFAIL:
4027                 bfa_stats(rp, sm_del_hwf);
4028                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4029                 bfa_rport_free(rp);
4030                 break;
4031
4032         default:
4033                 bfa_sm_fault(rp->bfa, event);
4034         }
4035 }
4036
4037 static void
4038 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4039 {
4040         bfa_trc(rp->bfa, rp->rport_tag);
4041         bfa_trc(rp->bfa, event);
4042
4043         switch (event) {
4044         case BFA_RPORT_SM_QRESUME:
4045                 bfa_stats(rp, sm_del_fwrsp);
4046                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4047                 bfa_rport_send_fwdelete(rp);
4048                 break;
4049
4050         case BFA_RPORT_SM_HWFAIL:
4051                 bfa_stats(rp, sm_del_hwf);
4052                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4053                 bfa_reqq_wcancel(&rp->reqq_wait);
4054                 bfa_rport_free(rp);
4055                 break;
4056
4057         default:
4058                 bfa_sm_fault(rp->bfa, event);
4059         }
4060 }
4061
4062 /*
4063  * Waiting for rport create response from firmware. A delete is pending.
4064  */
4065 static void
4066 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4067                                 enum bfa_rport_event event)
4068 {
4069         bfa_trc(rp->bfa, rp->rport_tag);
4070         bfa_trc(rp->bfa, event);
4071
4072         switch (event) {
4073         case BFA_RPORT_SM_FWRSP:
4074                 bfa_stats(rp, sm_delp_fwrsp);
4075                 if (bfa_rport_send_fwdelete(rp))
4076                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4077                 else
4078                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4079                 break;
4080
4081         case BFA_RPORT_SM_HWFAIL:
4082                 bfa_stats(rp, sm_delp_hwf);
4083                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4084                 bfa_rport_free(rp);
4085                 break;
4086
4087         default:
4088                 bfa_stats(rp, sm_delp_unexp);
4089                 bfa_sm_fault(rp->bfa, event);
4090         }
4091 }
4092
4093 /*
4094  * Waiting for rport create response from firmware. Rport offline is pending.
4095  */
4096 static void
4097 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4098                                  enum bfa_rport_event event)
4099 {
4100         bfa_trc(rp->bfa, rp->rport_tag);
4101         bfa_trc(rp->bfa, event);
4102
4103         switch (event) {
4104         case BFA_RPORT_SM_FWRSP:
4105                 bfa_stats(rp, sm_offp_fwrsp);
4106                 if (bfa_rport_send_fwdelete(rp))
4107                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4108                 else
4109                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4110                 break;
4111
4112         case BFA_RPORT_SM_DELETE:
4113                 bfa_stats(rp, sm_offp_del);
4114                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4115                 break;
4116
4117         case BFA_RPORT_SM_HWFAIL:
4118                 bfa_stats(rp, sm_offp_hwf);
4119                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4120                 break;
4121
4122         default:
4123                 bfa_stats(rp, sm_offp_unexp);
4124                 bfa_sm_fault(rp->bfa, event);
4125         }
4126 }
4127
4128 /*
4129  * IOC h/w failed.
4130  */
4131 static void
4132 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4133 {
4134         bfa_trc(rp->bfa, rp->rport_tag);
4135         bfa_trc(rp->bfa, event);
4136
4137         switch (event) {
4138         case BFA_RPORT_SM_OFFLINE:
4139                 bfa_stats(rp, sm_iocd_off);
4140                 bfa_rport_offline_cb(rp);
4141                 break;
4142
4143         case BFA_RPORT_SM_DELETE:
4144                 bfa_stats(rp, sm_iocd_del);
4145                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4146                 bfa_rport_free(rp);
4147                 break;
4148
4149         case BFA_RPORT_SM_ONLINE:
4150                 bfa_stats(rp, sm_iocd_on);
4151                 if (bfa_rport_send_fwcreate(rp))
4152                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4153                 else
4154                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4155                 break;
4156
4157         case BFA_RPORT_SM_HWFAIL:
4158                 break;
4159
4160         default:
4161                 bfa_stats(rp, sm_iocd_unexp);
4162                 bfa_sm_fault(rp->bfa, event);
4163         }
4164 }
4165
4166
4167
4168 /*
4169  *  bfa_rport_private BFA rport private functions
4170  */
4171
4172 static void
4173 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4174 {
4175         struct bfa_rport_s *rp = cbarg;
4176
4177         if (complete)
4178                 bfa_cb_rport_online(rp->rport_drv);
4179 }
4180
4181 static void
4182 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4183 {
4184         struct bfa_rport_s *rp = cbarg;
4185
4186         if (complete)
4187                 bfa_cb_rport_offline(rp->rport_drv);
4188 }
4189
4190 static void
4191 bfa_rport_qresume(void *cbarg)
4192 {
4193         struct bfa_rport_s      *rp = cbarg;
4194
4195         bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4196 }
4197
4198 static void
4199 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4200                 u32 *dm_len)
4201 {
4202         if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4203                 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4204
4205         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4206 }
4207
4208 static void
4209 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4210                      struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4211 {
4212         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4213         struct bfa_rport_s *rp;
4214         u16 i;
4215
4216         INIT_LIST_HEAD(&mod->rp_free_q);
4217         INIT_LIST_HEAD(&mod->rp_active_q);
4218
4219         rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4220         mod->rps_list = rp;
4221         mod->num_rports = cfg->fwcfg.num_rports;
4222
4223         bfa_assert(mod->num_rports &&
4224                    !(mod->num_rports & (mod->num_rports - 1)));
4225
4226         for (i = 0; i < mod->num_rports; i++, rp++) {
4227                 memset(rp, 0, sizeof(struct bfa_rport_s));
4228                 rp->bfa = bfa;
4229                 rp->rport_tag = i;
4230                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4231
4232                 /*
4233                  *  - is unused
4234                  */
4235                 if (i)
4236                         list_add_tail(&rp->qe, &mod->rp_free_q);
4237
4238                 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4239         }
4240
4241         /*
4242          * consume memory
4243          */
4244         bfa_meminfo_kva(meminfo) = (u8 *) rp;
4245 }
4246
4247 static void
4248 bfa_rport_detach(struct bfa_s *bfa)
4249 {
4250 }
4251
4252 static void
4253 bfa_rport_start(struct bfa_s *bfa)
4254 {
4255 }
4256
4257 static void
4258 bfa_rport_stop(struct bfa_s *bfa)
4259 {
4260 }
4261
4262 static void
4263 bfa_rport_iocdisable(struct bfa_s *bfa)
4264 {
4265         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4266         struct bfa_rport_s *rport;
4267         struct list_head *qe, *qen;
4268
4269         list_for_each_safe(qe, qen, &mod->rp_active_q) {
4270                 rport = (struct bfa_rport_s *) qe;
4271                 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4272         }
4273 }
4274
4275 static struct bfa_rport_s *
4276 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4277 {
4278         struct bfa_rport_s *rport;
4279
4280         bfa_q_deq(&mod->rp_free_q, &rport);
4281         if (rport)
4282                 list_add_tail(&rport->qe, &mod->rp_active_q);
4283
4284         return rport;
4285 }
4286
4287 static void
4288 bfa_rport_free(struct bfa_rport_s *rport)
4289 {
4290         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4291
4292         bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4293         list_del(&rport->qe);
4294         list_add_tail(&rport->qe, &mod->rp_free_q);
4295 }
4296
4297 static bfa_boolean_t
4298 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4299 {
4300         struct bfi_rport_create_req_s *m;
4301
4302         /*
4303          * check for room in queue to send request now
4304          */
4305         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4306         if (!m) {
4307                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4308                 return BFA_FALSE;
4309         }
4310
4311         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4312                         bfa_lpuid(rp->bfa));
4313         m->bfa_handle = rp->rport_tag;
4314         m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4315         m->pid = rp->rport_info.pid;
4316         m->lp_tag = rp->rport_info.lp_tag;
4317         m->local_pid = rp->rport_info.local_pid;
4318         m->fc_class = rp->rport_info.fc_class;
4319         m->vf_en = rp->rport_info.vf_en;
4320         m->vf_id = rp->rport_info.vf_id;
4321         m->cisc = rp->rport_info.cisc;
4322
4323         /*
4324          * queue I/O message to firmware
4325          */
4326         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4327         return BFA_TRUE;
4328 }
4329
4330 static bfa_boolean_t
4331 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4332 {
4333         struct bfi_rport_delete_req_s *m;
4334
4335         /*
4336          * check for room in queue to send request now
4337          */
4338         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4339         if (!m) {
4340                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4341                 return BFA_FALSE;
4342         }
4343
4344         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4345                         bfa_lpuid(rp->bfa));
4346         m->fw_handle = rp->fw_handle;
4347
4348         /*
4349          * queue I/O message to firmware
4350          */
4351         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4352         return BFA_TRUE;
4353 }
4354
4355 static bfa_boolean_t
4356 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4357 {
4358         struct bfa_rport_speed_req_s *m;
4359
4360         /*
4361          * check for room in queue to send request now
4362          */
4363         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4364         if (!m) {
4365                 bfa_trc(rp->bfa, rp->rport_info.speed);
4366                 return BFA_FALSE;
4367         }
4368
4369         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4370                         bfa_lpuid(rp->bfa));
4371         m->fw_handle = rp->fw_handle;
4372         m->speed = (u8)rp->rport_info.speed;
4373
4374         /*
4375          * queue I/O message to firmware
4376          */
4377         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4378         return BFA_TRUE;
4379 }
4380
4381
4382
4383 /*
4384  *  bfa_rport_public
4385  */
4386
4387 /*
4388  * Rport interrupt processing.
4389  */
4390 void
4391 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4392 {
4393         union bfi_rport_i2h_msg_u msg;
4394         struct bfa_rport_s *rp;
4395
4396         bfa_trc(bfa, m->mhdr.msg_id);
4397
4398         msg.msg = m;
4399
4400         switch (m->mhdr.msg_id) {
4401         case BFI_RPORT_I2H_CREATE_RSP:
4402                 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4403                 rp->fw_handle = msg.create_rsp->fw_handle;
4404                 rp->qos_attr = msg.create_rsp->qos_attr;
4405                 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4406                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4407                 break;
4408
4409         case BFI_RPORT_I2H_DELETE_RSP:
4410                 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4411                 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4412                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4413                 break;
4414
4415         case BFI_RPORT_I2H_QOS_SCN:
4416                 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4417                 rp->event_arg.fw_msg = msg.qos_scn_evt;
4418                 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4419                 break;
4420
4421         default:
4422                 bfa_trc(bfa, m->mhdr.msg_id);
4423                 bfa_assert(0);
4424         }
4425 }
4426
4427
4428
4429 /*
4430  *  bfa_rport_api
4431  */
4432
4433 struct bfa_rport_s *
4434 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4435 {
4436         struct bfa_rport_s *rp;
4437
4438         rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4439
4440         if (rp == NULL)
4441                 return NULL;
4442
4443         rp->bfa = bfa;
4444         rp->rport_drv = rport_drv;
4445         memset(&rp->stats, 0, sizeof(rp->stats));
4446
4447         bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4448         bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4449
4450         return rp;
4451 }
4452
4453 void
4454 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4455 {
4456         bfa_assert(rport_info->max_frmsz != 0);
4457
4458         /*
4459          * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4460          * responses. Default to minimum size.
4461          */
4462         if (rport_info->max_frmsz == 0) {
4463                 bfa_trc(rport->bfa, rport->rport_tag);
4464                 rport_info->max_frmsz = FC_MIN_PDUSZ;
4465         }
4466
4467         rport->rport_info = *rport_info;
4468         bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4469 }
4470
4471 void
4472 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4473 {
4474         bfa_assert(speed != 0);
4475         bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4476
4477         rport->rport_info.speed = speed;
4478         bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4479 }
4480
4481
4482 /*
4483  * SGPG related functions
4484  */
4485
4486 /*
4487  * Compute and return memory needed by FCP(im) module.
4488  */
4489 static void
4490 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4491                 u32 *dm_len)
4492 {
4493         if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4494                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4495
4496         *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4497         *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4498 }
4499
4500
4501 static void
4502 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4503                     struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4504 {
4505         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4506         int i;
4507         struct bfa_sgpg_s *hsgpg;
4508         struct bfi_sgpg_s *sgpg;
4509         u64 align_len;
4510
4511         union {
4512                 u64 pa;
4513                 union bfi_addr_u addr;
4514         } sgpg_pa, sgpg_pa_tmp;
4515
4516         INIT_LIST_HEAD(&mod->sgpg_q);
4517         INIT_LIST_HEAD(&mod->sgpg_wait_q);
4518
4519         bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4520
4521         mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4522         mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4523         align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4524         mod->sgpg_arr_pa += align_len;
4525         mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4526                                                 align_len);
4527         mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4528                                                 align_len);
4529
4530         hsgpg = mod->hsgpg_arr;
4531         sgpg = mod->sgpg_arr;
4532         sgpg_pa.pa = mod->sgpg_arr_pa;
4533         mod->free_sgpgs = mod->num_sgpgs;
4534
4535         bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4536
4537         for (i = 0; i < mod->num_sgpgs; i++) {
4538                 memset(hsgpg, 0, sizeof(*hsgpg));
4539                 memset(sgpg, 0, sizeof(*sgpg));
4540
4541                 hsgpg->sgpg = sgpg;
4542                 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4543                 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4544                 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4545
4546                 hsgpg++;
4547                 sgpg++;
4548                 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4549         }
4550
4551         bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4552         bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4553         bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4554 }
4555
4556 static void
4557 bfa_sgpg_detach(struct bfa_s *bfa)
4558 {
4559 }
4560
4561 static void
4562 bfa_sgpg_start(struct bfa_s *bfa)
4563 {
4564 }
4565
4566 static void
4567 bfa_sgpg_stop(struct bfa_s *bfa)
4568 {
4569 }
4570
4571 static void
4572 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4573 {
4574 }
4575
4576
4577
4578 /*
4579  *  hal_sgpg_public BFA SGPG public functions
4580  */
4581
4582 bfa_status_t
4583 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4584 {
4585         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4586         struct bfa_sgpg_s *hsgpg;
4587         int i;
4588
4589         bfa_trc_fp(bfa, nsgpgs);
4590
4591         if (mod->free_sgpgs < nsgpgs)
4592                 return BFA_STATUS_ENOMEM;
4593
4594         for (i = 0; i < nsgpgs; i++) {
4595                 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4596                 bfa_assert(hsgpg);
4597                 list_add_tail(&hsgpg->qe, sgpg_q);
4598         }
4599
4600         mod->free_sgpgs -= nsgpgs;
4601         return BFA_STATUS_OK;
4602 }
4603
4604 void
4605 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4606 {
4607         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4608         struct bfa_sgpg_wqe_s *wqe;
4609
4610         bfa_trc_fp(bfa, nsgpg);
4611
4612         mod->free_sgpgs += nsgpg;
4613         bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
4614
4615         list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4616
4617         if (list_empty(&mod->sgpg_wait_q))
4618                 return;
4619
4620         /*
4621          * satisfy as many waiting requests as possible
4622          */
4623         do {
4624                 wqe = bfa_q_first(&mod->sgpg_wait_q);
4625                 if (mod->free_sgpgs < wqe->nsgpg)
4626                         nsgpg = mod->free_sgpgs;
4627                 else
4628                         nsgpg = wqe->nsgpg;
4629                 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4630                 wqe->nsgpg -= nsgpg;
4631                 if (wqe->nsgpg == 0) {
4632                         list_del(&wqe->qe);
4633                         wqe->cbfn(wqe->cbarg);
4634                 }
4635         } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4636 }
4637
4638 void
4639 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4640 {
4641         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4642
4643         bfa_assert(nsgpg > 0);
4644         bfa_assert(nsgpg > mod->free_sgpgs);
4645
4646         wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4647
4648         /*
4649          * allocate any left to this one first
4650          */
4651         if (mod->free_sgpgs) {
4652                 /*
4653                  * no one else is waiting for SGPG
4654                  */
4655                 bfa_assert(list_empty(&mod->sgpg_wait_q));
4656                 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4657                 wqe->nsgpg -= mod->free_sgpgs;
4658                 mod->free_sgpgs = 0;
4659         }
4660
4661         list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4662 }
4663
4664 void
4665 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4666 {
4667         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4668
4669         bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4670         list_del(&wqe->qe);
4671
4672         if (wqe->nsgpg_total != wqe->nsgpg)
4673                 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4674                                    wqe->nsgpg_total - wqe->nsgpg);
4675 }
4676
4677 void
4678 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4679                    void *cbarg)
4680 {
4681         INIT_LIST_HEAD(&wqe->sgpg_q);
4682         wqe->cbfn = cbfn;
4683         wqe->cbarg = cbarg;
4684 }
4685
4686 /*
4687  *  UF related functions
4688  */
4689 /*
4690  *****************************************************************************
4691  * Internal functions
4692  *****************************************************************************
4693  */
4694 static void
4695 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4696 {
4697         struct bfa_uf_s   *uf = cbarg;
4698         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4699
4700         if (complete)
4701                 ufm->ufrecv(ufm->cbarg, uf);
4702 }
4703
4704 static void
4705 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4706 {
4707         u32 uf_pb_tot_sz;
4708
4709         ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4710         ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4711         uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4712                                                         BFA_DMA_ALIGN_SZ);
4713
4714         bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4715         bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4716
4717         memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
4718 }
4719
4720 static void
4721 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4722 {
4723         struct bfi_uf_buf_post_s *uf_bp_msg;
4724         struct bfi_sge_s      *sge;
4725         union bfi_addr_u      sga_zero = { {0} };
4726         u16 i;
4727         u16 buf_len;
4728
4729         ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4730         uf_bp_msg = ufm->uf_buf_posts;
4731
4732         for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4733              i++, uf_bp_msg++) {
4734                 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4735
4736                 uf_bp_msg->buf_tag = i;
4737                 buf_len = sizeof(struct bfa_uf_buf_s);
4738                 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4739                 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4740                             bfa_lpuid(ufm->bfa));
4741
4742                 sge = uf_bp_msg->sge;
4743                 sge[0].sg_len = buf_len;
4744                 sge[0].flags = BFI_SGE_DATA_LAST;
4745                 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4746                 bfa_sge_to_be(sge);
4747
4748                 sge[1].sg_len = buf_len;
4749                 sge[1].flags = BFI_SGE_PGDLEN;
4750                 sge[1].sga = sga_zero;
4751                 bfa_sge_to_be(&sge[1]);
4752         }
4753
4754         /*
4755          * advance pointer beyond consumed memory
4756          */
4757         bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4758 }
4759
4760 static void
4761 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4762 {
4763         u16 i;
4764         struct bfa_uf_s   *uf;
4765
4766         /*
4767          * Claim block of memory for UF list
4768          */
4769         ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4770
4771         /*
4772          * Initialize UFs and queue it in UF free queue
4773          */
4774         for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4775                 memset(uf, 0, sizeof(struct bfa_uf_s));
4776                 uf->bfa = ufm->bfa;
4777                 uf->uf_tag = i;
4778                 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4779                 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4780                 uf->buf_pa = ufm_pbs_pa(ufm, i);
4781                 list_add_tail(&uf->qe, &ufm->uf_free_q);
4782         }
4783
4784         /*
4785          * advance memory pointer
4786          */
4787         bfa_meminfo_kva(mi) = (u8 *) uf;
4788 }
4789
4790 static void
4791 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4792 {
4793         claim_uf_pbs(ufm, mi);
4794         claim_ufs(ufm, mi);
4795         claim_uf_post_msgs(ufm, mi);
4796 }
4797
4798 static void
4799 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4800 {
4801         u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4802
4803         /*
4804          * dma-able memory for UF posted bufs
4805          */
4806         *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4807                                                         BFA_DMA_ALIGN_SZ);
4808
4809         /*
4810          * kernel Virtual memory for UFs and UF buf post msg copies
4811          */
4812         *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4813         *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4814 }
4815
4816 static void
4817 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4818                   struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4819 {
4820         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4821
4822         memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
4823         ufm->bfa = bfa;
4824         ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4825         INIT_LIST_HEAD(&ufm->uf_free_q);
4826         INIT_LIST_HEAD(&ufm->uf_posted_q);
4827
4828         uf_mem_claim(ufm, meminfo);
4829 }
4830
4831 static void
4832 bfa_uf_detach(struct bfa_s *bfa)
4833 {
4834 }
4835
4836 static struct bfa_uf_s *
4837 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4838 {
4839         struct bfa_uf_s   *uf;
4840
4841         bfa_q_deq(&uf_mod->uf_free_q, &uf);
4842         return uf;
4843 }
4844
4845 static void
4846 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4847 {
4848         list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4849 }
4850
4851 static bfa_status_t
4852 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4853 {
4854         struct bfi_uf_buf_post_s *uf_post_msg;
4855
4856         uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4857         if (!uf_post_msg)
4858                 return BFA_STATUS_FAILED;
4859
4860         memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
4861                       sizeof(struct bfi_uf_buf_post_s));
4862         bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4863
4864         bfa_trc(ufm->bfa, uf->uf_tag);
4865
4866         list_add_tail(&uf->qe, &ufm->uf_posted_q);
4867         return BFA_STATUS_OK;
4868 }
4869
4870 static void
4871 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4872 {
4873         struct bfa_uf_s   *uf;
4874
4875         while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4876                 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4877                         break;
4878         }
4879 }
4880
4881 static void
4882 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4883 {
4884         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4885         u16 uf_tag = m->buf_tag;
4886         struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4887         struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4888         u8 *buf = &uf_buf->d[0];
4889         struct fchs_s *fchs;
4890
4891         m->frm_len = be16_to_cpu(m->frm_len);
4892         m->xfr_len = be16_to_cpu(m->xfr_len);
4893
4894         fchs = (struct fchs_s *)uf_buf;
4895
4896         list_del(&uf->qe);      /* dequeue from posted queue */
4897
4898         uf->data_ptr = buf;
4899         uf->data_len = m->xfr_len;
4900
4901         bfa_assert(uf->data_len >= sizeof(struct fchs_s));
4902
4903         if (uf->data_len == sizeof(struct fchs_s)) {
4904                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4905                                uf->data_len, (struct fchs_s *)buf);
4906         } else {
4907                 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4908                 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4909                                       BFA_PL_EID_RX, uf->data_len,
4910                                       (struct fchs_s *)buf, pld_w0);
4911         }
4912
4913         if (bfa->fcs)
4914                 __bfa_cb_uf_recv(uf, BFA_TRUE);
4915         else
4916                 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
4917 }
4918
4919 static void
4920 bfa_uf_stop(struct bfa_s *bfa)
4921 {
4922 }
4923
4924 static void
4925 bfa_uf_iocdisable(struct bfa_s *bfa)
4926 {
4927         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4928         struct bfa_uf_s *uf;
4929         struct list_head *qe, *qen;
4930
4931         list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
4932                 uf = (struct bfa_uf_s *) qe;
4933                 list_del(&uf->qe);
4934                 bfa_uf_put(ufm, uf);
4935         }
4936 }
4937
4938 static void
4939 bfa_uf_start(struct bfa_s *bfa)
4940 {
4941         bfa_uf_post_all(BFA_UF_MOD(bfa));
4942 }
4943
4944 /*
4945  * Register handler for all unsolicted recieve frames.
4946  *
4947  * @param[in]   bfa             BFA instance
4948  * @param[in]   ufrecv  receive handler function
4949  * @param[in]   cbarg   receive handler arg
4950  */
4951 void
4952 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
4953 {
4954         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4955
4956         ufm->ufrecv = ufrecv;
4957         ufm->cbarg = cbarg;
4958 }
4959
4960 /*
4961  *      Free an unsolicited frame back to BFA.
4962  *
4963  * @param[in]           uf              unsolicited frame to be freed
4964  *
4965  * @return None
4966  */
4967 void
4968 bfa_uf_free(struct bfa_uf_s *uf)
4969 {
4970         bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
4971         bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
4972 }
4973
4974
4975
4976 /*
4977  *  uf_pub BFA uf module public functions
4978  */
4979 void
4980 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
4981 {
4982         bfa_trc(bfa, msg->mhdr.msg_id);
4983
4984         switch (msg->mhdr.msg_id) {
4985         case BFI_UF_I2H_FRM_RCVD:
4986                 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
4987                 break;
4988
4989         default:
4990                 bfa_trc(bfa, msg->mhdr.msg_id);
4991                 bfa_assert(0);
4992         }
4993 }
4994
4995