]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/bfa/bfa_svc.c
ath9k: add AR9580 support
[karo-tx-linux.git] / drivers / scsi / bfa / bfa_svc.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfa_plog.h"
20 #include "bfa_cs.h"
21 #include "bfa_modules.h"
22
23 BFA_TRC_FILE(HAL, FCXP);
24 BFA_MODULE(fcdiag);
25 BFA_MODULE(fcxp);
26 BFA_MODULE(sgpg);
27 BFA_MODULE(lps);
28 BFA_MODULE(fcport);
29 BFA_MODULE(rport);
30 BFA_MODULE(uf);
31
32 /*
33  * LPS related definitions
34  */
35 #define BFA_LPS_MIN_LPORTS      (1)
36 #define BFA_LPS_MAX_LPORTS      (256)
37
38 /*
39  * Maximum Vports supported per physical port or vf.
40  */
41 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
42 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
43
44
45 /*
46  * FC PORT related definitions
47  */
48 /*
49  * The port is considered disabled if corresponding physical port or IOC are
50  * disabled explicitly
51  */
52 #define BFA_PORT_IS_DISABLED(bfa) \
53         ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
54         (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
55
56 /*
57  * BFA port state machine events
58  */
59 enum bfa_fcport_sm_event {
60         BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
61         BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
62         BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
63         BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
64         BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
65         BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
66         BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
67         BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
68         BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
69 };
70
71 /*
72  * BFA port link notification state machine events
73  */
74
75 enum bfa_fcport_ln_sm_event {
76         BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
77         BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
78         BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
79 };
80
81 /*
82  * RPORT related definitions
83  */
84 #define bfa_rport_offline_cb(__rp) do {                                 \
85         if ((__rp)->bfa->fcs)                                           \
86                 bfa_cb_rport_offline((__rp)->rport_drv);      \
87         else {                                                          \
88                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
89                                 __bfa_cb_rport_offline, (__rp));      \
90         }                                                               \
91 } while (0)
92
93 #define bfa_rport_online_cb(__rp) do {                                  \
94         if ((__rp)->bfa->fcs)                                           \
95                 bfa_cb_rport_online((__rp)->rport_drv);      \
96         else {                                                          \
97                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
98                                   __bfa_cb_rport_online, (__rp));      \
99                 }                                                       \
100 } while (0)
101
102 /*
103  * forward declarations FCXP related functions
104  */
105 static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107                                 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109                                 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void     bfa_fcxp_qresume(void *cbarg);
111 static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112                                 struct bfi_fcxp_send_req_s *send_req);
113
114 /*
115  * forward declarations for LPS functions
116  */
117 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
118                 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
119 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
120                                 struct bfa_iocfc_cfg_s *cfg,
121                                 struct bfa_pcidev_s *pcidev);
122 static void bfa_lps_detach(struct bfa_s *bfa);
123 static void bfa_lps_start(struct bfa_s *bfa);
124 static void bfa_lps_stop(struct bfa_s *bfa);
125 static void bfa_lps_iocdisable(struct bfa_s *bfa);
126 static void bfa_lps_login_rsp(struct bfa_s *bfa,
127                                 struct bfi_lps_login_rsp_s *rsp);
128 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
129 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
130                                 struct bfi_lps_logout_rsp_s *rsp);
131 static void bfa_lps_reqq_resume(void *lps_arg);
132 static void bfa_lps_free(struct bfa_lps_s *lps);
133 static void bfa_lps_send_login(struct bfa_lps_s *lps);
134 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
135 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
136 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
137 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
138 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
139
140 /*
141  * forward declaration for LPS state machine
142  */
143 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
144 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
145 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
146                                         event);
147 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
148 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
149                                         enum bfa_lps_event event);
150 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
151 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
152                                         event);
153
154 /*
155  * forward declaration for FC Port functions
156  */
157 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
158 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
159 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
161 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
162 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
163 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
164                         enum bfa_port_linkstate event, bfa_boolean_t trunk);
165 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
166                                 enum bfa_port_linkstate event);
167 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
168 static void bfa_fcport_stats_get_timeout(void *cbarg);
169 static void bfa_fcport_stats_clr_timeout(void *cbarg);
170 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
171
172 /*
173  * forward declaration for FC PORT state machine
174  */
175 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
176                                         enum bfa_fcport_sm_event event);
177 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
178                                         enum bfa_fcport_sm_event event);
179 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
180                                         enum bfa_fcport_sm_event event);
181 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
182                                         enum bfa_fcport_sm_event event);
183 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
184                                         enum bfa_fcport_sm_event event);
185 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
186                                         enum bfa_fcport_sm_event event);
187 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
188                                         enum bfa_fcport_sm_event event);
189 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
190                                         enum bfa_fcport_sm_event event);
191 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
192                                         enum bfa_fcport_sm_event event);
193 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
194                                         enum bfa_fcport_sm_event event);
195 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
196                                         enum bfa_fcport_sm_event event);
197 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
198                                         enum bfa_fcport_sm_event event);
199
200 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
201                                         enum bfa_fcport_ln_sm_event event);
202 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
203                                         enum bfa_fcport_ln_sm_event event);
204 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
205                                         enum bfa_fcport_ln_sm_event event);
206 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
207                                         enum bfa_fcport_ln_sm_event event);
208 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
209                                         enum bfa_fcport_ln_sm_event event);
210 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
211                                         enum bfa_fcport_ln_sm_event event);
212 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
213                                         enum bfa_fcport_ln_sm_event event);
214
215 static struct bfa_sm_table_s hal_port_sm_table[] = {
216         {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
217         {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
218         {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
219         {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
220         {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
221         {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
222         {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
223         {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
224         {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
225         {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
226         {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
227         {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
228 };
229
230
231 /*
232  * forward declaration for RPORT related functions
233  */
234 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
235 static void             bfa_rport_free(struct bfa_rport_s *rport);
236 static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
237 static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
238 static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
239 static void             __bfa_cb_rport_online(void *cbarg,
240                                                 bfa_boolean_t complete);
241 static void             __bfa_cb_rport_offline(void *cbarg,
242                                                 bfa_boolean_t complete);
243
244 /*
245  * forward declaration for RPORT state machine
246  */
247 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
248                                         enum bfa_rport_event event);
249 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
250                                         enum bfa_rport_event event);
251 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
252                                         enum bfa_rport_event event);
253 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
254                                         enum bfa_rport_event event);
255 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
256                                         enum bfa_rport_event event);
257 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
258                                         enum bfa_rport_event event);
259 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
260                                         enum bfa_rport_event event);
261 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
262                                         enum bfa_rport_event event);
263 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
264                                         enum bfa_rport_event event);
265 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
266                                         enum bfa_rport_event event);
267 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
268                                         enum bfa_rport_event event);
269 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
270                                         enum bfa_rport_event event);
271 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
272                                         enum bfa_rport_event event);
273
274 /*
275  * PLOG related definitions
276  */
277 static int
278 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
279 {
280         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281                 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
282                 return 1;
283
284         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
285                 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
286                 return 1;
287
288         return 0;
289 }
290
291 static u64
292 bfa_get_log_time(void)
293 {
294         u64 system_time = 0;
295         struct timeval tv;
296         do_gettimeofday(&tv);
297
298         /* We are interested in seconds only. */
299         system_time = tv.tv_sec;
300         return system_time;
301 }
302
303 static void
304 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
305 {
306         u16 tail;
307         struct bfa_plog_rec_s *pl_recp;
308
309         if (plog->plog_enabled == 0)
310                 return;
311
312         if (plkd_validate_logrec(pl_rec)) {
313                 WARN_ON(1);
314                 return;
315         }
316
317         tail = plog->tail;
318
319         pl_recp = &(plog->plog_recs[tail]);
320
321         memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
322
323         pl_recp->tv = bfa_get_log_time();
324         BFA_PL_LOG_REC_INCR(plog->tail);
325
326         if (plog->head == plog->tail)
327                 BFA_PL_LOG_REC_INCR(plog->head);
328 }
329
330 void
331 bfa_plog_init(struct bfa_plog_s *plog)
332 {
333         memset((char *)plog, 0, sizeof(struct bfa_plog_s));
334
335         memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
336         plog->head = plog->tail = 0;
337         plog->plog_enabled = 1;
338 }
339
340 void
341 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
342                 enum bfa_plog_eid event,
343                 u16 misc, char *log_str)
344 {
345         struct bfa_plog_rec_s  lp;
346
347         if (plog->plog_enabled) {
348                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
349                 lp.mid = mid;
350                 lp.eid = event;
351                 lp.log_type = BFA_PL_LOG_TYPE_STRING;
352                 lp.misc = misc;
353                 strncpy(lp.log_entry.string_log, log_str,
354                         BFA_PL_STRING_LOG_SZ - 1);
355                 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
356                 bfa_plog_add(plog, &lp);
357         }
358 }
359
360 void
361 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
362                 enum bfa_plog_eid event,
363                 u16 misc, u32 *intarr, u32 num_ints)
364 {
365         struct bfa_plog_rec_s  lp;
366         u32 i;
367
368         if (num_ints > BFA_PL_INT_LOG_SZ)
369                 num_ints = BFA_PL_INT_LOG_SZ;
370
371         if (plog->plog_enabled) {
372                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
373                 lp.mid = mid;
374                 lp.eid = event;
375                 lp.log_type = BFA_PL_LOG_TYPE_INT;
376                 lp.misc = misc;
377
378                 for (i = 0; i < num_ints; i++)
379                         lp.log_entry.int_log[i] = intarr[i];
380
381                 lp.log_num_ints = (u8) num_ints;
382
383                 bfa_plog_add(plog, &lp);
384         }
385 }
386
387 void
388 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
389                         enum bfa_plog_eid event,
390                         u16 misc, struct fchs_s *fchdr)
391 {
392         struct bfa_plog_rec_s  lp;
393         u32     *tmp_int = (u32 *) fchdr;
394         u32     ints[BFA_PL_INT_LOG_SZ];
395
396         if (plog->plog_enabled) {
397                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
398
399                 ints[0] = tmp_int[0];
400                 ints[1] = tmp_int[1];
401                 ints[2] = tmp_int[4];
402
403                 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
404         }
405 }
406
407 void
408 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
409                       enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
410                       u32 pld_w0)
411 {
412         struct bfa_plog_rec_s  lp;
413         u32     *tmp_int = (u32 *) fchdr;
414         u32     ints[BFA_PL_INT_LOG_SZ];
415
416         if (plog->plog_enabled) {
417                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
418
419                 ints[0] = tmp_int[0];
420                 ints[1] = tmp_int[1];
421                 ints[2] = tmp_int[4];
422                 ints[3] = pld_w0;
423
424                 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
425         }
426 }
427
428
429 /*
430  *  fcxp_pvt BFA FCXP private functions
431  */
432
433 static void
434 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
435 {
436         u16     i;
437         struct bfa_fcxp_s *fcxp;
438
439         fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
440         memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
441
442         INIT_LIST_HEAD(&mod->fcxp_free_q);
443         INIT_LIST_HEAD(&mod->fcxp_active_q);
444         INIT_LIST_HEAD(&mod->fcxp_unused_q);
445
446         mod->fcxp_list = fcxp;
447
448         for (i = 0; i < mod->num_fcxps; i++) {
449                 fcxp->fcxp_mod = mod;
450                 fcxp->fcxp_tag = i;
451
452                 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
453                 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
454                 fcxp->reqq_waiting = BFA_FALSE;
455
456                 fcxp = fcxp + 1;
457         }
458
459         bfa_mem_kva_curp(mod) = (void *)fcxp;
460 }
461
462 static void
463 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
464                 struct bfa_s *bfa)
465 {
466         struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
467         struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
468         struct bfa_mem_dma_s *seg_ptr;
469         u16     nsegs, idx, per_seg_fcxp;
470         u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
471         u32     per_fcxp_sz;
472
473         if (num_fcxps == 0)
474                 return;
475
476         if (cfg->drvcfg.min_cfg)
477                 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
478         else
479                 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
480
481         /* dma memory */
482         nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
483         per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
484
485         bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
486                 if (num_fcxps >= per_seg_fcxp) {
487                         num_fcxps -= per_seg_fcxp;
488                         bfa_mem_dma_setup(minfo, seg_ptr,
489                                 per_seg_fcxp * per_fcxp_sz);
490                 } else
491                         bfa_mem_dma_setup(minfo, seg_ptr,
492                                 num_fcxps * per_fcxp_sz);
493         }
494
495         /* kva memory */
496         bfa_mem_kva_setup(minfo, fcxp_kva,
497                 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
498 }
499
500 static void
501 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
502                 struct bfa_pcidev_s *pcidev)
503 {
504         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
505
506         mod->bfa = bfa;
507         mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
508
509         /*
510          * Initialize FCXP request and response payload sizes.
511          */
512         mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
513         if (!cfg->drvcfg.min_cfg)
514                 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
515
516         INIT_LIST_HEAD(&mod->wait_q);
517
518         claim_fcxps_mem(mod);
519 }
520
521 static void
522 bfa_fcxp_detach(struct bfa_s *bfa)
523 {
524 }
525
526 static void
527 bfa_fcxp_start(struct bfa_s *bfa)
528 {
529 }
530
531 static void
532 bfa_fcxp_stop(struct bfa_s *bfa)
533 {
534 }
535
536 static void
537 bfa_fcxp_iocdisable(struct bfa_s *bfa)
538 {
539         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
540         struct bfa_fcxp_s *fcxp;
541         struct list_head              *qe, *qen;
542
543         /* Enqueue unused fcxp resources to free_q */
544         list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
545
546         list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
547                 fcxp = (struct bfa_fcxp_s *) qe;
548                 if (fcxp->caller == NULL) {
549                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
550                                         BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
551                         bfa_fcxp_free(fcxp);
552                 } else {
553                         fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
554                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
555                                      __bfa_fcxp_send_cbfn, fcxp);
556                 }
557         }
558 }
559
560 static struct bfa_fcxp_s *
561 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
562 {
563         struct bfa_fcxp_s *fcxp;
564
565         bfa_q_deq(&fm->fcxp_free_q, &fcxp);
566
567         if (fcxp)
568                 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
569
570         return fcxp;
571 }
572
573 static void
574 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
575                struct bfa_s *bfa,
576                u8 *use_ibuf,
577                u32 *nr_sgles,
578                bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
579                bfa_fcxp_get_sglen_t *r_sglen_cbfn,
580                struct list_head *r_sgpg_q,
581                int n_sgles,
582                bfa_fcxp_get_sgaddr_t sga_cbfn,
583                bfa_fcxp_get_sglen_t sglen_cbfn)
584 {
585
586         WARN_ON(bfa == NULL);
587
588         bfa_trc(bfa, fcxp->fcxp_tag);
589
590         if (n_sgles == 0) {
591                 *use_ibuf = 1;
592         } else {
593                 WARN_ON(*sga_cbfn == NULL);
594                 WARN_ON(*sglen_cbfn == NULL);
595
596                 *use_ibuf = 0;
597                 *r_sga_cbfn = sga_cbfn;
598                 *r_sglen_cbfn = sglen_cbfn;
599
600                 *nr_sgles = n_sgles;
601
602                 /*
603                  * alloc required sgpgs
604                  */
605                 if (n_sgles > BFI_SGE_INLINE)
606                         WARN_ON(1);
607         }
608
609 }
610
611 static void
612 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
613                void *caller, struct bfa_s *bfa, int nreq_sgles,
614                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
615                bfa_fcxp_get_sglen_t req_sglen_cbfn,
616                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
617                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
618 {
619
620         WARN_ON(bfa == NULL);
621
622         bfa_trc(bfa, fcxp->fcxp_tag);
623
624         fcxp->caller = caller;
625
626         bfa_fcxp_init_reqrsp(fcxp, bfa,
627                 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
628                 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
629                 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
630
631         bfa_fcxp_init_reqrsp(fcxp, bfa,
632                 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
633                 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
634                 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
635
636 }
637
638 static void
639 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
640 {
641         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
642         struct bfa_fcxp_wqe_s *wqe;
643
644         bfa_q_deq(&mod->wait_q, &wqe);
645         if (wqe) {
646                 bfa_trc(mod->bfa, fcxp->fcxp_tag);
647
648                 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
649                         wqe->nrsp_sgles, wqe->req_sga_cbfn,
650                         wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
651                         wqe->rsp_sglen_cbfn);
652
653                 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
654                 return;
655         }
656
657         WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
658         list_del(&fcxp->qe);
659         list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
660 }
661
662 static void
663 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
664                    bfa_status_t req_status, u32 rsp_len,
665                    u32 resid_len, struct fchs_s *rsp_fchs)
666 {
667         /* discarded fcxp completion */
668 }
669
670 static void
671 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
672 {
673         struct bfa_fcxp_s *fcxp = cbarg;
674
675         if (complete) {
676                 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
677                                 fcxp->rsp_status, fcxp->rsp_len,
678                                 fcxp->residue_len, &fcxp->rsp_fchs);
679         } else {
680                 bfa_fcxp_free(fcxp);
681         }
682 }
683
684 static void
685 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
686 {
687         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
688         struct bfa_fcxp_s       *fcxp;
689         u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
690
691         bfa_trc(bfa, fcxp_tag);
692
693         fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
694
695         /*
696          * @todo f/w should not set residue to non-0 when everything
697          *       is received.
698          */
699         if (fcxp_rsp->req_status == BFA_STATUS_OK)
700                 fcxp_rsp->residue_len = 0;
701         else
702                 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
703
704         fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
705
706         WARN_ON(fcxp->send_cbfn == NULL);
707
708         hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
709
710         if (fcxp->send_cbfn != NULL) {
711                 bfa_trc(mod->bfa, (NULL == fcxp->caller));
712                 if (fcxp->caller == NULL) {
713                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
714                                         fcxp_rsp->req_status, fcxp_rsp->rsp_len,
715                                         fcxp_rsp->residue_len, &fcxp_rsp->fchs);
716                         /*
717                          * fcxp automatically freed on return from the callback
718                          */
719                         bfa_fcxp_free(fcxp);
720                 } else {
721                         fcxp->rsp_status = fcxp_rsp->req_status;
722                         fcxp->rsp_len = fcxp_rsp->rsp_len;
723                         fcxp->residue_len = fcxp_rsp->residue_len;
724                         fcxp->rsp_fchs = fcxp_rsp->fchs;
725
726                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
727                                         __bfa_fcxp_send_cbfn, fcxp);
728                 }
729         } else {
730                 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
731         }
732 }
733
734 static void
735 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
736                  struct fchs_s *fchs)
737 {
738         /*
739          * TODO: TX ox_id
740          */
741         if (reqlen > 0) {
742                 if (fcxp->use_ireqbuf) {
743                         u32     pld_w0 =
744                                 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
745
746                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
747                                         BFA_PL_EID_TX,
748                                         reqlen + sizeof(struct fchs_s), fchs,
749                                         pld_w0);
750                 } else {
751                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
752                                         BFA_PL_EID_TX,
753                                         reqlen + sizeof(struct fchs_s),
754                                         fchs);
755                 }
756         } else {
757                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
758                                reqlen + sizeof(struct fchs_s), fchs);
759         }
760 }
761
762 static void
763 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
764                  struct bfi_fcxp_send_rsp_s *fcxp_rsp)
765 {
766         if (fcxp_rsp->rsp_len > 0) {
767                 if (fcxp->use_irspbuf) {
768                         u32     pld_w0 =
769                                 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
770
771                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
772                                               BFA_PL_EID_RX,
773                                               (u16) fcxp_rsp->rsp_len,
774                                               &fcxp_rsp->fchs, pld_w0);
775                 } else {
776                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
777                                        BFA_PL_EID_RX,
778                                        (u16) fcxp_rsp->rsp_len,
779                                        &fcxp_rsp->fchs);
780                 }
781         } else {
782                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
783                                (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
784         }
785 }
786
787 /*
788  * Handler to resume sending fcxp when space in available in cpe queue.
789  */
790 static void
791 bfa_fcxp_qresume(void *cbarg)
792 {
793         struct bfa_fcxp_s               *fcxp = cbarg;
794         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
795         struct bfi_fcxp_send_req_s      *send_req;
796
797         fcxp->reqq_waiting = BFA_FALSE;
798         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
799         bfa_fcxp_queue(fcxp, send_req);
800 }
801
802 /*
803  * Queue fcxp send request to foimrware.
804  */
805 static void
806 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
807 {
808         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
809         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
810         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
811         struct bfa_rport_s              *rport = reqi->bfa_rport;
812
813         bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
814                     bfa_fn_lpu(bfa));
815
816         send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
817         if (rport) {
818                 send_req->rport_fw_hndl = rport->fw_handle;
819                 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
820                 if (send_req->max_frmsz == 0)
821                         send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
822         } else {
823                 send_req->rport_fw_hndl = 0;
824                 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
825         }
826
827         send_req->vf_id = cpu_to_be16(reqi->vf_id);
828         send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
829         send_req->class = reqi->class;
830         send_req->rsp_timeout = rspi->rsp_timeout;
831         send_req->cts = reqi->cts;
832         send_req->fchs = reqi->fchs;
833
834         send_req->req_len = cpu_to_be32(reqi->req_tot_len);
835         send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
836
837         /*
838          * setup req sgles
839          */
840         if (fcxp->use_ireqbuf == 1) {
841                 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
842                                         BFA_FCXP_REQ_PLD_PA(fcxp));
843         } else {
844                 if (fcxp->nreq_sgles > 0) {
845                         WARN_ON(fcxp->nreq_sgles != 1);
846                         bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
847                                 fcxp->req_sga_cbfn(fcxp->caller, 0));
848                 } else {
849                         WARN_ON(reqi->req_tot_len != 0);
850                         bfa_alen_set(&send_req->rsp_alen, 0, 0);
851                 }
852         }
853
854         /*
855          * setup rsp sgles
856          */
857         if (fcxp->use_irspbuf == 1) {
858                 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
859
860                 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
861                                         BFA_FCXP_RSP_PLD_PA(fcxp));
862         } else {
863                 if (fcxp->nrsp_sgles > 0) {
864                         WARN_ON(fcxp->nrsp_sgles != 1);
865                         bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
866                                 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
867
868                 } else {
869                         WARN_ON(rspi->rsp_maxlen != 0);
870                         bfa_alen_set(&send_req->rsp_alen, 0, 0);
871                 }
872         }
873
874         hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
875
876         bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
877
878         bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
879         bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
880 }
881
882 /*
883  * Allocate an FCXP instance to send a response or to send a request
884  * that has a response. Request/response buffers are allocated by caller.
885  *
886  * @param[in]   bfa             BFA bfa instance
887  * @param[in]   nreq_sgles      Number of SG elements required for request
888  *                              buffer. 0, if fcxp internal buffers are used.
889  *                              Use bfa_fcxp_get_reqbuf() to get the
890  *                              internal req buffer.
891  * @param[in]   req_sgles       SG elements describing request buffer. Will be
892  *                              copied in by BFA and hence can be freed on
893  *                              return from this function.
894  * @param[in]   get_req_sga     function ptr to be called to get a request SG
895  *                              Address (given the sge index).
896  * @param[in]   get_req_sglen   function ptr to be called to get a request SG
897  *                              len (given the sge index).
898  * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
899  *                              Address (given the sge index).
900  * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
901  *                              len (given the sge index).
902  *
903  * @return FCXP instance. NULL on failure.
904  */
905 struct bfa_fcxp_s *
906 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
907                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
908                bfa_fcxp_get_sglen_t req_sglen_cbfn,
909                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
910                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
911 {
912         struct bfa_fcxp_s *fcxp = NULL;
913
914         WARN_ON(bfa == NULL);
915
916         fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
917         if (fcxp == NULL)
918                 return NULL;
919
920         bfa_trc(bfa, fcxp->fcxp_tag);
921
922         bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
923                         req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
924
925         return fcxp;
926 }
927
928 /*
929  * Get the internal request buffer pointer
930  *
931  * @param[in]   fcxp    BFA fcxp pointer
932  *
933  * @return              pointer to the internal request buffer
934  */
935 void *
936 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
937 {
938         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
939         void    *reqbuf;
940
941         WARN_ON(fcxp->use_ireqbuf != 1);
942         reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
943                                 mod->req_pld_sz + mod->rsp_pld_sz);
944         return reqbuf;
945 }
946
947 u32
948 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
949 {
950         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
951
952         return mod->req_pld_sz;
953 }
954
955 /*
956  * Get the internal response buffer pointer
957  *
958  * @param[in]   fcxp    BFA fcxp pointer
959  *
960  * @return              pointer to the internal request buffer
961  */
962 void *
963 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
964 {
965         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
966         void    *fcxp_buf;
967
968         WARN_ON(fcxp->use_irspbuf != 1);
969
970         fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
971                                 mod->req_pld_sz + mod->rsp_pld_sz);
972
973         /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
974         return ((u8 *) fcxp_buf) + mod->req_pld_sz;
975 }
976
977 /*
978  * Free the BFA FCXP
979  *
980  * @param[in]   fcxp                    BFA fcxp pointer
981  *
982  * @return              void
983  */
984 void
985 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
986 {
987         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
988
989         WARN_ON(fcxp == NULL);
990         bfa_trc(mod->bfa, fcxp->fcxp_tag);
991         bfa_fcxp_put(fcxp);
992 }
993
994 /*
995  * Send a FCXP request
996  *
997  * @param[in]   fcxp    BFA fcxp pointer
998  * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
999  * @param[in]   vf_id   virtual Fabric ID
1000  * @param[in]   lp_tag  lport tag
1001  * @param[in]   cts     use Continuous sequence
1002  * @param[in]   cos     fc Class of Service
1003  * @param[in]   reqlen  request length, does not include FCHS length
1004  * @param[in]   fchs    fc Header Pointer. The header content will be copied
1005  *                      in by BFA.
1006  *
1007  * @param[in]   cbfn    call back function to be called on receiving
1008  *                                                              the response
1009  * @param[in]   cbarg   arg for cbfn
1010  * @param[in]   rsp_timeout
1011  *                      response timeout
1012  *
1013  * @return              bfa_status_t
1014  */
1015 void
1016 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1017               u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1018               u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1019               void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1020 {
1021         struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1022         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1023         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1024         struct bfi_fcxp_send_req_s      *send_req;
1025
1026         bfa_trc(bfa, fcxp->fcxp_tag);
1027
1028         /*
1029          * setup request/response info
1030          */
1031         reqi->bfa_rport = rport;
1032         reqi->vf_id = vf_id;
1033         reqi->lp_tag = lp_tag;
1034         reqi->class = cos;
1035         rspi->rsp_timeout = rsp_timeout;
1036         reqi->cts = cts;
1037         reqi->fchs = *fchs;
1038         reqi->req_tot_len = reqlen;
1039         rspi->rsp_maxlen = rsp_maxlen;
1040         fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1041         fcxp->send_cbarg = cbarg;
1042
1043         /*
1044          * If no room in CPE queue, wait for space in request queue
1045          */
1046         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1047         if (!send_req) {
1048                 bfa_trc(bfa, fcxp->fcxp_tag);
1049                 fcxp->reqq_waiting = BFA_TRUE;
1050                 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1051                 return;
1052         }
1053
1054         bfa_fcxp_queue(fcxp, send_req);
1055 }
1056
1057 /*
1058  * Abort a BFA FCXP
1059  *
1060  * @param[in]   fcxp    BFA fcxp pointer
1061  *
1062  * @return              void
1063  */
1064 bfa_status_t
1065 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1066 {
1067         bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1068         WARN_ON(1);
1069         return BFA_STATUS_OK;
1070 }
1071
1072 void
1073 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1074                bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1075                void *caller, int nreq_sgles,
1076                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1077                bfa_fcxp_get_sglen_t req_sglen_cbfn,
1078                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1079                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1080 {
1081         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1082
1083         WARN_ON(!list_empty(&mod->fcxp_free_q));
1084
1085         wqe->alloc_cbfn = alloc_cbfn;
1086         wqe->alloc_cbarg = alloc_cbarg;
1087         wqe->caller = caller;
1088         wqe->bfa = bfa;
1089         wqe->nreq_sgles = nreq_sgles;
1090         wqe->nrsp_sgles = nrsp_sgles;
1091         wqe->req_sga_cbfn = req_sga_cbfn;
1092         wqe->req_sglen_cbfn = req_sglen_cbfn;
1093         wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1094         wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1095
1096         list_add_tail(&wqe->qe, &mod->wait_q);
1097 }
1098
1099 void
1100 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1101 {
1102         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1103
1104         WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1105         list_del(&wqe->qe);
1106 }
1107
1108 void
1109 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1110 {
1111         /*
1112          * If waiting for room in request queue, cancel reqq wait
1113          * and free fcxp.
1114          */
1115         if (fcxp->reqq_waiting) {
1116                 fcxp->reqq_waiting = BFA_FALSE;
1117                 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1118                 bfa_fcxp_free(fcxp);
1119                 return;
1120         }
1121
1122         fcxp->send_cbfn = bfa_fcxp_null_comp;
1123 }
1124
1125 void
1126 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1127 {
1128         switch (msg->mhdr.msg_id) {
1129         case BFI_FCXP_I2H_SEND_RSP:
1130                 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1131                 break;
1132
1133         default:
1134                 bfa_trc(bfa, msg->mhdr.msg_id);
1135                 WARN_ON(1);
1136         }
1137 }
1138
1139 u32
1140 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1141 {
1142         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1143
1144         return mod->rsp_pld_sz;
1145 }
1146
1147 void
1148 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1149 {
1150         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
1151         struct list_head        *qe;
1152         int     i;
1153
1154         for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1155                 bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1156                 list_add_tail(qe, &mod->fcxp_unused_q);
1157         }
1158 }
1159
1160 /*
1161  *  BFA LPS state machine functions
1162  */
1163
1164 /*
1165  * Init state -- no login
1166  */
1167 static void
1168 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1169 {
1170         bfa_trc(lps->bfa, lps->bfa_tag);
1171         bfa_trc(lps->bfa, event);
1172
1173         switch (event) {
1174         case BFA_LPS_SM_LOGIN:
1175                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1176                         bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1177                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1178                 } else {
1179                         bfa_sm_set_state(lps, bfa_lps_sm_login);
1180                         bfa_lps_send_login(lps);
1181                 }
1182
1183                 if (lps->fdisc)
1184                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1185                                 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1186                 else
1187                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1188                                 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1189                 break;
1190
1191         case BFA_LPS_SM_LOGOUT:
1192                 bfa_lps_logout_comp(lps);
1193                 break;
1194
1195         case BFA_LPS_SM_DELETE:
1196                 bfa_lps_free(lps);
1197                 break;
1198
1199         case BFA_LPS_SM_RX_CVL:
1200         case BFA_LPS_SM_OFFLINE:
1201                 break;
1202
1203         case BFA_LPS_SM_FWRSP:
1204                 /*
1205                  * Could happen when fabric detects loopback and discards
1206                  * the lps request. Fw will eventually sent out the timeout
1207                  * Just ignore
1208                  */
1209                 break;
1210
1211         default:
1212                 bfa_sm_fault(lps->bfa, event);
1213         }
1214 }
1215
1216 /*
1217  * login is in progress -- awaiting response from firmware
1218  */
1219 static void
1220 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1221 {
1222         bfa_trc(lps->bfa, lps->bfa_tag);
1223         bfa_trc(lps->bfa, event);
1224
1225         switch (event) {
1226         case BFA_LPS_SM_FWRSP:
1227                 if (lps->status == BFA_STATUS_OK) {
1228                         bfa_sm_set_state(lps, bfa_lps_sm_online);
1229                         if (lps->fdisc)
1230                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1231                                         BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1232                         else
1233                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1234                                         BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1235                         /* If N2N, send the assigned PID to FW */
1236                         bfa_trc(lps->bfa, lps->fport);
1237                         bfa_trc(lps->bfa, lps->lp_pid);
1238
1239                         if (!lps->fport && lps->lp_pid)
1240                                 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1241                 } else {
1242                         bfa_sm_set_state(lps, bfa_lps_sm_init);
1243                         if (lps->fdisc)
1244                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1245                                         BFA_PL_EID_LOGIN, 0,
1246                                         "FDISC Fail (RJT or timeout)");
1247                         else
1248                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1249                                         BFA_PL_EID_LOGIN, 0,
1250                                         "FLOGI Fail (RJT or timeout)");
1251                 }
1252                 bfa_lps_login_comp(lps);
1253                 break;
1254
1255         case BFA_LPS_SM_OFFLINE:
1256         case BFA_LPS_SM_DELETE:
1257                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1258                 break;
1259
1260         case BFA_LPS_SM_SET_N2N_PID:
1261                 bfa_trc(lps->bfa, lps->fport);
1262                 bfa_trc(lps->bfa, lps->lp_pid);
1263                 break;
1264
1265         default:
1266                 bfa_sm_fault(lps->bfa, event);
1267         }
1268 }
1269
1270 /*
1271  * login pending - awaiting space in request queue
1272  */
1273 static void
1274 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1275 {
1276         bfa_trc(lps->bfa, lps->bfa_tag);
1277         bfa_trc(lps->bfa, event);
1278
1279         switch (event) {
1280         case BFA_LPS_SM_RESUME:
1281                 bfa_sm_set_state(lps, bfa_lps_sm_login);
1282                 break;
1283
1284         case BFA_LPS_SM_OFFLINE:
1285         case BFA_LPS_SM_DELETE:
1286                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1287                 bfa_reqq_wcancel(&lps->wqe);
1288                 break;
1289
1290         case BFA_LPS_SM_RX_CVL:
1291                 /*
1292                  * Login was not even sent out; so when getting out
1293                  * of this state, it will appear like a login retry
1294                  * after Clear virtual link
1295                  */
1296                 break;
1297
1298         default:
1299                 bfa_sm_fault(lps->bfa, event);
1300         }
1301 }
1302
1303 /*
1304  * login complete
1305  */
1306 static void
1307 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1308 {
1309         bfa_trc(lps->bfa, lps->bfa_tag);
1310         bfa_trc(lps->bfa, event);
1311
1312         switch (event) {
1313         case BFA_LPS_SM_LOGOUT:
1314                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1315                         bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1316                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1317                 } else {
1318                         bfa_sm_set_state(lps, bfa_lps_sm_logout);
1319                         bfa_lps_send_logout(lps);
1320                 }
1321                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1322                         BFA_PL_EID_LOGO, 0, "Logout");
1323                 break;
1324
1325         case BFA_LPS_SM_RX_CVL:
1326                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1327
1328                 /* Let the vport module know about this event */
1329                 bfa_lps_cvl_event(lps);
1330                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1331                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1332                 break;
1333
1334         case BFA_LPS_SM_SET_N2N_PID:
1335                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1336                         bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1337                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1338                 } else
1339                         bfa_lps_send_set_n2n_pid(lps);
1340                 break;
1341
1342         case BFA_LPS_SM_OFFLINE:
1343         case BFA_LPS_SM_DELETE:
1344                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1345                 break;
1346
1347         default:
1348                 bfa_sm_fault(lps->bfa, event);
1349         }
1350 }
1351
1352 /*
1353  * login complete
1354  */
1355 static void
1356 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1357 {
1358         bfa_trc(lps->bfa, lps->bfa_tag);
1359         bfa_trc(lps->bfa, event);
1360
1361         switch (event) {
1362         case BFA_LPS_SM_RESUME:
1363                 bfa_sm_set_state(lps, bfa_lps_sm_online);
1364                 bfa_lps_send_set_n2n_pid(lps);
1365                 break;
1366
1367         case BFA_LPS_SM_LOGOUT:
1368                 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1369                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1370                         BFA_PL_EID_LOGO, 0, "Logout");
1371                 break;
1372
1373         case BFA_LPS_SM_RX_CVL:
1374                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1375                 bfa_reqq_wcancel(&lps->wqe);
1376
1377                 /* Let the vport module know about this event */
1378                 bfa_lps_cvl_event(lps);
1379                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1380                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1381                 break;
1382
1383         case BFA_LPS_SM_OFFLINE:
1384         case BFA_LPS_SM_DELETE:
1385                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1386                 bfa_reqq_wcancel(&lps->wqe);
1387                 break;
1388
1389         default:
1390                 bfa_sm_fault(lps->bfa, event);
1391         }
1392 }
1393
1394 /*
1395  * logout in progress - awaiting firmware response
1396  */
1397 static void
1398 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1399 {
1400         bfa_trc(lps->bfa, lps->bfa_tag);
1401         bfa_trc(lps->bfa, event);
1402
1403         switch (event) {
1404         case BFA_LPS_SM_FWRSP:
1405                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1406                 bfa_lps_logout_comp(lps);
1407                 break;
1408
1409         case BFA_LPS_SM_OFFLINE:
1410         case BFA_LPS_SM_DELETE:
1411                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1412                 break;
1413
1414         default:
1415                 bfa_sm_fault(lps->bfa, event);
1416         }
1417 }
1418
1419 /*
1420  * logout pending -- awaiting space in request queue
1421  */
1422 static void
1423 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1424 {
1425         bfa_trc(lps->bfa, lps->bfa_tag);
1426         bfa_trc(lps->bfa, event);
1427
1428         switch (event) {
1429         case BFA_LPS_SM_RESUME:
1430                 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1431                 bfa_lps_send_logout(lps);
1432                 break;
1433
1434         case BFA_LPS_SM_OFFLINE:
1435         case BFA_LPS_SM_DELETE:
1436                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1437                 bfa_reqq_wcancel(&lps->wqe);
1438                 break;
1439
1440         default:
1441                 bfa_sm_fault(lps->bfa, event);
1442         }
1443 }
1444
1445
1446
1447 /*
1448  *  lps_pvt BFA LPS private functions
1449  */
1450
1451 /*
1452  * return memory requirement
1453  */
1454 static void
1455 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1456                 struct bfa_s *bfa)
1457 {
1458         struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1459
1460         if (cfg->drvcfg.min_cfg)
1461                 bfa_mem_kva_setup(minfo, lps_kva,
1462                         sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1463         else
1464                 bfa_mem_kva_setup(minfo, lps_kva,
1465                         sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1466 }
1467
1468 /*
1469  * bfa module attach at initialization time
1470  */
1471 static void
1472 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1473         struct bfa_pcidev_s *pcidev)
1474 {
1475         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1476         struct bfa_lps_s        *lps;
1477         int                     i;
1478
1479         mod->num_lps = BFA_LPS_MAX_LPORTS;
1480         if (cfg->drvcfg.min_cfg)
1481                 mod->num_lps = BFA_LPS_MIN_LPORTS;
1482         else
1483                 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484         mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1485
1486         bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1487
1488         INIT_LIST_HEAD(&mod->lps_free_q);
1489         INIT_LIST_HEAD(&mod->lps_active_q);
1490         INIT_LIST_HEAD(&mod->lps_login_q);
1491
1492         for (i = 0; i < mod->num_lps; i++, lps++) {
1493                 lps->bfa        = bfa;
1494                 lps->bfa_tag    = (u8) i;
1495                 lps->reqq       = BFA_REQQ_LPS;
1496                 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1497                 list_add_tail(&lps->qe, &mod->lps_free_q);
1498         }
1499 }
1500
1501 static void
1502 bfa_lps_detach(struct bfa_s *bfa)
1503 {
1504 }
1505
1506 static void
1507 bfa_lps_start(struct bfa_s *bfa)
1508 {
1509 }
1510
1511 static void
1512 bfa_lps_stop(struct bfa_s *bfa)
1513 {
1514 }
1515
1516 /*
1517  * IOC in disabled state -- consider all lps offline
1518  */
1519 static void
1520 bfa_lps_iocdisable(struct bfa_s *bfa)
1521 {
1522         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1523         struct bfa_lps_s        *lps;
1524         struct list_head                *qe, *qen;
1525
1526         list_for_each_safe(qe, qen, &mod->lps_active_q) {
1527                 lps = (struct bfa_lps_s *) qe;
1528                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1529         }
1530         list_for_each_safe(qe, qen, &mod->lps_login_q) {
1531                 lps = (struct bfa_lps_s *) qe;
1532                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1533         }
1534         list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1535 }
1536
1537 /*
1538  * Firmware login response
1539  */
1540 static void
1541 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1542 {
1543         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1544         struct bfa_lps_s        *lps;
1545
1546         WARN_ON(rsp->bfa_tag >= mod->num_lps);
1547         lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1548
1549         lps->status = rsp->status;
1550         switch (rsp->status) {
1551         case BFA_STATUS_OK:
1552                 lps->fw_tag     = rsp->fw_tag;
1553                 lps->fport      = rsp->f_port;
1554                 if (lps->fport)
1555                         lps->lp_pid = rsp->lp_pid;
1556                 lps->npiv_en    = rsp->npiv_en;
1557                 lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1558                 lps->pr_pwwn    = rsp->port_name;
1559                 lps->pr_nwwn    = rsp->node_name;
1560                 lps->auth_req   = rsp->auth_req;
1561                 lps->lp_mac     = rsp->lp_mac;
1562                 lps->brcd_switch = rsp->brcd_switch;
1563                 lps->fcf_mac    = rsp->fcf_mac;
1564                 lps->pr_bbscn   = rsp->bb_scn;
1565
1566                 break;
1567
1568         case BFA_STATUS_FABRIC_RJT:
1569                 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1570                 lps->lsrjt_expl = rsp->lsrjt_expl;
1571
1572                 break;
1573
1574         case BFA_STATUS_EPROTOCOL:
1575                 lps->ext_status = rsp->ext_status;
1576
1577                 break;
1578
1579         case BFA_STATUS_VPORT_MAX:
1580                 if (!rsp->ext_status)
1581                         bfa_lps_no_res(lps, rsp->ext_status);
1582                 break;
1583
1584         default:
1585                 /* Nothing to do with other status */
1586                 break;
1587         }
1588
1589         list_del(&lps->qe);
1590         list_add_tail(&lps->qe, &mod->lps_active_q);
1591         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1592 }
1593
1594 static void
1595 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1596 {
1597         struct bfa_s            *bfa = first_lps->bfa;
1598         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1599         struct list_head        *qe, *qe_next;
1600         struct bfa_lps_s        *lps;
1601
1602         bfa_trc(bfa, count);
1603
1604         qe = bfa_q_next(first_lps);
1605
1606         while (count && qe) {
1607                 qe_next = bfa_q_next(qe);
1608                 lps = (struct bfa_lps_s *)qe;
1609                 bfa_trc(bfa, lps->bfa_tag);
1610                 lps->status = first_lps->status;
1611                 list_del(&lps->qe);
1612                 list_add_tail(&lps->qe, &mod->lps_active_q);
1613                 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1614                 qe = qe_next;
1615                 count--;
1616         }
1617 }
1618
1619 /*
1620  * Firmware logout response
1621  */
1622 static void
1623 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1624 {
1625         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1626         struct bfa_lps_s        *lps;
1627
1628         WARN_ON(rsp->bfa_tag >= mod->num_lps);
1629         lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1630
1631         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1632 }
1633
1634 /*
1635  * Firmware received a Clear virtual link request (for FCoE)
1636  */
1637 static void
1638 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1639 {
1640         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1641         struct bfa_lps_s        *lps;
1642
1643         lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1644
1645         bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1646 }
1647
1648 /*
1649  * Space is available in request queue, resume queueing request to firmware.
1650  */
1651 static void
1652 bfa_lps_reqq_resume(void *lps_arg)
1653 {
1654         struct bfa_lps_s        *lps = lps_arg;
1655
1656         bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1657 }
1658
1659 /*
1660  * lps is freed -- triggered by vport delete
1661  */
1662 static void
1663 bfa_lps_free(struct bfa_lps_s *lps)
1664 {
1665         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1666
1667         lps->lp_pid = 0;
1668         list_del(&lps->qe);
1669         list_add_tail(&lps->qe, &mod->lps_free_q);
1670 }
1671
1672 /*
1673  * send login request to firmware
1674  */
1675 static void
1676 bfa_lps_send_login(struct bfa_lps_s *lps)
1677 {
1678         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1679         struct bfi_lps_login_req_s      *m;
1680
1681         m = bfa_reqq_next(lps->bfa, lps->reqq);
1682         WARN_ON(!m);
1683
1684         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1685                 bfa_fn_lpu(lps->bfa));
1686
1687         m->bfa_tag      = lps->bfa_tag;
1688         m->alpa         = lps->alpa;
1689         m->pdu_size     = cpu_to_be16(lps->pdusz);
1690         m->pwwn         = lps->pwwn;
1691         m->nwwn         = lps->nwwn;
1692         m->fdisc        = lps->fdisc;
1693         m->auth_en      = lps->auth_en;
1694         m->bb_scn       = lps->bb_scn;
1695
1696         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1697         list_del(&lps->qe);
1698         list_add_tail(&lps->qe, &mod->lps_login_q);
1699 }
1700
1701 /*
1702  * send logout request to firmware
1703  */
1704 static void
1705 bfa_lps_send_logout(struct bfa_lps_s *lps)
1706 {
1707         struct bfi_lps_logout_req_s *m;
1708
1709         m = bfa_reqq_next(lps->bfa, lps->reqq);
1710         WARN_ON(!m);
1711
1712         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1713                 bfa_fn_lpu(lps->bfa));
1714
1715         m->fw_tag = lps->fw_tag;
1716         m->port_name = lps->pwwn;
1717         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1718 }
1719
1720 /*
1721  * send n2n pid set request to firmware
1722  */
1723 static void
1724 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1725 {
1726         struct bfi_lps_n2n_pid_req_s *m;
1727
1728         m = bfa_reqq_next(lps->bfa, lps->reqq);
1729         WARN_ON(!m);
1730
1731         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1732                 bfa_fn_lpu(lps->bfa));
1733
1734         m->fw_tag = lps->fw_tag;
1735         m->lp_pid = lps->lp_pid;
1736         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1737 }
1738
1739 /*
1740  * Indirect login completion handler for non-fcs
1741  */
1742 static void
1743 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1744 {
1745         struct bfa_lps_s *lps   = arg;
1746
1747         if (!complete)
1748                 return;
1749
1750         if (lps->fdisc)
1751                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1752         else
1753                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1754 }
1755
1756 /*
1757  * Login completion handler -- direct call for fcs, queue for others
1758  */
1759 static void
1760 bfa_lps_login_comp(struct bfa_lps_s *lps)
1761 {
1762         if (!lps->bfa->fcs) {
1763                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1764                         lps);
1765                 return;
1766         }
1767
1768         if (lps->fdisc)
1769                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1770         else
1771                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1772 }
1773
1774 /*
1775  * Indirect logout completion handler for non-fcs
1776  */
1777 static void
1778 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1779 {
1780         struct bfa_lps_s *lps   = arg;
1781
1782         if (!complete)
1783                 return;
1784
1785         if (lps->fdisc)
1786                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1787 }
1788
1789 /*
1790  * Logout completion handler -- direct call for fcs, queue for others
1791  */
1792 static void
1793 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1794 {
1795         if (!lps->bfa->fcs) {
1796                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1797                         lps);
1798                 return;
1799         }
1800         if (lps->fdisc)
1801                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1802 }
1803
1804 /*
1805  * Clear virtual link completion handler for non-fcs
1806  */
1807 static void
1808 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1809 {
1810         struct bfa_lps_s *lps   = arg;
1811
1812         if (!complete)
1813                 return;
1814
1815         /* Clear virtual link to base port will result in link down */
1816         if (lps->fdisc)
1817                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1818 }
1819
1820 /*
1821  * Received Clear virtual link event --direct call for fcs,
1822  * queue for others
1823  */
1824 static void
1825 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1826 {
1827         if (!lps->bfa->fcs) {
1828                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1829                         lps);
1830                 return;
1831         }
1832
1833         /* Clear virtual link to base port will result in link down */
1834         if (lps->fdisc)
1835                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1836 }
1837
1838
1839
1840 /*
1841  *  lps_public BFA LPS public functions
1842  */
1843
1844 u32
1845 bfa_lps_get_max_vport(struct bfa_s *bfa)
1846 {
1847         if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1848                 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1849         else
1850                 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1851 }
1852
1853 /*
1854  * Allocate a lport srvice tag.
1855  */
1856 struct bfa_lps_s  *
1857 bfa_lps_alloc(struct bfa_s *bfa)
1858 {
1859         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1860         struct bfa_lps_s        *lps = NULL;
1861
1862         bfa_q_deq(&mod->lps_free_q, &lps);
1863
1864         if (lps == NULL)
1865                 return NULL;
1866
1867         list_add_tail(&lps->qe, &mod->lps_active_q);
1868
1869         bfa_sm_set_state(lps, bfa_lps_sm_init);
1870         return lps;
1871 }
1872
1873 /*
1874  * Free lport service tag. This can be called anytime after an alloc.
1875  * No need to wait for any pending login/logout completions.
1876  */
1877 void
1878 bfa_lps_delete(struct bfa_lps_s *lps)
1879 {
1880         bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1881 }
1882
1883 /*
1884  * Initiate a lport login.
1885  */
1886 void
1887 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1888         wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1889 {
1890         lps->uarg       = uarg;
1891         lps->alpa       = alpa;
1892         lps->pdusz      = pdusz;
1893         lps->pwwn       = pwwn;
1894         lps->nwwn       = nwwn;
1895         lps->fdisc      = BFA_FALSE;
1896         lps->auth_en    = auth_en;
1897         lps->bb_scn     = bb_scn;
1898         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1899 }
1900
1901 /*
1902  * Initiate a lport fdisc login.
1903  */
1904 void
1905 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1906         wwn_t nwwn)
1907 {
1908         lps->uarg       = uarg;
1909         lps->alpa       = 0;
1910         lps->pdusz      = pdusz;
1911         lps->pwwn       = pwwn;
1912         lps->nwwn       = nwwn;
1913         lps->fdisc      = BFA_TRUE;
1914         lps->auth_en    = BFA_FALSE;
1915         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1916 }
1917
1918
1919 /*
1920  * Initiate a lport FDSIC logout.
1921  */
1922 void
1923 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1924 {
1925         bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1926 }
1927
1928 u8
1929 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1930 {
1931         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1932
1933         return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1934 }
1935
1936 /*
1937  * Return lport services tag given the pid
1938  */
1939 u8
1940 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1941 {
1942         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1943         struct bfa_lps_s        *lps;
1944         int                     i;
1945
1946         for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1947                 if (lps->lp_pid == pid)
1948                         return lps->bfa_tag;
1949         }
1950
1951         /* Return base port tag anyway */
1952         return 0;
1953 }
1954
1955
1956 /*
1957  * return port id assigned to the base lport
1958  */
1959 u32
1960 bfa_lps_get_base_pid(struct bfa_s *bfa)
1961 {
1962         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1963
1964         return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1965 }
1966
1967 /*
1968  * Set PID in case of n2n (which is assigned during PLOGI)
1969  */
1970 void
1971 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1972 {
1973         bfa_trc(lps->bfa, lps->bfa_tag);
1974         bfa_trc(lps->bfa, n2n_pid);
1975
1976         lps->lp_pid = n2n_pid;
1977         bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1978 }
1979
1980 /*
1981  * LPS firmware message class handler.
1982  */
1983 void
1984 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1985 {
1986         union bfi_lps_i2h_msg_u msg;
1987
1988         bfa_trc(bfa, m->mhdr.msg_id);
1989         msg.msg = m;
1990
1991         switch (m->mhdr.msg_id) {
1992         case BFI_LPS_I2H_LOGIN_RSP:
1993                 bfa_lps_login_rsp(bfa, msg.login_rsp);
1994                 break;
1995
1996         case BFI_LPS_I2H_LOGOUT_RSP:
1997                 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1998                 break;
1999
2000         case BFI_LPS_I2H_CVL_EVENT:
2001                 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2002                 break;
2003
2004         default:
2005                 bfa_trc(bfa, m->mhdr.msg_id);
2006                 WARN_ON(1);
2007         }
2008 }
2009
2010 /*
2011  * FC PORT state machine functions
2012  */
2013 static void
2014 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2015                         enum bfa_fcport_sm_event event)
2016 {
2017         bfa_trc(fcport->bfa, event);
2018
2019         switch (event) {
2020         case BFA_FCPORT_SM_START:
2021                 /*
2022                  * Start event after IOC is configured and BFA is started.
2023                  */
2024                 fcport->use_flash_cfg = BFA_TRUE;
2025
2026                 if (bfa_fcport_send_enable(fcport)) {
2027                         bfa_trc(fcport->bfa, BFA_TRUE);
2028                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2029                 } else {
2030                         bfa_trc(fcport->bfa, BFA_FALSE);
2031                         bfa_sm_set_state(fcport,
2032                                         bfa_fcport_sm_enabling_qwait);
2033                 }
2034                 break;
2035
2036         case BFA_FCPORT_SM_ENABLE:
2037                 /*
2038                  * Port is persistently configured to be in enabled state. Do
2039                  * not change state. Port enabling is done when START event is
2040                  * received.
2041                  */
2042                 break;
2043
2044         case BFA_FCPORT_SM_DISABLE:
2045                 /*
2046                  * If a port is persistently configured to be disabled, the
2047                  * first event will a port disable request.
2048                  */
2049                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2050                 break;
2051
2052         case BFA_FCPORT_SM_HWFAIL:
2053                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2054                 break;
2055
2056         default:
2057                 bfa_sm_fault(fcport->bfa, event);
2058         }
2059 }
2060
2061 static void
2062 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2063                                 enum bfa_fcport_sm_event event)
2064 {
2065         char pwwn_buf[BFA_STRING_32];
2066         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2067         bfa_trc(fcport->bfa, event);
2068
2069         switch (event) {
2070         case BFA_FCPORT_SM_QRESUME:
2071                 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2072                 bfa_fcport_send_enable(fcport);
2073                 break;
2074
2075         case BFA_FCPORT_SM_STOP:
2076                 bfa_reqq_wcancel(&fcport->reqq_wait);
2077                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2078                 break;
2079
2080         case BFA_FCPORT_SM_ENABLE:
2081                 /*
2082                  * Already enable is in progress.
2083                  */
2084                 break;
2085
2086         case BFA_FCPORT_SM_DISABLE:
2087                 /*
2088                  * Just send disable request to firmware when room becomes
2089                  * available in request queue.
2090                  */
2091                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2092                 bfa_reqq_wcancel(&fcport->reqq_wait);
2093                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2094                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2095                 wwn2str(pwwn_buf, fcport->pwwn);
2096                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2097                         "Base port disabled: WWN = %s\n", pwwn_buf);
2098                 break;
2099
2100         case BFA_FCPORT_SM_LINKUP:
2101         case BFA_FCPORT_SM_LINKDOWN:
2102                 /*
2103                  * Possible to get link events when doing back-to-back
2104                  * enable/disables.
2105                  */
2106                 break;
2107
2108         case BFA_FCPORT_SM_HWFAIL:
2109                 bfa_reqq_wcancel(&fcport->reqq_wait);
2110                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2111                 break;
2112
2113         default:
2114                 bfa_sm_fault(fcport->bfa, event);
2115         }
2116 }
2117
2118 static void
2119 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2120                                                 enum bfa_fcport_sm_event event)
2121 {
2122         char pwwn_buf[BFA_STRING_32];
2123         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2124         bfa_trc(fcport->bfa, event);
2125
2126         switch (event) {
2127         case BFA_FCPORT_SM_FWRSP:
2128         case BFA_FCPORT_SM_LINKDOWN:
2129                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2130                 break;
2131
2132         case BFA_FCPORT_SM_LINKUP:
2133                 bfa_fcport_update_linkinfo(fcport);
2134                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2135
2136                 WARN_ON(!fcport->event_cbfn);
2137                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2138                 break;
2139
2140         case BFA_FCPORT_SM_ENABLE:
2141                 /*
2142                  * Already being enabled.
2143                  */
2144                 break;
2145
2146         case BFA_FCPORT_SM_DISABLE:
2147                 if (bfa_fcport_send_disable(fcport))
2148                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2149                 else
2150                         bfa_sm_set_state(fcport,
2151                                          bfa_fcport_sm_disabling_qwait);
2152
2153                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2154                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2155                 wwn2str(pwwn_buf, fcport->pwwn);
2156                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2157                         "Base port disabled: WWN = %s\n", pwwn_buf);
2158                 break;
2159
2160         case BFA_FCPORT_SM_STOP:
2161                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2162                 break;
2163
2164         case BFA_FCPORT_SM_HWFAIL:
2165                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2166                 break;
2167
2168         default:
2169                 bfa_sm_fault(fcport->bfa, event);
2170         }
2171 }
2172
2173 static void
2174 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2175                                                 enum bfa_fcport_sm_event event)
2176 {
2177         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2178         char pwwn_buf[BFA_STRING_32];
2179         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2180
2181         bfa_trc(fcport->bfa, event);
2182
2183         switch (event) {
2184         case BFA_FCPORT_SM_LINKUP:
2185                 bfa_fcport_update_linkinfo(fcport);
2186                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2187                 WARN_ON(!fcport->event_cbfn);
2188                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2189                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2190                 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2191
2192                         bfa_trc(fcport->bfa,
2193                                 pevent->link_state.vc_fcf.fcf.fipenabled);
2194                         bfa_trc(fcport->bfa,
2195                                 pevent->link_state.vc_fcf.fcf.fipfailed);
2196
2197                         if (pevent->link_state.vc_fcf.fcf.fipfailed)
2198                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2200                                         "FIP FCF Discovery Failed");
2201                         else
2202                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2203                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2204                                         "FIP FCF Discovered");
2205                 }
2206
2207                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2208                 wwn2str(pwwn_buf, fcport->pwwn);
2209                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2210                         "Base port online: WWN = %s\n", pwwn_buf);
2211                 break;
2212
2213         case BFA_FCPORT_SM_LINKDOWN:
2214                 /*
2215                  * Possible to get link down event.
2216                  */
2217                 break;
2218
2219         case BFA_FCPORT_SM_ENABLE:
2220                 /*
2221                  * Already enabled.
2222                  */
2223                 break;
2224
2225         case BFA_FCPORT_SM_DISABLE:
2226                 if (bfa_fcport_send_disable(fcport))
2227                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2228                 else
2229                         bfa_sm_set_state(fcport,
2230                                          bfa_fcport_sm_disabling_qwait);
2231
2232                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2233                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2234                 wwn2str(pwwn_buf, fcport->pwwn);
2235                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2236                         "Base port disabled: WWN = %s\n", pwwn_buf);
2237                 break;
2238
2239         case BFA_FCPORT_SM_STOP:
2240                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2241                 break;
2242
2243         case BFA_FCPORT_SM_HWFAIL:
2244                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2245                 break;
2246
2247         default:
2248                 bfa_sm_fault(fcport->bfa, event);
2249         }
2250 }
2251
2252 static void
2253 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2254         enum bfa_fcport_sm_event event)
2255 {
2256         char pwwn_buf[BFA_STRING_32];
2257         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2258
2259         bfa_trc(fcport->bfa, event);
2260
2261         switch (event) {
2262         case BFA_FCPORT_SM_ENABLE:
2263                 /*
2264                  * Already enabled.
2265                  */
2266                 break;
2267
2268         case BFA_FCPORT_SM_DISABLE:
2269                 if (bfa_fcport_send_disable(fcport))
2270                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2271                 else
2272                         bfa_sm_set_state(fcport,
2273                                          bfa_fcport_sm_disabling_qwait);
2274
2275                 bfa_fcport_reset_linkinfo(fcport);
2276                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2277                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2278                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2279                 wwn2str(pwwn_buf, fcport->pwwn);
2280                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281                         "Base port offline: WWN = %s\n", pwwn_buf);
2282                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2283                         "Base port disabled: WWN = %s\n", pwwn_buf);
2284                 break;
2285
2286         case BFA_FCPORT_SM_LINKDOWN:
2287                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2288                 bfa_fcport_reset_linkinfo(fcport);
2289                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2290                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2291                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2292                 wwn2str(pwwn_buf, fcport->pwwn);
2293                 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2294                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2295                                 "Base port offline: WWN = %s\n", pwwn_buf);
2296                 else
2297                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2298                                 "Base port (WWN = %s) "
2299                                 "lost fabric connectivity\n", pwwn_buf);
2300                 break;
2301
2302         case BFA_FCPORT_SM_STOP:
2303                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2304                 bfa_fcport_reset_linkinfo(fcport);
2305                 wwn2str(pwwn_buf, fcport->pwwn);
2306                 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2307                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2308                                 "Base port offline: WWN = %s\n", pwwn_buf);
2309                 else
2310                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2311                                 "Base port (WWN = %s) "
2312                                 "lost fabric connectivity\n", pwwn_buf);
2313                 break;
2314
2315         case BFA_FCPORT_SM_HWFAIL:
2316                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2317                 bfa_fcport_reset_linkinfo(fcport);
2318                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2319                 wwn2str(pwwn_buf, fcport->pwwn);
2320                 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2321                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2322                                 "Base port offline: WWN = %s\n", pwwn_buf);
2323                 else
2324                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2325                                 "Base port (WWN = %s) "
2326                                 "lost fabric connectivity\n", pwwn_buf);
2327                 break;
2328
2329         default:
2330                 bfa_sm_fault(fcport->bfa, event);
2331         }
2332 }
2333
2334 static void
2335 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2336                                  enum bfa_fcport_sm_event event)
2337 {
2338         bfa_trc(fcport->bfa, event);
2339
2340         switch (event) {
2341         case BFA_FCPORT_SM_QRESUME:
2342                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2343                 bfa_fcport_send_disable(fcport);
2344                 break;
2345
2346         case BFA_FCPORT_SM_STOP:
2347                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2348                 bfa_reqq_wcancel(&fcport->reqq_wait);
2349                 break;
2350
2351         case BFA_FCPORT_SM_ENABLE:
2352                 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2353                 break;
2354
2355         case BFA_FCPORT_SM_DISABLE:
2356                 /*
2357                  * Already being disabled.
2358                  */
2359                 break;
2360
2361         case BFA_FCPORT_SM_LINKUP:
2362         case BFA_FCPORT_SM_LINKDOWN:
2363                 /*
2364                  * Possible to get link events when doing back-to-back
2365                  * enable/disables.
2366                  */
2367                 break;
2368
2369         case BFA_FCPORT_SM_HWFAIL:
2370                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2371                 bfa_reqq_wcancel(&fcport->reqq_wait);
2372                 break;
2373
2374         default:
2375                 bfa_sm_fault(fcport->bfa, event);
2376         }
2377 }
2378
2379 static void
2380 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2381                                  enum bfa_fcport_sm_event event)
2382 {
2383         bfa_trc(fcport->bfa, event);
2384
2385         switch (event) {
2386         case BFA_FCPORT_SM_QRESUME:
2387                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2388                 bfa_fcport_send_disable(fcport);
2389                 if (bfa_fcport_send_enable(fcport))
2390                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2391                 else
2392                         bfa_sm_set_state(fcport,
2393                                          bfa_fcport_sm_enabling_qwait);
2394                 break;
2395
2396         case BFA_FCPORT_SM_STOP:
2397                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2398                 bfa_reqq_wcancel(&fcport->reqq_wait);
2399                 break;
2400
2401         case BFA_FCPORT_SM_ENABLE:
2402                 break;
2403
2404         case BFA_FCPORT_SM_DISABLE:
2405                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2406                 break;
2407
2408         case BFA_FCPORT_SM_LINKUP:
2409         case BFA_FCPORT_SM_LINKDOWN:
2410                 /*
2411                  * Possible to get link events when doing back-to-back
2412                  * enable/disables.
2413                  */
2414                 break;
2415
2416         case BFA_FCPORT_SM_HWFAIL:
2417                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2418                 bfa_reqq_wcancel(&fcport->reqq_wait);
2419                 break;
2420
2421         default:
2422                 bfa_sm_fault(fcport->bfa, event);
2423         }
2424 }
2425
2426 static void
2427 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2428                                                 enum bfa_fcport_sm_event event)
2429 {
2430         char pwwn_buf[BFA_STRING_32];
2431         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2432         bfa_trc(fcport->bfa, event);
2433
2434         switch (event) {
2435         case BFA_FCPORT_SM_FWRSP:
2436                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2437                 break;
2438
2439         case BFA_FCPORT_SM_DISABLE:
2440                 /*
2441                  * Already being disabled.
2442                  */
2443                 break;
2444
2445         case BFA_FCPORT_SM_ENABLE:
2446                 if (bfa_fcport_send_enable(fcport))
2447                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2448                 else
2449                         bfa_sm_set_state(fcport,
2450                                          bfa_fcport_sm_enabling_qwait);
2451
2452                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2453                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2454                 wwn2str(pwwn_buf, fcport->pwwn);
2455                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2456                         "Base port enabled: WWN = %s\n", pwwn_buf);
2457                 break;
2458
2459         case BFA_FCPORT_SM_STOP:
2460                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2461                 break;
2462
2463         case BFA_FCPORT_SM_LINKUP:
2464         case BFA_FCPORT_SM_LINKDOWN:
2465                 /*
2466                  * Possible to get link events when doing back-to-back
2467                  * enable/disables.
2468                  */
2469                 break;
2470
2471         case BFA_FCPORT_SM_HWFAIL:
2472                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2473                 break;
2474
2475         default:
2476                 bfa_sm_fault(fcport->bfa, event);
2477         }
2478 }
2479
2480 static void
2481 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2482                                                 enum bfa_fcport_sm_event event)
2483 {
2484         char pwwn_buf[BFA_STRING_32];
2485         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2486         bfa_trc(fcport->bfa, event);
2487
2488         switch (event) {
2489         case BFA_FCPORT_SM_START:
2490                 /*
2491                  * Ignore start event for a port that is disabled.
2492                  */
2493                 break;
2494
2495         case BFA_FCPORT_SM_STOP:
2496                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2497                 break;
2498
2499         case BFA_FCPORT_SM_ENABLE:
2500                 if (bfa_fcport_send_enable(fcport))
2501                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2502                 else
2503                         bfa_sm_set_state(fcport,
2504                                          bfa_fcport_sm_enabling_qwait);
2505
2506                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2507                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2508                 wwn2str(pwwn_buf, fcport->pwwn);
2509                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2510                         "Base port enabled: WWN = %s\n", pwwn_buf);
2511                 break;
2512
2513         case BFA_FCPORT_SM_DISABLE:
2514                 /*
2515                  * Already disabled.
2516                  */
2517                 break;
2518
2519         case BFA_FCPORT_SM_HWFAIL:
2520                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2521                 break;
2522
2523         default:
2524                 bfa_sm_fault(fcport->bfa, event);
2525         }
2526 }
2527
2528 static void
2529 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2530                          enum bfa_fcport_sm_event event)
2531 {
2532         bfa_trc(fcport->bfa, event);
2533
2534         switch (event) {
2535         case BFA_FCPORT_SM_START:
2536                 if (bfa_fcport_send_enable(fcport))
2537                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2538                 else
2539                         bfa_sm_set_state(fcport,
2540                                          bfa_fcport_sm_enabling_qwait);
2541                 break;
2542
2543         default:
2544                 /*
2545                  * Ignore all other events.
2546                  */
2547                 ;
2548         }
2549 }
2550
2551 /*
2552  * Port is enabled. IOC is down/failed.
2553  */
2554 static void
2555 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2556                          enum bfa_fcport_sm_event event)
2557 {
2558         bfa_trc(fcport->bfa, event);
2559
2560         switch (event) {
2561         case BFA_FCPORT_SM_START:
2562                 if (bfa_fcport_send_enable(fcport))
2563                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2564                 else
2565                         bfa_sm_set_state(fcport,
2566                                          bfa_fcport_sm_enabling_qwait);
2567                 break;
2568
2569         default:
2570                 /*
2571                  * Ignore all events.
2572                  */
2573                 ;
2574         }
2575 }
2576
2577 /*
2578  * Port is disabled. IOC is down/failed.
2579  */
2580 static void
2581 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2582                          enum bfa_fcport_sm_event event)
2583 {
2584         bfa_trc(fcport->bfa, event);
2585
2586         switch (event) {
2587         case BFA_FCPORT_SM_START:
2588                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2589                 break;
2590
2591         case BFA_FCPORT_SM_ENABLE:
2592                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2593                 break;
2594
2595         default:
2596                 /*
2597                  * Ignore all events.
2598                  */
2599                 ;
2600         }
2601 }
2602
2603 /*
2604  * Link state is down
2605  */
2606 static void
2607 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2608                 enum bfa_fcport_ln_sm_event event)
2609 {
2610         bfa_trc(ln->fcport->bfa, event);
2611
2612         switch (event) {
2613         case BFA_FCPORT_LN_SM_LINKUP:
2614                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2615                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2616                 break;
2617
2618         default:
2619                 bfa_sm_fault(ln->fcport->bfa, event);
2620         }
2621 }
2622
2623 /*
2624  * Link state is waiting for down notification
2625  */
2626 static void
2627 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2628                 enum bfa_fcport_ln_sm_event event)
2629 {
2630         bfa_trc(ln->fcport->bfa, event);
2631
2632         switch (event) {
2633         case BFA_FCPORT_LN_SM_LINKUP:
2634                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2635                 break;
2636
2637         case BFA_FCPORT_LN_SM_NOTIFICATION:
2638                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2639                 break;
2640
2641         default:
2642                 bfa_sm_fault(ln->fcport->bfa, event);
2643         }
2644 }
2645
2646 /*
2647  * Link state is waiting for down notification and there is a pending up
2648  */
2649 static void
2650 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2651                 enum bfa_fcport_ln_sm_event event)
2652 {
2653         bfa_trc(ln->fcport->bfa, event);
2654
2655         switch (event) {
2656         case BFA_FCPORT_LN_SM_LINKDOWN:
2657                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2658                 break;
2659
2660         case BFA_FCPORT_LN_SM_NOTIFICATION:
2661                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2662                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2663                 break;
2664
2665         default:
2666                 bfa_sm_fault(ln->fcport->bfa, event);
2667         }
2668 }
2669
2670 /*
2671  * Link state is up
2672  */
2673 static void
2674 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2675                 enum bfa_fcport_ln_sm_event event)
2676 {
2677         bfa_trc(ln->fcport->bfa, event);
2678
2679         switch (event) {
2680         case BFA_FCPORT_LN_SM_LINKDOWN:
2681                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2682                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2683                 break;
2684
2685         default:
2686                 bfa_sm_fault(ln->fcport->bfa, event);
2687         }
2688 }
2689
2690 /*
2691  * Link state is waiting for up notification
2692  */
2693 static void
2694 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2695                 enum bfa_fcport_ln_sm_event event)
2696 {
2697         bfa_trc(ln->fcport->bfa, event);
2698
2699         switch (event) {
2700         case BFA_FCPORT_LN_SM_LINKDOWN:
2701                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2702                 break;
2703
2704         case BFA_FCPORT_LN_SM_NOTIFICATION:
2705                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2706                 break;
2707
2708         default:
2709                 bfa_sm_fault(ln->fcport->bfa, event);
2710         }
2711 }
2712
2713 /*
2714  * Link state is waiting for up notification and there is a pending down
2715  */
2716 static void
2717 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2718                 enum bfa_fcport_ln_sm_event event)
2719 {
2720         bfa_trc(ln->fcport->bfa, event);
2721
2722         switch (event) {
2723         case BFA_FCPORT_LN_SM_LINKUP:
2724                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2725                 break;
2726
2727         case BFA_FCPORT_LN_SM_NOTIFICATION:
2728                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2729                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2730                 break;
2731
2732         default:
2733                 bfa_sm_fault(ln->fcport->bfa, event);
2734         }
2735 }
2736
2737 /*
2738  * Link state is waiting for up notification and there are pending down and up
2739  */
2740 static void
2741 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2742                         enum bfa_fcport_ln_sm_event event)
2743 {
2744         bfa_trc(ln->fcport->bfa, event);
2745
2746         switch (event) {
2747         case BFA_FCPORT_LN_SM_LINKDOWN:
2748                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2749                 break;
2750
2751         case BFA_FCPORT_LN_SM_NOTIFICATION:
2752                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2753                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2754                 break;
2755
2756         default:
2757                 bfa_sm_fault(ln->fcport->bfa, event);
2758         }
2759 }
2760
2761 static void
2762 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2763 {
2764         struct bfa_fcport_ln_s *ln = cbarg;
2765
2766         if (complete)
2767                 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2768         else
2769                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2770 }
2771
2772 /*
2773  * Send SCN notification to upper layers.
2774  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2775  */
2776 static void
2777 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2778         bfa_boolean_t trunk)
2779 {
2780         if (fcport->cfg.trunked && !trunk)
2781                 return;
2782
2783         switch (event) {
2784         case BFA_PORT_LINKUP:
2785                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2786                 break;
2787         case BFA_PORT_LINKDOWN:
2788                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2789                 break;
2790         default:
2791                 WARN_ON(1);
2792         }
2793 }
2794
2795 static void
2796 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2797 {
2798         struct bfa_fcport_s *fcport = ln->fcport;
2799
2800         if (fcport->bfa->fcs) {
2801                 fcport->event_cbfn(fcport->event_cbarg, event);
2802                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2803         } else {
2804                 ln->ln_event = event;
2805                 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2806                         __bfa_cb_fcport_event, ln);
2807         }
2808 }
2809
2810 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2811                                                         BFA_CACHELINE_SZ))
2812
2813 static void
2814 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2815                    struct bfa_s *bfa)
2816 {
2817         struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2818
2819         bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2820 }
2821
2822 static void
2823 bfa_fcport_qresume(void *cbarg)
2824 {
2825         struct bfa_fcport_s *fcport = cbarg;
2826
2827         bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2828 }
2829
2830 static void
2831 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2832 {
2833         struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2834
2835         fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2836         fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
2837         fcport->stats = (union bfa_fcport_stats_u *)
2838                                 bfa_mem_dma_virt(fcport_dma);
2839 }
2840
2841 /*
2842  * Memory initialization.
2843  */
2844 static void
2845 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2846                 struct bfa_pcidev_s *pcidev)
2847 {
2848         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2849         struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2850         struct bfa_fcport_ln_s *ln = &fcport->ln;
2851         struct timeval tv;
2852
2853         fcport->bfa = bfa;
2854         ln->fcport = fcport;
2855
2856         bfa_fcport_mem_claim(fcport);
2857
2858         bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2859         bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2860
2861         /*
2862          * initialize time stamp for stats reset
2863          */
2864         do_gettimeofday(&tv);
2865         fcport->stats_reset_time = tv.tv_sec;
2866
2867         /*
2868          * initialize and set default configuration
2869          */
2870         port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2871         port_cfg->speed = BFA_PORT_SPEED_AUTO;
2872         port_cfg->trunked = BFA_FALSE;
2873         port_cfg->maxfrsize = 0;
2874
2875         port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2876
2877         bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2878 }
2879
2880 static void
2881 bfa_fcport_detach(struct bfa_s *bfa)
2882 {
2883 }
2884
2885 /*
2886  * Called when IOC is ready.
2887  */
2888 static void
2889 bfa_fcport_start(struct bfa_s *bfa)
2890 {
2891         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2892 }
2893
2894 /*
2895  * Called before IOC is stopped.
2896  */
2897 static void
2898 bfa_fcport_stop(struct bfa_s *bfa)
2899 {
2900         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2901         bfa_trunk_iocdisable(bfa);
2902 }
2903
2904 /*
2905  * Called when IOC failure is detected.
2906  */
2907 static void
2908 bfa_fcport_iocdisable(struct bfa_s *bfa)
2909 {
2910         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2911
2912         bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2913         bfa_trunk_iocdisable(bfa);
2914 }
2915
2916 static void
2917 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2918 {
2919         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2920         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2921
2922         fcport->speed = pevent->link_state.speed;
2923         fcport->topology = pevent->link_state.topology;
2924
2925         if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2926                 fcport->myalpa = 0;
2927
2928         /* QoS Details */
2929         fcport->qos_attr = pevent->link_state.qos_attr;
2930         fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2931
2932         /*
2933          * update trunk state if applicable
2934          */
2935         if (!fcport->cfg.trunked)
2936                 trunk->attr.state = BFA_TRUNK_DISABLED;
2937
2938         /* update FCoE specific */
2939         fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2940
2941         bfa_trc(fcport->bfa, fcport->speed);
2942         bfa_trc(fcport->bfa, fcport->topology);
2943 }
2944
2945 static void
2946 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2947 {
2948         fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2949         fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2950         fcport->bbsc_op_state = BFA_FALSE;
2951 }
2952
2953 /*
2954  * Send port enable message to firmware.
2955  */
2956 static bfa_boolean_t
2957 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2958 {
2959         struct bfi_fcport_enable_req_s *m;
2960
2961         /*
2962          * Increment message tag before queue check, so that responses to old
2963          * requests are discarded.
2964          */
2965         fcport->msgtag++;
2966
2967         /*
2968          * check for room in queue to send request now
2969          */
2970         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2971         if (!m) {
2972                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2973                                                         &fcport->reqq_wait);
2974                 return BFA_FALSE;
2975         }
2976
2977         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2978                         bfa_fn_lpu(fcport->bfa));
2979         m->nwwn = fcport->nwwn;
2980         m->pwwn = fcport->pwwn;
2981         m->port_cfg = fcport->cfg;
2982         m->msgtag = fcport->msgtag;
2983         m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2984          m->use_flash_cfg = fcport->use_flash_cfg;
2985         bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2986         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2987         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2988
2989         /*
2990          * queue I/O message to firmware
2991          */
2992         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
2993         return BFA_TRUE;
2994 }
2995
2996 /*
2997  * Send port disable message to firmware.
2998  */
2999 static  bfa_boolean_t
3000 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3001 {
3002         struct bfi_fcport_req_s *m;
3003
3004         /*
3005          * Increment message tag before queue check, so that responses to old
3006          * requests are discarded.
3007          */
3008         fcport->msgtag++;
3009
3010         /*
3011          * check for room in queue to send request now
3012          */
3013         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3014         if (!m) {
3015                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3016                                                         &fcport->reqq_wait);
3017                 return BFA_FALSE;
3018         }
3019
3020         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3021                         bfa_fn_lpu(fcport->bfa));
3022         m->msgtag = fcport->msgtag;
3023
3024         /*
3025          * queue I/O message to firmware
3026          */
3027         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3028
3029         return BFA_TRUE;
3030 }
3031
3032 static void
3033 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3034 {
3035         fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3036         fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3037
3038         bfa_trc(fcport->bfa, fcport->pwwn);
3039         bfa_trc(fcport->bfa, fcport->nwwn);
3040 }
3041
3042 static void
3043 bfa_fcport_send_txcredit(void *port_cbarg)
3044 {
3045
3046         struct bfa_fcport_s *fcport = port_cbarg;
3047         struct bfi_fcport_set_svc_params_req_s *m;
3048
3049         /*
3050          * check for room in queue to send request now
3051          */
3052         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3053         if (!m) {
3054                 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3055                 return;
3056         }
3057
3058         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3059                         bfa_fn_lpu(fcport->bfa));
3060         m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3061         m->bb_scn = fcport->cfg.bb_scn;
3062
3063         /*
3064          * queue I/O message to firmware
3065          */
3066         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3067 }
3068
3069 static void
3070 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3071         struct bfa_qos_stats_s *s)
3072 {
3073         u32     *dip = (u32 *) d;
3074         __be32  *sip = (__be32 *) s;
3075         int             i;
3076
3077         /* Now swap the 32 bit fields */
3078         for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3079                 dip[i] = be32_to_cpu(sip[i]);
3080 }
3081
3082 static void
3083 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3084         struct bfa_fcoe_stats_s *s)
3085 {
3086         u32     *dip = (u32 *) d;
3087         __be32  *sip = (__be32 *) s;
3088         int             i;
3089
3090         for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3091              i = i + 2) {
3092 #ifdef __BIG_ENDIAN
3093                 dip[i] = be32_to_cpu(sip[i]);
3094                 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3095 #else
3096                 dip[i] = be32_to_cpu(sip[i + 1]);
3097                 dip[i + 1] = be32_to_cpu(sip[i]);
3098 #endif
3099         }
3100 }
3101
3102 static void
3103 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3104 {
3105         struct bfa_fcport_s *fcport = cbarg;
3106
3107         if (complete) {
3108                 if (fcport->stats_status == BFA_STATUS_OK) {
3109                         struct timeval tv;
3110
3111                         /* Swap FC QoS or FCoE stats */
3112                         if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3113                                 bfa_fcport_qos_stats_swap(
3114                                         &fcport->stats_ret->fcqos,
3115                                         &fcport->stats->fcqos);
3116                         } else {
3117                                 bfa_fcport_fcoe_stats_swap(
3118                                         &fcport->stats_ret->fcoe,
3119                                         &fcport->stats->fcoe);
3120
3121                                 do_gettimeofday(&tv);
3122                                 fcport->stats_ret->fcoe.secs_reset =
3123                                         tv.tv_sec - fcport->stats_reset_time;
3124                         }
3125                 }
3126                 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3127         } else {
3128                 fcport->stats_busy = BFA_FALSE;
3129                 fcport->stats_status = BFA_STATUS_OK;
3130         }
3131 }
3132
3133 static void
3134 bfa_fcport_stats_get_timeout(void *cbarg)
3135 {
3136         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3137
3138         bfa_trc(fcport->bfa, fcport->stats_qfull);
3139
3140         if (fcport->stats_qfull) {
3141                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3142                 fcport->stats_qfull = BFA_FALSE;
3143         }
3144
3145         fcport->stats_status = BFA_STATUS_ETIMER;
3146         bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3147                 fcport);
3148 }
3149
3150 static void
3151 bfa_fcport_send_stats_get(void *cbarg)
3152 {
3153         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3154         struct bfi_fcport_req_s *msg;
3155
3156         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3157
3158         if (!msg) {
3159                 fcport->stats_qfull = BFA_TRUE;
3160                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3161                                 bfa_fcport_send_stats_get, fcport);
3162                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3163                                 &fcport->stats_reqq_wait);
3164                 return;
3165         }
3166         fcport->stats_qfull = BFA_FALSE;
3167
3168         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3169         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3170                         bfa_fn_lpu(fcport->bfa));
3171         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3172 }
3173
3174 static void
3175 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3176 {
3177         struct bfa_fcport_s *fcport = cbarg;
3178
3179         if (complete) {
3180                 struct timeval tv;
3181
3182                 /*
3183                  * re-initialize time stamp for stats reset
3184                  */
3185                 do_gettimeofday(&tv);
3186                 fcport->stats_reset_time = tv.tv_sec;
3187
3188                 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3189         } else {
3190                 fcport->stats_busy = BFA_FALSE;
3191                 fcport->stats_status = BFA_STATUS_OK;
3192         }
3193 }
3194
3195 static void
3196 bfa_fcport_stats_clr_timeout(void *cbarg)
3197 {
3198         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3199
3200         bfa_trc(fcport->bfa, fcport->stats_qfull);
3201
3202         if (fcport->stats_qfull) {
3203                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3204                 fcport->stats_qfull = BFA_FALSE;
3205         }
3206
3207         fcport->stats_status = BFA_STATUS_ETIMER;
3208         bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3209                         __bfa_cb_fcport_stats_clr, fcport);
3210 }
3211
3212 static void
3213 bfa_fcport_send_stats_clear(void *cbarg)
3214 {
3215         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3216         struct bfi_fcport_req_s *msg;
3217
3218         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3219
3220         if (!msg) {
3221                 fcport->stats_qfull = BFA_TRUE;
3222                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3223                                 bfa_fcport_send_stats_clear, fcport);
3224                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3225                                                 &fcport->stats_reqq_wait);
3226                 return;
3227         }
3228         fcport->stats_qfull = BFA_FALSE;
3229
3230         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3231         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3232                         bfa_fn_lpu(fcport->bfa));
3233         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3234 }
3235
3236 /*
3237  * Handle trunk SCN event from firmware.
3238  */
3239 static void
3240 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3241 {
3242         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3243         struct bfi_fcport_trunk_link_s *tlink;
3244         struct bfa_trunk_link_attr_s *lattr;
3245         enum bfa_trunk_state state_prev;
3246         int i;
3247         int link_bm = 0;
3248
3249         bfa_trc(fcport->bfa, fcport->cfg.trunked);
3250         WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3251                    scn->trunk_state != BFA_TRUNK_OFFLINE);
3252
3253         bfa_trc(fcport->bfa, trunk->attr.state);
3254         bfa_trc(fcport->bfa, scn->trunk_state);
3255         bfa_trc(fcport->bfa, scn->trunk_speed);
3256
3257         /*
3258          * Save off new state for trunk attribute query
3259          */
3260         state_prev = trunk->attr.state;
3261         if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3262                 trunk->attr.state = scn->trunk_state;
3263         trunk->attr.speed = scn->trunk_speed;
3264         for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3265                 lattr = &trunk->attr.link_attr[i];
3266                 tlink = &scn->tlink[i];
3267
3268                 lattr->link_state = tlink->state;
3269                 lattr->trunk_wwn  = tlink->trunk_wwn;
3270                 lattr->fctl       = tlink->fctl;
3271                 lattr->speed      = tlink->speed;
3272                 lattr->deskew     = be32_to_cpu(tlink->deskew);
3273
3274                 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3275                         fcport->speed    = tlink->speed;
3276                         fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3277                         link_bm |= 1 << i;
3278                 }
3279
3280                 bfa_trc(fcport->bfa, lattr->link_state);
3281                 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3282                 bfa_trc(fcport->bfa, lattr->fctl);
3283                 bfa_trc(fcport->bfa, lattr->speed);
3284                 bfa_trc(fcport->bfa, lattr->deskew);
3285         }
3286
3287         switch (link_bm) {
3288         case 3:
3289                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3290                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3291                 break;
3292         case 2:
3293                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3294                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3295                 break;
3296         case 1:
3297                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3298                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3299                 break;
3300         default:
3301                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3302                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3303         }
3304
3305         /*
3306          * Notify upper layers if trunk state changed.
3307          */
3308         if ((state_prev != trunk->attr.state) ||
3309                 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3310                 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3311                         BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3312         }
3313 }
3314
3315 static void
3316 bfa_trunk_iocdisable(struct bfa_s *bfa)
3317 {
3318         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3319         int i = 0;
3320
3321         /*
3322          * In trunked mode, notify upper layers that link is down
3323          */
3324         if (fcport->cfg.trunked) {
3325                 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3326                         bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3327
3328                 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3329                 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3330                 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3331                         fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3332                         fcport->trunk.attr.link_attr[i].fctl =
3333                                                 BFA_TRUNK_LINK_FCTL_NORMAL;
3334                         fcport->trunk.attr.link_attr[i].link_state =
3335                                                 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3336                         fcport->trunk.attr.link_attr[i].speed =
3337                                                 BFA_PORT_SPEED_UNKNOWN;
3338                         fcport->trunk.attr.link_attr[i].deskew = 0;
3339                 }
3340         }
3341 }
3342
3343 /*
3344  * Called to initialize port attributes
3345  */
3346 void
3347 bfa_fcport_init(struct bfa_s *bfa)
3348 {
3349         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3350
3351         /*
3352          * Initialize port attributes from IOC hardware data.
3353          */
3354         bfa_fcport_set_wwns(fcport);
3355         if (fcport->cfg.maxfrsize == 0)
3356                 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3357         fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3358         fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3359
3360         if (bfa_fcport_is_pbcdisabled(bfa))
3361                 bfa->modules.port.pbc_disabled = BFA_TRUE;
3362
3363         WARN_ON(!fcport->cfg.maxfrsize);
3364         WARN_ON(!fcport->cfg.rx_bbcredit);
3365         WARN_ON(!fcport->speed_sup);
3366 }
3367
3368 /*
3369  * Firmware message handler.
3370  */
3371 void
3372 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3373 {
3374         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3375         union bfi_fcport_i2h_msg_u i2hmsg;
3376
3377         i2hmsg.msg = msg;
3378         fcport->event_arg.i2hmsg = i2hmsg;
3379
3380         bfa_trc(bfa, msg->mhdr.msg_id);
3381         bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3382
3383         switch (msg->mhdr.msg_id) {
3384         case BFI_FCPORT_I2H_ENABLE_RSP:
3385                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3386
3387                         if (fcport->use_flash_cfg) {
3388                                 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3389                                 fcport->cfg.maxfrsize =
3390                                         cpu_to_be16(fcport->cfg.maxfrsize);
3391                                 fcport->cfg.path_tov =
3392                                         cpu_to_be16(fcport->cfg.path_tov);
3393                                 fcport->cfg.q_depth =
3394                                         cpu_to_be16(fcport->cfg.q_depth);
3395
3396                                 if (fcport->cfg.trunked)
3397                                         fcport->trunk.attr.state =
3398                                                 BFA_TRUNK_OFFLINE;
3399                                 else
3400                                         fcport->trunk.attr.state =
3401                                                 BFA_TRUNK_DISABLED;
3402                                 fcport->use_flash_cfg = BFA_FALSE;
3403                         }
3404
3405                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3406                 }
3407                 break;
3408
3409         case BFI_FCPORT_I2H_DISABLE_RSP:
3410                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3411                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3412                 break;
3413
3414         case BFI_FCPORT_I2H_EVENT:
3415                 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3416                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3417                 else
3418                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3419                 break;
3420
3421         case BFI_FCPORT_I2H_TRUNK_SCN:
3422                 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3423                 break;
3424
3425         case BFI_FCPORT_I2H_STATS_GET_RSP:
3426                 /*
3427                  * check for timer pop before processing the rsp
3428                  */
3429                 if (fcport->stats_busy == BFA_FALSE ||
3430                     fcport->stats_status == BFA_STATUS_ETIMER)
3431                         break;
3432
3433                 bfa_timer_stop(&fcport->timer);
3434                 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3435                 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3436                                 __bfa_cb_fcport_stats_get, fcport);
3437                 break;
3438
3439         case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3440                 /*
3441                  * check for timer pop before processing the rsp
3442                  */
3443                 if (fcport->stats_busy == BFA_FALSE ||
3444                     fcport->stats_status == BFA_STATUS_ETIMER)
3445                         break;
3446
3447                 bfa_timer_stop(&fcport->timer);
3448                 fcport->stats_status = BFA_STATUS_OK;
3449                 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3450                                 __bfa_cb_fcport_stats_clr, fcport);
3451                 break;
3452
3453         case BFI_FCPORT_I2H_ENABLE_AEN:
3454                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3455                 break;
3456
3457         case BFI_FCPORT_I2H_DISABLE_AEN:
3458                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3459                 break;
3460
3461         default:
3462                 WARN_ON(1);
3463         break;
3464         }
3465 }
3466
3467 /*
3468  * Registered callback for port events.
3469  */
3470 void
3471 bfa_fcport_event_register(struct bfa_s *bfa,
3472                                 void (*cbfn) (void *cbarg,
3473                                 enum bfa_port_linkstate event),
3474                                 void *cbarg)
3475 {
3476         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3477
3478         fcport->event_cbfn = cbfn;
3479         fcport->event_cbarg = cbarg;
3480 }
3481
3482 bfa_status_t
3483 bfa_fcport_enable(struct bfa_s *bfa)
3484 {
3485         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3486
3487         if (bfa_fcport_is_pbcdisabled(bfa))
3488                 return BFA_STATUS_PBC;
3489
3490         if (bfa_ioc_is_disabled(&bfa->ioc))
3491                 return BFA_STATUS_IOC_DISABLED;
3492
3493         if (fcport->diag_busy)
3494                 return BFA_STATUS_DIAG_BUSY;
3495
3496         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3497         return BFA_STATUS_OK;
3498 }
3499
3500 bfa_status_t
3501 bfa_fcport_disable(struct bfa_s *bfa)
3502 {
3503         if (bfa_fcport_is_pbcdisabled(bfa))
3504                 return BFA_STATUS_PBC;
3505
3506         if (bfa_ioc_is_disabled(&bfa->ioc))
3507                 return BFA_STATUS_IOC_DISABLED;
3508
3509         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3510         return BFA_STATUS_OK;
3511 }
3512
3513 /* If PBC is disabled on port, return error */
3514 bfa_status_t
3515 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3516 {
3517         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3518         struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3519         struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3520
3521         if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3522                 bfa_trc(bfa, fcport->pwwn);
3523                 return BFA_STATUS_PBC;
3524         }
3525         return BFA_STATUS_OK;
3526 }
3527
3528 /*
3529  * Configure port speed.
3530  */
3531 bfa_status_t
3532 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3533 {
3534         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3535
3536         bfa_trc(bfa, speed);
3537
3538         if (fcport->cfg.trunked == BFA_TRUE)
3539                 return BFA_STATUS_TRUNK_ENABLED;
3540         if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3541                 bfa_trc(bfa, fcport->speed_sup);
3542                 return BFA_STATUS_UNSUPP_SPEED;
3543         }
3544
3545         /* For Mezz card, port speed entered needs to be checked */
3546         if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
3547                 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3548                         /* For CT2, 1G is not supported */
3549                         if ((speed == BFA_PORT_SPEED_1GBPS) &&
3550                             (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3551                                 return BFA_STATUS_UNSUPP_SPEED;
3552
3553                         /* Already checked for Auto Speed and Max Speed supp */
3554                         if (!(speed == BFA_PORT_SPEED_1GBPS ||
3555                               speed == BFA_PORT_SPEED_2GBPS ||
3556                               speed == BFA_PORT_SPEED_4GBPS ||
3557                               speed == BFA_PORT_SPEED_8GBPS ||
3558                               speed == BFA_PORT_SPEED_16GBPS ||
3559                               speed == BFA_PORT_SPEED_AUTO))
3560                                 return BFA_STATUS_UNSUPP_SPEED;
3561                 } else {
3562                         if (speed != BFA_PORT_SPEED_10GBPS)
3563                                 return BFA_STATUS_UNSUPP_SPEED;
3564                 }
3565         }
3566
3567         fcport->cfg.speed = speed;
3568
3569         return BFA_STATUS_OK;
3570 }
3571
3572 /*
3573  * Get current speed.
3574  */
3575 enum bfa_port_speed
3576 bfa_fcport_get_speed(struct bfa_s *bfa)
3577 {
3578         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3579
3580         return fcport->speed;
3581 }
3582
3583 /*
3584  * Configure port topology.
3585  */
3586 bfa_status_t
3587 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3588 {
3589         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3590
3591         bfa_trc(bfa, topology);
3592         bfa_trc(bfa, fcport->cfg.topology);
3593
3594         switch (topology) {
3595         case BFA_PORT_TOPOLOGY_P2P:
3596         case BFA_PORT_TOPOLOGY_LOOP:
3597         case BFA_PORT_TOPOLOGY_AUTO:
3598                 break;
3599
3600         default:
3601                 return BFA_STATUS_EINVAL;
3602         }
3603
3604         fcport->cfg.topology = topology;
3605         return BFA_STATUS_OK;
3606 }
3607
3608 /*
3609  * Get current topology.
3610  */
3611 enum bfa_port_topology
3612 bfa_fcport_get_topology(struct bfa_s *bfa)
3613 {
3614         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3615
3616         return fcport->topology;
3617 }
3618
3619 bfa_status_t
3620 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3621 {
3622         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3623
3624         bfa_trc(bfa, alpa);
3625         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3626         bfa_trc(bfa, fcport->cfg.hardalpa);
3627
3628         fcport->cfg.cfg_hardalpa = BFA_TRUE;
3629         fcport->cfg.hardalpa = alpa;
3630
3631         return BFA_STATUS_OK;
3632 }
3633
3634 bfa_status_t
3635 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3636 {
3637         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3638
3639         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3640         bfa_trc(bfa, fcport->cfg.hardalpa);
3641
3642         fcport->cfg.cfg_hardalpa = BFA_FALSE;
3643         return BFA_STATUS_OK;
3644 }
3645
3646 bfa_boolean_t
3647 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3648 {
3649         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3650
3651         *alpa = fcport->cfg.hardalpa;
3652         return fcport->cfg.cfg_hardalpa;
3653 }
3654
3655 u8
3656 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3657 {
3658         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3659
3660         return fcport->myalpa;
3661 }
3662
3663 bfa_status_t
3664 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3665 {
3666         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3667
3668         bfa_trc(bfa, maxfrsize);
3669         bfa_trc(bfa, fcport->cfg.maxfrsize);
3670
3671         /* with in range */
3672         if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3673                 return BFA_STATUS_INVLD_DFSZ;
3674
3675         /* power of 2, if not the max frame size of 2112 */
3676         if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3677                 return BFA_STATUS_INVLD_DFSZ;
3678
3679         fcport->cfg.maxfrsize = maxfrsize;
3680         return BFA_STATUS_OK;
3681 }
3682
3683 u16
3684 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3685 {
3686         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3687
3688         return fcport->cfg.maxfrsize;
3689 }
3690
3691 u8
3692 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3693 {
3694         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3695
3696         return fcport->cfg.rx_bbcredit;
3697 }
3698
3699 void
3700 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3701 {
3702         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3703
3704         fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3705         fcport->cfg.bb_scn = bb_scn;
3706         if (bb_scn)
3707                 fcport->bbsc_op_state = BFA_TRUE;
3708         bfa_fcport_send_txcredit(fcport);
3709 }
3710
3711 /*
3712  * Get port attributes.
3713  */
3714
3715 wwn_t
3716 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3717 {
3718         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3719         if (node)
3720                 return fcport->nwwn;
3721         else
3722                 return fcport->pwwn;
3723 }
3724
3725 void
3726 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3727 {
3728         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3729
3730         memset(attr, 0, sizeof(struct bfa_port_attr_s));
3731
3732         attr->nwwn = fcport->nwwn;
3733         attr->pwwn = fcport->pwwn;
3734
3735         attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3736         attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3737
3738         memcpy(&attr->pport_cfg, &fcport->cfg,
3739                 sizeof(struct bfa_port_cfg_s));
3740         /* speed attributes */
3741         attr->pport_cfg.speed = fcport->cfg.speed;
3742         attr->speed_supported = fcport->speed_sup;
3743         attr->speed = fcport->speed;
3744         attr->cos_supported = FC_CLASS_3;
3745
3746         /* topology attributes */
3747         attr->pport_cfg.topology = fcport->cfg.topology;
3748         attr->topology = fcport->topology;
3749         attr->pport_cfg.trunked = fcport->cfg.trunked;
3750
3751         /* beacon attributes */
3752         attr->beacon = fcport->beacon;
3753         attr->link_e2e_beacon = fcport->link_e2e_beacon;
3754
3755         attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3756         attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3757         attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3758         attr->bbsc_op_status =  fcport->bbsc_op_state;
3759
3760         /* PBC Disabled State */
3761         if (bfa_fcport_is_pbcdisabled(bfa))
3762                 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3763         else {
3764                 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3765                         attr->port_state = BFA_PORT_ST_IOCDIS;
3766                 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3767                         attr->port_state = BFA_PORT_ST_FWMISMATCH;
3768                 else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3769                         attr->port_state = BFA_PORT_ST_ACQ_ADDR;
3770         }
3771
3772         /* FCoE vlan */
3773         attr->fcoe_vlan = fcport->fcoe_vlan;
3774 }
3775
3776 #define BFA_FCPORT_STATS_TOV    1000
3777
3778 /*
3779  * Fetch port statistics (FCQoS or FCoE).
3780  */
3781 bfa_status_t
3782 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3783         bfa_cb_port_t cbfn, void *cbarg)
3784 {
3785         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3786
3787         if (fcport->stats_busy) {
3788                 bfa_trc(bfa, fcport->stats_busy);
3789                 return BFA_STATUS_DEVBUSY;
3790         }
3791
3792         fcport->stats_busy  = BFA_TRUE;
3793         fcport->stats_ret   = stats;
3794         fcport->stats_cbfn  = cbfn;
3795         fcport->stats_cbarg = cbarg;
3796
3797         bfa_fcport_send_stats_get(fcport);
3798
3799         bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3800                         fcport, BFA_FCPORT_STATS_TOV);
3801         return BFA_STATUS_OK;
3802 }
3803
3804 /*
3805  * Reset port statistics (FCQoS or FCoE).
3806  */
3807 bfa_status_t
3808 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3809 {
3810         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3811
3812         if (fcport->stats_busy) {
3813                 bfa_trc(bfa, fcport->stats_busy);
3814                 return BFA_STATUS_DEVBUSY;
3815         }
3816
3817         fcport->stats_busy  = BFA_TRUE;
3818         fcport->stats_cbfn  = cbfn;
3819         fcport->stats_cbarg = cbarg;
3820
3821         bfa_fcport_send_stats_clear(fcport);
3822
3823         bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3824                         fcport, BFA_FCPORT_STATS_TOV);
3825         return BFA_STATUS_OK;
3826 }
3827
3828
3829 /*
3830  * Fetch port attributes.
3831  */
3832 bfa_boolean_t
3833 bfa_fcport_is_disabled(struct bfa_s *bfa)
3834 {
3835         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3836
3837         return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3838                 BFA_PORT_ST_DISABLED;
3839
3840 }
3841
3842 bfa_boolean_t
3843 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3844 {
3845         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3846
3847         return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3848
3849 }
3850
3851 /*
3852  *      Enable/Disable FAA feature in port config
3853  */
3854 void
3855 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3856 {
3857         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3858
3859         bfa_trc(bfa, state);
3860         fcport->cfg.faa_state = state;
3861 }
3862
3863 /*
3864  * Get default minimum ratelim speed
3865  */
3866 enum bfa_port_speed
3867 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3868 {
3869         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3870
3871         bfa_trc(bfa, fcport->cfg.trl_def_speed);
3872         return fcport->cfg.trl_def_speed;
3873
3874 }
3875
3876 void
3877 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3878                   bfa_boolean_t link_e2e_beacon)
3879 {
3880         struct bfa_s *bfa = dev;
3881         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3882
3883         bfa_trc(bfa, beacon);
3884         bfa_trc(bfa, link_e2e_beacon);
3885         bfa_trc(bfa, fcport->beacon);
3886         bfa_trc(bfa, fcport->link_e2e_beacon);
3887
3888         fcport->beacon = beacon;
3889         fcport->link_e2e_beacon = link_e2e_beacon;
3890 }
3891
3892 bfa_boolean_t
3893 bfa_fcport_is_linkup(struct bfa_s *bfa)
3894 {
3895         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3896
3897         return  (!fcport->cfg.trunked &&
3898                  bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3899                 (fcport->cfg.trunked &&
3900                  fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3901 }
3902
3903 bfa_boolean_t
3904 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3905 {
3906         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3907
3908         return fcport->cfg.qos_enabled;
3909 }
3910
3911 bfa_boolean_t
3912 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3913 {
3914         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3915
3916         return fcport->cfg.trunked;
3917 }
3918
3919 /*
3920  * Rport State machine functions
3921  */
3922 /*
3923  * Beginning state, only online event expected.
3924  */
3925 static void
3926 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3927 {
3928         bfa_trc(rp->bfa, rp->rport_tag);
3929         bfa_trc(rp->bfa, event);
3930
3931         switch (event) {
3932         case BFA_RPORT_SM_CREATE:
3933                 bfa_stats(rp, sm_un_cr);
3934                 bfa_sm_set_state(rp, bfa_rport_sm_created);
3935                 break;
3936
3937         default:
3938                 bfa_stats(rp, sm_un_unexp);
3939                 bfa_sm_fault(rp->bfa, event);
3940         }
3941 }
3942
3943 static void
3944 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3945 {
3946         bfa_trc(rp->bfa, rp->rport_tag);
3947         bfa_trc(rp->bfa, event);
3948
3949         switch (event) {
3950         case BFA_RPORT_SM_ONLINE:
3951                 bfa_stats(rp, sm_cr_on);
3952                 if (bfa_rport_send_fwcreate(rp))
3953                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3954                 else
3955                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3956                 break;
3957
3958         case BFA_RPORT_SM_DELETE:
3959                 bfa_stats(rp, sm_cr_del);
3960                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3961                 bfa_rport_free(rp);
3962                 break;
3963
3964         case BFA_RPORT_SM_HWFAIL:
3965                 bfa_stats(rp, sm_cr_hwf);
3966                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3967                 break;
3968
3969         default:
3970                 bfa_stats(rp, sm_cr_unexp);
3971                 bfa_sm_fault(rp->bfa, event);
3972         }
3973 }
3974
3975 /*
3976  * Waiting for rport create response from firmware.
3977  */
3978 static void
3979 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3980 {
3981         bfa_trc(rp->bfa, rp->rport_tag);
3982         bfa_trc(rp->bfa, event);
3983
3984         switch (event) {
3985         case BFA_RPORT_SM_FWRSP:
3986                 bfa_stats(rp, sm_fwc_rsp);
3987                 bfa_sm_set_state(rp, bfa_rport_sm_online);
3988                 bfa_rport_online_cb(rp);
3989                 break;
3990
3991         case BFA_RPORT_SM_DELETE:
3992                 bfa_stats(rp, sm_fwc_del);
3993                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3994                 break;
3995
3996         case BFA_RPORT_SM_OFFLINE:
3997                 bfa_stats(rp, sm_fwc_off);
3998                 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3999                 break;
4000
4001         case BFA_RPORT_SM_HWFAIL:
4002                 bfa_stats(rp, sm_fwc_hwf);
4003                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4004                 break;
4005
4006         default:
4007                 bfa_stats(rp, sm_fwc_unexp);
4008                 bfa_sm_fault(rp->bfa, event);
4009         }
4010 }
4011
4012 /*
4013  * Request queue is full, awaiting queue resume to send create request.
4014  */
4015 static void
4016 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4017 {
4018         bfa_trc(rp->bfa, rp->rport_tag);
4019         bfa_trc(rp->bfa, event);
4020
4021         switch (event) {
4022         case BFA_RPORT_SM_QRESUME:
4023                 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4024                 bfa_rport_send_fwcreate(rp);
4025                 break;
4026
4027         case BFA_RPORT_SM_DELETE:
4028                 bfa_stats(rp, sm_fwc_del);
4029                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4030                 bfa_reqq_wcancel(&rp->reqq_wait);
4031                 bfa_rport_free(rp);
4032                 break;
4033
4034         case BFA_RPORT_SM_OFFLINE:
4035                 bfa_stats(rp, sm_fwc_off);
4036                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4037                 bfa_reqq_wcancel(&rp->reqq_wait);
4038                 bfa_rport_offline_cb(rp);
4039                 break;
4040
4041         case BFA_RPORT_SM_HWFAIL:
4042                 bfa_stats(rp, sm_fwc_hwf);
4043                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4044                 bfa_reqq_wcancel(&rp->reqq_wait);
4045                 break;
4046
4047         default:
4048                 bfa_stats(rp, sm_fwc_unexp);
4049                 bfa_sm_fault(rp->bfa, event);
4050         }
4051 }
4052
4053 /*
4054  * Online state - normal parking state.
4055  */
4056 static void
4057 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4058 {
4059         struct bfi_rport_qos_scn_s *qos_scn;
4060
4061         bfa_trc(rp->bfa, rp->rport_tag);
4062         bfa_trc(rp->bfa, event);
4063
4064         switch (event) {
4065         case BFA_RPORT_SM_OFFLINE:
4066                 bfa_stats(rp, sm_on_off);
4067                 if (bfa_rport_send_fwdelete(rp))
4068                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4069                 else
4070                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4071                 break;
4072
4073         case BFA_RPORT_SM_DELETE:
4074                 bfa_stats(rp, sm_on_del);
4075                 if (bfa_rport_send_fwdelete(rp))
4076                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4077                 else
4078                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4079                 break;
4080
4081         case BFA_RPORT_SM_HWFAIL:
4082                 bfa_stats(rp, sm_on_hwf);
4083                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4084                 break;
4085
4086         case BFA_RPORT_SM_SET_SPEED:
4087                 bfa_rport_send_fwspeed(rp);
4088                 break;
4089
4090         case BFA_RPORT_SM_QOS_SCN:
4091                 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4092                 rp->qos_attr = qos_scn->new_qos_attr;
4093                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4094                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4095                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4096                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4097
4098                 qos_scn->old_qos_attr.qos_flow_id  =
4099                         be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4100                 qos_scn->new_qos_attr.qos_flow_id  =
4101                         be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4102
4103                 if (qos_scn->old_qos_attr.qos_flow_id !=
4104                         qos_scn->new_qos_attr.qos_flow_id)
4105                         bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4106                                                     qos_scn->old_qos_attr,
4107                                                     qos_scn->new_qos_attr);
4108                 if (qos_scn->old_qos_attr.qos_priority !=
4109                         qos_scn->new_qos_attr.qos_priority)
4110                         bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4111                                                   qos_scn->old_qos_attr,
4112                                                   qos_scn->new_qos_attr);
4113                 break;
4114
4115         default:
4116                 bfa_stats(rp, sm_on_unexp);
4117                 bfa_sm_fault(rp->bfa, event);
4118         }
4119 }
4120
4121 /*
4122  * Firmware rport is being deleted - awaiting f/w response.
4123  */
4124 static void
4125 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4126 {
4127         bfa_trc(rp->bfa, rp->rport_tag);
4128         bfa_trc(rp->bfa, event);
4129
4130         switch (event) {
4131         case BFA_RPORT_SM_FWRSP:
4132                 bfa_stats(rp, sm_fwd_rsp);
4133                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4134                 bfa_rport_offline_cb(rp);
4135                 break;
4136
4137         case BFA_RPORT_SM_DELETE:
4138                 bfa_stats(rp, sm_fwd_del);
4139                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4140                 break;
4141
4142         case BFA_RPORT_SM_HWFAIL:
4143                 bfa_stats(rp, sm_fwd_hwf);
4144                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4145                 bfa_rport_offline_cb(rp);
4146                 break;
4147
4148         default:
4149                 bfa_stats(rp, sm_fwd_unexp);
4150                 bfa_sm_fault(rp->bfa, event);
4151         }
4152 }
4153
4154 static void
4155 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4156 {
4157         bfa_trc(rp->bfa, rp->rport_tag);
4158         bfa_trc(rp->bfa, event);
4159
4160         switch (event) {
4161         case BFA_RPORT_SM_QRESUME:
4162                 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4163                 bfa_rport_send_fwdelete(rp);
4164                 break;
4165
4166         case BFA_RPORT_SM_DELETE:
4167                 bfa_stats(rp, sm_fwd_del);
4168                 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4169                 break;
4170
4171         case BFA_RPORT_SM_HWFAIL:
4172                 bfa_stats(rp, sm_fwd_hwf);
4173                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4174                 bfa_reqq_wcancel(&rp->reqq_wait);
4175                 bfa_rport_offline_cb(rp);
4176                 break;
4177
4178         default:
4179                 bfa_stats(rp, sm_fwd_unexp);
4180                 bfa_sm_fault(rp->bfa, event);
4181         }
4182 }
4183
4184 /*
4185  * Offline state.
4186  */
4187 static void
4188 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4189 {
4190         bfa_trc(rp->bfa, rp->rport_tag);
4191         bfa_trc(rp->bfa, event);
4192
4193         switch (event) {
4194         case BFA_RPORT_SM_DELETE:
4195                 bfa_stats(rp, sm_off_del);
4196                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4197                 bfa_rport_free(rp);
4198                 break;
4199
4200         case BFA_RPORT_SM_ONLINE:
4201                 bfa_stats(rp, sm_off_on);
4202                 if (bfa_rport_send_fwcreate(rp))
4203                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4204                 else
4205                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4206                 break;
4207
4208         case BFA_RPORT_SM_HWFAIL:
4209                 bfa_stats(rp, sm_off_hwf);
4210                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4211                 break;
4212
4213         default:
4214                 bfa_stats(rp, sm_off_unexp);
4215                 bfa_sm_fault(rp->bfa, event);
4216         }
4217 }
4218
4219 /*
4220  * Rport is deleted, waiting for firmware response to delete.
4221  */
4222 static void
4223 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4224 {
4225         bfa_trc(rp->bfa, rp->rport_tag);
4226         bfa_trc(rp->bfa, event);
4227
4228         switch (event) {
4229         case BFA_RPORT_SM_FWRSP:
4230                 bfa_stats(rp, sm_del_fwrsp);
4231                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4232                 bfa_rport_free(rp);
4233                 break;
4234
4235         case BFA_RPORT_SM_HWFAIL:
4236                 bfa_stats(rp, sm_del_hwf);
4237                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4238                 bfa_rport_free(rp);
4239                 break;
4240
4241         default:
4242                 bfa_sm_fault(rp->bfa, event);
4243         }
4244 }
4245
4246 static void
4247 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4248 {
4249         bfa_trc(rp->bfa, rp->rport_tag);
4250         bfa_trc(rp->bfa, event);
4251
4252         switch (event) {
4253         case BFA_RPORT_SM_QRESUME:
4254                 bfa_stats(rp, sm_del_fwrsp);
4255                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4256                 bfa_rport_send_fwdelete(rp);
4257                 break;
4258
4259         case BFA_RPORT_SM_HWFAIL:
4260                 bfa_stats(rp, sm_del_hwf);
4261                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4262                 bfa_reqq_wcancel(&rp->reqq_wait);
4263                 bfa_rport_free(rp);
4264                 break;
4265
4266         default:
4267                 bfa_sm_fault(rp->bfa, event);
4268         }
4269 }
4270
4271 /*
4272  * Waiting for rport create response from firmware. A delete is pending.
4273  */
4274 static void
4275 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4276                                 enum bfa_rport_event event)
4277 {
4278         bfa_trc(rp->bfa, rp->rport_tag);
4279         bfa_trc(rp->bfa, event);
4280
4281         switch (event) {
4282         case BFA_RPORT_SM_FWRSP:
4283                 bfa_stats(rp, sm_delp_fwrsp);
4284                 if (bfa_rport_send_fwdelete(rp))
4285                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4286                 else
4287                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4288                 break;
4289
4290         case BFA_RPORT_SM_HWFAIL:
4291                 bfa_stats(rp, sm_delp_hwf);
4292                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4293                 bfa_rport_free(rp);
4294                 break;
4295
4296         default:
4297                 bfa_stats(rp, sm_delp_unexp);
4298                 bfa_sm_fault(rp->bfa, event);
4299         }
4300 }
4301
4302 /*
4303  * Waiting for rport create response from firmware. Rport offline is pending.
4304  */
4305 static void
4306 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4307                                  enum bfa_rport_event event)
4308 {
4309         bfa_trc(rp->bfa, rp->rport_tag);
4310         bfa_trc(rp->bfa, event);
4311
4312         switch (event) {
4313         case BFA_RPORT_SM_FWRSP:
4314                 bfa_stats(rp, sm_offp_fwrsp);
4315                 if (bfa_rport_send_fwdelete(rp))
4316                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4317                 else
4318                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4319                 break;
4320
4321         case BFA_RPORT_SM_DELETE:
4322                 bfa_stats(rp, sm_offp_del);
4323                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4324                 break;
4325
4326         case BFA_RPORT_SM_HWFAIL:
4327                 bfa_stats(rp, sm_offp_hwf);
4328                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4329                 break;
4330
4331         default:
4332                 bfa_stats(rp, sm_offp_unexp);
4333                 bfa_sm_fault(rp->bfa, event);
4334         }
4335 }
4336
4337 /*
4338  * IOC h/w failed.
4339  */
4340 static void
4341 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4342 {
4343         bfa_trc(rp->bfa, rp->rport_tag);
4344         bfa_trc(rp->bfa, event);
4345
4346         switch (event) {
4347         case BFA_RPORT_SM_OFFLINE:
4348                 bfa_stats(rp, sm_iocd_off);
4349                 bfa_rport_offline_cb(rp);
4350                 break;
4351
4352         case BFA_RPORT_SM_DELETE:
4353                 bfa_stats(rp, sm_iocd_del);
4354                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4355                 bfa_rport_free(rp);
4356                 break;
4357
4358         case BFA_RPORT_SM_ONLINE:
4359                 bfa_stats(rp, sm_iocd_on);
4360                 if (bfa_rport_send_fwcreate(rp))
4361                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4362                 else
4363                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4364                 break;
4365
4366         case BFA_RPORT_SM_HWFAIL:
4367                 break;
4368
4369         default:
4370                 bfa_stats(rp, sm_iocd_unexp);
4371                 bfa_sm_fault(rp->bfa, event);
4372         }
4373 }
4374
4375
4376
4377 /*
4378  *  bfa_rport_private BFA rport private functions
4379  */
4380
4381 static void
4382 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4383 {
4384         struct bfa_rport_s *rp = cbarg;
4385
4386         if (complete)
4387                 bfa_cb_rport_online(rp->rport_drv);
4388 }
4389
4390 static void
4391 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4392 {
4393         struct bfa_rport_s *rp = cbarg;
4394
4395         if (complete)
4396                 bfa_cb_rport_offline(rp->rport_drv);
4397 }
4398
4399 static void
4400 bfa_rport_qresume(void *cbarg)
4401 {
4402         struct bfa_rport_s      *rp = cbarg;
4403
4404         bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4405 }
4406
4407 static void
4408 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4409                 struct bfa_s *bfa)
4410 {
4411         struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4412
4413         if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4414                 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4415
4416         /* kva memory */
4417         bfa_mem_kva_setup(minfo, rport_kva,
4418                 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4419 }
4420
4421 static void
4422 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4423                 struct bfa_pcidev_s *pcidev)
4424 {
4425         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4426         struct bfa_rport_s *rp;
4427         u16 i;
4428
4429         INIT_LIST_HEAD(&mod->rp_free_q);
4430         INIT_LIST_HEAD(&mod->rp_active_q);
4431         INIT_LIST_HEAD(&mod->rp_unused_q);
4432
4433         rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4434         mod->rps_list = rp;
4435         mod->num_rports = cfg->fwcfg.num_rports;
4436
4437         WARN_ON(!mod->num_rports ||
4438                    (mod->num_rports & (mod->num_rports - 1)));
4439
4440         for (i = 0; i < mod->num_rports; i++, rp++) {
4441                 memset(rp, 0, sizeof(struct bfa_rport_s));
4442                 rp->bfa = bfa;
4443                 rp->rport_tag = i;
4444                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4445
4446                 /*
4447                  *  - is unused
4448                  */
4449                 if (i)
4450                         list_add_tail(&rp->qe, &mod->rp_free_q);
4451
4452                 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4453         }
4454
4455         /*
4456          * consume memory
4457          */
4458         bfa_mem_kva_curp(mod) = (u8 *) rp;
4459 }
4460
4461 static void
4462 bfa_rport_detach(struct bfa_s *bfa)
4463 {
4464 }
4465
4466 static void
4467 bfa_rport_start(struct bfa_s *bfa)
4468 {
4469 }
4470
4471 static void
4472 bfa_rport_stop(struct bfa_s *bfa)
4473 {
4474 }
4475
4476 static void
4477 bfa_rport_iocdisable(struct bfa_s *bfa)
4478 {
4479         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4480         struct bfa_rport_s *rport;
4481         struct list_head *qe, *qen;
4482
4483         /* Enqueue unused rport resources to free_q */
4484         list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4485
4486         list_for_each_safe(qe, qen, &mod->rp_active_q) {
4487                 rport = (struct bfa_rport_s *) qe;
4488                 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4489         }
4490 }
4491
4492 static struct bfa_rport_s *
4493 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4494 {
4495         struct bfa_rport_s *rport;
4496
4497         bfa_q_deq(&mod->rp_free_q, &rport);
4498         if (rport)
4499                 list_add_tail(&rport->qe, &mod->rp_active_q);
4500
4501         return rport;
4502 }
4503
4504 static void
4505 bfa_rport_free(struct bfa_rport_s *rport)
4506 {
4507         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4508
4509         WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4510         list_del(&rport->qe);
4511         list_add_tail(&rport->qe, &mod->rp_free_q);
4512 }
4513
4514 static bfa_boolean_t
4515 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4516 {
4517         struct bfi_rport_create_req_s *m;
4518
4519         /*
4520          * check for room in queue to send request now
4521          */
4522         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4523         if (!m) {
4524                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4525                 return BFA_FALSE;
4526         }
4527
4528         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4529                         bfa_fn_lpu(rp->bfa));
4530         m->bfa_handle = rp->rport_tag;
4531         m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4532         m->pid = rp->rport_info.pid;
4533         m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4534         m->local_pid = rp->rport_info.local_pid;
4535         m->fc_class = rp->rport_info.fc_class;
4536         m->vf_en = rp->rport_info.vf_en;
4537         m->vf_id = rp->rport_info.vf_id;
4538         m->cisc = rp->rport_info.cisc;
4539
4540         /*
4541          * queue I/O message to firmware
4542          */
4543         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4544         return BFA_TRUE;
4545 }
4546
4547 static bfa_boolean_t
4548 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4549 {
4550         struct bfi_rport_delete_req_s *m;
4551
4552         /*
4553          * check for room in queue to send request now
4554          */
4555         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4556         if (!m) {
4557                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4558                 return BFA_FALSE;
4559         }
4560
4561         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4562                         bfa_fn_lpu(rp->bfa));
4563         m->fw_handle = rp->fw_handle;
4564
4565         /*
4566          * queue I/O message to firmware
4567          */
4568         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4569         return BFA_TRUE;
4570 }
4571
4572 static bfa_boolean_t
4573 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4574 {
4575         struct bfa_rport_speed_req_s *m;
4576
4577         /*
4578          * check for room in queue to send request now
4579          */
4580         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4581         if (!m) {
4582                 bfa_trc(rp->bfa, rp->rport_info.speed);
4583                 return BFA_FALSE;
4584         }
4585
4586         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4587                         bfa_fn_lpu(rp->bfa));
4588         m->fw_handle = rp->fw_handle;
4589         m->speed = (u8)rp->rport_info.speed;
4590
4591         /*
4592          * queue I/O message to firmware
4593          */
4594         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4595         return BFA_TRUE;
4596 }
4597
4598
4599
4600 /*
4601  *  bfa_rport_public
4602  */
4603
4604 /*
4605  * Rport interrupt processing.
4606  */
4607 void
4608 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4609 {
4610         union bfi_rport_i2h_msg_u msg;
4611         struct bfa_rport_s *rp;
4612
4613         bfa_trc(bfa, m->mhdr.msg_id);
4614
4615         msg.msg = m;
4616
4617         switch (m->mhdr.msg_id) {
4618         case BFI_RPORT_I2H_CREATE_RSP:
4619                 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4620                 rp->fw_handle = msg.create_rsp->fw_handle;
4621                 rp->qos_attr = msg.create_rsp->qos_attr;
4622                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4623                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4624                 break;
4625
4626         case BFI_RPORT_I2H_DELETE_RSP:
4627                 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4628                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4629                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4630                 break;
4631
4632         case BFI_RPORT_I2H_QOS_SCN:
4633                 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4634                 rp->event_arg.fw_msg = msg.qos_scn_evt;
4635                 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4636                 break;
4637
4638         default:
4639                 bfa_trc(bfa, m->mhdr.msg_id);
4640                 WARN_ON(1);
4641         }
4642 }
4643
4644 void
4645 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4646 {
4647         struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
4648         struct list_head        *qe;
4649         int     i;
4650
4651         for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4652                 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4653                 list_add_tail(qe, &mod->rp_unused_q);
4654         }
4655 }
4656
4657 /*
4658  *  bfa_rport_api
4659  */
4660
4661 struct bfa_rport_s *
4662 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4663 {
4664         struct bfa_rport_s *rp;
4665
4666         rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4667
4668         if (rp == NULL)
4669                 return NULL;
4670
4671         rp->bfa = bfa;
4672         rp->rport_drv = rport_drv;
4673         memset(&rp->stats, 0, sizeof(rp->stats));
4674
4675         WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4676         bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4677
4678         return rp;
4679 }
4680
4681 void
4682 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4683 {
4684         WARN_ON(rport_info->max_frmsz == 0);
4685
4686         /*
4687          * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4688          * responses. Default to minimum size.
4689          */
4690         if (rport_info->max_frmsz == 0) {
4691                 bfa_trc(rport->bfa, rport->rport_tag);
4692                 rport_info->max_frmsz = FC_MIN_PDUSZ;
4693         }
4694
4695         rport->rport_info = *rport_info;
4696         bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4697 }
4698
4699 void
4700 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4701 {
4702         WARN_ON(speed == 0);
4703         WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4704
4705         rport->rport_info.speed = speed;
4706         bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4707 }
4708
4709
4710 /*
4711  * SGPG related functions
4712  */
4713
4714 /*
4715  * Compute and return memory needed by FCP(im) module.
4716  */
4717 static void
4718 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4719                 struct bfa_s *bfa)
4720 {
4721         struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4722         struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4723         struct bfa_mem_dma_s *seg_ptr;
4724         u16     nsegs, idx, per_seg_sgpg, num_sgpg;
4725         u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
4726
4727         if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4728                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4729         else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4730                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
4731
4732         num_sgpg = cfg->drvcfg.num_sgpgs;
4733
4734         nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4735         per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4736
4737         bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4738                 if (num_sgpg >= per_seg_sgpg) {
4739                         num_sgpg -= per_seg_sgpg;
4740                         bfa_mem_dma_setup(minfo, seg_ptr,
4741                                         per_seg_sgpg * sgpg_sz);
4742                 } else
4743                         bfa_mem_dma_setup(minfo, seg_ptr,
4744                                         num_sgpg * sgpg_sz);
4745         }
4746
4747         /* kva memory */
4748         bfa_mem_kva_setup(minfo, sgpg_kva,
4749                 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
4750 }
4751
4752 static void
4753 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4754                 struct bfa_pcidev_s *pcidev)
4755 {
4756         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4757         struct bfa_sgpg_s *hsgpg;
4758         struct bfi_sgpg_s *sgpg;
4759         u64 align_len;
4760         struct bfa_mem_dma_s *seg_ptr;
4761         u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
4762         u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
4763
4764         union {
4765                 u64 pa;
4766                 union bfi_addr_u addr;
4767         } sgpg_pa, sgpg_pa_tmp;
4768
4769         INIT_LIST_HEAD(&mod->sgpg_q);
4770         INIT_LIST_HEAD(&mod->sgpg_wait_q);
4771
4772         bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4773
4774         mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4775
4776         num_sgpg = cfg->drvcfg.num_sgpgs;
4777         nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4778
4779         /* dma/kva mem claim */
4780         hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
4781
4782         bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
4783
4784                 if (!bfa_mem_dma_virt(seg_ptr))
4785                         break;
4786
4787                 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4788                                              bfa_mem_dma_phys(seg_ptr);
4789
4790                 sgpg = (struct bfi_sgpg_s *)
4791                         (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4792                 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4793                 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4794
4795                 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4796
4797                 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4798                         memset(hsgpg, 0, sizeof(*hsgpg));
4799                         memset(sgpg, 0, sizeof(*sgpg));
4800
4801                         hsgpg->sgpg = sgpg;
4802                         sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4803                         hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4804                         list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4805
4806                         sgpg++;
4807                         hsgpg++;
4808                         sgpg_pa.pa += sgpg_sz;
4809                 }
4810         }
4811
4812         bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
4813 }
4814
4815 static void
4816 bfa_sgpg_detach(struct bfa_s *bfa)
4817 {
4818 }
4819
4820 static void
4821 bfa_sgpg_start(struct bfa_s *bfa)
4822 {
4823 }
4824
4825 static void
4826 bfa_sgpg_stop(struct bfa_s *bfa)
4827 {
4828 }
4829
4830 static void
4831 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4832 {
4833 }
4834
4835 bfa_status_t
4836 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4837 {
4838         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4839         struct bfa_sgpg_s *hsgpg;
4840         int i;
4841
4842         if (mod->free_sgpgs < nsgpgs)
4843                 return BFA_STATUS_ENOMEM;
4844
4845         for (i = 0; i < nsgpgs; i++) {
4846                 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4847                 WARN_ON(!hsgpg);
4848                 list_add_tail(&hsgpg->qe, sgpg_q);
4849         }
4850
4851         mod->free_sgpgs -= nsgpgs;
4852         return BFA_STATUS_OK;
4853 }
4854
4855 void
4856 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4857 {
4858         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4859         struct bfa_sgpg_wqe_s *wqe;
4860
4861         mod->free_sgpgs += nsgpg;
4862         WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4863
4864         list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4865
4866         if (list_empty(&mod->sgpg_wait_q))
4867                 return;
4868
4869         /*
4870          * satisfy as many waiting requests as possible
4871          */
4872         do {
4873                 wqe = bfa_q_first(&mod->sgpg_wait_q);
4874                 if (mod->free_sgpgs < wqe->nsgpg)
4875                         nsgpg = mod->free_sgpgs;
4876                 else
4877                         nsgpg = wqe->nsgpg;
4878                 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4879                 wqe->nsgpg -= nsgpg;
4880                 if (wqe->nsgpg == 0) {
4881                         list_del(&wqe->qe);
4882                         wqe->cbfn(wqe->cbarg);
4883                 }
4884         } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4885 }
4886
4887 void
4888 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4889 {
4890         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4891
4892         WARN_ON(nsgpg <= 0);
4893         WARN_ON(nsgpg <= mod->free_sgpgs);
4894
4895         wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4896
4897         /*
4898          * allocate any left to this one first
4899          */
4900         if (mod->free_sgpgs) {
4901                 /*
4902                  * no one else is waiting for SGPG
4903                  */
4904                 WARN_ON(!list_empty(&mod->sgpg_wait_q));
4905                 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4906                 wqe->nsgpg -= mod->free_sgpgs;
4907                 mod->free_sgpgs = 0;
4908         }
4909
4910         list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4911 }
4912
4913 void
4914 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4915 {
4916         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4917
4918         WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4919         list_del(&wqe->qe);
4920
4921         if (wqe->nsgpg_total != wqe->nsgpg)
4922                 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4923                                    wqe->nsgpg_total - wqe->nsgpg);
4924 }
4925
4926 void
4927 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4928                    void *cbarg)
4929 {
4930         INIT_LIST_HEAD(&wqe->sgpg_q);
4931         wqe->cbfn = cbfn;
4932         wqe->cbarg = cbarg;
4933 }
4934
4935 /*
4936  *  UF related functions
4937  */
4938 /*
4939  *****************************************************************************
4940  * Internal functions
4941  *****************************************************************************
4942  */
4943 static void
4944 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4945 {
4946         struct bfa_uf_s   *uf = cbarg;
4947         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4948
4949         if (complete)
4950                 ufm->ufrecv(ufm->cbarg, uf);
4951 }
4952
4953 static void
4954 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
4955 {
4956         struct bfi_uf_buf_post_s *uf_bp_msg;
4957         u16 i;
4958         u16 buf_len;
4959
4960         ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
4961         uf_bp_msg = ufm->uf_buf_posts;
4962
4963         for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4964              i++, uf_bp_msg++) {
4965                 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4966
4967                 uf_bp_msg->buf_tag = i;
4968                 buf_len = sizeof(struct bfa_uf_buf_s);
4969                 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4970                 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4971                             bfa_fn_lpu(ufm->bfa));
4972                 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
4973         }
4974
4975         /*
4976          * advance pointer beyond consumed memory
4977          */
4978         bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
4979 }
4980
4981 static void
4982 claim_ufs(struct bfa_uf_mod_s *ufm)
4983 {
4984         u16 i;
4985         struct bfa_uf_s   *uf;
4986
4987         /*
4988          * Claim block of memory for UF list
4989          */
4990         ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
4991
4992         /*
4993          * Initialize UFs and queue it in UF free queue
4994          */
4995         for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4996                 memset(uf, 0, sizeof(struct bfa_uf_s));
4997                 uf->bfa = ufm->bfa;
4998                 uf->uf_tag = i;
4999                 uf->pb_len = BFA_PER_UF_DMA_SZ;
5000                 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5001                 uf->buf_pa = ufm_pbs_pa(ufm, i);
5002                 list_add_tail(&uf->qe, &ufm->uf_free_q);
5003         }
5004
5005         /*
5006          * advance memory pointer
5007          */
5008         bfa_mem_kva_curp(ufm) = (u8 *) uf;
5009 }
5010
5011 static void
5012 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5013 {
5014         claim_ufs(ufm);
5015         claim_uf_post_msgs(ufm);
5016 }
5017
5018 static void
5019 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5020                 struct bfa_s *bfa)
5021 {
5022         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5023         struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5024         u32     num_ufs = cfg->fwcfg.num_uf_bufs;
5025         struct bfa_mem_dma_s *seg_ptr;
5026         u16     nsegs, idx, per_seg_uf = 0;
5027
5028         nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5029         per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5030
5031         bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5032                 if (num_ufs >= per_seg_uf) {
5033                         num_ufs -= per_seg_uf;
5034                         bfa_mem_dma_setup(minfo, seg_ptr,
5035                                 per_seg_uf * BFA_PER_UF_DMA_SZ);
5036                 } else
5037                         bfa_mem_dma_setup(minfo, seg_ptr,
5038                                 num_ufs * BFA_PER_UF_DMA_SZ);
5039         }
5040
5041         /* kva memory */
5042         bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5043                 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5044 }
5045
5046 static void
5047 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5048                 struct bfa_pcidev_s *pcidev)
5049 {
5050         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5051
5052         ufm->bfa = bfa;
5053         ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5054         INIT_LIST_HEAD(&ufm->uf_free_q);
5055         INIT_LIST_HEAD(&ufm->uf_posted_q);
5056         INIT_LIST_HEAD(&ufm->uf_unused_q);
5057
5058         uf_mem_claim(ufm);
5059 }
5060
5061 static void
5062 bfa_uf_detach(struct bfa_s *bfa)
5063 {
5064 }
5065
5066 static struct bfa_uf_s *
5067 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5068 {
5069         struct bfa_uf_s   *uf;
5070
5071         bfa_q_deq(&uf_mod->uf_free_q, &uf);
5072         return uf;
5073 }
5074
5075 static void
5076 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5077 {
5078         list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5079 }
5080
5081 static bfa_status_t
5082 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5083 {
5084         struct bfi_uf_buf_post_s *uf_post_msg;
5085
5086         uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5087         if (!uf_post_msg)
5088                 return BFA_STATUS_FAILED;
5089
5090         memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5091                       sizeof(struct bfi_uf_buf_post_s));
5092         bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5093
5094         bfa_trc(ufm->bfa, uf->uf_tag);
5095
5096         list_add_tail(&uf->qe, &ufm->uf_posted_q);
5097         return BFA_STATUS_OK;
5098 }
5099
5100 static void
5101 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5102 {
5103         struct bfa_uf_s   *uf;
5104
5105         while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5106                 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5107                         break;
5108         }
5109 }
5110
5111 static void
5112 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5113 {
5114         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5115         u16 uf_tag = m->buf_tag;
5116         struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5117         struct bfa_uf_buf_s *uf_buf;
5118         uint8_t *buf;
5119         struct fchs_s *fchs;
5120
5121         uf_buf = (struct bfa_uf_buf_s *)
5122                         bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5123         buf = &uf_buf->d[0];
5124
5125         m->frm_len = be16_to_cpu(m->frm_len);
5126         m->xfr_len = be16_to_cpu(m->xfr_len);
5127
5128         fchs = (struct fchs_s *)uf_buf;
5129
5130         list_del(&uf->qe);      /* dequeue from posted queue */
5131
5132         uf->data_ptr = buf;
5133         uf->data_len = m->xfr_len;
5134
5135         WARN_ON(uf->data_len < sizeof(struct fchs_s));
5136
5137         if (uf->data_len == sizeof(struct fchs_s)) {
5138                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5139                                uf->data_len, (struct fchs_s *)buf);
5140         } else {
5141                 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5142                 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5143                                       BFA_PL_EID_RX, uf->data_len,
5144                                       (struct fchs_s *)buf, pld_w0);
5145         }
5146
5147         if (bfa->fcs)
5148                 __bfa_cb_uf_recv(uf, BFA_TRUE);
5149         else
5150                 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5151 }
5152
5153 static void
5154 bfa_uf_stop(struct bfa_s *bfa)
5155 {
5156 }
5157
5158 static void
5159 bfa_uf_iocdisable(struct bfa_s *bfa)
5160 {
5161         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5162         struct bfa_uf_s *uf;
5163         struct list_head *qe, *qen;
5164
5165         /* Enqueue unused uf resources to free_q */
5166         list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5167
5168         list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5169                 uf = (struct bfa_uf_s *) qe;
5170                 list_del(&uf->qe);
5171                 bfa_uf_put(ufm, uf);
5172         }
5173 }
5174
5175 static void
5176 bfa_uf_start(struct bfa_s *bfa)
5177 {
5178         bfa_uf_post_all(BFA_UF_MOD(bfa));
5179 }
5180
5181 /*
5182  * Register handler for all unsolicted receive frames.
5183  *
5184  * @param[in]   bfa             BFA instance
5185  * @param[in]   ufrecv  receive handler function
5186  * @param[in]   cbarg   receive handler arg
5187  */
5188 void
5189 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5190 {
5191         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5192
5193         ufm->ufrecv = ufrecv;
5194         ufm->cbarg = cbarg;
5195 }
5196
5197 /*
5198  *      Free an unsolicited frame back to BFA.
5199  *
5200  * @param[in]           uf              unsolicited frame to be freed
5201  *
5202  * @return None
5203  */
5204 void
5205 bfa_uf_free(struct bfa_uf_s *uf)
5206 {
5207         bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5208         bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5209 }
5210
5211
5212
5213 /*
5214  *  uf_pub BFA uf module public functions
5215  */
5216 void
5217 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5218 {
5219         bfa_trc(bfa, msg->mhdr.msg_id);
5220
5221         switch (msg->mhdr.msg_id) {
5222         case BFI_UF_I2H_FRM_RCVD:
5223                 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5224                 break;
5225
5226         default:
5227                 bfa_trc(bfa, msg->mhdr.msg_id);
5228                 WARN_ON(1);
5229         }
5230 }
5231
5232 void
5233 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5234 {
5235         struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
5236         struct list_head        *qe;
5237         int     i;
5238
5239         for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5240                 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5241                 list_add_tail(qe, &mod->uf_unused_q);
5242         }
5243 }
5244
5245 /*
5246  *      BFA fcdiag module
5247  */
5248 #define BFA_DIAG_QTEST_TOV      1000    /* msec */
5249
5250 /*
5251  *      Set port status to busy
5252  */
5253 static void
5254 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5255 {
5256         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5257
5258         if (fcdiag->lb.lock)
5259                 fcport->diag_busy = BFA_TRUE;
5260         else
5261                 fcport->diag_busy = BFA_FALSE;
5262 }
5263
5264 static void
5265 bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5266                 struct bfa_s *bfa)
5267 {
5268 }
5269
5270 static void
5271 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5272                 struct bfa_pcidev_s *pcidev)
5273 {
5274         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5275         fcdiag->bfa             = bfa;
5276         fcdiag->trcmod  = bfa->trcmod;
5277         /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5278 }
5279
5280 static void
5281 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5282 {
5283         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5284         bfa_trc(fcdiag, fcdiag->lb.lock);
5285         if (fcdiag->lb.lock) {
5286                 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5287                 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5288                 fcdiag->lb.lock = 0;
5289                 bfa_fcdiag_set_busy_status(fcdiag);
5290         }
5291 }
5292
5293 static void
5294 bfa_fcdiag_detach(struct bfa_s *bfa)
5295 {
5296 }
5297
5298 static void
5299 bfa_fcdiag_start(struct bfa_s *bfa)
5300 {
5301 }
5302
5303 static void
5304 bfa_fcdiag_stop(struct bfa_s *bfa)
5305 {
5306 }
5307
5308 static void
5309 bfa_fcdiag_queuetest_timeout(void *cbarg)
5310 {
5311         struct bfa_fcdiag_s       *fcdiag = cbarg;
5312         struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5313
5314         bfa_trc(fcdiag, fcdiag->qtest.all);
5315         bfa_trc(fcdiag, fcdiag->qtest.count);
5316
5317         fcdiag->qtest.timer_active = 0;
5318
5319         res->status = BFA_STATUS_ETIMER;
5320         res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5321         if (fcdiag->qtest.all)
5322                 res->queue  = fcdiag->qtest.all;
5323
5324         bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5325         fcdiag->qtest.status = BFA_STATUS_ETIMER;
5326         fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5327         fcdiag->qtest.lock = 0;
5328 }
5329
5330 static bfa_status_t
5331 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5332 {
5333         u32     i;
5334         struct bfi_diag_qtest_req_s *req;
5335
5336         req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5337         if (!req)
5338                 return BFA_STATUS_DEVBUSY;
5339
5340         /* build host command */
5341         bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5342                 bfa_fn_lpu(fcdiag->bfa));
5343
5344         for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5345                 req->data[i] = QTEST_PAT_DEFAULT;
5346
5347         bfa_trc(fcdiag, fcdiag->qtest.queue);
5348         /* ring door bell */
5349         bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5350         return BFA_STATUS_OK;
5351 }
5352
5353 static void
5354 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5355                         bfi_diag_qtest_rsp_t *rsp)
5356 {
5357         struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5358         bfa_status_t status = BFA_STATUS_OK;
5359         int i;
5360
5361         /* Check timer, should still be active   */
5362         if (!fcdiag->qtest.timer_active) {
5363                 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5364                 return;
5365         }
5366
5367         /* update count */
5368         fcdiag->qtest.count--;
5369
5370         /* Check result */
5371         for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5372                 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5373                         res->status = BFA_STATUS_DATACORRUPTED;
5374                         break;
5375                 }
5376         }
5377
5378         if (res->status == BFA_STATUS_OK) {
5379                 if (fcdiag->qtest.count > 0) {
5380                         status = bfa_fcdiag_queuetest_send(fcdiag);
5381                         if (status == BFA_STATUS_OK)
5382                                 return;
5383                         else
5384                                 res->status = status;
5385                 } else if (fcdiag->qtest.all > 0 &&
5386                         fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5387                         fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5388                         fcdiag->qtest.queue++;
5389                         status = bfa_fcdiag_queuetest_send(fcdiag);
5390                         if (status == BFA_STATUS_OK)
5391                                 return;
5392                         else
5393                                 res->status = status;
5394                 }
5395         }
5396
5397         /* Stop timer when we comp all queue */
5398         if (fcdiag->qtest.timer_active) {
5399                 bfa_timer_stop(&fcdiag->qtest.timer);
5400                 fcdiag->qtest.timer_active = 0;
5401         }
5402         res->queue = fcdiag->qtest.queue;
5403         res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5404         bfa_trc(fcdiag, res->count);
5405         bfa_trc(fcdiag, res->status);
5406         fcdiag->qtest.status = res->status;
5407         fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5408         fcdiag->qtest.lock = 0;
5409 }
5410
5411 static void
5412 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5413                         struct bfi_diag_lb_rsp_s *rsp)
5414 {
5415         struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5416
5417         res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5418         res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5419         res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5420         res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5421         res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5422         res->status     = rsp->res.status;
5423         fcdiag->lb.status = rsp->res.status;
5424         bfa_trc(fcdiag, fcdiag->lb.status);
5425         fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5426         fcdiag->lb.lock = 0;
5427         bfa_fcdiag_set_busy_status(fcdiag);
5428 }
5429
5430 static bfa_status_t
5431 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5432                         struct bfa_diag_loopback_s *loopback)
5433 {
5434         struct bfi_diag_lb_req_s *lb_req;
5435
5436         lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5437         if (!lb_req)
5438                 return BFA_STATUS_DEVBUSY;
5439
5440         /* build host command */
5441         bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5442                 bfa_fn_lpu(fcdiag->bfa));
5443
5444         lb_req->lb_mode = loopback->lb_mode;
5445         lb_req->speed = loopback->speed;
5446         lb_req->loopcnt = loopback->loopcnt;
5447         lb_req->pattern = loopback->pattern;
5448
5449         /* ring door bell */
5450         bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5451
5452         bfa_trc(fcdiag, loopback->lb_mode);
5453         bfa_trc(fcdiag, loopback->speed);
5454         bfa_trc(fcdiag, loopback->loopcnt);
5455         bfa_trc(fcdiag, loopback->pattern);
5456         return BFA_STATUS_OK;
5457 }
5458
5459 /*
5460  *      cpe/rme intr handler
5461  */
5462 void
5463 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5464 {
5465         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5466
5467         switch (msg->mhdr.msg_id) {
5468         case BFI_DIAG_I2H_LOOPBACK:
5469                 bfa_fcdiag_loopback_comp(fcdiag,
5470                                 (struct bfi_diag_lb_rsp_s *) msg);
5471                 break;
5472         case BFI_DIAG_I2H_QTEST:
5473                 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5474                 break;
5475         default:
5476                 bfa_trc(fcdiag, msg->mhdr.msg_id);
5477                 WARN_ON(1);
5478         }
5479 }
5480
5481 /*
5482  *      Loopback test
5483  *
5484  *   @param[in] *bfa            - bfa data struct
5485  *   @param[in] opmode          - port operation mode
5486  *   @param[in] speed           - port speed
5487  *   @param[in] lpcnt           - loop count
5488  *   @param[in] pat                     - pattern to build packet
5489  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5490  *   @param[in] cbfn            - callback function
5491  *   @param[in] cbarg           - callback functioin arg
5492  *
5493  *   @param[out]
5494  */
5495 bfa_status_t
5496 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5497                 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5498                 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5499                 void *cbarg)
5500 {
5501         struct  bfa_diag_loopback_s loopback;
5502         struct bfa_port_attr_s attr;
5503         bfa_status_t status;
5504         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5505
5506         if (!bfa_iocfc_is_operational(bfa))
5507                 return BFA_STATUS_IOC_NON_OP;
5508
5509         /* if port is PBC disabled, return error */
5510         if (bfa_fcport_is_pbcdisabled(bfa)) {
5511                 bfa_trc(fcdiag, BFA_STATUS_PBC);
5512                 return BFA_STATUS_PBC;
5513         }
5514
5515         if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5516                 bfa_trc(fcdiag, opmode);
5517                 return BFA_STATUS_PORT_NOT_DISABLED;
5518         }
5519
5520         /* Check if the speed is supported */
5521         bfa_fcport_get_attr(bfa, &attr);
5522         bfa_trc(fcdiag, attr.speed_supported);
5523         if (speed > attr.speed_supported)
5524                 return BFA_STATUS_UNSUPP_SPEED;
5525
5526         /* For Mezz card, port speed entered needs to be checked */
5527         if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5528                 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5529                         if ((speed == BFA_PORT_SPEED_1GBPS) &&
5530                             (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5531                                 return BFA_STATUS_UNSUPP_SPEED;
5532                         if (!(speed == BFA_PORT_SPEED_1GBPS ||
5533                               speed == BFA_PORT_SPEED_2GBPS ||
5534                               speed == BFA_PORT_SPEED_4GBPS ||
5535                               speed == BFA_PORT_SPEED_8GBPS ||
5536                               speed == BFA_PORT_SPEED_16GBPS ||
5537                               speed == BFA_PORT_SPEED_AUTO))
5538                                 return BFA_STATUS_UNSUPP_SPEED;
5539                 } else {
5540                         if (speed != BFA_PORT_SPEED_10GBPS)
5541                                 return BFA_STATUS_UNSUPP_SPEED;
5542                 }
5543         }
5544
5545         /* check to see if there is another destructive diag cmd running */
5546         if (fcdiag->lb.lock) {
5547                 bfa_trc(fcdiag, fcdiag->lb.lock);
5548                 return BFA_STATUS_DEVBUSY;
5549         }
5550
5551         fcdiag->lb.lock = 1;
5552         loopback.lb_mode = opmode;
5553         loopback.speed = speed;
5554         loopback.loopcnt = lpcnt;
5555         loopback.pattern = pat;
5556         fcdiag->lb.result = result;
5557         fcdiag->lb.cbfn = cbfn;
5558         fcdiag->lb.cbarg = cbarg;
5559         memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5560         bfa_fcdiag_set_busy_status(fcdiag);
5561
5562         /* Send msg to fw */
5563         status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5564         return status;
5565 }
5566
5567 /*
5568  *      DIAG queue test command
5569  *
5570  *   @param[in] *bfa            - bfa data struct
5571  *   @param[in] force           - 1: don't do ioc op checking
5572  *   @param[in] queue           - queue no. to test
5573  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
5574  *   @param[in] cbfn            - callback function
5575  *   @param[in] *cbarg          - callback functioin arg
5576  *
5577  *   @param[out]
5578  */
5579 bfa_status_t
5580 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5581                 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5582                 void *cbarg)
5583 {
5584         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5585         bfa_status_t status;
5586         bfa_trc(fcdiag, force);
5587         bfa_trc(fcdiag, queue);
5588
5589         if (!force && !bfa_iocfc_is_operational(bfa))
5590                 return BFA_STATUS_IOC_NON_OP;
5591
5592         /* check to see if there is another destructive diag cmd running */
5593         if (fcdiag->qtest.lock) {
5594                 bfa_trc(fcdiag, fcdiag->qtest.lock);
5595                 return BFA_STATUS_DEVBUSY;
5596         }
5597
5598         /* Initialization */
5599         fcdiag->qtest.lock = 1;
5600         fcdiag->qtest.cbfn = cbfn;
5601         fcdiag->qtest.cbarg = cbarg;
5602         fcdiag->qtest.result = result;
5603         fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5604
5605         /* Init test results */
5606         fcdiag->qtest.result->status = BFA_STATUS_OK;
5607         fcdiag->qtest.result->count  = 0;
5608
5609         /* send */
5610         if (queue < BFI_IOC_MAX_CQS) {
5611                 fcdiag->qtest.result->queue  = (u8)queue;
5612                 fcdiag->qtest.queue = (u8)queue;
5613                 fcdiag->qtest.all   = 0;
5614         } else {
5615                 fcdiag->qtest.result->queue  = 0;
5616                 fcdiag->qtest.queue = 0;
5617                 fcdiag->qtest.all   = 1;
5618         }
5619         status = bfa_fcdiag_queuetest_send(fcdiag);
5620
5621         /* Start a timer */
5622         if (status == BFA_STATUS_OK) {
5623                 bfa_timer_start(bfa, &fcdiag->qtest.timer,
5624                                 bfa_fcdiag_queuetest_timeout, fcdiag,
5625                                 BFA_DIAG_QTEST_TOV);
5626                 fcdiag->qtest.timer_active = 1;
5627         }
5628         return status;
5629 }
5630
5631 /*
5632  * DIAG PLB is running
5633  *
5634  *   @param[in] *bfa    - bfa data struct
5635  *
5636  *   @param[out]
5637  */
5638 bfa_status_t
5639 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5640 {
5641         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5642         return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5643 }