2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
21 #include "bfa_modules.h"
23 BFA_TRC_FILE(HAL, FCXP);
33 * LPS related definitions
35 #define BFA_LPS_MIN_LPORTS (1)
36 #define BFA_LPS_MAX_LPORTS (256)
39 * Maximum Vports supported per physical port or vf.
41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
46 * FC PORT related definitions
49 * The port is considered disabled if corresponding physical port or IOC are
52 #define BFA_PORT_IS_DISABLED(bfa) \
53 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
54 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
57 * BFA port state machine events
59 enum bfa_fcport_sm_event {
60 BFA_FCPORT_SM_START = 1, /* start port state machine */
61 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
62 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
63 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
64 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
65 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
66 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
67 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
68 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
72 * BFA port link notification state machine events
75 enum bfa_fcport_ln_sm_event {
76 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
77 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
78 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
82 * RPORT related definitions
84 #define bfa_rport_offline_cb(__rp) do { \
85 if ((__rp)->bfa->fcs) \
86 bfa_cb_rport_offline((__rp)->rport_drv); \
88 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
89 __bfa_cb_rport_offline, (__rp)); \
93 #define bfa_rport_online_cb(__rp) do { \
94 if ((__rp)->bfa->fcs) \
95 bfa_cb_rport_online((__rp)->rport_drv); \
97 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
98 __bfa_cb_rport_online, (__rp)); \
103 * forward declarations FCXP related functions
105 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void bfa_fcxp_qresume(void *cbarg);
111 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112 struct bfi_fcxp_send_req_s *send_req);
115 * forward declarations for LPS functions
117 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
118 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
119 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
120 struct bfa_iocfc_cfg_s *cfg,
121 struct bfa_pcidev_s *pcidev);
122 static void bfa_lps_detach(struct bfa_s *bfa);
123 static void bfa_lps_start(struct bfa_s *bfa);
124 static void bfa_lps_stop(struct bfa_s *bfa);
125 static void bfa_lps_iocdisable(struct bfa_s *bfa);
126 static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 struct bfi_lps_login_rsp_s *rsp);
128 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
129 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
130 struct bfi_lps_logout_rsp_s *rsp);
131 static void bfa_lps_reqq_resume(void *lps_arg);
132 static void bfa_lps_free(struct bfa_lps_s *lps);
133 static void bfa_lps_send_login(struct bfa_lps_s *lps);
134 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
135 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
136 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
137 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
138 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
141 * forward declaration for LPS state machine
143 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
144 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
145 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
147 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
148 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
149 enum bfa_lps_event event);
150 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
151 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
155 * forward declaration for FC Port functions
157 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
158 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
159 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
161 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
162 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
163 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
164 enum bfa_port_linkstate event, bfa_boolean_t trunk);
165 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
166 enum bfa_port_linkstate event);
167 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
168 static void bfa_fcport_stats_get_timeout(void *cbarg);
169 static void bfa_fcport_stats_clr_timeout(void *cbarg);
170 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
173 * forward declaration for FC PORT state machine
175 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
176 enum bfa_fcport_sm_event event);
177 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
178 enum bfa_fcport_sm_event event);
179 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
180 enum bfa_fcport_sm_event event);
181 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
182 enum bfa_fcport_sm_event event);
183 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
184 enum bfa_fcport_sm_event event);
185 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
186 enum bfa_fcport_sm_event event);
187 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
188 enum bfa_fcport_sm_event event);
189 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
190 enum bfa_fcport_sm_event event);
191 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
192 enum bfa_fcport_sm_event event);
193 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
194 enum bfa_fcport_sm_event event);
195 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
196 enum bfa_fcport_sm_event event);
197 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
198 enum bfa_fcport_sm_event event);
200 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
201 enum bfa_fcport_ln_sm_event event);
202 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
203 enum bfa_fcport_ln_sm_event event);
204 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
205 enum bfa_fcport_ln_sm_event event);
206 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
207 enum bfa_fcport_ln_sm_event event);
208 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
209 enum bfa_fcport_ln_sm_event event);
210 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
211 enum bfa_fcport_ln_sm_event event);
212 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
213 enum bfa_fcport_ln_sm_event event);
215 static struct bfa_sm_table_s hal_port_sm_table[] = {
216 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
217 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
218 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
219 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
220 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
221 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
222 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
223 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
224 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
225 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
226 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
227 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
232 * forward declaration for RPORT related functions
234 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
235 static void bfa_rport_free(struct bfa_rport_s *rport);
236 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
237 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
238 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
239 static void __bfa_cb_rport_online(void *cbarg,
240 bfa_boolean_t complete);
241 static void __bfa_cb_rport_offline(void *cbarg,
242 bfa_boolean_t complete);
245 * forward declaration for RPORT state machine
247 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
270 enum bfa_rport_event event);
271 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
275 * PLOG related definitions
278 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
280 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
284 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
285 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
292 bfa_get_log_time(void)
296 do_gettimeofday(&tv);
298 /* We are interested in seconds only. */
299 system_time = tv.tv_sec;
304 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
307 struct bfa_plog_rec_s *pl_recp;
309 if (plog->plog_enabled == 0)
312 if (plkd_validate_logrec(pl_rec)) {
319 pl_recp = &(plog->plog_recs[tail]);
321 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
323 pl_recp->tv = bfa_get_log_time();
324 BFA_PL_LOG_REC_INCR(plog->tail);
326 if (plog->head == plog->tail)
327 BFA_PL_LOG_REC_INCR(plog->head);
331 bfa_plog_init(struct bfa_plog_s *plog)
333 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
335 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
336 plog->head = plog->tail = 0;
337 plog->plog_enabled = 1;
341 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
342 enum bfa_plog_eid event,
343 u16 misc, char *log_str)
345 struct bfa_plog_rec_s lp;
347 if (plog->plog_enabled) {
348 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
351 lp.log_type = BFA_PL_LOG_TYPE_STRING;
353 strncpy(lp.log_entry.string_log, log_str,
354 BFA_PL_STRING_LOG_SZ - 1);
355 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
356 bfa_plog_add(plog, &lp);
361 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
362 enum bfa_plog_eid event,
363 u16 misc, u32 *intarr, u32 num_ints)
365 struct bfa_plog_rec_s lp;
368 if (num_ints > BFA_PL_INT_LOG_SZ)
369 num_ints = BFA_PL_INT_LOG_SZ;
371 if (plog->plog_enabled) {
372 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
375 lp.log_type = BFA_PL_LOG_TYPE_INT;
378 for (i = 0; i < num_ints; i++)
379 lp.log_entry.int_log[i] = intarr[i];
381 lp.log_num_ints = (u8) num_ints;
383 bfa_plog_add(plog, &lp);
388 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
389 enum bfa_plog_eid event,
390 u16 misc, struct fchs_s *fchdr)
392 struct bfa_plog_rec_s lp;
393 u32 *tmp_int = (u32 *) fchdr;
394 u32 ints[BFA_PL_INT_LOG_SZ];
396 if (plog->plog_enabled) {
397 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
399 ints[0] = tmp_int[0];
400 ints[1] = tmp_int[1];
401 ints[2] = tmp_int[4];
403 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
408 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
409 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
412 struct bfa_plog_rec_s lp;
413 u32 *tmp_int = (u32 *) fchdr;
414 u32 ints[BFA_PL_INT_LOG_SZ];
416 if (plog->plog_enabled) {
417 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
419 ints[0] = tmp_int[0];
420 ints[1] = tmp_int[1];
421 ints[2] = tmp_int[4];
424 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
430 * fcxp_pvt BFA FCXP private functions
434 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
437 struct bfa_fcxp_s *fcxp;
439 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
440 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
442 INIT_LIST_HEAD(&mod->fcxp_free_q);
443 INIT_LIST_HEAD(&mod->fcxp_active_q);
444 INIT_LIST_HEAD(&mod->fcxp_unused_q);
446 mod->fcxp_list = fcxp;
448 for (i = 0; i < mod->num_fcxps; i++) {
449 fcxp->fcxp_mod = mod;
452 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
453 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
454 fcxp->reqq_waiting = BFA_FALSE;
459 bfa_mem_kva_curp(mod) = (void *)fcxp;
463 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
466 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
467 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
468 struct bfa_mem_dma_s *seg_ptr;
469 u16 nsegs, idx, per_seg_fcxp;
470 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
476 if (cfg->drvcfg.min_cfg)
477 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
479 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
482 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
483 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
485 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
486 if (num_fcxps >= per_seg_fcxp) {
487 num_fcxps -= per_seg_fcxp;
488 bfa_mem_dma_setup(minfo, seg_ptr,
489 per_seg_fcxp * per_fcxp_sz);
491 bfa_mem_dma_setup(minfo, seg_ptr,
492 num_fcxps * per_fcxp_sz);
496 bfa_mem_kva_setup(minfo, fcxp_kva,
497 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
501 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
502 struct bfa_pcidev_s *pcidev)
504 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
507 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
510 * Initialize FCXP request and response payload sizes.
512 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
513 if (!cfg->drvcfg.min_cfg)
514 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
516 INIT_LIST_HEAD(&mod->wait_q);
518 claim_fcxps_mem(mod);
522 bfa_fcxp_detach(struct bfa_s *bfa)
527 bfa_fcxp_start(struct bfa_s *bfa)
532 bfa_fcxp_stop(struct bfa_s *bfa)
537 bfa_fcxp_iocdisable(struct bfa_s *bfa)
539 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
540 struct bfa_fcxp_s *fcxp;
541 struct list_head *qe, *qen;
543 /* Enqueue unused fcxp resources to free_q */
544 list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
546 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
547 fcxp = (struct bfa_fcxp_s *) qe;
548 if (fcxp->caller == NULL) {
549 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
550 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
553 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
554 bfa_cb_queue(bfa, &fcxp->hcb_qe,
555 __bfa_fcxp_send_cbfn, fcxp);
560 static struct bfa_fcxp_s *
561 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
563 struct bfa_fcxp_s *fcxp;
565 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
568 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
574 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
578 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
579 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
580 struct list_head *r_sgpg_q,
582 bfa_fcxp_get_sgaddr_t sga_cbfn,
583 bfa_fcxp_get_sglen_t sglen_cbfn)
586 WARN_ON(bfa == NULL);
588 bfa_trc(bfa, fcxp->fcxp_tag);
593 WARN_ON(*sga_cbfn == NULL);
594 WARN_ON(*sglen_cbfn == NULL);
597 *r_sga_cbfn = sga_cbfn;
598 *r_sglen_cbfn = sglen_cbfn;
603 * alloc required sgpgs
605 if (n_sgles > BFI_SGE_INLINE)
612 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
613 void *caller, struct bfa_s *bfa, int nreq_sgles,
614 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
615 bfa_fcxp_get_sglen_t req_sglen_cbfn,
616 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
617 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
620 WARN_ON(bfa == NULL);
622 bfa_trc(bfa, fcxp->fcxp_tag);
624 fcxp->caller = caller;
626 bfa_fcxp_init_reqrsp(fcxp, bfa,
627 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
628 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
629 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
631 bfa_fcxp_init_reqrsp(fcxp, bfa,
632 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
633 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
634 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
639 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
641 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
642 struct bfa_fcxp_wqe_s *wqe;
644 bfa_q_deq(&mod->wait_q, &wqe);
646 bfa_trc(mod->bfa, fcxp->fcxp_tag);
648 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
649 wqe->nrsp_sgles, wqe->req_sga_cbfn,
650 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
651 wqe->rsp_sglen_cbfn);
653 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
657 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
659 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
663 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
664 bfa_status_t req_status, u32 rsp_len,
665 u32 resid_len, struct fchs_s *rsp_fchs)
667 /* discarded fcxp completion */
671 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
673 struct bfa_fcxp_s *fcxp = cbarg;
676 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
677 fcxp->rsp_status, fcxp->rsp_len,
678 fcxp->residue_len, &fcxp->rsp_fchs);
685 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
687 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
688 struct bfa_fcxp_s *fcxp;
689 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
691 bfa_trc(bfa, fcxp_tag);
693 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
696 * @todo f/w should not set residue to non-0 when everything
699 if (fcxp_rsp->req_status == BFA_STATUS_OK)
700 fcxp_rsp->residue_len = 0;
702 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
704 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
706 WARN_ON(fcxp->send_cbfn == NULL);
708 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
710 if (fcxp->send_cbfn != NULL) {
711 bfa_trc(mod->bfa, (NULL == fcxp->caller));
712 if (fcxp->caller == NULL) {
713 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
714 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
715 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
717 * fcxp automatically freed on return from the callback
721 fcxp->rsp_status = fcxp_rsp->req_status;
722 fcxp->rsp_len = fcxp_rsp->rsp_len;
723 fcxp->residue_len = fcxp_rsp->residue_len;
724 fcxp->rsp_fchs = fcxp_rsp->fchs;
726 bfa_cb_queue(bfa, &fcxp->hcb_qe,
727 __bfa_fcxp_send_cbfn, fcxp);
730 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
735 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
742 if (fcxp->use_ireqbuf) {
744 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
746 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
748 reqlen + sizeof(struct fchs_s), fchs,
751 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
753 reqlen + sizeof(struct fchs_s),
757 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
758 reqlen + sizeof(struct fchs_s), fchs);
763 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
764 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
766 if (fcxp_rsp->rsp_len > 0) {
767 if (fcxp->use_irspbuf) {
769 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
771 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
773 (u16) fcxp_rsp->rsp_len,
774 &fcxp_rsp->fchs, pld_w0);
776 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
778 (u16) fcxp_rsp->rsp_len,
782 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
783 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
788 * Handler to resume sending fcxp when space in available in cpe queue.
791 bfa_fcxp_qresume(void *cbarg)
793 struct bfa_fcxp_s *fcxp = cbarg;
794 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
795 struct bfi_fcxp_send_req_s *send_req;
797 fcxp->reqq_waiting = BFA_FALSE;
798 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
799 bfa_fcxp_queue(fcxp, send_req);
803 * Queue fcxp send request to foimrware.
806 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
808 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
809 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
810 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
811 struct bfa_rport_s *rport = reqi->bfa_rport;
813 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
816 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
818 send_req->rport_fw_hndl = rport->fw_handle;
819 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
820 if (send_req->max_frmsz == 0)
821 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
823 send_req->rport_fw_hndl = 0;
824 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
827 send_req->vf_id = cpu_to_be16(reqi->vf_id);
828 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
829 send_req->class = reqi->class;
830 send_req->rsp_timeout = rspi->rsp_timeout;
831 send_req->cts = reqi->cts;
832 send_req->fchs = reqi->fchs;
834 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
835 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
840 if (fcxp->use_ireqbuf == 1) {
841 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
842 BFA_FCXP_REQ_PLD_PA(fcxp));
844 if (fcxp->nreq_sgles > 0) {
845 WARN_ON(fcxp->nreq_sgles != 1);
846 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
847 fcxp->req_sga_cbfn(fcxp->caller, 0));
849 WARN_ON(reqi->req_tot_len != 0);
850 bfa_alen_set(&send_req->rsp_alen, 0, 0);
857 if (fcxp->use_irspbuf == 1) {
858 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
860 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
861 BFA_FCXP_RSP_PLD_PA(fcxp));
863 if (fcxp->nrsp_sgles > 0) {
864 WARN_ON(fcxp->nrsp_sgles != 1);
865 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
866 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
869 WARN_ON(rspi->rsp_maxlen != 0);
870 bfa_alen_set(&send_req->rsp_alen, 0, 0);
874 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
876 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
878 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
879 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
883 * Allocate an FCXP instance to send a response or to send a request
884 * that has a response. Request/response buffers are allocated by caller.
886 * @param[in] bfa BFA bfa instance
887 * @param[in] nreq_sgles Number of SG elements required for request
888 * buffer. 0, if fcxp internal buffers are used.
889 * Use bfa_fcxp_get_reqbuf() to get the
890 * internal req buffer.
891 * @param[in] req_sgles SG elements describing request buffer. Will be
892 * copied in by BFA and hence can be freed on
893 * return from this function.
894 * @param[in] get_req_sga function ptr to be called to get a request SG
895 * Address (given the sge index).
896 * @param[in] get_req_sglen function ptr to be called to get a request SG
897 * len (given the sge index).
898 * @param[in] get_rsp_sga function ptr to be called to get a response SG
899 * Address (given the sge index).
900 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
901 * len (given the sge index).
903 * @return FCXP instance. NULL on failure.
906 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
907 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
908 bfa_fcxp_get_sglen_t req_sglen_cbfn,
909 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
910 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
912 struct bfa_fcxp_s *fcxp = NULL;
914 WARN_ON(bfa == NULL);
916 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
920 bfa_trc(bfa, fcxp->fcxp_tag);
922 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
923 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
929 * Get the internal request buffer pointer
931 * @param[in] fcxp BFA fcxp pointer
933 * @return pointer to the internal request buffer
936 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
938 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
941 WARN_ON(fcxp->use_ireqbuf != 1);
942 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
943 mod->req_pld_sz + mod->rsp_pld_sz);
948 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
950 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
952 return mod->req_pld_sz;
956 * Get the internal response buffer pointer
958 * @param[in] fcxp BFA fcxp pointer
960 * @return pointer to the internal request buffer
963 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
965 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
968 WARN_ON(fcxp->use_irspbuf != 1);
970 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
971 mod->req_pld_sz + mod->rsp_pld_sz);
973 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
974 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
980 * @param[in] fcxp BFA fcxp pointer
985 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
987 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
989 WARN_ON(fcxp == NULL);
990 bfa_trc(mod->bfa, fcxp->fcxp_tag);
995 * Send a FCXP request
997 * @param[in] fcxp BFA fcxp pointer
998 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
999 * @param[in] vf_id virtual Fabric ID
1000 * @param[in] lp_tag lport tag
1001 * @param[in] cts use Continuous sequence
1002 * @param[in] cos fc Class of Service
1003 * @param[in] reqlen request length, does not include FCHS length
1004 * @param[in] fchs fc Header Pointer. The header content will be copied
1007 * @param[in] cbfn call back function to be called on receiving
1009 * @param[in] cbarg arg for cbfn
1010 * @param[in] rsp_timeout
1013 * @return bfa_status_t
1016 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1017 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1018 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1019 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1021 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1022 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1023 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1024 struct bfi_fcxp_send_req_s *send_req;
1026 bfa_trc(bfa, fcxp->fcxp_tag);
1029 * setup request/response info
1031 reqi->bfa_rport = rport;
1032 reqi->vf_id = vf_id;
1033 reqi->lp_tag = lp_tag;
1035 rspi->rsp_timeout = rsp_timeout;
1038 reqi->req_tot_len = reqlen;
1039 rspi->rsp_maxlen = rsp_maxlen;
1040 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1041 fcxp->send_cbarg = cbarg;
1044 * If no room in CPE queue, wait for space in request queue
1046 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1048 bfa_trc(bfa, fcxp->fcxp_tag);
1049 fcxp->reqq_waiting = BFA_TRUE;
1050 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1054 bfa_fcxp_queue(fcxp, send_req);
1060 * @param[in] fcxp BFA fcxp pointer
1065 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1067 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1069 return BFA_STATUS_OK;
1073 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1074 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1075 void *caller, int nreq_sgles,
1076 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1077 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1078 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1079 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1081 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1083 WARN_ON(!list_empty(&mod->fcxp_free_q));
1085 wqe->alloc_cbfn = alloc_cbfn;
1086 wqe->alloc_cbarg = alloc_cbarg;
1087 wqe->caller = caller;
1089 wqe->nreq_sgles = nreq_sgles;
1090 wqe->nrsp_sgles = nrsp_sgles;
1091 wqe->req_sga_cbfn = req_sga_cbfn;
1092 wqe->req_sglen_cbfn = req_sglen_cbfn;
1093 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1094 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1096 list_add_tail(&wqe->qe, &mod->wait_q);
1100 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1102 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1104 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1109 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1112 * If waiting for room in request queue, cancel reqq wait
1115 if (fcxp->reqq_waiting) {
1116 fcxp->reqq_waiting = BFA_FALSE;
1117 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1118 bfa_fcxp_free(fcxp);
1122 fcxp->send_cbfn = bfa_fcxp_null_comp;
1126 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1128 switch (msg->mhdr.msg_id) {
1129 case BFI_FCXP_I2H_SEND_RSP:
1130 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1134 bfa_trc(bfa, msg->mhdr.msg_id);
1140 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1142 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1144 return mod->rsp_pld_sz;
1148 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1150 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1151 struct list_head *qe;
1154 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1155 bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1156 list_add_tail(qe, &mod->fcxp_unused_q);
1161 * BFA LPS state machine functions
1165 * Init state -- no login
1168 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1170 bfa_trc(lps->bfa, lps->bfa_tag);
1171 bfa_trc(lps->bfa, event);
1174 case BFA_LPS_SM_LOGIN:
1175 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1176 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1177 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1179 bfa_sm_set_state(lps, bfa_lps_sm_login);
1180 bfa_lps_send_login(lps);
1184 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1185 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1187 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1188 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1191 case BFA_LPS_SM_LOGOUT:
1192 bfa_lps_logout_comp(lps);
1195 case BFA_LPS_SM_DELETE:
1199 case BFA_LPS_SM_RX_CVL:
1200 case BFA_LPS_SM_OFFLINE:
1203 case BFA_LPS_SM_FWRSP:
1205 * Could happen when fabric detects loopback and discards
1206 * the lps request. Fw will eventually sent out the timeout
1212 bfa_sm_fault(lps->bfa, event);
1217 * login is in progress -- awaiting response from firmware
1220 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1222 bfa_trc(lps->bfa, lps->bfa_tag);
1223 bfa_trc(lps->bfa, event);
1226 case BFA_LPS_SM_FWRSP:
1227 if (lps->status == BFA_STATUS_OK) {
1228 bfa_sm_set_state(lps, bfa_lps_sm_online);
1230 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1231 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1233 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1234 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1235 /* If N2N, send the assigned PID to FW */
1236 bfa_trc(lps->bfa, lps->fport);
1237 bfa_trc(lps->bfa, lps->lp_pid);
1239 if (!lps->fport && lps->lp_pid)
1240 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1242 bfa_sm_set_state(lps, bfa_lps_sm_init);
1244 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1245 BFA_PL_EID_LOGIN, 0,
1246 "FDISC Fail (RJT or timeout)");
1248 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1249 BFA_PL_EID_LOGIN, 0,
1250 "FLOGI Fail (RJT or timeout)");
1252 bfa_lps_login_comp(lps);
1255 case BFA_LPS_SM_OFFLINE:
1256 case BFA_LPS_SM_DELETE:
1257 bfa_sm_set_state(lps, bfa_lps_sm_init);
1260 case BFA_LPS_SM_SET_N2N_PID:
1261 bfa_trc(lps->bfa, lps->fport);
1262 bfa_trc(lps->bfa, lps->lp_pid);
1266 bfa_sm_fault(lps->bfa, event);
1271 * login pending - awaiting space in request queue
1274 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1276 bfa_trc(lps->bfa, lps->bfa_tag);
1277 bfa_trc(lps->bfa, event);
1280 case BFA_LPS_SM_RESUME:
1281 bfa_sm_set_state(lps, bfa_lps_sm_login);
1284 case BFA_LPS_SM_OFFLINE:
1285 case BFA_LPS_SM_DELETE:
1286 bfa_sm_set_state(lps, bfa_lps_sm_init);
1287 bfa_reqq_wcancel(&lps->wqe);
1290 case BFA_LPS_SM_RX_CVL:
1292 * Login was not even sent out; so when getting out
1293 * of this state, it will appear like a login retry
1294 * after Clear virtual link
1299 bfa_sm_fault(lps->bfa, event);
1307 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1309 bfa_trc(lps->bfa, lps->bfa_tag);
1310 bfa_trc(lps->bfa, event);
1313 case BFA_LPS_SM_LOGOUT:
1314 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1315 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1316 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1318 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1319 bfa_lps_send_logout(lps);
1321 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1322 BFA_PL_EID_LOGO, 0, "Logout");
1325 case BFA_LPS_SM_RX_CVL:
1326 bfa_sm_set_state(lps, bfa_lps_sm_init);
1328 /* Let the vport module know about this event */
1329 bfa_lps_cvl_event(lps);
1330 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1331 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1334 case BFA_LPS_SM_SET_N2N_PID:
1335 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1336 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1337 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1339 bfa_lps_send_set_n2n_pid(lps);
1342 case BFA_LPS_SM_OFFLINE:
1343 case BFA_LPS_SM_DELETE:
1344 bfa_sm_set_state(lps, bfa_lps_sm_init);
1348 bfa_sm_fault(lps->bfa, event);
1356 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1358 bfa_trc(lps->bfa, lps->bfa_tag);
1359 bfa_trc(lps->bfa, event);
1362 case BFA_LPS_SM_RESUME:
1363 bfa_sm_set_state(lps, bfa_lps_sm_online);
1364 bfa_lps_send_set_n2n_pid(lps);
1367 case BFA_LPS_SM_LOGOUT:
1368 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1369 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1370 BFA_PL_EID_LOGO, 0, "Logout");
1373 case BFA_LPS_SM_RX_CVL:
1374 bfa_sm_set_state(lps, bfa_lps_sm_init);
1375 bfa_reqq_wcancel(&lps->wqe);
1377 /* Let the vport module know about this event */
1378 bfa_lps_cvl_event(lps);
1379 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1380 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1383 case BFA_LPS_SM_OFFLINE:
1384 case BFA_LPS_SM_DELETE:
1385 bfa_sm_set_state(lps, bfa_lps_sm_init);
1386 bfa_reqq_wcancel(&lps->wqe);
1390 bfa_sm_fault(lps->bfa, event);
1395 * logout in progress - awaiting firmware response
1398 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1400 bfa_trc(lps->bfa, lps->bfa_tag);
1401 bfa_trc(lps->bfa, event);
1404 case BFA_LPS_SM_FWRSP:
1405 bfa_sm_set_state(lps, bfa_lps_sm_init);
1406 bfa_lps_logout_comp(lps);
1409 case BFA_LPS_SM_OFFLINE:
1410 case BFA_LPS_SM_DELETE:
1411 bfa_sm_set_state(lps, bfa_lps_sm_init);
1415 bfa_sm_fault(lps->bfa, event);
1420 * logout pending -- awaiting space in request queue
1423 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1425 bfa_trc(lps->bfa, lps->bfa_tag);
1426 bfa_trc(lps->bfa, event);
1429 case BFA_LPS_SM_RESUME:
1430 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1431 bfa_lps_send_logout(lps);
1434 case BFA_LPS_SM_OFFLINE:
1435 case BFA_LPS_SM_DELETE:
1436 bfa_sm_set_state(lps, bfa_lps_sm_init);
1437 bfa_reqq_wcancel(&lps->wqe);
1441 bfa_sm_fault(lps->bfa, event);
1448 * lps_pvt BFA LPS private functions
1452 * return memory requirement
1455 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1458 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1460 if (cfg->drvcfg.min_cfg)
1461 bfa_mem_kva_setup(minfo, lps_kva,
1462 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1464 bfa_mem_kva_setup(minfo, lps_kva,
1465 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1469 * bfa module attach at initialization time
1472 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1473 struct bfa_pcidev_s *pcidev)
1475 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1476 struct bfa_lps_s *lps;
1479 mod->num_lps = BFA_LPS_MAX_LPORTS;
1480 if (cfg->drvcfg.min_cfg)
1481 mod->num_lps = BFA_LPS_MIN_LPORTS;
1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1486 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1488 INIT_LIST_HEAD(&mod->lps_free_q);
1489 INIT_LIST_HEAD(&mod->lps_active_q);
1490 INIT_LIST_HEAD(&mod->lps_login_q);
1492 for (i = 0; i < mod->num_lps; i++, lps++) {
1494 lps->bfa_tag = (u8) i;
1495 lps->reqq = BFA_REQQ_LPS;
1496 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1497 list_add_tail(&lps->qe, &mod->lps_free_q);
1502 bfa_lps_detach(struct bfa_s *bfa)
1507 bfa_lps_start(struct bfa_s *bfa)
1512 bfa_lps_stop(struct bfa_s *bfa)
1517 * IOC in disabled state -- consider all lps offline
1520 bfa_lps_iocdisable(struct bfa_s *bfa)
1522 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1523 struct bfa_lps_s *lps;
1524 struct list_head *qe, *qen;
1526 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1527 lps = (struct bfa_lps_s *) qe;
1528 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1530 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1531 lps = (struct bfa_lps_s *) qe;
1532 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1534 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1538 * Firmware login response
1541 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1543 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1544 struct bfa_lps_s *lps;
1546 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1547 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1549 lps->status = rsp->status;
1550 switch (rsp->status) {
1552 lps->fw_tag = rsp->fw_tag;
1553 lps->fport = rsp->f_port;
1555 lps->lp_pid = rsp->lp_pid;
1556 lps->npiv_en = rsp->npiv_en;
1557 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1558 lps->pr_pwwn = rsp->port_name;
1559 lps->pr_nwwn = rsp->node_name;
1560 lps->auth_req = rsp->auth_req;
1561 lps->lp_mac = rsp->lp_mac;
1562 lps->brcd_switch = rsp->brcd_switch;
1563 lps->fcf_mac = rsp->fcf_mac;
1564 lps->pr_bbscn = rsp->bb_scn;
1568 case BFA_STATUS_FABRIC_RJT:
1569 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1570 lps->lsrjt_expl = rsp->lsrjt_expl;
1574 case BFA_STATUS_EPROTOCOL:
1575 lps->ext_status = rsp->ext_status;
1579 case BFA_STATUS_VPORT_MAX:
1580 if (!rsp->ext_status)
1581 bfa_lps_no_res(lps, rsp->ext_status);
1585 /* Nothing to do with other status */
1590 list_add_tail(&lps->qe, &mod->lps_active_q);
1591 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1597 struct bfa_s *bfa = first_lps->bfa;
1598 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1599 struct list_head *qe, *qe_next;
1600 struct bfa_lps_s *lps;
1602 bfa_trc(bfa, count);
1604 qe = bfa_q_next(first_lps);
1606 while (count && qe) {
1607 qe_next = bfa_q_next(qe);
1608 lps = (struct bfa_lps_s *)qe;
1609 bfa_trc(bfa, lps->bfa_tag);
1610 lps->status = first_lps->status;
1612 list_add_tail(&lps->qe, &mod->lps_active_q);
1613 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1620 * Firmware logout response
1623 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1625 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1626 struct bfa_lps_s *lps;
1628 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1629 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1631 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1635 * Firmware received a Clear virtual link request (for FCoE)
1638 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1640 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1641 struct bfa_lps_s *lps;
1643 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1645 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1649 * Space is available in request queue, resume queueing request to firmware.
1652 bfa_lps_reqq_resume(void *lps_arg)
1654 struct bfa_lps_s *lps = lps_arg;
1656 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1660 * lps is freed -- triggered by vport delete
1663 bfa_lps_free(struct bfa_lps_s *lps)
1665 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1669 list_add_tail(&lps->qe, &mod->lps_free_q);
1673 * send login request to firmware
1676 bfa_lps_send_login(struct bfa_lps_s *lps)
1678 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1679 struct bfi_lps_login_req_s *m;
1681 m = bfa_reqq_next(lps->bfa, lps->reqq);
1684 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1685 bfa_fn_lpu(lps->bfa));
1687 m->bfa_tag = lps->bfa_tag;
1688 m->alpa = lps->alpa;
1689 m->pdu_size = cpu_to_be16(lps->pdusz);
1690 m->pwwn = lps->pwwn;
1691 m->nwwn = lps->nwwn;
1692 m->fdisc = lps->fdisc;
1693 m->auth_en = lps->auth_en;
1694 m->bb_scn = lps->bb_scn;
1696 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1698 list_add_tail(&lps->qe, &mod->lps_login_q);
1702 * send logout request to firmware
1705 bfa_lps_send_logout(struct bfa_lps_s *lps)
1707 struct bfi_lps_logout_req_s *m;
1709 m = bfa_reqq_next(lps->bfa, lps->reqq);
1712 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1713 bfa_fn_lpu(lps->bfa));
1715 m->fw_tag = lps->fw_tag;
1716 m->port_name = lps->pwwn;
1717 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1721 * send n2n pid set request to firmware
1724 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1726 struct bfi_lps_n2n_pid_req_s *m;
1728 m = bfa_reqq_next(lps->bfa, lps->reqq);
1731 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1732 bfa_fn_lpu(lps->bfa));
1734 m->fw_tag = lps->fw_tag;
1735 m->lp_pid = lps->lp_pid;
1736 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1740 * Indirect login completion handler for non-fcs
1743 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1745 struct bfa_lps_s *lps = arg;
1751 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1753 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1757 * Login completion handler -- direct call for fcs, queue for others
1760 bfa_lps_login_comp(struct bfa_lps_s *lps)
1762 if (!lps->bfa->fcs) {
1763 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1769 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1771 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1775 * Indirect logout completion handler for non-fcs
1778 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1780 struct bfa_lps_s *lps = arg;
1786 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1790 * Logout completion handler -- direct call for fcs, queue for others
1793 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1795 if (!lps->bfa->fcs) {
1796 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1801 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1805 * Clear virtual link completion handler for non-fcs
1808 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1810 struct bfa_lps_s *lps = arg;
1815 /* Clear virtual link to base port will result in link down */
1817 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1821 * Received Clear virtual link event --direct call for fcs,
1825 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1827 if (!lps->bfa->fcs) {
1828 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1833 /* Clear virtual link to base port will result in link down */
1835 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1841 * lps_public BFA LPS public functions
1845 bfa_lps_get_max_vport(struct bfa_s *bfa)
1847 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1848 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1850 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1854 * Allocate a lport srvice tag.
1857 bfa_lps_alloc(struct bfa_s *bfa)
1859 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1860 struct bfa_lps_s *lps = NULL;
1862 bfa_q_deq(&mod->lps_free_q, &lps);
1867 list_add_tail(&lps->qe, &mod->lps_active_q);
1869 bfa_sm_set_state(lps, bfa_lps_sm_init);
1874 * Free lport service tag. This can be called anytime after an alloc.
1875 * No need to wait for any pending login/logout completions.
1878 bfa_lps_delete(struct bfa_lps_s *lps)
1880 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1884 * Initiate a lport login.
1887 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1888 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1895 lps->fdisc = BFA_FALSE;
1896 lps->auth_en = auth_en;
1897 lps->bb_scn = bb_scn;
1898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1902 * Initiate a lport fdisc login.
1905 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1913 lps->fdisc = BFA_TRUE;
1914 lps->auth_en = BFA_FALSE;
1915 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1920 * Initiate a lport FDSIC logout.
1923 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1925 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1929 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1931 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1933 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1937 * Return lport services tag given the pid
1940 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1942 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1943 struct bfa_lps_s *lps;
1946 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1947 if (lps->lp_pid == pid)
1948 return lps->bfa_tag;
1951 /* Return base port tag anyway */
1957 * return port id assigned to the base lport
1960 bfa_lps_get_base_pid(struct bfa_s *bfa)
1962 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1964 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1968 * Set PID in case of n2n (which is assigned during PLOGI)
1971 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1973 bfa_trc(lps->bfa, lps->bfa_tag);
1974 bfa_trc(lps->bfa, n2n_pid);
1976 lps->lp_pid = n2n_pid;
1977 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1981 * LPS firmware message class handler.
1984 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1986 union bfi_lps_i2h_msg_u msg;
1988 bfa_trc(bfa, m->mhdr.msg_id);
1991 switch (m->mhdr.msg_id) {
1992 case BFI_LPS_I2H_LOGIN_RSP:
1993 bfa_lps_login_rsp(bfa, msg.login_rsp);
1996 case BFI_LPS_I2H_LOGOUT_RSP:
1997 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2000 case BFI_LPS_I2H_CVL_EVENT:
2001 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2005 bfa_trc(bfa, m->mhdr.msg_id);
2011 * FC PORT state machine functions
2014 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2015 enum bfa_fcport_sm_event event)
2017 bfa_trc(fcport->bfa, event);
2020 case BFA_FCPORT_SM_START:
2022 * Start event after IOC is configured and BFA is started.
2024 fcport->use_flash_cfg = BFA_TRUE;
2026 if (bfa_fcport_send_enable(fcport)) {
2027 bfa_trc(fcport->bfa, BFA_TRUE);
2028 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2030 bfa_trc(fcport->bfa, BFA_FALSE);
2031 bfa_sm_set_state(fcport,
2032 bfa_fcport_sm_enabling_qwait);
2036 case BFA_FCPORT_SM_ENABLE:
2038 * Port is persistently configured to be in enabled state. Do
2039 * not change state. Port enabling is done when START event is
2044 case BFA_FCPORT_SM_DISABLE:
2046 * If a port is persistently configured to be disabled, the
2047 * first event will a port disable request.
2049 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2052 case BFA_FCPORT_SM_HWFAIL:
2053 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2057 bfa_sm_fault(fcport->bfa, event);
2062 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2063 enum bfa_fcport_sm_event event)
2065 char pwwn_buf[BFA_STRING_32];
2066 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2067 bfa_trc(fcport->bfa, event);
2070 case BFA_FCPORT_SM_QRESUME:
2071 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2072 bfa_fcport_send_enable(fcport);
2075 case BFA_FCPORT_SM_STOP:
2076 bfa_reqq_wcancel(&fcport->reqq_wait);
2077 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2080 case BFA_FCPORT_SM_ENABLE:
2082 * Already enable is in progress.
2086 case BFA_FCPORT_SM_DISABLE:
2088 * Just send disable request to firmware when room becomes
2089 * available in request queue.
2091 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2092 bfa_reqq_wcancel(&fcport->reqq_wait);
2093 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2094 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2095 wwn2str(pwwn_buf, fcport->pwwn);
2096 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2097 "Base port disabled: WWN = %s\n", pwwn_buf);
2100 case BFA_FCPORT_SM_LINKUP:
2101 case BFA_FCPORT_SM_LINKDOWN:
2103 * Possible to get link events when doing back-to-back
2108 case BFA_FCPORT_SM_HWFAIL:
2109 bfa_reqq_wcancel(&fcport->reqq_wait);
2110 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2114 bfa_sm_fault(fcport->bfa, event);
2119 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2120 enum bfa_fcport_sm_event event)
2122 char pwwn_buf[BFA_STRING_32];
2123 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2124 bfa_trc(fcport->bfa, event);
2127 case BFA_FCPORT_SM_FWRSP:
2128 case BFA_FCPORT_SM_LINKDOWN:
2129 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2132 case BFA_FCPORT_SM_LINKUP:
2133 bfa_fcport_update_linkinfo(fcport);
2134 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2136 WARN_ON(!fcport->event_cbfn);
2137 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2140 case BFA_FCPORT_SM_ENABLE:
2142 * Already being enabled.
2146 case BFA_FCPORT_SM_DISABLE:
2147 if (bfa_fcport_send_disable(fcport))
2148 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2150 bfa_sm_set_state(fcport,
2151 bfa_fcport_sm_disabling_qwait);
2153 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2154 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2155 wwn2str(pwwn_buf, fcport->pwwn);
2156 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2157 "Base port disabled: WWN = %s\n", pwwn_buf);
2160 case BFA_FCPORT_SM_STOP:
2161 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2164 case BFA_FCPORT_SM_HWFAIL:
2165 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2169 bfa_sm_fault(fcport->bfa, event);
2174 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2175 enum bfa_fcport_sm_event event)
2177 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2178 char pwwn_buf[BFA_STRING_32];
2179 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2181 bfa_trc(fcport->bfa, event);
2184 case BFA_FCPORT_SM_LINKUP:
2185 bfa_fcport_update_linkinfo(fcport);
2186 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2187 WARN_ON(!fcport->event_cbfn);
2188 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2189 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2190 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2192 bfa_trc(fcport->bfa,
2193 pevent->link_state.vc_fcf.fcf.fipenabled);
2194 bfa_trc(fcport->bfa,
2195 pevent->link_state.vc_fcf.fcf.fipfailed);
2197 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2198 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199 BFA_PL_EID_FIP_FCF_DISC, 0,
2200 "FIP FCF Discovery Failed");
2202 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2203 BFA_PL_EID_FIP_FCF_DISC, 0,
2204 "FIP FCF Discovered");
2207 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2208 wwn2str(pwwn_buf, fcport->pwwn);
2209 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2210 "Base port online: WWN = %s\n", pwwn_buf);
2213 case BFA_FCPORT_SM_LINKDOWN:
2215 * Possible to get link down event.
2219 case BFA_FCPORT_SM_ENABLE:
2225 case BFA_FCPORT_SM_DISABLE:
2226 if (bfa_fcport_send_disable(fcport))
2227 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2229 bfa_sm_set_state(fcport,
2230 bfa_fcport_sm_disabling_qwait);
2232 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2233 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2234 wwn2str(pwwn_buf, fcport->pwwn);
2235 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2236 "Base port disabled: WWN = %s\n", pwwn_buf);
2239 case BFA_FCPORT_SM_STOP:
2240 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2243 case BFA_FCPORT_SM_HWFAIL:
2244 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2248 bfa_sm_fault(fcport->bfa, event);
2253 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2254 enum bfa_fcport_sm_event event)
2256 char pwwn_buf[BFA_STRING_32];
2257 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2259 bfa_trc(fcport->bfa, event);
2262 case BFA_FCPORT_SM_ENABLE:
2268 case BFA_FCPORT_SM_DISABLE:
2269 if (bfa_fcport_send_disable(fcport))
2270 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2272 bfa_sm_set_state(fcport,
2273 bfa_fcport_sm_disabling_qwait);
2275 bfa_fcport_reset_linkinfo(fcport);
2276 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2277 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2278 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2279 wwn2str(pwwn_buf, fcport->pwwn);
2280 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281 "Base port offline: WWN = %s\n", pwwn_buf);
2282 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2283 "Base port disabled: WWN = %s\n", pwwn_buf);
2286 case BFA_FCPORT_SM_LINKDOWN:
2287 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2288 bfa_fcport_reset_linkinfo(fcport);
2289 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2290 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2291 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2292 wwn2str(pwwn_buf, fcport->pwwn);
2293 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2294 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2295 "Base port offline: WWN = %s\n", pwwn_buf);
2297 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2298 "Base port (WWN = %s) "
2299 "lost fabric connectivity\n", pwwn_buf);
2302 case BFA_FCPORT_SM_STOP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2304 bfa_fcport_reset_linkinfo(fcport);
2305 wwn2str(pwwn_buf, fcport->pwwn);
2306 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2307 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2308 "Base port offline: WWN = %s\n", pwwn_buf);
2310 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2311 "Base port (WWN = %s) "
2312 "lost fabric connectivity\n", pwwn_buf);
2315 case BFA_FCPORT_SM_HWFAIL:
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2317 bfa_fcport_reset_linkinfo(fcport);
2318 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2319 wwn2str(pwwn_buf, fcport->pwwn);
2320 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2321 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2322 "Base port offline: WWN = %s\n", pwwn_buf);
2324 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2325 "Base port (WWN = %s) "
2326 "lost fabric connectivity\n", pwwn_buf);
2330 bfa_sm_fault(fcport->bfa, event);
2335 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2336 enum bfa_fcport_sm_event event)
2338 bfa_trc(fcport->bfa, event);
2341 case BFA_FCPORT_SM_QRESUME:
2342 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2343 bfa_fcport_send_disable(fcport);
2346 case BFA_FCPORT_SM_STOP:
2347 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2348 bfa_reqq_wcancel(&fcport->reqq_wait);
2351 case BFA_FCPORT_SM_ENABLE:
2352 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2355 case BFA_FCPORT_SM_DISABLE:
2357 * Already being disabled.
2361 case BFA_FCPORT_SM_LINKUP:
2362 case BFA_FCPORT_SM_LINKDOWN:
2364 * Possible to get link events when doing back-to-back
2369 case BFA_FCPORT_SM_HWFAIL:
2370 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2371 bfa_reqq_wcancel(&fcport->reqq_wait);
2375 bfa_sm_fault(fcport->bfa, event);
2380 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2381 enum bfa_fcport_sm_event event)
2383 bfa_trc(fcport->bfa, event);
2386 case BFA_FCPORT_SM_QRESUME:
2387 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2388 bfa_fcport_send_disable(fcport);
2389 if (bfa_fcport_send_enable(fcport))
2390 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2392 bfa_sm_set_state(fcport,
2393 bfa_fcport_sm_enabling_qwait);
2396 case BFA_FCPORT_SM_STOP:
2397 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2398 bfa_reqq_wcancel(&fcport->reqq_wait);
2401 case BFA_FCPORT_SM_ENABLE:
2404 case BFA_FCPORT_SM_DISABLE:
2405 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2408 case BFA_FCPORT_SM_LINKUP:
2409 case BFA_FCPORT_SM_LINKDOWN:
2411 * Possible to get link events when doing back-to-back
2416 case BFA_FCPORT_SM_HWFAIL:
2417 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2418 bfa_reqq_wcancel(&fcport->reqq_wait);
2422 bfa_sm_fault(fcport->bfa, event);
2427 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2428 enum bfa_fcport_sm_event event)
2430 char pwwn_buf[BFA_STRING_32];
2431 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2432 bfa_trc(fcport->bfa, event);
2435 case BFA_FCPORT_SM_FWRSP:
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2439 case BFA_FCPORT_SM_DISABLE:
2441 * Already being disabled.
2445 case BFA_FCPORT_SM_ENABLE:
2446 if (bfa_fcport_send_enable(fcport))
2447 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2449 bfa_sm_set_state(fcport,
2450 bfa_fcport_sm_enabling_qwait);
2452 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2453 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2454 wwn2str(pwwn_buf, fcport->pwwn);
2455 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2456 "Base port enabled: WWN = %s\n", pwwn_buf);
2459 case BFA_FCPORT_SM_STOP:
2460 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2463 case BFA_FCPORT_SM_LINKUP:
2464 case BFA_FCPORT_SM_LINKDOWN:
2466 * Possible to get link events when doing back-to-back
2471 case BFA_FCPORT_SM_HWFAIL:
2472 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2476 bfa_sm_fault(fcport->bfa, event);
2481 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2482 enum bfa_fcport_sm_event event)
2484 char pwwn_buf[BFA_STRING_32];
2485 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2486 bfa_trc(fcport->bfa, event);
2489 case BFA_FCPORT_SM_START:
2491 * Ignore start event for a port that is disabled.
2495 case BFA_FCPORT_SM_STOP:
2496 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2499 case BFA_FCPORT_SM_ENABLE:
2500 if (bfa_fcport_send_enable(fcport))
2501 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2503 bfa_sm_set_state(fcport,
2504 bfa_fcport_sm_enabling_qwait);
2506 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2507 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2508 wwn2str(pwwn_buf, fcport->pwwn);
2509 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2510 "Base port enabled: WWN = %s\n", pwwn_buf);
2513 case BFA_FCPORT_SM_DISABLE:
2519 case BFA_FCPORT_SM_HWFAIL:
2520 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2524 bfa_sm_fault(fcport->bfa, event);
2529 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2530 enum bfa_fcport_sm_event event)
2532 bfa_trc(fcport->bfa, event);
2535 case BFA_FCPORT_SM_START:
2536 if (bfa_fcport_send_enable(fcport))
2537 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2539 bfa_sm_set_state(fcport,
2540 bfa_fcport_sm_enabling_qwait);
2545 * Ignore all other events.
2552 * Port is enabled. IOC is down/failed.
2555 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2556 enum bfa_fcport_sm_event event)
2558 bfa_trc(fcport->bfa, event);
2561 case BFA_FCPORT_SM_START:
2562 if (bfa_fcport_send_enable(fcport))
2563 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2565 bfa_sm_set_state(fcport,
2566 bfa_fcport_sm_enabling_qwait);
2571 * Ignore all events.
2578 * Port is disabled. IOC is down/failed.
2581 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2582 enum bfa_fcport_sm_event event)
2584 bfa_trc(fcport->bfa, event);
2587 case BFA_FCPORT_SM_START:
2588 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2591 case BFA_FCPORT_SM_ENABLE:
2592 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2597 * Ignore all events.
2604 * Link state is down
2607 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2608 enum bfa_fcport_ln_sm_event event)
2610 bfa_trc(ln->fcport->bfa, event);
2613 case BFA_FCPORT_LN_SM_LINKUP:
2614 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2615 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2619 bfa_sm_fault(ln->fcport->bfa, event);
2624 * Link state is waiting for down notification
2627 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2628 enum bfa_fcport_ln_sm_event event)
2630 bfa_trc(ln->fcport->bfa, event);
2633 case BFA_FCPORT_LN_SM_LINKUP:
2634 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2637 case BFA_FCPORT_LN_SM_NOTIFICATION:
2638 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2642 bfa_sm_fault(ln->fcport->bfa, event);
2647 * Link state is waiting for down notification and there is a pending up
2650 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2651 enum bfa_fcport_ln_sm_event event)
2653 bfa_trc(ln->fcport->bfa, event);
2656 case BFA_FCPORT_LN_SM_LINKDOWN:
2657 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2660 case BFA_FCPORT_LN_SM_NOTIFICATION:
2661 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2662 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2666 bfa_sm_fault(ln->fcport->bfa, event);
2674 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2675 enum bfa_fcport_ln_sm_event event)
2677 bfa_trc(ln->fcport->bfa, event);
2680 case BFA_FCPORT_LN_SM_LINKDOWN:
2681 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2682 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2686 bfa_sm_fault(ln->fcport->bfa, event);
2691 * Link state is waiting for up notification
2694 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2695 enum bfa_fcport_ln_sm_event event)
2697 bfa_trc(ln->fcport->bfa, event);
2700 case BFA_FCPORT_LN_SM_LINKDOWN:
2701 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2704 case BFA_FCPORT_LN_SM_NOTIFICATION:
2705 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2709 bfa_sm_fault(ln->fcport->bfa, event);
2714 * Link state is waiting for up notification and there is a pending down
2717 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2718 enum bfa_fcport_ln_sm_event event)
2720 bfa_trc(ln->fcport->bfa, event);
2723 case BFA_FCPORT_LN_SM_LINKUP:
2724 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2727 case BFA_FCPORT_LN_SM_NOTIFICATION:
2728 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2729 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2733 bfa_sm_fault(ln->fcport->bfa, event);
2738 * Link state is waiting for up notification and there are pending down and up
2741 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2742 enum bfa_fcport_ln_sm_event event)
2744 bfa_trc(ln->fcport->bfa, event);
2747 case BFA_FCPORT_LN_SM_LINKDOWN:
2748 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2751 case BFA_FCPORT_LN_SM_NOTIFICATION:
2752 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2753 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2757 bfa_sm_fault(ln->fcport->bfa, event);
2762 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2764 struct bfa_fcport_ln_s *ln = cbarg;
2767 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2769 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2773 * Send SCN notification to upper layers.
2774 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2777 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2778 bfa_boolean_t trunk)
2780 if (fcport->cfg.trunked && !trunk)
2784 case BFA_PORT_LINKUP:
2785 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2787 case BFA_PORT_LINKDOWN:
2788 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2796 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2798 struct bfa_fcport_s *fcport = ln->fcport;
2800 if (fcport->bfa->fcs) {
2801 fcport->event_cbfn(fcport->event_cbarg, event);
2802 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2804 ln->ln_event = event;
2805 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2806 __bfa_cb_fcport_event, ln);
2810 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2814 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2817 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2819 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2823 bfa_fcport_qresume(void *cbarg)
2825 struct bfa_fcport_s *fcport = cbarg;
2827 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2831 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2833 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2835 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2836 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
2837 fcport->stats = (union bfa_fcport_stats_u *)
2838 bfa_mem_dma_virt(fcport_dma);
2842 * Memory initialization.
2845 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2846 struct bfa_pcidev_s *pcidev)
2848 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2849 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2850 struct bfa_fcport_ln_s *ln = &fcport->ln;
2854 ln->fcport = fcport;
2856 bfa_fcport_mem_claim(fcport);
2858 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2859 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2862 * initialize time stamp for stats reset
2864 do_gettimeofday(&tv);
2865 fcport->stats_reset_time = tv.tv_sec;
2868 * initialize and set default configuration
2870 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2871 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2872 port_cfg->trunked = BFA_FALSE;
2873 port_cfg->maxfrsize = 0;
2875 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2877 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2881 bfa_fcport_detach(struct bfa_s *bfa)
2886 * Called when IOC is ready.
2889 bfa_fcport_start(struct bfa_s *bfa)
2891 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2895 * Called before IOC is stopped.
2898 bfa_fcport_stop(struct bfa_s *bfa)
2900 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2901 bfa_trunk_iocdisable(bfa);
2905 * Called when IOC failure is detected.
2908 bfa_fcport_iocdisable(struct bfa_s *bfa)
2910 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2912 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2913 bfa_trunk_iocdisable(bfa);
2917 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2919 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2920 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2922 fcport->speed = pevent->link_state.speed;
2923 fcport->topology = pevent->link_state.topology;
2925 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2929 fcport->qos_attr = pevent->link_state.qos_attr;
2930 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2933 * update trunk state if applicable
2935 if (!fcport->cfg.trunked)
2936 trunk->attr.state = BFA_TRUNK_DISABLED;
2938 /* update FCoE specific */
2939 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2941 bfa_trc(fcport->bfa, fcport->speed);
2942 bfa_trc(fcport->bfa, fcport->topology);
2946 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2948 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2949 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2950 fcport->bbsc_op_state = BFA_FALSE;
2954 * Send port enable message to firmware.
2956 static bfa_boolean_t
2957 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2959 struct bfi_fcport_enable_req_s *m;
2962 * Increment message tag before queue check, so that responses to old
2963 * requests are discarded.
2968 * check for room in queue to send request now
2970 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2972 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2973 &fcport->reqq_wait);
2977 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2978 bfa_fn_lpu(fcport->bfa));
2979 m->nwwn = fcport->nwwn;
2980 m->pwwn = fcport->pwwn;
2981 m->port_cfg = fcport->cfg;
2982 m->msgtag = fcport->msgtag;
2983 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2984 m->use_flash_cfg = fcport->use_flash_cfg;
2985 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2986 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2987 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2990 * queue I/O message to firmware
2992 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
2997 * Send port disable message to firmware.
2999 static bfa_boolean_t
3000 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3002 struct bfi_fcport_req_s *m;
3005 * Increment message tag before queue check, so that responses to old
3006 * requests are discarded.
3011 * check for room in queue to send request now
3013 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3015 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3016 &fcport->reqq_wait);
3020 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3021 bfa_fn_lpu(fcport->bfa));
3022 m->msgtag = fcport->msgtag;
3025 * queue I/O message to firmware
3027 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3033 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3035 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3036 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3038 bfa_trc(fcport->bfa, fcport->pwwn);
3039 bfa_trc(fcport->bfa, fcport->nwwn);
3043 bfa_fcport_send_txcredit(void *port_cbarg)
3046 struct bfa_fcport_s *fcport = port_cbarg;
3047 struct bfi_fcport_set_svc_params_req_s *m;
3050 * check for room in queue to send request now
3052 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3054 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3058 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3059 bfa_fn_lpu(fcport->bfa));
3060 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3061 m->bb_scn = fcport->cfg.bb_scn;
3064 * queue I/O message to firmware
3066 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3070 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3071 struct bfa_qos_stats_s *s)
3073 u32 *dip = (u32 *) d;
3074 __be32 *sip = (__be32 *) s;
3077 /* Now swap the 32 bit fields */
3078 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3079 dip[i] = be32_to_cpu(sip[i]);
3083 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3084 struct bfa_fcoe_stats_s *s)
3086 u32 *dip = (u32 *) d;
3087 __be32 *sip = (__be32 *) s;
3090 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3093 dip[i] = be32_to_cpu(sip[i]);
3094 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3096 dip[i] = be32_to_cpu(sip[i + 1]);
3097 dip[i + 1] = be32_to_cpu(sip[i]);
3103 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3105 struct bfa_fcport_s *fcport = cbarg;
3108 if (fcport->stats_status == BFA_STATUS_OK) {
3111 /* Swap FC QoS or FCoE stats */
3112 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3113 bfa_fcport_qos_stats_swap(
3114 &fcport->stats_ret->fcqos,
3115 &fcport->stats->fcqos);
3117 bfa_fcport_fcoe_stats_swap(
3118 &fcport->stats_ret->fcoe,
3119 &fcport->stats->fcoe);
3121 do_gettimeofday(&tv);
3122 fcport->stats_ret->fcoe.secs_reset =
3123 tv.tv_sec - fcport->stats_reset_time;
3126 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3128 fcport->stats_busy = BFA_FALSE;
3129 fcport->stats_status = BFA_STATUS_OK;
3134 bfa_fcport_stats_get_timeout(void *cbarg)
3136 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3138 bfa_trc(fcport->bfa, fcport->stats_qfull);
3140 if (fcport->stats_qfull) {
3141 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3142 fcport->stats_qfull = BFA_FALSE;
3145 fcport->stats_status = BFA_STATUS_ETIMER;
3146 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3151 bfa_fcport_send_stats_get(void *cbarg)
3153 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3154 struct bfi_fcport_req_s *msg;
3156 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3159 fcport->stats_qfull = BFA_TRUE;
3160 bfa_reqq_winit(&fcport->stats_reqq_wait,
3161 bfa_fcport_send_stats_get, fcport);
3162 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3163 &fcport->stats_reqq_wait);
3166 fcport->stats_qfull = BFA_FALSE;
3168 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3169 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3170 bfa_fn_lpu(fcport->bfa));
3171 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3175 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3177 struct bfa_fcport_s *fcport = cbarg;
3183 * re-initialize time stamp for stats reset
3185 do_gettimeofday(&tv);
3186 fcport->stats_reset_time = tv.tv_sec;
3188 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3190 fcport->stats_busy = BFA_FALSE;
3191 fcport->stats_status = BFA_STATUS_OK;
3196 bfa_fcport_stats_clr_timeout(void *cbarg)
3198 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3200 bfa_trc(fcport->bfa, fcport->stats_qfull);
3202 if (fcport->stats_qfull) {
3203 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3204 fcport->stats_qfull = BFA_FALSE;
3207 fcport->stats_status = BFA_STATUS_ETIMER;
3208 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3209 __bfa_cb_fcport_stats_clr, fcport);
3213 bfa_fcport_send_stats_clear(void *cbarg)
3215 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3216 struct bfi_fcport_req_s *msg;
3218 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3221 fcport->stats_qfull = BFA_TRUE;
3222 bfa_reqq_winit(&fcport->stats_reqq_wait,
3223 bfa_fcport_send_stats_clear, fcport);
3224 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3225 &fcport->stats_reqq_wait);
3228 fcport->stats_qfull = BFA_FALSE;
3230 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3231 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3232 bfa_fn_lpu(fcport->bfa));
3233 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3237 * Handle trunk SCN event from firmware.
3240 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3242 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3243 struct bfi_fcport_trunk_link_s *tlink;
3244 struct bfa_trunk_link_attr_s *lattr;
3245 enum bfa_trunk_state state_prev;
3249 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3250 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3251 scn->trunk_state != BFA_TRUNK_OFFLINE);
3253 bfa_trc(fcport->bfa, trunk->attr.state);
3254 bfa_trc(fcport->bfa, scn->trunk_state);
3255 bfa_trc(fcport->bfa, scn->trunk_speed);
3258 * Save off new state for trunk attribute query
3260 state_prev = trunk->attr.state;
3261 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3262 trunk->attr.state = scn->trunk_state;
3263 trunk->attr.speed = scn->trunk_speed;
3264 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3265 lattr = &trunk->attr.link_attr[i];
3266 tlink = &scn->tlink[i];
3268 lattr->link_state = tlink->state;
3269 lattr->trunk_wwn = tlink->trunk_wwn;
3270 lattr->fctl = tlink->fctl;
3271 lattr->speed = tlink->speed;
3272 lattr->deskew = be32_to_cpu(tlink->deskew);
3274 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3275 fcport->speed = tlink->speed;
3276 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3280 bfa_trc(fcport->bfa, lattr->link_state);
3281 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3282 bfa_trc(fcport->bfa, lattr->fctl);
3283 bfa_trc(fcport->bfa, lattr->speed);
3284 bfa_trc(fcport->bfa, lattr->deskew);
3289 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3290 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3293 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3294 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3297 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3298 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3301 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3302 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3306 * Notify upper layers if trunk state changed.
3308 if ((state_prev != trunk->attr.state) ||
3309 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3310 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3311 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3316 bfa_trunk_iocdisable(struct bfa_s *bfa)
3318 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3322 * In trunked mode, notify upper layers that link is down
3324 if (fcport->cfg.trunked) {
3325 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3326 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3328 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3329 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3330 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3331 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3332 fcport->trunk.attr.link_attr[i].fctl =
3333 BFA_TRUNK_LINK_FCTL_NORMAL;
3334 fcport->trunk.attr.link_attr[i].link_state =
3335 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3336 fcport->trunk.attr.link_attr[i].speed =
3337 BFA_PORT_SPEED_UNKNOWN;
3338 fcport->trunk.attr.link_attr[i].deskew = 0;
3344 * Called to initialize port attributes
3347 bfa_fcport_init(struct bfa_s *bfa)
3349 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3352 * Initialize port attributes from IOC hardware data.
3354 bfa_fcport_set_wwns(fcport);
3355 if (fcport->cfg.maxfrsize == 0)
3356 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3357 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3358 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3360 if (bfa_fcport_is_pbcdisabled(bfa))
3361 bfa->modules.port.pbc_disabled = BFA_TRUE;
3363 WARN_ON(!fcport->cfg.maxfrsize);
3364 WARN_ON(!fcport->cfg.rx_bbcredit);
3365 WARN_ON(!fcport->speed_sup);
3369 * Firmware message handler.
3372 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3375 union bfi_fcport_i2h_msg_u i2hmsg;
3378 fcport->event_arg.i2hmsg = i2hmsg;
3380 bfa_trc(bfa, msg->mhdr.msg_id);
3381 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3383 switch (msg->mhdr.msg_id) {
3384 case BFI_FCPORT_I2H_ENABLE_RSP:
3385 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3387 if (fcport->use_flash_cfg) {
3388 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3389 fcport->cfg.maxfrsize =
3390 cpu_to_be16(fcport->cfg.maxfrsize);
3391 fcport->cfg.path_tov =
3392 cpu_to_be16(fcport->cfg.path_tov);
3393 fcport->cfg.q_depth =
3394 cpu_to_be16(fcport->cfg.q_depth);
3396 if (fcport->cfg.trunked)
3397 fcport->trunk.attr.state =
3400 fcport->trunk.attr.state =
3402 fcport->use_flash_cfg = BFA_FALSE;
3405 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3409 case BFI_FCPORT_I2H_DISABLE_RSP:
3410 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3411 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3414 case BFI_FCPORT_I2H_EVENT:
3415 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3416 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3418 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3421 case BFI_FCPORT_I2H_TRUNK_SCN:
3422 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3425 case BFI_FCPORT_I2H_STATS_GET_RSP:
3427 * check for timer pop before processing the rsp
3429 if (fcport->stats_busy == BFA_FALSE ||
3430 fcport->stats_status == BFA_STATUS_ETIMER)
3433 bfa_timer_stop(&fcport->timer);
3434 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3435 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3436 __bfa_cb_fcport_stats_get, fcport);
3439 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3441 * check for timer pop before processing the rsp
3443 if (fcport->stats_busy == BFA_FALSE ||
3444 fcport->stats_status == BFA_STATUS_ETIMER)
3447 bfa_timer_stop(&fcport->timer);
3448 fcport->stats_status = BFA_STATUS_OK;
3449 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3450 __bfa_cb_fcport_stats_clr, fcport);
3453 case BFI_FCPORT_I2H_ENABLE_AEN:
3454 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3457 case BFI_FCPORT_I2H_DISABLE_AEN:
3458 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3468 * Registered callback for port events.
3471 bfa_fcport_event_register(struct bfa_s *bfa,
3472 void (*cbfn) (void *cbarg,
3473 enum bfa_port_linkstate event),
3476 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3478 fcport->event_cbfn = cbfn;
3479 fcport->event_cbarg = cbarg;
3483 bfa_fcport_enable(struct bfa_s *bfa)
3485 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3487 if (bfa_fcport_is_pbcdisabled(bfa))
3488 return BFA_STATUS_PBC;
3490 if (bfa_ioc_is_disabled(&bfa->ioc))
3491 return BFA_STATUS_IOC_DISABLED;
3493 if (fcport->diag_busy)
3494 return BFA_STATUS_DIAG_BUSY;
3496 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3497 return BFA_STATUS_OK;
3501 bfa_fcport_disable(struct bfa_s *bfa)
3503 if (bfa_fcport_is_pbcdisabled(bfa))
3504 return BFA_STATUS_PBC;
3506 if (bfa_ioc_is_disabled(&bfa->ioc))
3507 return BFA_STATUS_IOC_DISABLED;
3509 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3510 return BFA_STATUS_OK;
3513 /* If PBC is disabled on port, return error */
3515 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3517 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3518 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3519 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3521 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3522 bfa_trc(bfa, fcport->pwwn);
3523 return BFA_STATUS_PBC;
3525 return BFA_STATUS_OK;
3529 * Configure port speed.
3532 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3534 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3536 bfa_trc(bfa, speed);
3538 if (fcport->cfg.trunked == BFA_TRUE)
3539 return BFA_STATUS_TRUNK_ENABLED;
3540 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3541 bfa_trc(bfa, fcport->speed_sup);
3542 return BFA_STATUS_UNSUPP_SPEED;
3545 /* For Mezz card, port speed entered needs to be checked */
3546 if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
3547 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3548 /* For CT2, 1G is not supported */
3549 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3550 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3551 return BFA_STATUS_UNSUPP_SPEED;
3553 /* Already checked for Auto Speed and Max Speed supp */
3554 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3555 speed == BFA_PORT_SPEED_2GBPS ||
3556 speed == BFA_PORT_SPEED_4GBPS ||
3557 speed == BFA_PORT_SPEED_8GBPS ||
3558 speed == BFA_PORT_SPEED_16GBPS ||
3559 speed == BFA_PORT_SPEED_AUTO))
3560 return BFA_STATUS_UNSUPP_SPEED;
3562 if (speed != BFA_PORT_SPEED_10GBPS)
3563 return BFA_STATUS_UNSUPP_SPEED;
3567 fcport->cfg.speed = speed;
3569 return BFA_STATUS_OK;
3573 * Get current speed.
3576 bfa_fcport_get_speed(struct bfa_s *bfa)
3578 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3580 return fcport->speed;
3584 * Configure port topology.
3587 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3589 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3591 bfa_trc(bfa, topology);
3592 bfa_trc(bfa, fcport->cfg.topology);
3595 case BFA_PORT_TOPOLOGY_P2P:
3596 case BFA_PORT_TOPOLOGY_LOOP:
3597 case BFA_PORT_TOPOLOGY_AUTO:
3601 return BFA_STATUS_EINVAL;
3604 fcport->cfg.topology = topology;
3605 return BFA_STATUS_OK;
3609 * Get current topology.
3611 enum bfa_port_topology
3612 bfa_fcport_get_topology(struct bfa_s *bfa)
3614 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3616 return fcport->topology;
3620 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3622 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3625 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3626 bfa_trc(bfa, fcport->cfg.hardalpa);
3628 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3629 fcport->cfg.hardalpa = alpa;
3631 return BFA_STATUS_OK;
3635 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3637 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3639 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3640 bfa_trc(bfa, fcport->cfg.hardalpa);
3642 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3643 return BFA_STATUS_OK;
3647 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3649 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3651 *alpa = fcport->cfg.hardalpa;
3652 return fcport->cfg.cfg_hardalpa;
3656 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3658 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3660 return fcport->myalpa;
3664 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3666 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3668 bfa_trc(bfa, maxfrsize);
3669 bfa_trc(bfa, fcport->cfg.maxfrsize);
3672 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3673 return BFA_STATUS_INVLD_DFSZ;
3675 /* power of 2, if not the max frame size of 2112 */
3676 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3677 return BFA_STATUS_INVLD_DFSZ;
3679 fcport->cfg.maxfrsize = maxfrsize;
3680 return BFA_STATUS_OK;
3684 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3686 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3688 return fcport->cfg.maxfrsize;
3692 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3694 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3696 return fcport->cfg.rx_bbcredit;
3700 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3702 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3704 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3705 fcport->cfg.bb_scn = bb_scn;
3707 fcport->bbsc_op_state = BFA_TRUE;
3708 bfa_fcport_send_txcredit(fcport);
3712 * Get port attributes.
3716 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3718 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3720 return fcport->nwwn;
3722 return fcport->pwwn;
3726 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3728 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3730 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3732 attr->nwwn = fcport->nwwn;
3733 attr->pwwn = fcport->pwwn;
3735 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3736 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
3738 memcpy(&attr->pport_cfg, &fcport->cfg,
3739 sizeof(struct bfa_port_cfg_s));
3740 /* speed attributes */
3741 attr->pport_cfg.speed = fcport->cfg.speed;
3742 attr->speed_supported = fcport->speed_sup;
3743 attr->speed = fcport->speed;
3744 attr->cos_supported = FC_CLASS_3;
3746 /* topology attributes */
3747 attr->pport_cfg.topology = fcport->cfg.topology;
3748 attr->topology = fcport->topology;
3749 attr->pport_cfg.trunked = fcport->cfg.trunked;
3751 /* beacon attributes */
3752 attr->beacon = fcport->beacon;
3753 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3755 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3756 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3757 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3758 attr->bbsc_op_status = fcport->bbsc_op_state;
3760 /* PBC Disabled State */
3761 if (bfa_fcport_is_pbcdisabled(bfa))
3762 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3764 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_IOCDIS;
3766 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3767 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3768 else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3769 attr->port_state = BFA_PORT_ST_ACQ_ADDR;
3773 attr->fcoe_vlan = fcport->fcoe_vlan;
3776 #define BFA_FCPORT_STATS_TOV 1000
3779 * Fetch port statistics (FCQoS or FCoE).
3782 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3783 bfa_cb_port_t cbfn, void *cbarg)
3785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3787 if (fcport->stats_busy) {
3788 bfa_trc(bfa, fcport->stats_busy);
3789 return BFA_STATUS_DEVBUSY;
3792 fcport->stats_busy = BFA_TRUE;
3793 fcport->stats_ret = stats;
3794 fcport->stats_cbfn = cbfn;
3795 fcport->stats_cbarg = cbarg;
3797 bfa_fcport_send_stats_get(fcport);
3799 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3800 fcport, BFA_FCPORT_STATS_TOV);
3801 return BFA_STATUS_OK;
3805 * Reset port statistics (FCQoS or FCoE).
3808 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3810 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3812 if (fcport->stats_busy) {
3813 bfa_trc(bfa, fcport->stats_busy);
3814 return BFA_STATUS_DEVBUSY;
3817 fcport->stats_busy = BFA_TRUE;
3818 fcport->stats_cbfn = cbfn;
3819 fcport->stats_cbarg = cbarg;
3821 bfa_fcport_send_stats_clear(fcport);
3823 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3824 fcport, BFA_FCPORT_STATS_TOV);
3825 return BFA_STATUS_OK;
3830 * Fetch port attributes.
3833 bfa_fcport_is_disabled(struct bfa_s *bfa)
3835 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3837 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3838 BFA_PORT_ST_DISABLED;
3843 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3845 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3847 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3852 * Enable/Disable FAA feature in port config
3855 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3857 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3859 bfa_trc(bfa, state);
3860 fcport->cfg.faa_state = state;
3864 * Get default minimum ratelim speed
3867 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3869 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3871 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3872 return fcport->cfg.trl_def_speed;
3877 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3878 bfa_boolean_t link_e2e_beacon)
3880 struct bfa_s *bfa = dev;
3881 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3883 bfa_trc(bfa, beacon);
3884 bfa_trc(bfa, link_e2e_beacon);
3885 bfa_trc(bfa, fcport->beacon);
3886 bfa_trc(bfa, fcport->link_e2e_beacon);
3888 fcport->beacon = beacon;
3889 fcport->link_e2e_beacon = link_e2e_beacon;
3893 bfa_fcport_is_linkup(struct bfa_s *bfa)
3895 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3897 return (!fcport->cfg.trunked &&
3898 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3899 (fcport->cfg.trunked &&
3900 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3904 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3906 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3908 return fcport->cfg.qos_enabled;
3912 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3914 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3916 return fcport->cfg.trunked;
3920 * Rport State machine functions
3923 * Beginning state, only online event expected.
3926 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3928 bfa_trc(rp->bfa, rp->rport_tag);
3929 bfa_trc(rp->bfa, event);
3932 case BFA_RPORT_SM_CREATE:
3933 bfa_stats(rp, sm_un_cr);
3934 bfa_sm_set_state(rp, bfa_rport_sm_created);
3938 bfa_stats(rp, sm_un_unexp);
3939 bfa_sm_fault(rp->bfa, event);
3944 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3946 bfa_trc(rp->bfa, rp->rport_tag);
3947 bfa_trc(rp->bfa, event);
3950 case BFA_RPORT_SM_ONLINE:
3951 bfa_stats(rp, sm_cr_on);
3952 if (bfa_rport_send_fwcreate(rp))
3953 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3955 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3958 case BFA_RPORT_SM_DELETE:
3959 bfa_stats(rp, sm_cr_del);
3960 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3964 case BFA_RPORT_SM_HWFAIL:
3965 bfa_stats(rp, sm_cr_hwf);
3966 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3970 bfa_stats(rp, sm_cr_unexp);
3971 bfa_sm_fault(rp->bfa, event);
3976 * Waiting for rport create response from firmware.
3979 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3981 bfa_trc(rp->bfa, rp->rport_tag);
3982 bfa_trc(rp->bfa, event);
3985 case BFA_RPORT_SM_FWRSP:
3986 bfa_stats(rp, sm_fwc_rsp);
3987 bfa_sm_set_state(rp, bfa_rport_sm_online);
3988 bfa_rport_online_cb(rp);
3991 case BFA_RPORT_SM_DELETE:
3992 bfa_stats(rp, sm_fwc_del);
3993 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3996 case BFA_RPORT_SM_OFFLINE:
3997 bfa_stats(rp, sm_fwc_off);
3998 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4001 case BFA_RPORT_SM_HWFAIL:
4002 bfa_stats(rp, sm_fwc_hwf);
4003 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4007 bfa_stats(rp, sm_fwc_unexp);
4008 bfa_sm_fault(rp->bfa, event);
4013 * Request queue is full, awaiting queue resume to send create request.
4016 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4018 bfa_trc(rp->bfa, rp->rport_tag);
4019 bfa_trc(rp->bfa, event);
4022 case BFA_RPORT_SM_QRESUME:
4023 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4024 bfa_rport_send_fwcreate(rp);
4027 case BFA_RPORT_SM_DELETE:
4028 bfa_stats(rp, sm_fwc_del);
4029 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4030 bfa_reqq_wcancel(&rp->reqq_wait);
4034 case BFA_RPORT_SM_OFFLINE:
4035 bfa_stats(rp, sm_fwc_off);
4036 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4037 bfa_reqq_wcancel(&rp->reqq_wait);
4038 bfa_rport_offline_cb(rp);
4041 case BFA_RPORT_SM_HWFAIL:
4042 bfa_stats(rp, sm_fwc_hwf);
4043 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4044 bfa_reqq_wcancel(&rp->reqq_wait);
4048 bfa_stats(rp, sm_fwc_unexp);
4049 bfa_sm_fault(rp->bfa, event);
4054 * Online state - normal parking state.
4057 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4059 struct bfi_rport_qos_scn_s *qos_scn;
4061 bfa_trc(rp->bfa, rp->rport_tag);
4062 bfa_trc(rp->bfa, event);
4065 case BFA_RPORT_SM_OFFLINE:
4066 bfa_stats(rp, sm_on_off);
4067 if (bfa_rport_send_fwdelete(rp))
4068 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4070 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4073 case BFA_RPORT_SM_DELETE:
4074 bfa_stats(rp, sm_on_del);
4075 if (bfa_rport_send_fwdelete(rp))
4076 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4078 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4081 case BFA_RPORT_SM_HWFAIL:
4082 bfa_stats(rp, sm_on_hwf);
4083 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4086 case BFA_RPORT_SM_SET_SPEED:
4087 bfa_rport_send_fwspeed(rp);
4090 case BFA_RPORT_SM_QOS_SCN:
4091 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4092 rp->qos_attr = qos_scn->new_qos_attr;
4093 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4094 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4095 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4096 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4098 qos_scn->old_qos_attr.qos_flow_id =
4099 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4100 qos_scn->new_qos_attr.qos_flow_id =
4101 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4103 if (qos_scn->old_qos_attr.qos_flow_id !=
4104 qos_scn->new_qos_attr.qos_flow_id)
4105 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4106 qos_scn->old_qos_attr,
4107 qos_scn->new_qos_attr);
4108 if (qos_scn->old_qos_attr.qos_priority !=
4109 qos_scn->new_qos_attr.qos_priority)
4110 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4111 qos_scn->old_qos_attr,
4112 qos_scn->new_qos_attr);
4116 bfa_stats(rp, sm_on_unexp);
4117 bfa_sm_fault(rp->bfa, event);
4122 * Firmware rport is being deleted - awaiting f/w response.
4125 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4127 bfa_trc(rp->bfa, rp->rport_tag);
4128 bfa_trc(rp->bfa, event);
4131 case BFA_RPORT_SM_FWRSP:
4132 bfa_stats(rp, sm_fwd_rsp);
4133 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4134 bfa_rport_offline_cb(rp);
4137 case BFA_RPORT_SM_DELETE:
4138 bfa_stats(rp, sm_fwd_del);
4139 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4142 case BFA_RPORT_SM_HWFAIL:
4143 bfa_stats(rp, sm_fwd_hwf);
4144 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4145 bfa_rport_offline_cb(rp);
4149 bfa_stats(rp, sm_fwd_unexp);
4150 bfa_sm_fault(rp->bfa, event);
4155 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4157 bfa_trc(rp->bfa, rp->rport_tag);
4158 bfa_trc(rp->bfa, event);
4161 case BFA_RPORT_SM_QRESUME:
4162 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4163 bfa_rport_send_fwdelete(rp);
4166 case BFA_RPORT_SM_DELETE:
4167 bfa_stats(rp, sm_fwd_del);
4168 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4171 case BFA_RPORT_SM_HWFAIL:
4172 bfa_stats(rp, sm_fwd_hwf);
4173 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4174 bfa_reqq_wcancel(&rp->reqq_wait);
4175 bfa_rport_offline_cb(rp);
4179 bfa_stats(rp, sm_fwd_unexp);
4180 bfa_sm_fault(rp->bfa, event);
4188 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4190 bfa_trc(rp->bfa, rp->rport_tag);
4191 bfa_trc(rp->bfa, event);
4194 case BFA_RPORT_SM_DELETE:
4195 bfa_stats(rp, sm_off_del);
4196 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4200 case BFA_RPORT_SM_ONLINE:
4201 bfa_stats(rp, sm_off_on);
4202 if (bfa_rport_send_fwcreate(rp))
4203 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4205 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4208 case BFA_RPORT_SM_HWFAIL:
4209 bfa_stats(rp, sm_off_hwf);
4210 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4214 bfa_stats(rp, sm_off_unexp);
4215 bfa_sm_fault(rp->bfa, event);
4220 * Rport is deleted, waiting for firmware response to delete.
4223 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4225 bfa_trc(rp->bfa, rp->rport_tag);
4226 bfa_trc(rp->bfa, event);
4229 case BFA_RPORT_SM_FWRSP:
4230 bfa_stats(rp, sm_del_fwrsp);
4231 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4235 case BFA_RPORT_SM_HWFAIL:
4236 bfa_stats(rp, sm_del_hwf);
4237 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4242 bfa_sm_fault(rp->bfa, event);
4247 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4249 bfa_trc(rp->bfa, rp->rport_tag);
4250 bfa_trc(rp->bfa, event);
4253 case BFA_RPORT_SM_QRESUME:
4254 bfa_stats(rp, sm_del_fwrsp);
4255 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4256 bfa_rport_send_fwdelete(rp);
4259 case BFA_RPORT_SM_HWFAIL:
4260 bfa_stats(rp, sm_del_hwf);
4261 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4262 bfa_reqq_wcancel(&rp->reqq_wait);
4267 bfa_sm_fault(rp->bfa, event);
4272 * Waiting for rport create response from firmware. A delete is pending.
4275 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4276 enum bfa_rport_event event)
4278 bfa_trc(rp->bfa, rp->rport_tag);
4279 bfa_trc(rp->bfa, event);
4282 case BFA_RPORT_SM_FWRSP:
4283 bfa_stats(rp, sm_delp_fwrsp);
4284 if (bfa_rport_send_fwdelete(rp))
4285 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4287 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4290 case BFA_RPORT_SM_HWFAIL:
4291 bfa_stats(rp, sm_delp_hwf);
4292 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4297 bfa_stats(rp, sm_delp_unexp);
4298 bfa_sm_fault(rp->bfa, event);
4303 * Waiting for rport create response from firmware. Rport offline is pending.
4306 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4307 enum bfa_rport_event event)
4309 bfa_trc(rp->bfa, rp->rport_tag);
4310 bfa_trc(rp->bfa, event);
4313 case BFA_RPORT_SM_FWRSP:
4314 bfa_stats(rp, sm_offp_fwrsp);
4315 if (bfa_rport_send_fwdelete(rp))
4316 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4318 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4321 case BFA_RPORT_SM_DELETE:
4322 bfa_stats(rp, sm_offp_del);
4323 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4326 case BFA_RPORT_SM_HWFAIL:
4327 bfa_stats(rp, sm_offp_hwf);
4328 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4332 bfa_stats(rp, sm_offp_unexp);
4333 bfa_sm_fault(rp->bfa, event);
4341 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4343 bfa_trc(rp->bfa, rp->rport_tag);
4344 bfa_trc(rp->bfa, event);
4347 case BFA_RPORT_SM_OFFLINE:
4348 bfa_stats(rp, sm_iocd_off);
4349 bfa_rport_offline_cb(rp);
4352 case BFA_RPORT_SM_DELETE:
4353 bfa_stats(rp, sm_iocd_del);
4354 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4358 case BFA_RPORT_SM_ONLINE:
4359 bfa_stats(rp, sm_iocd_on);
4360 if (bfa_rport_send_fwcreate(rp))
4361 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4363 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4366 case BFA_RPORT_SM_HWFAIL:
4370 bfa_stats(rp, sm_iocd_unexp);
4371 bfa_sm_fault(rp->bfa, event);
4378 * bfa_rport_private BFA rport private functions
4382 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4384 struct bfa_rport_s *rp = cbarg;
4387 bfa_cb_rport_online(rp->rport_drv);
4391 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4393 struct bfa_rport_s *rp = cbarg;
4396 bfa_cb_rport_offline(rp->rport_drv);
4400 bfa_rport_qresume(void *cbarg)
4402 struct bfa_rport_s *rp = cbarg;
4404 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4408 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4411 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4413 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4414 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4417 bfa_mem_kva_setup(minfo, rport_kva,
4418 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4422 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4423 struct bfa_pcidev_s *pcidev)
4425 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4426 struct bfa_rport_s *rp;
4429 INIT_LIST_HEAD(&mod->rp_free_q);
4430 INIT_LIST_HEAD(&mod->rp_active_q);
4431 INIT_LIST_HEAD(&mod->rp_unused_q);
4433 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4435 mod->num_rports = cfg->fwcfg.num_rports;
4437 WARN_ON(!mod->num_rports ||
4438 (mod->num_rports & (mod->num_rports - 1)));
4440 for (i = 0; i < mod->num_rports; i++, rp++) {
4441 memset(rp, 0, sizeof(struct bfa_rport_s));
4444 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4450 list_add_tail(&rp->qe, &mod->rp_free_q);
4452 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4458 bfa_mem_kva_curp(mod) = (u8 *) rp;
4462 bfa_rport_detach(struct bfa_s *bfa)
4467 bfa_rport_start(struct bfa_s *bfa)
4472 bfa_rport_stop(struct bfa_s *bfa)
4477 bfa_rport_iocdisable(struct bfa_s *bfa)
4479 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4480 struct bfa_rport_s *rport;
4481 struct list_head *qe, *qen;
4483 /* Enqueue unused rport resources to free_q */
4484 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4486 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4487 rport = (struct bfa_rport_s *) qe;
4488 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4492 static struct bfa_rport_s *
4493 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4495 struct bfa_rport_s *rport;
4497 bfa_q_deq(&mod->rp_free_q, &rport);
4499 list_add_tail(&rport->qe, &mod->rp_active_q);
4505 bfa_rport_free(struct bfa_rport_s *rport)
4507 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4509 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4510 list_del(&rport->qe);
4511 list_add_tail(&rport->qe, &mod->rp_free_q);
4514 static bfa_boolean_t
4515 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4517 struct bfi_rport_create_req_s *m;
4520 * check for room in queue to send request now
4522 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4524 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4528 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4529 bfa_fn_lpu(rp->bfa));
4530 m->bfa_handle = rp->rport_tag;
4531 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4532 m->pid = rp->rport_info.pid;
4533 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4534 m->local_pid = rp->rport_info.local_pid;
4535 m->fc_class = rp->rport_info.fc_class;
4536 m->vf_en = rp->rport_info.vf_en;
4537 m->vf_id = rp->rport_info.vf_id;
4538 m->cisc = rp->rport_info.cisc;
4541 * queue I/O message to firmware
4543 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4547 static bfa_boolean_t
4548 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4550 struct bfi_rport_delete_req_s *m;
4553 * check for room in queue to send request now
4555 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4557 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4561 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4562 bfa_fn_lpu(rp->bfa));
4563 m->fw_handle = rp->fw_handle;
4566 * queue I/O message to firmware
4568 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4572 static bfa_boolean_t
4573 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4575 struct bfa_rport_speed_req_s *m;
4578 * check for room in queue to send request now
4580 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4582 bfa_trc(rp->bfa, rp->rport_info.speed);
4586 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4587 bfa_fn_lpu(rp->bfa));
4588 m->fw_handle = rp->fw_handle;
4589 m->speed = (u8)rp->rport_info.speed;
4592 * queue I/O message to firmware
4594 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4605 * Rport interrupt processing.
4608 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4610 union bfi_rport_i2h_msg_u msg;
4611 struct bfa_rport_s *rp;
4613 bfa_trc(bfa, m->mhdr.msg_id);
4617 switch (m->mhdr.msg_id) {
4618 case BFI_RPORT_I2H_CREATE_RSP:
4619 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4620 rp->fw_handle = msg.create_rsp->fw_handle;
4621 rp->qos_attr = msg.create_rsp->qos_attr;
4622 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4623 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4626 case BFI_RPORT_I2H_DELETE_RSP:
4627 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4628 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4629 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4632 case BFI_RPORT_I2H_QOS_SCN:
4633 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4634 rp->event_arg.fw_msg = msg.qos_scn_evt;
4635 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4639 bfa_trc(bfa, m->mhdr.msg_id);
4645 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4647 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4648 struct list_head *qe;
4651 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4652 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4653 list_add_tail(qe, &mod->rp_unused_q);
4661 struct bfa_rport_s *
4662 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4664 struct bfa_rport_s *rp;
4666 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4672 rp->rport_drv = rport_drv;
4673 memset(&rp->stats, 0, sizeof(rp->stats));
4675 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4676 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4682 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4684 WARN_ON(rport_info->max_frmsz == 0);
4687 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4688 * responses. Default to minimum size.
4690 if (rport_info->max_frmsz == 0) {
4691 bfa_trc(rport->bfa, rport->rport_tag);
4692 rport_info->max_frmsz = FC_MIN_PDUSZ;
4695 rport->rport_info = *rport_info;
4696 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4700 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4702 WARN_ON(speed == 0);
4703 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4705 rport->rport_info.speed = speed;
4706 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4711 * SGPG related functions
4715 * Compute and return memory needed by FCP(im) module.
4718 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4721 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4722 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4723 struct bfa_mem_dma_s *seg_ptr;
4724 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
4725 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4727 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4728 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4729 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4730 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
4732 num_sgpg = cfg->drvcfg.num_sgpgs;
4734 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4735 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4737 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4738 if (num_sgpg >= per_seg_sgpg) {
4739 num_sgpg -= per_seg_sgpg;
4740 bfa_mem_dma_setup(minfo, seg_ptr,
4741 per_seg_sgpg * sgpg_sz);
4743 bfa_mem_dma_setup(minfo, seg_ptr,
4744 num_sgpg * sgpg_sz);
4748 bfa_mem_kva_setup(minfo, sgpg_kva,
4749 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
4753 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4754 struct bfa_pcidev_s *pcidev)
4756 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4757 struct bfa_sgpg_s *hsgpg;
4758 struct bfi_sgpg_s *sgpg;
4760 struct bfa_mem_dma_s *seg_ptr;
4761 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4762 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
4766 union bfi_addr_u addr;
4767 } sgpg_pa, sgpg_pa_tmp;
4769 INIT_LIST_HEAD(&mod->sgpg_q);
4770 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4772 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4774 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4776 num_sgpg = cfg->drvcfg.num_sgpgs;
4777 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4779 /* dma/kva mem claim */
4780 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
4782 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
4784 if (!bfa_mem_dma_virt(seg_ptr))
4787 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4788 bfa_mem_dma_phys(seg_ptr);
4790 sgpg = (struct bfi_sgpg_s *)
4791 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4792 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4793 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4795 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4797 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4798 memset(hsgpg, 0, sizeof(*hsgpg));
4799 memset(sgpg, 0, sizeof(*sgpg));
4802 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4803 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4804 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4808 sgpg_pa.pa += sgpg_sz;
4812 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
4816 bfa_sgpg_detach(struct bfa_s *bfa)
4821 bfa_sgpg_start(struct bfa_s *bfa)
4826 bfa_sgpg_stop(struct bfa_s *bfa)
4831 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4836 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4838 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4839 struct bfa_sgpg_s *hsgpg;
4842 if (mod->free_sgpgs < nsgpgs)
4843 return BFA_STATUS_ENOMEM;
4845 for (i = 0; i < nsgpgs; i++) {
4846 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4848 list_add_tail(&hsgpg->qe, sgpg_q);
4851 mod->free_sgpgs -= nsgpgs;
4852 return BFA_STATUS_OK;
4856 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4858 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4859 struct bfa_sgpg_wqe_s *wqe;
4861 mod->free_sgpgs += nsgpg;
4862 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4864 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4866 if (list_empty(&mod->sgpg_wait_q))
4870 * satisfy as many waiting requests as possible
4873 wqe = bfa_q_first(&mod->sgpg_wait_q);
4874 if (mod->free_sgpgs < wqe->nsgpg)
4875 nsgpg = mod->free_sgpgs;
4878 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4879 wqe->nsgpg -= nsgpg;
4880 if (wqe->nsgpg == 0) {
4882 wqe->cbfn(wqe->cbarg);
4884 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4888 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4890 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4892 WARN_ON(nsgpg <= 0);
4893 WARN_ON(nsgpg <= mod->free_sgpgs);
4895 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4898 * allocate any left to this one first
4900 if (mod->free_sgpgs) {
4902 * no one else is waiting for SGPG
4904 WARN_ON(!list_empty(&mod->sgpg_wait_q));
4905 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4906 wqe->nsgpg -= mod->free_sgpgs;
4907 mod->free_sgpgs = 0;
4910 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4914 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4916 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4918 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4921 if (wqe->nsgpg_total != wqe->nsgpg)
4922 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4923 wqe->nsgpg_total - wqe->nsgpg);
4927 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4930 INIT_LIST_HEAD(&wqe->sgpg_q);
4936 * UF related functions
4939 *****************************************************************************
4940 * Internal functions
4941 *****************************************************************************
4944 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4946 struct bfa_uf_s *uf = cbarg;
4947 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4950 ufm->ufrecv(ufm->cbarg, uf);
4954 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
4956 struct bfi_uf_buf_post_s *uf_bp_msg;
4960 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
4961 uf_bp_msg = ufm->uf_buf_posts;
4963 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4965 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4967 uf_bp_msg->buf_tag = i;
4968 buf_len = sizeof(struct bfa_uf_buf_s);
4969 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4970 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4971 bfa_fn_lpu(ufm->bfa));
4972 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
4976 * advance pointer beyond consumed memory
4978 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
4982 claim_ufs(struct bfa_uf_mod_s *ufm)
4985 struct bfa_uf_s *uf;
4988 * Claim block of memory for UF list
4990 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
4993 * Initialize UFs and queue it in UF free queue
4995 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4996 memset(uf, 0, sizeof(struct bfa_uf_s));
4999 uf->pb_len = BFA_PER_UF_DMA_SZ;
5000 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5001 uf->buf_pa = ufm_pbs_pa(ufm, i);
5002 list_add_tail(&uf->qe, &ufm->uf_free_q);
5006 * advance memory pointer
5008 bfa_mem_kva_curp(ufm) = (u8 *) uf;
5012 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5015 claim_uf_post_msgs(ufm);
5019 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5022 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5023 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5024 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5025 struct bfa_mem_dma_s *seg_ptr;
5026 u16 nsegs, idx, per_seg_uf = 0;
5028 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5029 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5031 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5032 if (num_ufs >= per_seg_uf) {
5033 num_ufs -= per_seg_uf;
5034 bfa_mem_dma_setup(minfo, seg_ptr,
5035 per_seg_uf * BFA_PER_UF_DMA_SZ);
5037 bfa_mem_dma_setup(minfo, seg_ptr,
5038 num_ufs * BFA_PER_UF_DMA_SZ);
5042 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5043 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5047 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5048 struct bfa_pcidev_s *pcidev)
5050 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5053 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5054 INIT_LIST_HEAD(&ufm->uf_free_q);
5055 INIT_LIST_HEAD(&ufm->uf_posted_q);
5056 INIT_LIST_HEAD(&ufm->uf_unused_q);
5062 bfa_uf_detach(struct bfa_s *bfa)
5066 static struct bfa_uf_s *
5067 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5069 struct bfa_uf_s *uf;
5071 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5076 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5078 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5082 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5084 struct bfi_uf_buf_post_s *uf_post_msg;
5086 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5088 return BFA_STATUS_FAILED;
5090 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5091 sizeof(struct bfi_uf_buf_post_s));
5092 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5094 bfa_trc(ufm->bfa, uf->uf_tag);
5096 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5097 return BFA_STATUS_OK;
5101 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5103 struct bfa_uf_s *uf;
5105 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5106 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5112 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5114 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5115 u16 uf_tag = m->buf_tag;
5116 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5117 struct bfa_uf_buf_s *uf_buf;
5119 struct fchs_s *fchs;
5121 uf_buf = (struct bfa_uf_buf_s *)
5122 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5123 buf = &uf_buf->d[0];
5125 m->frm_len = be16_to_cpu(m->frm_len);
5126 m->xfr_len = be16_to_cpu(m->xfr_len);
5128 fchs = (struct fchs_s *)uf_buf;
5130 list_del(&uf->qe); /* dequeue from posted queue */
5133 uf->data_len = m->xfr_len;
5135 WARN_ON(uf->data_len < sizeof(struct fchs_s));
5137 if (uf->data_len == sizeof(struct fchs_s)) {
5138 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5139 uf->data_len, (struct fchs_s *)buf);
5141 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5142 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5143 BFA_PL_EID_RX, uf->data_len,
5144 (struct fchs_s *)buf, pld_w0);
5148 __bfa_cb_uf_recv(uf, BFA_TRUE);
5150 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5154 bfa_uf_stop(struct bfa_s *bfa)
5159 bfa_uf_iocdisable(struct bfa_s *bfa)
5161 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5162 struct bfa_uf_s *uf;
5163 struct list_head *qe, *qen;
5165 /* Enqueue unused uf resources to free_q */
5166 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5168 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5169 uf = (struct bfa_uf_s *) qe;
5171 bfa_uf_put(ufm, uf);
5176 bfa_uf_start(struct bfa_s *bfa)
5178 bfa_uf_post_all(BFA_UF_MOD(bfa));
5182 * Register handler for all unsolicted receive frames.
5184 * @param[in] bfa BFA instance
5185 * @param[in] ufrecv receive handler function
5186 * @param[in] cbarg receive handler arg
5189 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5191 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5193 ufm->ufrecv = ufrecv;
5198 * Free an unsolicited frame back to BFA.
5200 * @param[in] uf unsolicited frame to be freed
5205 bfa_uf_free(struct bfa_uf_s *uf)
5207 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5208 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5214 * uf_pub BFA uf module public functions
5217 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5219 bfa_trc(bfa, msg->mhdr.msg_id);
5221 switch (msg->mhdr.msg_id) {
5222 case BFI_UF_I2H_FRM_RCVD:
5223 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5227 bfa_trc(bfa, msg->mhdr.msg_id);
5233 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5235 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5236 struct list_head *qe;
5239 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5240 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5241 list_add_tail(qe, &mod->uf_unused_q);
5248 #define BFA_DIAG_QTEST_TOV 1000 /* msec */
5251 * Set port status to busy
5254 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5256 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5258 if (fcdiag->lb.lock)
5259 fcport->diag_busy = BFA_TRUE;
5261 fcport->diag_busy = BFA_FALSE;
5265 bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5271 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5272 struct bfa_pcidev_s *pcidev)
5274 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5276 fcdiag->trcmod = bfa->trcmod;
5277 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5281 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5283 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5284 bfa_trc(fcdiag, fcdiag->lb.lock);
5285 if (fcdiag->lb.lock) {
5286 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5287 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5288 fcdiag->lb.lock = 0;
5289 bfa_fcdiag_set_busy_status(fcdiag);
5294 bfa_fcdiag_detach(struct bfa_s *bfa)
5299 bfa_fcdiag_start(struct bfa_s *bfa)
5304 bfa_fcdiag_stop(struct bfa_s *bfa)
5309 bfa_fcdiag_queuetest_timeout(void *cbarg)
5311 struct bfa_fcdiag_s *fcdiag = cbarg;
5312 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5314 bfa_trc(fcdiag, fcdiag->qtest.all);
5315 bfa_trc(fcdiag, fcdiag->qtest.count);
5317 fcdiag->qtest.timer_active = 0;
5319 res->status = BFA_STATUS_ETIMER;
5320 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5321 if (fcdiag->qtest.all)
5322 res->queue = fcdiag->qtest.all;
5324 bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5325 fcdiag->qtest.status = BFA_STATUS_ETIMER;
5326 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5327 fcdiag->qtest.lock = 0;
5331 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5334 struct bfi_diag_qtest_req_s *req;
5336 req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5338 return BFA_STATUS_DEVBUSY;
5340 /* build host command */
5341 bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5342 bfa_fn_lpu(fcdiag->bfa));
5344 for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5345 req->data[i] = QTEST_PAT_DEFAULT;
5347 bfa_trc(fcdiag, fcdiag->qtest.queue);
5348 /* ring door bell */
5349 bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5350 return BFA_STATUS_OK;
5354 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5355 bfi_diag_qtest_rsp_t *rsp)
5357 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5358 bfa_status_t status = BFA_STATUS_OK;
5361 /* Check timer, should still be active */
5362 if (!fcdiag->qtest.timer_active) {
5363 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5368 fcdiag->qtest.count--;
5371 for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5372 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5373 res->status = BFA_STATUS_DATACORRUPTED;
5378 if (res->status == BFA_STATUS_OK) {
5379 if (fcdiag->qtest.count > 0) {
5380 status = bfa_fcdiag_queuetest_send(fcdiag);
5381 if (status == BFA_STATUS_OK)
5384 res->status = status;
5385 } else if (fcdiag->qtest.all > 0 &&
5386 fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5387 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5388 fcdiag->qtest.queue++;
5389 status = bfa_fcdiag_queuetest_send(fcdiag);
5390 if (status == BFA_STATUS_OK)
5393 res->status = status;
5397 /* Stop timer when we comp all queue */
5398 if (fcdiag->qtest.timer_active) {
5399 bfa_timer_stop(&fcdiag->qtest.timer);
5400 fcdiag->qtest.timer_active = 0;
5402 res->queue = fcdiag->qtest.queue;
5403 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5404 bfa_trc(fcdiag, res->count);
5405 bfa_trc(fcdiag, res->status);
5406 fcdiag->qtest.status = res->status;
5407 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5408 fcdiag->qtest.lock = 0;
5412 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5413 struct bfi_diag_lb_rsp_s *rsp)
5415 struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5417 res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
5418 res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
5419 res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
5420 res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
5421 res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
5422 res->status = rsp->res.status;
5423 fcdiag->lb.status = rsp->res.status;
5424 bfa_trc(fcdiag, fcdiag->lb.status);
5425 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5426 fcdiag->lb.lock = 0;
5427 bfa_fcdiag_set_busy_status(fcdiag);
5431 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5432 struct bfa_diag_loopback_s *loopback)
5434 struct bfi_diag_lb_req_s *lb_req;
5436 lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5438 return BFA_STATUS_DEVBUSY;
5440 /* build host command */
5441 bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5442 bfa_fn_lpu(fcdiag->bfa));
5444 lb_req->lb_mode = loopback->lb_mode;
5445 lb_req->speed = loopback->speed;
5446 lb_req->loopcnt = loopback->loopcnt;
5447 lb_req->pattern = loopback->pattern;
5449 /* ring door bell */
5450 bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5452 bfa_trc(fcdiag, loopback->lb_mode);
5453 bfa_trc(fcdiag, loopback->speed);
5454 bfa_trc(fcdiag, loopback->loopcnt);
5455 bfa_trc(fcdiag, loopback->pattern);
5456 return BFA_STATUS_OK;
5460 * cpe/rme intr handler
5463 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5465 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5467 switch (msg->mhdr.msg_id) {
5468 case BFI_DIAG_I2H_LOOPBACK:
5469 bfa_fcdiag_loopback_comp(fcdiag,
5470 (struct bfi_diag_lb_rsp_s *) msg);
5472 case BFI_DIAG_I2H_QTEST:
5473 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5476 bfa_trc(fcdiag, msg->mhdr.msg_id);
5484 * @param[in] *bfa - bfa data struct
5485 * @param[in] opmode - port operation mode
5486 * @param[in] speed - port speed
5487 * @param[in] lpcnt - loop count
5488 * @param[in] pat - pattern to build packet
5489 * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
5490 * @param[in] cbfn - callback function
5491 * @param[in] cbarg - callback functioin arg
5496 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5497 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5498 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5501 struct bfa_diag_loopback_s loopback;
5502 struct bfa_port_attr_s attr;
5503 bfa_status_t status;
5504 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5506 if (!bfa_iocfc_is_operational(bfa))
5507 return BFA_STATUS_IOC_NON_OP;
5509 /* if port is PBC disabled, return error */
5510 if (bfa_fcport_is_pbcdisabled(bfa)) {
5511 bfa_trc(fcdiag, BFA_STATUS_PBC);
5512 return BFA_STATUS_PBC;
5515 if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5516 bfa_trc(fcdiag, opmode);
5517 return BFA_STATUS_PORT_NOT_DISABLED;
5520 /* Check if the speed is supported */
5521 bfa_fcport_get_attr(bfa, &attr);
5522 bfa_trc(fcdiag, attr.speed_supported);
5523 if (speed > attr.speed_supported)
5524 return BFA_STATUS_UNSUPP_SPEED;
5526 /* For Mezz card, port speed entered needs to be checked */
5527 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5528 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5529 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5530 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5531 return BFA_STATUS_UNSUPP_SPEED;
5532 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5533 speed == BFA_PORT_SPEED_2GBPS ||
5534 speed == BFA_PORT_SPEED_4GBPS ||
5535 speed == BFA_PORT_SPEED_8GBPS ||
5536 speed == BFA_PORT_SPEED_16GBPS ||
5537 speed == BFA_PORT_SPEED_AUTO))
5538 return BFA_STATUS_UNSUPP_SPEED;
5540 if (speed != BFA_PORT_SPEED_10GBPS)
5541 return BFA_STATUS_UNSUPP_SPEED;
5545 /* check to see if there is another destructive diag cmd running */
5546 if (fcdiag->lb.lock) {
5547 bfa_trc(fcdiag, fcdiag->lb.lock);
5548 return BFA_STATUS_DEVBUSY;
5551 fcdiag->lb.lock = 1;
5552 loopback.lb_mode = opmode;
5553 loopback.speed = speed;
5554 loopback.loopcnt = lpcnt;
5555 loopback.pattern = pat;
5556 fcdiag->lb.result = result;
5557 fcdiag->lb.cbfn = cbfn;
5558 fcdiag->lb.cbarg = cbarg;
5559 memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5560 bfa_fcdiag_set_busy_status(fcdiag);
5562 /* Send msg to fw */
5563 status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5568 * DIAG queue test command
5570 * @param[in] *bfa - bfa data struct
5571 * @param[in] force - 1: don't do ioc op checking
5572 * @param[in] queue - queue no. to test
5573 * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
5574 * @param[in] cbfn - callback function
5575 * @param[in] *cbarg - callback functioin arg
5580 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5581 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5584 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5585 bfa_status_t status;
5586 bfa_trc(fcdiag, force);
5587 bfa_trc(fcdiag, queue);
5589 if (!force && !bfa_iocfc_is_operational(bfa))
5590 return BFA_STATUS_IOC_NON_OP;
5592 /* check to see if there is another destructive diag cmd running */
5593 if (fcdiag->qtest.lock) {
5594 bfa_trc(fcdiag, fcdiag->qtest.lock);
5595 return BFA_STATUS_DEVBUSY;
5598 /* Initialization */
5599 fcdiag->qtest.lock = 1;
5600 fcdiag->qtest.cbfn = cbfn;
5601 fcdiag->qtest.cbarg = cbarg;
5602 fcdiag->qtest.result = result;
5603 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5605 /* Init test results */
5606 fcdiag->qtest.result->status = BFA_STATUS_OK;
5607 fcdiag->qtest.result->count = 0;
5610 if (queue < BFI_IOC_MAX_CQS) {
5611 fcdiag->qtest.result->queue = (u8)queue;
5612 fcdiag->qtest.queue = (u8)queue;
5613 fcdiag->qtest.all = 0;
5615 fcdiag->qtest.result->queue = 0;
5616 fcdiag->qtest.queue = 0;
5617 fcdiag->qtest.all = 1;
5619 status = bfa_fcdiag_queuetest_send(fcdiag);
5622 if (status == BFA_STATUS_OK) {
5623 bfa_timer_start(bfa, &fcdiag->qtest.timer,
5624 bfa_fcdiag_queuetest_timeout, fcdiag,
5625 BFA_DIAG_QTEST_TOV);
5626 fcdiag->qtest.timer_active = 1;
5632 * DIAG PLB is running
5634 * @param[in] *bfa - bfa data struct
5639 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5641 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5642 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;