2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
21 #include "bfa_modules.h"
23 BFA_TRC_FILE(HAL, FCXP);
32 * LPS related definitions
34 #define BFA_LPS_MIN_LPORTS (1)
35 #define BFA_LPS_MAX_LPORTS (256)
38 * Maximum Vports supported per physical port or vf.
40 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
41 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
45 * FC PORT related definitions
48 * The port is considered disabled if corresponding physical port or IOC are
51 #define BFA_PORT_IS_DISABLED(bfa) \
52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
57 * BFA port state machine events
59 enum bfa_fcport_sm_event {
60 BFA_FCPORT_SM_START = 1, /* start port state machine */
61 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
62 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
63 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
64 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
65 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
66 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
67 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
68 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
72 * BFA port link notification state machine events
75 enum bfa_fcport_ln_sm_event {
76 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
77 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
78 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
82 * RPORT related definitions
84 #define bfa_rport_offline_cb(__rp) do { \
85 if ((__rp)->bfa->fcs) \
86 bfa_cb_rport_offline((__rp)->rport_drv); \
88 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
89 __bfa_cb_rport_offline, (__rp)); \
93 #define bfa_rport_online_cb(__rp) do { \
94 if ((__rp)->bfa->fcs) \
95 bfa_cb_rport_online((__rp)->rport_drv); \
97 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
98 __bfa_cb_rport_online, (__rp)); \
103 * forward declarations FCXP related functions
105 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void bfa_fcxp_qresume(void *cbarg);
111 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112 struct bfi_fcxp_send_req_s *send_req);
115 * forward declarations for LPS functions
117 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
119 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
120 struct bfa_iocfc_cfg_s *cfg,
121 struct bfa_meminfo_s *meminfo,
122 struct bfa_pcidev_s *pcidev);
123 static void bfa_lps_detach(struct bfa_s *bfa);
124 static void bfa_lps_start(struct bfa_s *bfa);
125 static void bfa_lps_stop(struct bfa_s *bfa);
126 static void bfa_lps_iocdisable(struct bfa_s *bfa);
127 static void bfa_lps_login_rsp(struct bfa_s *bfa,
128 struct bfi_lps_login_rsp_s *rsp);
129 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
130 struct bfi_lps_logout_rsp_s *rsp);
131 static void bfa_lps_reqq_resume(void *lps_arg);
132 static void bfa_lps_free(struct bfa_lps_s *lps);
133 static void bfa_lps_send_login(struct bfa_lps_s *lps);
134 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
135 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
136 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
137 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
140 * forward declaration for LPS state machine
142 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
143 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
144 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
146 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
147 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
148 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
152 * forward declaration for FC Port functions
154 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
155 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
156 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
157 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
158 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
159 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
160 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
161 enum bfa_port_linkstate event, bfa_boolean_t trunk);
162 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
163 enum bfa_port_linkstate event);
164 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
165 static void bfa_fcport_stats_get_timeout(void *cbarg);
166 static void bfa_fcport_stats_clr_timeout(void *cbarg);
167 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
170 * forward declaration for FC PORT state machine
172 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
173 enum bfa_fcport_sm_event event);
174 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
175 enum bfa_fcport_sm_event event);
176 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
177 enum bfa_fcport_sm_event event);
178 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
179 enum bfa_fcport_sm_event event);
180 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
181 enum bfa_fcport_sm_event event);
182 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
183 enum bfa_fcport_sm_event event);
184 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
185 enum bfa_fcport_sm_event event);
186 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
187 enum bfa_fcport_sm_event event);
188 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
189 enum bfa_fcport_sm_event event);
190 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
191 enum bfa_fcport_sm_event event);
192 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
193 enum bfa_fcport_sm_event event);
194 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event);
197 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
198 enum bfa_fcport_ln_sm_event event);
199 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
200 enum bfa_fcport_ln_sm_event event);
201 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event);
203 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
204 enum bfa_fcport_ln_sm_event event);
205 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
206 enum bfa_fcport_ln_sm_event event);
207 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
208 enum bfa_fcport_ln_sm_event event);
209 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
210 enum bfa_fcport_ln_sm_event event);
212 static struct bfa_sm_table_s hal_port_sm_table[] = {
213 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
214 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
215 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
216 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
217 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
218 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
219 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
220 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
221 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
222 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
223 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
224 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
229 * forward declaration for RPORT related functions
231 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
232 static void bfa_rport_free(struct bfa_rport_s *rport);
233 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
234 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
235 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
236 static void __bfa_cb_rport_online(void *cbarg,
237 bfa_boolean_t complete);
238 static void __bfa_cb_rport_offline(void *cbarg,
239 bfa_boolean_t complete);
242 * forward declaration for RPORT state machine
244 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
245 enum bfa_rport_event event);
246 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
247 enum bfa_rport_event event);
248 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
249 enum bfa_rport_event event);
250 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
251 enum bfa_rport_event event);
252 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
253 enum bfa_rport_event event);
254 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
255 enum bfa_rport_event event);
256 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
257 enum bfa_rport_event event);
258 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
259 enum bfa_rport_event event);
260 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
261 enum bfa_rport_event event);
262 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
263 enum bfa_rport_event event);
264 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
265 enum bfa_rport_event event);
266 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
267 enum bfa_rport_event event);
268 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
269 enum bfa_rport_event event);
272 * PLOG related definitions
275 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
277 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
278 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
281 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
282 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
289 bfa_get_log_time(void)
293 do_gettimeofday(&tv);
295 /* We are interested in seconds only. */
296 system_time = tv.tv_sec;
301 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
304 struct bfa_plog_rec_s *pl_recp;
306 if (plog->plog_enabled == 0)
309 if (plkd_validate_logrec(pl_rec)) {
316 pl_recp = &(plog->plog_recs[tail]);
318 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
320 pl_recp->tv = bfa_get_log_time();
321 BFA_PL_LOG_REC_INCR(plog->tail);
323 if (plog->head == plog->tail)
324 BFA_PL_LOG_REC_INCR(plog->head);
328 bfa_plog_init(struct bfa_plog_s *plog)
330 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
332 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
333 plog->head = plog->tail = 0;
334 plog->plog_enabled = 1;
338 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
339 enum bfa_plog_eid event,
340 u16 misc, char *log_str)
342 struct bfa_plog_rec_s lp;
344 if (plog->plog_enabled) {
345 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
348 lp.log_type = BFA_PL_LOG_TYPE_STRING;
350 strncpy(lp.log_entry.string_log, log_str,
351 BFA_PL_STRING_LOG_SZ - 1);
352 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
353 bfa_plog_add(plog, &lp);
358 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
359 enum bfa_plog_eid event,
360 u16 misc, u32 *intarr, u32 num_ints)
362 struct bfa_plog_rec_s lp;
365 if (num_ints > BFA_PL_INT_LOG_SZ)
366 num_ints = BFA_PL_INT_LOG_SZ;
368 if (plog->plog_enabled) {
369 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
372 lp.log_type = BFA_PL_LOG_TYPE_INT;
375 for (i = 0; i < num_ints; i++)
376 lp.log_entry.int_log[i] = intarr[i];
378 lp.log_num_ints = (u8) num_ints;
380 bfa_plog_add(plog, &lp);
385 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
386 enum bfa_plog_eid event,
387 u16 misc, struct fchs_s *fchdr)
389 struct bfa_plog_rec_s lp;
390 u32 *tmp_int = (u32 *) fchdr;
391 u32 ints[BFA_PL_INT_LOG_SZ];
393 if (plog->plog_enabled) {
394 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
396 ints[0] = tmp_int[0];
397 ints[1] = tmp_int[1];
398 ints[2] = tmp_int[4];
400 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
405 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
406 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
409 struct bfa_plog_rec_s lp;
410 u32 *tmp_int = (u32 *) fchdr;
411 u32 ints[BFA_PL_INT_LOG_SZ];
413 if (plog->plog_enabled) {
414 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
416 ints[0] = tmp_int[0];
417 ints[1] = tmp_int[1];
418 ints[2] = tmp_int[4];
421 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
427 * fcxp_pvt BFA FCXP private functions
431 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
437 dm_kva = bfa_meminfo_dma_virt(mi);
438 dm_pa = bfa_meminfo_dma_phys(mi);
440 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
443 * Initialize the fcxp req payload list
445 mod->req_pld_list_kva = dm_kva;
446 mod->req_pld_list_pa = dm_pa;
447 dm_kva += buf_pool_sz;
448 dm_pa += buf_pool_sz;
449 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
452 * Initialize the fcxp rsp payload list
454 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
455 mod->rsp_pld_list_kva = dm_kva;
456 mod->rsp_pld_list_pa = dm_pa;
457 dm_kva += buf_pool_sz;
458 dm_pa += buf_pool_sz;
459 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
461 bfa_meminfo_dma_virt(mi) = dm_kva;
462 bfa_meminfo_dma_phys(mi) = dm_pa;
466 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
469 struct bfa_fcxp_s *fcxp;
471 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
472 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
474 INIT_LIST_HEAD(&mod->fcxp_free_q);
475 INIT_LIST_HEAD(&mod->fcxp_active_q);
477 mod->fcxp_list = fcxp;
479 for (i = 0; i < mod->num_fcxps; i++) {
480 fcxp->fcxp_mod = mod;
483 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
484 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
485 fcxp->reqq_waiting = BFA_FALSE;
490 bfa_meminfo_kva(mi) = (void *)fcxp;
494 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
497 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
499 if (num_fcxp_reqs == 0)
503 * Account for req/rsp payload
505 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
506 if (cfg->drvcfg.min_cfg)
507 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
509 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
512 * Account for fcxp structs
514 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
518 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
519 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
521 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
523 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
525 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
528 * Initialize FCXP request and response payload sizes.
530 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
531 if (!cfg->drvcfg.min_cfg)
532 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
534 INIT_LIST_HEAD(&mod->wait_q);
536 claim_fcxp_req_rsp_mem(mod, meminfo);
537 claim_fcxps_mem(mod, meminfo);
541 bfa_fcxp_detach(struct bfa_s *bfa)
546 bfa_fcxp_start(struct bfa_s *bfa)
551 bfa_fcxp_stop(struct bfa_s *bfa)
556 bfa_fcxp_iocdisable(struct bfa_s *bfa)
558 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
559 struct bfa_fcxp_s *fcxp;
560 struct list_head *qe, *qen;
562 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
563 fcxp = (struct bfa_fcxp_s *) qe;
564 if (fcxp->caller == NULL) {
565 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
566 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
569 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
570 bfa_cb_queue(bfa, &fcxp->hcb_qe,
571 __bfa_fcxp_send_cbfn, fcxp);
576 static struct bfa_fcxp_s *
577 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
579 struct bfa_fcxp_s *fcxp;
581 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
584 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
590 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
594 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
595 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
596 struct list_head *r_sgpg_q,
598 bfa_fcxp_get_sgaddr_t sga_cbfn,
599 bfa_fcxp_get_sglen_t sglen_cbfn)
602 bfa_assert(bfa != NULL);
604 bfa_trc(bfa, fcxp->fcxp_tag);
609 bfa_assert(*sga_cbfn != NULL);
610 bfa_assert(*sglen_cbfn != NULL);
613 *r_sga_cbfn = sga_cbfn;
614 *r_sglen_cbfn = sglen_cbfn;
619 * alloc required sgpgs
621 if (n_sgles > BFI_SGE_INLINE)
628 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
629 void *caller, struct bfa_s *bfa, int nreq_sgles,
630 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
631 bfa_fcxp_get_sglen_t req_sglen_cbfn,
632 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
633 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
636 bfa_assert(bfa != NULL);
638 bfa_trc(bfa, fcxp->fcxp_tag);
640 fcxp->caller = caller;
642 bfa_fcxp_init_reqrsp(fcxp, bfa,
643 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
644 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
645 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
647 bfa_fcxp_init_reqrsp(fcxp, bfa,
648 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
649 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
650 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
655 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
657 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
658 struct bfa_fcxp_wqe_s *wqe;
660 bfa_q_deq(&mod->wait_q, &wqe);
662 bfa_trc(mod->bfa, fcxp->fcxp_tag);
664 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
665 wqe->nrsp_sgles, wqe->req_sga_cbfn,
666 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
667 wqe->rsp_sglen_cbfn);
669 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
673 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
675 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
679 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
680 bfa_status_t req_status, u32 rsp_len,
681 u32 resid_len, struct fchs_s *rsp_fchs)
683 /* discarded fcxp completion */
687 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
689 struct bfa_fcxp_s *fcxp = cbarg;
692 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
693 fcxp->rsp_status, fcxp->rsp_len,
694 fcxp->residue_len, &fcxp->rsp_fchs);
701 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
703 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
704 struct bfa_fcxp_s *fcxp;
705 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
707 bfa_trc(bfa, fcxp_tag);
709 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
712 * @todo f/w should not set residue to non-0 when everything
715 if (fcxp_rsp->req_status == BFA_STATUS_OK)
716 fcxp_rsp->residue_len = 0;
718 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
720 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
722 bfa_assert(fcxp->send_cbfn != NULL);
724 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
726 if (fcxp->send_cbfn != NULL) {
727 bfa_trc(mod->bfa, (NULL == fcxp->caller));
728 if (fcxp->caller == NULL) {
729 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
730 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
731 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
733 * fcxp automatically freed on return from the callback
737 fcxp->rsp_status = fcxp_rsp->req_status;
738 fcxp->rsp_len = fcxp_rsp->rsp_len;
739 fcxp->residue_len = fcxp_rsp->residue_len;
740 fcxp->rsp_fchs = fcxp_rsp->fchs;
742 bfa_cb_queue(bfa, &fcxp->hcb_qe,
743 __bfa_fcxp_send_cbfn, fcxp);
746 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
751 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
753 union bfi_addr_u sga_zero = { {0} };
755 sge->sg_len = reqlen;
756 sge->flags = BFI_SGE_DATA_LAST;
757 bfa_dma_addr_set(sge[0].sga, req_pa);
762 sge->sg_len = reqlen;
763 sge->flags = BFI_SGE_PGDLEN;
768 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
775 if (fcxp->use_ireqbuf) {
777 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
779 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
781 reqlen + sizeof(struct fchs_s), fchs,
784 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
786 reqlen + sizeof(struct fchs_s),
790 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
791 reqlen + sizeof(struct fchs_s), fchs);
796 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
797 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
799 if (fcxp_rsp->rsp_len > 0) {
800 if (fcxp->use_irspbuf) {
802 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
804 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
806 (u16) fcxp_rsp->rsp_len,
807 &fcxp_rsp->fchs, pld_w0);
809 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
811 (u16) fcxp_rsp->rsp_len,
815 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
816 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
821 * Handler to resume sending fcxp when space in available in cpe queue.
824 bfa_fcxp_qresume(void *cbarg)
826 struct bfa_fcxp_s *fcxp = cbarg;
827 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
828 struct bfi_fcxp_send_req_s *send_req;
830 fcxp->reqq_waiting = BFA_FALSE;
831 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
832 bfa_fcxp_queue(fcxp, send_req);
836 * Queue fcxp send request to foimrware.
839 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
841 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
842 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
843 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
844 struct bfa_rport_s *rport = reqi->bfa_rport;
846 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
849 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
851 send_req->rport_fw_hndl = rport->fw_handle;
852 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
853 if (send_req->max_frmsz == 0)
854 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
856 send_req->rport_fw_hndl = 0;
857 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
860 send_req->vf_id = cpu_to_be16(reqi->vf_id);
861 send_req->lp_tag = reqi->lp_tag;
862 send_req->class = reqi->class;
863 send_req->rsp_timeout = rspi->rsp_timeout;
864 send_req->cts = reqi->cts;
865 send_req->fchs = reqi->fchs;
867 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
868 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
873 if (fcxp->use_ireqbuf == 1) {
874 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
875 BFA_FCXP_REQ_PLD_PA(fcxp));
877 if (fcxp->nreq_sgles > 0) {
878 bfa_assert(fcxp->nreq_sgles == 1);
879 hal_fcxp_set_local_sges(send_req->req_sge,
881 fcxp->req_sga_cbfn(fcxp->caller,
884 bfa_assert(reqi->req_tot_len == 0);
885 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
892 if (fcxp->use_irspbuf == 1) {
893 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
895 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
896 BFA_FCXP_RSP_PLD_PA(fcxp));
899 if (fcxp->nrsp_sgles > 0) {
900 bfa_assert(fcxp->nrsp_sgles == 1);
901 hal_fcxp_set_local_sges(send_req->rsp_sge,
903 fcxp->rsp_sga_cbfn(fcxp->caller,
906 bfa_assert(rspi->rsp_maxlen == 0);
907 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
911 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
913 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
915 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
916 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
920 * hal_fcxp_api BFA FCXP API
924 * Allocate an FCXP instance to send a response or to send a request
925 * that has a response. Request/response buffers are allocated by caller.
927 * @param[in] bfa BFA bfa instance
928 * @param[in] nreq_sgles Number of SG elements required for request
929 * buffer. 0, if fcxp internal buffers are used.
930 * Use bfa_fcxp_get_reqbuf() to get the
931 * internal req buffer.
932 * @param[in] req_sgles SG elements describing request buffer. Will be
933 * copied in by BFA and hence can be freed on
934 * return from this function.
935 * @param[in] get_req_sga function ptr to be called to get a request SG
936 * Address (given the sge index).
937 * @param[in] get_req_sglen function ptr to be called to get a request SG
938 * len (given the sge index).
939 * @param[in] get_rsp_sga function ptr to be called to get a response SG
940 * Address (given the sge index).
941 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
942 * len (given the sge index).
944 * @return FCXP instance. NULL on failure.
947 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
948 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
949 bfa_fcxp_get_sglen_t req_sglen_cbfn,
950 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
951 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
953 struct bfa_fcxp_s *fcxp = NULL;
955 bfa_assert(bfa != NULL);
957 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
961 bfa_trc(bfa, fcxp->fcxp_tag);
963 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
964 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
970 * Get the internal request buffer pointer
972 * @param[in] fcxp BFA fcxp pointer
974 * @return pointer to the internal request buffer
977 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
979 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
982 bfa_assert(fcxp->use_ireqbuf == 1);
983 reqbuf = ((u8 *)mod->req_pld_list_kva) +
984 fcxp->fcxp_tag * mod->req_pld_sz;
989 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
991 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
993 return mod->req_pld_sz;
997 * Get the internal response buffer pointer
999 * @param[in] fcxp BFA fcxp pointer
1001 * @return pointer to the internal request buffer
1004 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1006 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1009 bfa_assert(fcxp->use_irspbuf == 1);
1011 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1012 fcxp->fcxp_tag * mod->rsp_pld_sz;
1019 * @param[in] fcxp BFA fcxp pointer
1024 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1026 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1028 bfa_assert(fcxp != NULL);
1029 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1034 * Send a FCXP request
1036 * @param[in] fcxp BFA fcxp pointer
1037 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1038 * @param[in] vf_id virtual Fabric ID
1039 * @param[in] lp_tag lport tag
1040 * @param[in] cts use Continous sequence
1041 * @param[in] cos fc Class of Service
1042 * @param[in] reqlen request length, does not include FCHS length
1043 * @param[in] fchs fc Header Pointer. The header content will be copied
1046 * @param[in] cbfn call back function to be called on receiving
1048 * @param[in] cbarg arg for cbfn
1049 * @param[in] rsp_timeout
1052 * @return bfa_status_t
1055 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1056 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1057 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1058 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1060 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1061 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1062 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1063 struct bfi_fcxp_send_req_s *send_req;
1065 bfa_trc(bfa, fcxp->fcxp_tag);
1068 * setup request/response info
1070 reqi->bfa_rport = rport;
1071 reqi->vf_id = vf_id;
1072 reqi->lp_tag = lp_tag;
1074 rspi->rsp_timeout = rsp_timeout;
1077 reqi->req_tot_len = reqlen;
1078 rspi->rsp_maxlen = rsp_maxlen;
1079 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1080 fcxp->send_cbarg = cbarg;
1083 * If no room in CPE queue, wait for space in request queue
1085 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1087 bfa_trc(bfa, fcxp->fcxp_tag);
1088 fcxp->reqq_waiting = BFA_TRUE;
1089 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1093 bfa_fcxp_queue(fcxp, send_req);
1099 * @param[in] fcxp BFA fcxp pointer
1104 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1106 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1108 return BFA_STATUS_OK;
1112 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1113 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1114 void *caller, int nreq_sgles,
1115 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1116 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1117 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1118 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1120 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1122 bfa_assert(list_empty(&mod->fcxp_free_q));
1124 wqe->alloc_cbfn = alloc_cbfn;
1125 wqe->alloc_cbarg = alloc_cbarg;
1126 wqe->caller = caller;
1128 wqe->nreq_sgles = nreq_sgles;
1129 wqe->nrsp_sgles = nrsp_sgles;
1130 wqe->req_sga_cbfn = req_sga_cbfn;
1131 wqe->req_sglen_cbfn = req_sglen_cbfn;
1132 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1133 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1135 list_add_tail(&wqe->qe, &mod->wait_q);
1139 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1141 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1143 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1148 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1151 * If waiting for room in request queue, cancel reqq wait
1154 if (fcxp->reqq_waiting) {
1155 fcxp->reqq_waiting = BFA_FALSE;
1156 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1157 bfa_fcxp_free(fcxp);
1161 fcxp->send_cbfn = bfa_fcxp_null_comp;
1167 * hal_fcxp_public BFA FCXP public functions
1171 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1173 switch (msg->mhdr.msg_id) {
1174 case BFI_FCXP_I2H_SEND_RSP:
1175 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1179 bfa_trc(bfa, msg->mhdr.msg_id);
1185 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1187 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1189 return mod->rsp_pld_sz;
1194 * BFA LPS state machine functions
1198 * Init state -- no login
1201 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1203 bfa_trc(lps->bfa, lps->lp_tag);
1204 bfa_trc(lps->bfa, event);
1207 case BFA_LPS_SM_LOGIN:
1208 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1209 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1210 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1212 bfa_sm_set_state(lps, bfa_lps_sm_login);
1213 bfa_lps_send_login(lps);
1217 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1218 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1220 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1221 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1224 case BFA_LPS_SM_LOGOUT:
1225 bfa_lps_logout_comp(lps);
1228 case BFA_LPS_SM_DELETE:
1232 case BFA_LPS_SM_RX_CVL:
1233 case BFA_LPS_SM_OFFLINE:
1236 case BFA_LPS_SM_FWRSP:
1238 * Could happen when fabric detects loopback and discards
1239 * the lps request. Fw will eventually sent out the timeout
1245 bfa_sm_fault(lps->bfa, event);
1250 * login is in progress -- awaiting response from firmware
1253 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1255 bfa_trc(lps->bfa, lps->lp_tag);
1256 bfa_trc(lps->bfa, event);
1259 case BFA_LPS_SM_FWRSP:
1260 if (lps->status == BFA_STATUS_OK) {
1261 bfa_sm_set_state(lps, bfa_lps_sm_online);
1263 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1264 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1266 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1267 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1269 bfa_sm_set_state(lps, bfa_lps_sm_init);
1271 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1272 BFA_PL_EID_LOGIN, 0,
1273 "FDISC Fail (RJT or timeout)");
1275 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1276 BFA_PL_EID_LOGIN, 0,
1277 "FLOGI Fail (RJT or timeout)");
1279 bfa_lps_login_comp(lps);
1282 case BFA_LPS_SM_OFFLINE:
1283 bfa_sm_set_state(lps, bfa_lps_sm_init);
1287 bfa_sm_fault(lps->bfa, event);
1292 * login pending - awaiting space in request queue
1295 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1297 bfa_trc(lps->bfa, lps->lp_tag);
1298 bfa_trc(lps->bfa, event);
1301 case BFA_LPS_SM_RESUME:
1302 bfa_sm_set_state(lps, bfa_lps_sm_login);
1305 case BFA_LPS_SM_OFFLINE:
1306 bfa_sm_set_state(lps, bfa_lps_sm_init);
1307 bfa_reqq_wcancel(&lps->wqe);
1310 case BFA_LPS_SM_RX_CVL:
1312 * Login was not even sent out; so when getting out
1313 * of this state, it will appear like a login retry
1314 * after Clear virtual link
1319 bfa_sm_fault(lps->bfa, event);
1327 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1329 bfa_trc(lps->bfa, lps->lp_tag);
1330 bfa_trc(lps->bfa, event);
1333 case BFA_LPS_SM_LOGOUT:
1334 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1335 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1336 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1338 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1339 bfa_lps_send_logout(lps);
1341 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1342 BFA_PL_EID_LOGO, 0, "Logout");
1345 case BFA_LPS_SM_RX_CVL:
1346 bfa_sm_set_state(lps, bfa_lps_sm_init);
1348 /* Let the vport module know about this event */
1349 bfa_lps_cvl_event(lps);
1350 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1351 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1354 case BFA_LPS_SM_OFFLINE:
1355 case BFA_LPS_SM_DELETE:
1356 bfa_sm_set_state(lps, bfa_lps_sm_init);
1360 bfa_sm_fault(lps->bfa, event);
1365 * logout in progress - awaiting firmware response
1368 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1370 bfa_trc(lps->bfa, lps->lp_tag);
1371 bfa_trc(lps->bfa, event);
1374 case BFA_LPS_SM_FWRSP:
1375 bfa_sm_set_state(lps, bfa_lps_sm_init);
1376 bfa_lps_logout_comp(lps);
1379 case BFA_LPS_SM_OFFLINE:
1380 bfa_sm_set_state(lps, bfa_lps_sm_init);
1384 bfa_sm_fault(lps->bfa, event);
1389 * logout pending -- awaiting space in request queue
1392 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1394 bfa_trc(lps->bfa, lps->lp_tag);
1395 bfa_trc(lps->bfa, event);
1398 case BFA_LPS_SM_RESUME:
1399 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1400 bfa_lps_send_logout(lps);
1403 case BFA_LPS_SM_OFFLINE:
1404 bfa_sm_set_state(lps, bfa_lps_sm_init);
1405 bfa_reqq_wcancel(&lps->wqe);
1409 bfa_sm_fault(lps->bfa, event);
1416 * lps_pvt BFA LPS private functions
1420 * return memory requirement
1423 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1426 if (cfg->drvcfg.min_cfg)
1427 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1429 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1433 * bfa module attach at initialization time
1436 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1437 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1439 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1440 struct bfa_lps_s *lps;
1443 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1444 mod->num_lps = BFA_LPS_MAX_LPORTS;
1445 if (cfg->drvcfg.min_cfg)
1446 mod->num_lps = BFA_LPS_MIN_LPORTS;
1448 mod->num_lps = BFA_LPS_MAX_LPORTS;
1449 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1451 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1453 INIT_LIST_HEAD(&mod->lps_free_q);
1454 INIT_LIST_HEAD(&mod->lps_active_q);
1456 for (i = 0; i < mod->num_lps; i++, lps++) {
1458 lps->lp_tag = (u8) i;
1459 lps->reqq = BFA_REQQ_LPS;
1460 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1461 list_add_tail(&lps->qe, &mod->lps_free_q);
1466 bfa_lps_detach(struct bfa_s *bfa)
1471 bfa_lps_start(struct bfa_s *bfa)
1476 bfa_lps_stop(struct bfa_s *bfa)
1481 * IOC in disabled state -- consider all lps offline
1484 bfa_lps_iocdisable(struct bfa_s *bfa)
1486 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1487 struct bfa_lps_s *lps;
1488 struct list_head *qe, *qen;
1490 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1491 lps = (struct bfa_lps_s *) qe;
1492 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1497 * Firmware login response
1500 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1502 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1503 struct bfa_lps_s *lps;
1505 bfa_assert(rsp->lp_tag < mod->num_lps);
1506 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1508 lps->status = rsp->status;
1509 switch (rsp->status) {
1511 lps->fport = rsp->f_port;
1512 lps->npiv_en = rsp->npiv_en;
1513 lps->lp_pid = rsp->lp_pid;
1514 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1515 lps->pr_pwwn = rsp->port_name;
1516 lps->pr_nwwn = rsp->node_name;
1517 lps->auth_req = rsp->auth_req;
1518 lps->lp_mac = rsp->lp_mac;
1519 lps->brcd_switch = rsp->brcd_switch;
1520 lps->fcf_mac = rsp->fcf_mac;
1524 case BFA_STATUS_FABRIC_RJT:
1525 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1526 lps->lsrjt_expl = rsp->lsrjt_expl;
1530 case BFA_STATUS_EPROTOCOL:
1531 lps->ext_status = rsp->ext_status;
1536 /* Nothing to do with other status */
1540 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1544 * Firmware logout response
1547 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1549 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1550 struct bfa_lps_s *lps;
1552 bfa_assert(rsp->lp_tag < mod->num_lps);
1553 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1555 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1559 * Firmware received a Clear virtual link request (for FCoE)
1562 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1564 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1565 struct bfa_lps_s *lps;
1567 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1569 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1573 * Space is available in request queue, resume queueing request to firmware.
1576 bfa_lps_reqq_resume(void *lps_arg)
1578 struct bfa_lps_s *lps = lps_arg;
1580 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1584 * lps is freed -- triggered by vport delete
1587 bfa_lps_free(struct bfa_lps_s *lps)
1589 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1593 list_add_tail(&lps->qe, &mod->lps_free_q);
1597 * send login request to firmware
1600 bfa_lps_send_login(struct bfa_lps_s *lps)
1602 struct bfi_lps_login_req_s *m;
1604 m = bfa_reqq_next(lps->bfa, lps->reqq);
1607 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1608 bfa_lpuid(lps->bfa));
1610 m->lp_tag = lps->lp_tag;
1611 m->alpa = lps->alpa;
1612 m->pdu_size = cpu_to_be16(lps->pdusz);
1613 m->pwwn = lps->pwwn;
1614 m->nwwn = lps->nwwn;
1615 m->fdisc = lps->fdisc;
1616 m->auth_en = lps->auth_en;
1618 bfa_reqq_produce(lps->bfa, lps->reqq);
1622 * send logout request to firmware
1625 bfa_lps_send_logout(struct bfa_lps_s *lps)
1627 struct bfi_lps_logout_req_s *m;
1629 m = bfa_reqq_next(lps->bfa, lps->reqq);
1632 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1633 bfa_lpuid(lps->bfa));
1635 m->lp_tag = lps->lp_tag;
1636 m->port_name = lps->pwwn;
1637 bfa_reqq_produce(lps->bfa, lps->reqq);
1641 * Indirect login completion handler for non-fcs
1644 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1646 struct bfa_lps_s *lps = arg;
1652 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1654 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1658 * Login completion handler -- direct call for fcs, queue for others
1661 bfa_lps_login_comp(struct bfa_lps_s *lps)
1663 if (!lps->bfa->fcs) {
1664 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1670 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1672 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1676 * Indirect logout completion handler for non-fcs
1679 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1681 struct bfa_lps_s *lps = arg;
1687 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1691 * Logout completion handler -- direct call for fcs, queue for others
1694 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1696 if (!lps->bfa->fcs) {
1697 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1702 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1706 * Clear virtual link completion handler for non-fcs
1709 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1711 struct bfa_lps_s *lps = arg;
1716 /* Clear virtual link to base port will result in link down */
1718 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1722 * Received Clear virtual link event --direct call for fcs,
1726 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1728 if (!lps->bfa->fcs) {
1729 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1734 /* Clear virtual link to base port will result in link down */
1736 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1742 * lps_public BFA LPS public functions
1746 bfa_lps_get_max_vport(struct bfa_s *bfa)
1748 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1749 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1751 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1755 * Allocate a lport srvice tag.
1758 bfa_lps_alloc(struct bfa_s *bfa)
1760 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1761 struct bfa_lps_s *lps = NULL;
1763 bfa_q_deq(&mod->lps_free_q, &lps);
1768 list_add_tail(&lps->qe, &mod->lps_active_q);
1770 bfa_sm_set_state(lps, bfa_lps_sm_init);
1775 * Free lport service tag. This can be called anytime after an alloc.
1776 * No need to wait for any pending login/logout completions.
1779 bfa_lps_delete(struct bfa_lps_s *lps)
1781 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1785 * Initiate a lport login.
1788 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1789 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1796 lps->fdisc = BFA_FALSE;
1797 lps->auth_en = auth_en;
1798 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1802 * Initiate a lport fdisc login.
1805 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1813 lps->fdisc = BFA_TRUE;
1814 lps->auth_en = BFA_FALSE;
1815 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1820 * Initiate a lport FDSIC logout.
1823 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1825 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1830 * Return lport services tag given the pid
1833 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1835 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1836 struct bfa_lps_s *lps;
1839 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1840 if (lps->lp_pid == pid)
1844 /* Return base port tag anyway */
1850 * return port id assigned to the base lport
1853 bfa_lps_get_base_pid(struct bfa_s *bfa)
1855 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1857 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1861 * LPS firmware message class handler.
1864 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1866 union bfi_lps_i2h_msg_u msg;
1868 bfa_trc(bfa, m->mhdr.msg_id);
1871 switch (m->mhdr.msg_id) {
1872 case BFI_LPS_H2I_LOGIN_RSP:
1873 bfa_lps_login_rsp(bfa, msg.login_rsp);
1876 case BFI_LPS_H2I_LOGOUT_RSP:
1877 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1880 case BFI_LPS_H2I_CVL_EVENT:
1881 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1885 bfa_trc(bfa, m->mhdr.msg_id);
1891 * FC PORT state machine functions
1894 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1895 enum bfa_fcport_sm_event event)
1897 bfa_trc(fcport->bfa, event);
1900 case BFA_FCPORT_SM_START:
1902 * Start event after IOC is configured and BFA is started.
1904 if (bfa_fcport_send_enable(fcport)) {
1905 bfa_trc(fcport->bfa, BFA_TRUE);
1906 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1908 bfa_trc(fcport->bfa, BFA_FALSE);
1909 bfa_sm_set_state(fcport,
1910 bfa_fcport_sm_enabling_qwait);
1914 case BFA_FCPORT_SM_ENABLE:
1916 * Port is persistently configured to be in enabled state. Do
1917 * not change state. Port enabling is done when START event is
1922 case BFA_FCPORT_SM_DISABLE:
1924 * If a port is persistently configured to be disabled, the
1925 * first event will a port disable request.
1927 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1930 case BFA_FCPORT_SM_HWFAIL:
1931 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1935 bfa_sm_fault(fcport->bfa, event);
1940 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
1941 enum bfa_fcport_sm_event event)
1943 char pwwn_buf[BFA_STRING_32];
1944 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1945 bfa_trc(fcport->bfa, event);
1948 case BFA_FCPORT_SM_QRESUME:
1949 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1950 bfa_fcport_send_enable(fcport);
1953 case BFA_FCPORT_SM_STOP:
1954 bfa_reqq_wcancel(&fcport->reqq_wait);
1955 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
1958 case BFA_FCPORT_SM_ENABLE:
1960 * Already enable is in progress.
1964 case BFA_FCPORT_SM_DISABLE:
1966 * Just send disable request to firmware when room becomes
1967 * available in request queue.
1969 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1970 bfa_reqq_wcancel(&fcport->reqq_wait);
1971 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
1972 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
1973 wwn2str(pwwn_buf, fcport->pwwn);
1974 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1975 "Base port disabled: WWN = %s\n", pwwn_buf);
1978 case BFA_FCPORT_SM_LINKUP:
1979 case BFA_FCPORT_SM_LINKDOWN:
1981 * Possible to get link events when doing back-to-back
1986 case BFA_FCPORT_SM_HWFAIL:
1987 bfa_reqq_wcancel(&fcport->reqq_wait);
1988 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1992 bfa_sm_fault(fcport->bfa, event);
1997 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
1998 enum bfa_fcport_sm_event event)
2000 char pwwn_buf[BFA_STRING_32];
2001 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2002 bfa_trc(fcport->bfa, event);
2005 case BFA_FCPORT_SM_FWRSP:
2006 case BFA_FCPORT_SM_LINKDOWN:
2007 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2010 case BFA_FCPORT_SM_LINKUP:
2011 bfa_fcport_update_linkinfo(fcport);
2012 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2014 bfa_assert(fcport->event_cbfn);
2015 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2018 case BFA_FCPORT_SM_ENABLE:
2020 * Already being enabled.
2024 case BFA_FCPORT_SM_DISABLE:
2025 if (bfa_fcport_send_disable(fcport))
2026 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2028 bfa_sm_set_state(fcport,
2029 bfa_fcport_sm_disabling_qwait);
2031 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2032 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2033 wwn2str(pwwn_buf, fcport->pwwn);
2034 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2035 "Base port disabled: WWN = %s\n", pwwn_buf);
2038 case BFA_FCPORT_SM_STOP:
2039 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2042 case BFA_FCPORT_SM_HWFAIL:
2043 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2047 bfa_sm_fault(fcport->bfa, event);
2052 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2053 enum bfa_fcport_sm_event event)
2055 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2056 char pwwn_buf[BFA_STRING_32];
2057 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2059 bfa_trc(fcport->bfa, event);
2062 case BFA_FCPORT_SM_LINKUP:
2063 bfa_fcport_update_linkinfo(fcport);
2064 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2065 bfa_assert(fcport->event_cbfn);
2066 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2067 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2068 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2070 bfa_trc(fcport->bfa,
2071 pevent->link_state.vc_fcf.fcf.fipenabled);
2072 bfa_trc(fcport->bfa,
2073 pevent->link_state.vc_fcf.fcf.fipfailed);
2075 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2076 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2077 BFA_PL_EID_FIP_FCF_DISC, 0,
2078 "FIP FCF Discovery Failed");
2080 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2081 BFA_PL_EID_FIP_FCF_DISC, 0,
2082 "FIP FCF Discovered");
2085 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2086 wwn2str(pwwn_buf, fcport->pwwn);
2087 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2088 "Base port online: WWN = %s\n", pwwn_buf);
2091 case BFA_FCPORT_SM_LINKDOWN:
2093 * Possible to get link down event.
2097 case BFA_FCPORT_SM_ENABLE:
2103 case BFA_FCPORT_SM_DISABLE:
2104 if (bfa_fcport_send_disable(fcport))
2105 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2107 bfa_sm_set_state(fcport,
2108 bfa_fcport_sm_disabling_qwait);
2110 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2111 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2112 wwn2str(pwwn_buf, fcport->pwwn);
2113 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2114 "Base port disabled: WWN = %s\n", pwwn_buf);
2117 case BFA_FCPORT_SM_STOP:
2118 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2121 case BFA_FCPORT_SM_HWFAIL:
2122 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2126 bfa_sm_fault(fcport->bfa, event);
2131 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2132 enum bfa_fcport_sm_event event)
2134 char pwwn_buf[BFA_STRING_32];
2135 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2137 bfa_trc(fcport->bfa, event);
2140 case BFA_FCPORT_SM_ENABLE:
2146 case BFA_FCPORT_SM_DISABLE:
2147 if (bfa_fcport_send_disable(fcport))
2148 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2150 bfa_sm_set_state(fcport,
2151 bfa_fcport_sm_disabling_qwait);
2153 bfa_fcport_reset_linkinfo(fcport);
2154 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2155 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2156 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2157 wwn2str(pwwn_buf, fcport->pwwn);
2158 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2159 "Base port offline: WWN = %s\n", pwwn_buf);
2160 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2161 "Base port disabled: WWN = %s\n", pwwn_buf);
2164 case BFA_FCPORT_SM_LINKDOWN:
2165 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2166 bfa_fcport_reset_linkinfo(fcport);
2167 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2168 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2169 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2170 wwn2str(pwwn_buf, fcport->pwwn);
2171 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2172 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2173 "Base port offline: WWN = %s\n", pwwn_buf);
2175 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2176 "Base port (WWN = %s) "
2177 "lost fabric connectivity\n", pwwn_buf);
2180 case BFA_FCPORT_SM_STOP:
2181 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2182 bfa_fcport_reset_linkinfo(fcport);
2183 wwn2str(pwwn_buf, fcport->pwwn);
2184 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2185 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2186 "Base port offline: WWN = %s\n", pwwn_buf);
2188 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2189 "Base port (WWN = %s) "
2190 "lost fabric connectivity\n", pwwn_buf);
2193 case BFA_FCPORT_SM_HWFAIL:
2194 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2195 bfa_fcport_reset_linkinfo(fcport);
2196 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2197 wwn2str(pwwn_buf, fcport->pwwn);
2198 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2199 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2200 "Base port offline: WWN = %s\n", pwwn_buf);
2202 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2203 "Base port (WWN = %s) "
2204 "lost fabric connectivity\n", pwwn_buf);
2208 bfa_sm_fault(fcport->bfa, event);
2213 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2214 enum bfa_fcport_sm_event event)
2216 bfa_trc(fcport->bfa, event);
2219 case BFA_FCPORT_SM_QRESUME:
2220 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2221 bfa_fcport_send_disable(fcport);
2224 case BFA_FCPORT_SM_STOP:
2225 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2226 bfa_reqq_wcancel(&fcport->reqq_wait);
2229 case BFA_FCPORT_SM_ENABLE:
2230 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2233 case BFA_FCPORT_SM_DISABLE:
2235 * Already being disabled.
2239 case BFA_FCPORT_SM_LINKUP:
2240 case BFA_FCPORT_SM_LINKDOWN:
2242 * Possible to get link events when doing back-to-back
2247 case BFA_FCPORT_SM_HWFAIL:
2248 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2249 bfa_reqq_wcancel(&fcport->reqq_wait);
2253 bfa_sm_fault(fcport->bfa, event);
2258 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2259 enum bfa_fcport_sm_event event)
2261 bfa_trc(fcport->bfa, event);
2264 case BFA_FCPORT_SM_QRESUME:
2265 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2266 bfa_fcport_send_disable(fcport);
2267 if (bfa_fcport_send_enable(fcport))
2268 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2270 bfa_sm_set_state(fcport,
2271 bfa_fcport_sm_enabling_qwait);
2274 case BFA_FCPORT_SM_STOP:
2275 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2276 bfa_reqq_wcancel(&fcport->reqq_wait);
2279 case BFA_FCPORT_SM_ENABLE:
2282 case BFA_FCPORT_SM_DISABLE:
2283 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2286 case BFA_FCPORT_SM_LINKUP:
2287 case BFA_FCPORT_SM_LINKDOWN:
2289 * Possible to get link events when doing back-to-back
2294 case BFA_FCPORT_SM_HWFAIL:
2295 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2296 bfa_reqq_wcancel(&fcport->reqq_wait);
2300 bfa_sm_fault(fcport->bfa, event);
2305 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2306 enum bfa_fcport_sm_event event)
2308 char pwwn_buf[BFA_STRING_32];
2309 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2310 bfa_trc(fcport->bfa, event);
2313 case BFA_FCPORT_SM_FWRSP:
2314 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2317 case BFA_FCPORT_SM_DISABLE:
2319 * Already being disabled.
2323 case BFA_FCPORT_SM_ENABLE:
2324 if (bfa_fcport_send_enable(fcport))
2325 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2327 bfa_sm_set_state(fcport,
2328 bfa_fcport_sm_enabling_qwait);
2330 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2331 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2332 wwn2str(pwwn_buf, fcport->pwwn);
2333 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2334 "Base port enabled: WWN = %s\n", pwwn_buf);
2337 case BFA_FCPORT_SM_STOP:
2338 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2341 case BFA_FCPORT_SM_LINKUP:
2342 case BFA_FCPORT_SM_LINKDOWN:
2344 * Possible to get link events when doing back-to-back
2349 case BFA_FCPORT_SM_HWFAIL:
2350 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2354 bfa_sm_fault(fcport->bfa, event);
2359 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2360 enum bfa_fcport_sm_event event)
2362 char pwwn_buf[BFA_STRING_32];
2363 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2364 bfa_trc(fcport->bfa, event);
2367 case BFA_FCPORT_SM_START:
2369 * Ignore start event for a port that is disabled.
2373 case BFA_FCPORT_SM_STOP:
2374 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2377 case BFA_FCPORT_SM_ENABLE:
2378 if (bfa_fcport_send_enable(fcport))
2379 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2381 bfa_sm_set_state(fcport,
2382 bfa_fcport_sm_enabling_qwait);
2384 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2385 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2386 wwn2str(pwwn_buf, fcport->pwwn);
2387 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2388 "Base port enabled: WWN = %s\n", pwwn_buf);
2391 case BFA_FCPORT_SM_DISABLE:
2397 case BFA_FCPORT_SM_HWFAIL:
2398 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2402 bfa_sm_fault(fcport->bfa, event);
2407 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2408 enum bfa_fcport_sm_event event)
2410 bfa_trc(fcport->bfa, event);
2413 case BFA_FCPORT_SM_START:
2414 if (bfa_fcport_send_enable(fcport))
2415 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2417 bfa_sm_set_state(fcport,
2418 bfa_fcport_sm_enabling_qwait);
2423 * Ignore all other events.
2430 * Port is enabled. IOC is down/failed.
2433 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2434 enum bfa_fcport_sm_event event)
2436 bfa_trc(fcport->bfa, event);
2439 case BFA_FCPORT_SM_START:
2440 if (bfa_fcport_send_enable(fcport))
2441 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2443 bfa_sm_set_state(fcport,
2444 bfa_fcport_sm_enabling_qwait);
2449 * Ignore all events.
2456 * Port is disabled. IOC is down/failed.
2459 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2460 enum bfa_fcport_sm_event event)
2462 bfa_trc(fcport->bfa, event);
2465 case BFA_FCPORT_SM_START:
2466 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2469 case BFA_FCPORT_SM_ENABLE:
2470 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2475 * Ignore all events.
2482 * Link state is down
2485 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2486 enum bfa_fcport_ln_sm_event event)
2488 bfa_trc(ln->fcport->bfa, event);
2491 case BFA_FCPORT_LN_SM_LINKUP:
2492 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2493 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2497 bfa_sm_fault(ln->fcport->bfa, event);
2502 * Link state is waiting for down notification
2505 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2506 enum bfa_fcport_ln_sm_event event)
2508 bfa_trc(ln->fcport->bfa, event);
2511 case BFA_FCPORT_LN_SM_LINKUP:
2512 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2515 case BFA_FCPORT_LN_SM_NOTIFICATION:
2516 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2520 bfa_sm_fault(ln->fcport->bfa, event);
2525 * Link state is waiting for down notification and there is a pending up
2528 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2529 enum bfa_fcport_ln_sm_event event)
2531 bfa_trc(ln->fcport->bfa, event);
2534 case BFA_FCPORT_LN_SM_LINKDOWN:
2535 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2538 case BFA_FCPORT_LN_SM_NOTIFICATION:
2539 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2540 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2544 bfa_sm_fault(ln->fcport->bfa, event);
2552 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2553 enum bfa_fcport_ln_sm_event event)
2555 bfa_trc(ln->fcport->bfa, event);
2558 case BFA_FCPORT_LN_SM_LINKDOWN:
2559 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2560 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2564 bfa_sm_fault(ln->fcport->bfa, event);
2569 * Link state is waiting for up notification
2572 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2573 enum bfa_fcport_ln_sm_event event)
2575 bfa_trc(ln->fcport->bfa, event);
2578 case BFA_FCPORT_LN_SM_LINKDOWN:
2579 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2582 case BFA_FCPORT_LN_SM_NOTIFICATION:
2583 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2587 bfa_sm_fault(ln->fcport->bfa, event);
2592 * Link state is waiting for up notification and there is a pending down
2595 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2596 enum bfa_fcport_ln_sm_event event)
2598 bfa_trc(ln->fcport->bfa, event);
2601 case BFA_FCPORT_LN_SM_LINKUP:
2602 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2605 case BFA_FCPORT_LN_SM_NOTIFICATION:
2606 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2607 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2611 bfa_sm_fault(ln->fcport->bfa, event);
2616 * Link state is waiting for up notification and there are pending down and up
2619 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2620 enum bfa_fcport_ln_sm_event event)
2622 bfa_trc(ln->fcport->bfa, event);
2625 case BFA_FCPORT_LN_SM_LINKDOWN:
2626 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2629 case BFA_FCPORT_LN_SM_NOTIFICATION:
2630 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2631 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2635 bfa_sm_fault(ln->fcport->bfa, event);
2646 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2648 struct bfa_fcport_ln_s *ln = cbarg;
2651 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2653 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2657 * Send SCN notification to upper layers.
2658 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2661 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2662 bfa_boolean_t trunk)
2664 if (fcport->cfg.trunked && !trunk)
2668 case BFA_PORT_LINKUP:
2669 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2671 case BFA_PORT_LINKDOWN:
2672 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2680 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2682 struct bfa_fcport_s *fcport = ln->fcport;
2684 if (fcport->bfa->fcs) {
2685 fcport->event_cbfn(fcport->event_cbarg, event);
2686 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2688 ln->ln_event = event;
2689 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2690 __bfa_cb_fcport_event, ln);
2694 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2698 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2701 *dm_len += FCPORT_STATS_DMA_SZ;
2705 bfa_fcport_qresume(void *cbarg)
2707 struct bfa_fcport_s *fcport = cbarg;
2709 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2713 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2718 dm_kva = bfa_meminfo_dma_virt(meminfo);
2719 dm_pa = bfa_meminfo_dma_phys(meminfo);
2721 fcport->stats_kva = dm_kva;
2722 fcport->stats_pa = dm_pa;
2723 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2725 dm_kva += FCPORT_STATS_DMA_SZ;
2726 dm_pa += FCPORT_STATS_DMA_SZ;
2728 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2729 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2733 * Memory initialization.
2736 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2737 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2739 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2740 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2741 struct bfa_fcport_ln_s *ln = &fcport->ln;
2744 memset(fcport, 0, sizeof(struct bfa_fcport_s));
2746 ln->fcport = fcport;
2748 bfa_fcport_mem_claim(fcport, meminfo);
2750 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2751 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2754 * initialize time stamp for stats reset
2756 do_gettimeofday(&tv);
2757 fcport->stats_reset_time = tv.tv_sec;
2760 * initialize and set default configuration
2762 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2763 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2764 port_cfg->trunked = BFA_FALSE;
2765 port_cfg->maxfrsize = 0;
2767 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2769 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2773 bfa_fcport_detach(struct bfa_s *bfa)
2778 * Called when IOC is ready.
2781 bfa_fcport_start(struct bfa_s *bfa)
2783 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2787 * Called before IOC is stopped.
2790 bfa_fcport_stop(struct bfa_s *bfa)
2792 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2793 bfa_trunk_iocdisable(bfa);
2797 * Called when IOC failure is detected.
2800 bfa_fcport_iocdisable(struct bfa_s *bfa)
2802 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2804 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2805 bfa_trunk_iocdisable(bfa);
2809 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2811 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2812 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2814 fcport->speed = pevent->link_state.speed;
2815 fcport->topology = pevent->link_state.topology;
2817 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2821 fcport->qos_attr = pevent->link_state.qos_attr;
2822 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2825 * update trunk state if applicable
2827 if (!fcport->cfg.trunked)
2828 trunk->attr.state = BFA_TRUNK_DISABLED;
2830 /* update FCoE specific */
2831 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2833 bfa_trc(fcport->bfa, fcport->speed);
2834 bfa_trc(fcport->bfa, fcport->topology);
2838 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2840 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2841 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2845 * Send port enable message to firmware.
2847 static bfa_boolean_t
2848 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2850 struct bfi_fcport_enable_req_s *m;
2853 * Increment message tag before queue check, so that responses to old
2854 * requests are discarded.
2859 * check for room in queue to send request now
2861 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2863 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2864 &fcport->reqq_wait);
2868 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2869 bfa_lpuid(fcport->bfa));
2870 m->nwwn = fcport->nwwn;
2871 m->pwwn = fcport->pwwn;
2872 m->port_cfg = fcport->cfg;
2873 m->msgtag = fcport->msgtag;
2874 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2875 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2876 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2877 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2880 * queue I/O message to firmware
2882 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2887 * Send port disable message to firmware.
2889 static bfa_boolean_t
2890 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2892 struct bfi_fcport_req_s *m;
2895 * Increment message tag before queue check, so that responses to old
2896 * requests are discarded.
2901 * check for room in queue to send request now
2903 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2905 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2906 &fcport->reqq_wait);
2910 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2911 bfa_lpuid(fcport->bfa));
2912 m->msgtag = fcport->msgtag;
2915 * queue I/O message to firmware
2917 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2923 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
2925 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
2926 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
2928 bfa_trc(fcport->bfa, fcport->pwwn);
2929 bfa_trc(fcport->bfa, fcport->nwwn);
2933 bfa_fcport_send_txcredit(void *port_cbarg)
2936 struct bfa_fcport_s *fcport = port_cbarg;
2937 struct bfi_fcport_set_svc_params_req_s *m;
2940 * check for room in queue to send request now
2942 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2944 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
2948 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
2949 bfa_lpuid(fcport->bfa));
2950 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
2953 * queue I/O message to firmware
2955 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2959 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
2960 struct bfa_qos_stats_s *s)
2962 u32 *dip = (u32 *) d;
2963 __be32 *sip = (__be32 *) s;
2966 /* Now swap the 32 bit fields */
2967 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
2968 dip[i] = be32_to_cpu(sip[i]);
2972 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
2973 struct bfa_fcoe_stats_s *s)
2975 u32 *dip = (u32 *) d;
2976 __be32 *sip = (__be32 *) s;
2979 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
2982 dip[i] = be32_to_cpu(sip[i]);
2983 dip[i + 1] = be32_to_cpu(sip[i + 1]);
2985 dip[i] = be32_to_cpu(sip[i + 1]);
2986 dip[i + 1] = be32_to_cpu(sip[i]);
2992 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
2994 struct bfa_fcport_s *fcport = cbarg;
2997 if (fcport->stats_status == BFA_STATUS_OK) {
3000 /* Swap FC QoS or FCoE stats */
3001 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3002 bfa_fcport_qos_stats_swap(
3003 &fcport->stats_ret->fcqos,
3004 &fcport->stats->fcqos);
3006 bfa_fcport_fcoe_stats_swap(
3007 &fcport->stats_ret->fcoe,
3008 &fcport->stats->fcoe);
3010 do_gettimeofday(&tv);
3011 fcport->stats_ret->fcoe.secs_reset =
3012 tv.tv_sec - fcport->stats_reset_time;
3015 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3017 fcport->stats_busy = BFA_FALSE;
3018 fcport->stats_status = BFA_STATUS_OK;
3023 bfa_fcport_stats_get_timeout(void *cbarg)
3025 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3027 bfa_trc(fcport->bfa, fcport->stats_qfull);
3029 if (fcport->stats_qfull) {
3030 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3031 fcport->stats_qfull = BFA_FALSE;
3034 fcport->stats_status = BFA_STATUS_ETIMER;
3035 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3040 bfa_fcport_send_stats_get(void *cbarg)
3042 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3043 struct bfi_fcport_req_s *msg;
3045 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3048 fcport->stats_qfull = BFA_TRUE;
3049 bfa_reqq_winit(&fcport->stats_reqq_wait,
3050 bfa_fcport_send_stats_get, fcport);
3051 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3052 &fcport->stats_reqq_wait);
3055 fcport->stats_qfull = BFA_FALSE;
3057 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3058 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3059 bfa_lpuid(fcport->bfa));
3060 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3064 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3066 struct bfa_fcport_s *fcport = cbarg;
3072 * re-initialize time stamp for stats reset
3074 do_gettimeofday(&tv);
3075 fcport->stats_reset_time = tv.tv_sec;
3077 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3079 fcport->stats_busy = BFA_FALSE;
3080 fcport->stats_status = BFA_STATUS_OK;
3085 bfa_fcport_stats_clr_timeout(void *cbarg)
3087 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3089 bfa_trc(fcport->bfa, fcport->stats_qfull);
3091 if (fcport->stats_qfull) {
3092 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3093 fcport->stats_qfull = BFA_FALSE;
3096 fcport->stats_status = BFA_STATUS_ETIMER;
3097 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3098 __bfa_cb_fcport_stats_clr, fcport);
3102 bfa_fcport_send_stats_clear(void *cbarg)
3104 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3105 struct bfi_fcport_req_s *msg;
3107 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3110 fcport->stats_qfull = BFA_TRUE;
3111 bfa_reqq_winit(&fcport->stats_reqq_wait,
3112 bfa_fcport_send_stats_clear, fcport);
3113 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3114 &fcport->stats_reqq_wait);
3117 fcport->stats_qfull = BFA_FALSE;
3119 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3120 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3121 bfa_lpuid(fcport->bfa));
3122 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3126 * Handle trunk SCN event from firmware.
3129 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3131 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3132 struct bfi_fcport_trunk_link_s *tlink;
3133 struct bfa_trunk_link_attr_s *lattr;
3134 enum bfa_trunk_state state_prev;
3138 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3139 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3140 scn->trunk_state == BFA_TRUNK_OFFLINE);
3142 bfa_trc(fcport->bfa, trunk->attr.state);
3143 bfa_trc(fcport->bfa, scn->trunk_state);
3144 bfa_trc(fcport->bfa, scn->trunk_speed);
3147 * Save off new state for trunk attribute query
3149 state_prev = trunk->attr.state;
3150 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3151 trunk->attr.state = scn->trunk_state;
3152 trunk->attr.speed = scn->trunk_speed;
3153 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3154 lattr = &trunk->attr.link_attr[i];
3155 tlink = &scn->tlink[i];
3157 lattr->link_state = tlink->state;
3158 lattr->trunk_wwn = tlink->trunk_wwn;
3159 lattr->fctl = tlink->fctl;
3160 lattr->speed = tlink->speed;
3161 lattr->deskew = be32_to_cpu(tlink->deskew);
3163 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3164 fcport->speed = tlink->speed;
3165 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3169 bfa_trc(fcport->bfa, lattr->link_state);
3170 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3171 bfa_trc(fcport->bfa, lattr->fctl);
3172 bfa_trc(fcport->bfa, lattr->speed);
3173 bfa_trc(fcport->bfa, lattr->deskew);
3178 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3179 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3182 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3183 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3186 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3187 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3190 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3191 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3195 * Notify upper layers if trunk state changed.
3197 if ((state_prev != trunk->attr.state) ||
3198 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3199 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3200 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3205 bfa_trunk_iocdisable(struct bfa_s *bfa)
3207 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3211 * In trunked mode, notify upper layers that link is down
3213 if (fcport->cfg.trunked) {
3214 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3215 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3217 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3218 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3219 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3220 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3221 fcport->trunk.attr.link_attr[i].fctl =
3222 BFA_TRUNK_LINK_FCTL_NORMAL;
3223 fcport->trunk.attr.link_attr[i].link_state =
3224 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3225 fcport->trunk.attr.link_attr[i].speed =
3226 BFA_PORT_SPEED_UNKNOWN;
3227 fcport->trunk.attr.link_attr[i].deskew = 0;
3239 * Called to initialize port attributes
3242 bfa_fcport_init(struct bfa_s *bfa)
3244 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3247 * Initialize port attributes from IOC hardware data.
3249 bfa_fcport_set_wwns(fcport);
3250 if (fcport->cfg.maxfrsize == 0)
3251 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3252 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3253 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3255 bfa_assert(fcport->cfg.maxfrsize);
3256 bfa_assert(fcport->cfg.rx_bbcredit);
3257 bfa_assert(fcport->speed_sup);
3261 * Firmware message handler.
3264 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3266 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3267 union bfi_fcport_i2h_msg_u i2hmsg;
3270 fcport->event_arg.i2hmsg = i2hmsg;
3272 bfa_trc(bfa, msg->mhdr.msg_id);
3273 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3275 switch (msg->mhdr.msg_id) {
3276 case BFI_FCPORT_I2H_ENABLE_RSP:
3277 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3278 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3281 case BFI_FCPORT_I2H_DISABLE_RSP:
3282 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3283 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3286 case BFI_FCPORT_I2H_EVENT:
3287 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3288 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3290 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3293 case BFI_FCPORT_I2H_TRUNK_SCN:
3294 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3297 case BFI_FCPORT_I2H_STATS_GET_RSP:
3299 * check for timer pop before processing the rsp
3301 if (fcport->stats_busy == BFA_FALSE ||
3302 fcport->stats_status == BFA_STATUS_ETIMER)
3305 bfa_timer_stop(&fcport->timer);
3306 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3307 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3308 __bfa_cb_fcport_stats_get, fcport);
3311 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3313 * check for timer pop before processing the rsp
3315 if (fcport->stats_busy == BFA_FALSE ||
3316 fcport->stats_status == BFA_STATUS_ETIMER)
3319 bfa_timer_stop(&fcport->timer);
3320 fcport->stats_status = BFA_STATUS_OK;
3321 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3322 __bfa_cb_fcport_stats_clr, fcport);
3325 case BFI_FCPORT_I2H_ENABLE_AEN:
3326 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3329 case BFI_FCPORT_I2H_DISABLE_AEN:
3330 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3346 * Registered callback for port events.
3349 bfa_fcport_event_register(struct bfa_s *bfa,
3350 void (*cbfn) (void *cbarg,
3351 enum bfa_port_linkstate event),
3354 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3356 fcport->event_cbfn = cbfn;
3357 fcport->event_cbarg = cbarg;
3361 bfa_fcport_enable(struct bfa_s *bfa)
3363 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3365 if (bfa_ioc_is_disabled(&bfa->ioc))
3366 return BFA_STATUS_IOC_DISABLED;
3368 if (fcport->diag_busy)
3369 return BFA_STATUS_DIAG_BUSY;
3371 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3372 return BFA_STATUS_OK;
3376 bfa_fcport_disable(struct bfa_s *bfa)
3379 if (bfa_ioc_is_disabled(&bfa->ioc))
3380 return BFA_STATUS_IOC_DISABLED;
3382 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3383 return BFA_STATUS_OK;
3387 * Configure port speed.
3390 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3392 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3394 bfa_trc(bfa, speed);
3396 if (fcport->cfg.trunked == BFA_TRUE)
3397 return BFA_STATUS_TRUNK_ENABLED;
3398 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3399 bfa_trc(bfa, fcport->speed_sup);
3400 return BFA_STATUS_UNSUPP_SPEED;
3403 fcport->cfg.speed = speed;
3405 return BFA_STATUS_OK;
3409 * Get current speed.
3412 bfa_fcport_get_speed(struct bfa_s *bfa)
3414 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3416 return fcport->speed;
3420 * Configure port topology.
3423 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3425 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3427 bfa_trc(bfa, topology);
3428 bfa_trc(bfa, fcport->cfg.topology);
3431 case BFA_PORT_TOPOLOGY_P2P:
3432 case BFA_PORT_TOPOLOGY_LOOP:
3433 case BFA_PORT_TOPOLOGY_AUTO:
3437 return BFA_STATUS_EINVAL;
3440 fcport->cfg.topology = topology;
3441 return BFA_STATUS_OK;
3445 * Get current topology.
3447 enum bfa_port_topology
3448 bfa_fcport_get_topology(struct bfa_s *bfa)
3450 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3452 return fcport->topology;
3456 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3458 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3461 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3462 bfa_trc(bfa, fcport->cfg.hardalpa);
3464 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3465 fcport->cfg.hardalpa = alpa;
3467 return BFA_STATUS_OK;
3471 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3473 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3475 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3476 bfa_trc(bfa, fcport->cfg.hardalpa);
3478 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3479 return BFA_STATUS_OK;
3483 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3485 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3487 *alpa = fcport->cfg.hardalpa;
3488 return fcport->cfg.cfg_hardalpa;
3492 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3494 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3496 return fcport->myalpa;
3500 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3502 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3504 bfa_trc(bfa, maxfrsize);
3505 bfa_trc(bfa, fcport->cfg.maxfrsize);
3508 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3509 return BFA_STATUS_INVLD_DFSZ;
3511 /* power of 2, if not the max frame size of 2112 */
3512 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3513 return BFA_STATUS_INVLD_DFSZ;
3515 fcport->cfg.maxfrsize = maxfrsize;
3516 return BFA_STATUS_OK;
3520 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3522 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3524 return fcport->cfg.maxfrsize;
3528 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3530 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3532 return fcport->cfg.rx_bbcredit;
3536 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3538 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3540 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3541 bfa_fcport_send_txcredit(fcport);
3545 * Get port attributes.
3549 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3551 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3553 return fcport->nwwn;
3555 return fcport->pwwn;
3559 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3563 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3565 attr->nwwn = fcport->nwwn;
3566 attr->pwwn = fcport->pwwn;
3568 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3569 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
3571 memcpy(&attr->pport_cfg, &fcport->cfg,
3572 sizeof(struct bfa_port_cfg_s));
3573 /* speed attributes */
3574 attr->pport_cfg.speed = fcport->cfg.speed;
3575 attr->speed_supported = fcport->speed_sup;
3576 attr->speed = fcport->speed;
3577 attr->cos_supported = FC_CLASS_3;
3579 /* topology attributes */
3580 attr->pport_cfg.topology = fcport->cfg.topology;
3581 attr->topology = fcport->topology;
3582 attr->pport_cfg.trunked = fcport->cfg.trunked;
3584 /* beacon attributes */
3585 attr->beacon = fcport->beacon;
3586 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3587 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
3588 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3590 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3591 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3592 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3593 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3594 attr->port_state = BFA_PORT_ST_IOCDIS;
3595 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3596 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3599 attr->fcoe_vlan = fcport->fcoe_vlan;
3602 #define BFA_FCPORT_STATS_TOV 1000
3605 * Fetch port statistics (FCQoS or FCoE).
3608 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3609 bfa_cb_port_t cbfn, void *cbarg)
3611 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3613 if (fcport->stats_busy) {
3614 bfa_trc(bfa, fcport->stats_busy);
3615 return BFA_STATUS_DEVBUSY;
3618 fcport->stats_busy = BFA_TRUE;
3619 fcport->stats_ret = stats;
3620 fcport->stats_cbfn = cbfn;
3621 fcport->stats_cbarg = cbarg;
3623 bfa_fcport_send_stats_get(fcport);
3625 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3626 fcport, BFA_FCPORT_STATS_TOV);
3627 return BFA_STATUS_OK;
3631 * Reset port statistics (FCQoS or FCoE).
3634 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3636 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3638 if (fcport->stats_busy) {
3639 bfa_trc(bfa, fcport->stats_busy);
3640 return BFA_STATUS_DEVBUSY;
3643 fcport->stats_busy = BFA_TRUE;
3644 fcport->stats_cbfn = cbfn;
3645 fcport->stats_cbarg = cbarg;
3647 bfa_fcport_send_stats_clear(fcport);
3649 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3650 fcport, BFA_FCPORT_STATS_TOV);
3651 return BFA_STATUS_OK;
3656 * Fetch port attributes.
3659 bfa_fcport_is_disabled(struct bfa_s *bfa)
3661 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3663 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3664 BFA_PORT_ST_DISABLED;
3669 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3673 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3679 * Get default minimum ratelim speed
3682 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3684 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3686 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3687 return fcport->cfg.trl_def_speed;
3692 bfa_fcport_is_linkup(struct bfa_s *bfa)
3694 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3696 return (!fcport->cfg.trunked &&
3697 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3698 (fcport->cfg.trunked &&
3699 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3703 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3705 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3707 return fcport->cfg.qos_enabled;
3711 * Rport State machine functions
3714 * Beginning state, only online event expected.
3717 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3719 bfa_trc(rp->bfa, rp->rport_tag);
3720 bfa_trc(rp->bfa, event);
3723 case BFA_RPORT_SM_CREATE:
3724 bfa_stats(rp, sm_un_cr);
3725 bfa_sm_set_state(rp, bfa_rport_sm_created);
3729 bfa_stats(rp, sm_un_unexp);
3730 bfa_sm_fault(rp->bfa, event);
3735 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3737 bfa_trc(rp->bfa, rp->rport_tag);
3738 bfa_trc(rp->bfa, event);
3741 case BFA_RPORT_SM_ONLINE:
3742 bfa_stats(rp, sm_cr_on);
3743 if (bfa_rport_send_fwcreate(rp))
3744 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3746 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3749 case BFA_RPORT_SM_DELETE:
3750 bfa_stats(rp, sm_cr_del);
3751 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3755 case BFA_RPORT_SM_HWFAIL:
3756 bfa_stats(rp, sm_cr_hwf);
3757 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3761 bfa_stats(rp, sm_cr_unexp);
3762 bfa_sm_fault(rp->bfa, event);
3767 * Waiting for rport create response from firmware.
3770 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3772 bfa_trc(rp->bfa, rp->rport_tag);
3773 bfa_trc(rp->bfa, event);
3776 case BFA_RPORT_SM_FWRSP:
3777 bfa_stats(rp, sm_fwc_rsp);
3778 bfa_sm_set_state(rp, bfa_rport_sm_online);
3779 bfa_rport_online_cb(rp);
3782 case BFA_RPORT_SM_DELETE:
3783 bfa_stats(rp, sm_fwc_del);
3784 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3787 case BFA_RPORT_SM_OFFLINE:
3788 bfa_stats(rp, sm_fwc_off);
3789 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3792 case BFA_RPORT_SM_HWFAIL:
3793 bfa_stats(rp, sm_fwc_hwf);
3794 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3798 bfa_stats(rp, sm_fwc_unexp);
3799 bfa_sm_fault(rp->bfa, event);
3804 * Request queue is full, awaiting queue resume to send create request.
3807 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3809 bfa_trc(rp->bfa, rp->rport_tag);
3810 bfa_trc(rp->bfa, event);
3813 case BFA_RPORT_SM_QRESUME:
3814 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3815 bfa_rport_send_fwcreate(rp);
3818 case BFA_RPORT_SM_DELETE:
3819 bfa_stats(rp, sm_fwc_del);
3820 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3821 bfa_reqq_wcancel(&rp->reqq_wait);
3825 case BFA_RPORT_SM_OFFLINE:
3826 bfa_stats(rp, sm_fwc_off);
3827 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3828 bfa_reqq_wcancel(&rp->reqq_wait);
3829 bfa_rport_offline_cb(rp);
3832 case BFA_RPORT_SM_HWFAIL:
3833 bfa_stats(rp, sm_fwc_hwf);
3834 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3835 bfa_reqq_wcancel(&rp->reqq_wait);
3839 bfa_stats(rp, sm_fwc_unexp);
3840 bfa_sm_fault(rp->bfa, event);
3845 * Online state - normal parking state.
3848 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3850 struct bfi_rport_qos_scn_s *qos_scn;
3852 bfa_trc(rp->bfa, rp->rport_tag);
3853 bfa_trc(rp->bfa, event);
3856 case BFA_RPORT_SM_OFFLINE:
3857 bfa_stats(rp, sm_on_off);
3858 if (bfa_rport_send_fwdelete(rp))
3859 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3861 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3864 case BFA_RPORT_SM_DELETE:
3865 bfa_stats(rp, sm_on_del);
3866 if (bfa_rport_send_fwdelete(rp))
3867 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3869 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3872 case BFA_RPORT_SM_HWFAIL:
3873 bfa_stats(rp, sm_on_hwf);
3874 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3877 case BFA_RPORT_SM_SET_SPEED:
3878 bfa_rport_send_fwspeed(rp);
3881 case BFA_RPORT_SM_QOS_SCN:
3882 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3883 rp->qos_attr = qos_scn->new_qos_attr;
3884 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3885 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3886 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3887 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3889 qos_scn->old_qos_attr.qos_flow_id =
3890 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
3891 qos_scn->new_qos_attr.qos_flow_id =
3892 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
3894 if (qos_scn->old_qos_attr.qos_flow_id !=
3895 qos_scn->new_qos_attr.qos_flow_id)
3896 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3897 qos_scn->old_qos_attr,
3898 qos_scn->new_qos_attr);
3899 if (qos_scn->old_qos_attr.qos_priority !=
3900 qos_scn->new_qos_attr.qos_priority)
3901 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3902 qos_scn->old_qos_attr,
3903 qos_scn->new_qos_attr);
3907 bfa_stats(rp, sm_on_unexp);
3908 bfa_sm_fault(rp->bfa, event);
3913 * Firmware rport is being deleted - awaiting f/w response.
3916 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
3918 bfa_trc(rp->bfa, rp->rport_tag);
3919 bfa_trc(rp->bfa, event);
3922 case BFA_RPORT_SM_FWRSP:
3923 bfa_stats(rp, sm_fwd_rsp);
3924 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3925 bfa_rport_offline_cb(rp);
3928 case BFA_RPORT_SM_DELETE:
3929 bfa_stats(rp, sm_fwd_del);
3930 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3933 case BFA_RPORT_SM_HWFAIL:
3934 bfa_stats(rp, sm_fwd_hwf);
3935 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3936 bfa_rport_offline_cb(rp);
3940 bfa_stats(rp, sm_fwd_unexp);
3941 bfa_sm_fault(rp->bfa, event);
3946 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3948 bfa_trc(rp->bfa, rp->rport_tag);
3949 bfa_trc(rp->bfa, event);
3952 case BFA_RPORT_SM_QRESUME:
3953 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3954 bfa_rport_send_fwdelete(rp);
3957 case BFA_RPORT_SM_DELETE:
3958 bfa_stats(rp, sm_fwd_del);
3959 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3962 case BFA_RPORT_SM_HWFAIL:
3963 bfa_stats(rp, sm_fwd_hwf);
3964 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3965 bfa_reqq_wcancel(&rp->reqq_wait);
3966 bfa_rport_offline_cb(rp);
3970 bfa_stats(rp, sm_fwd_unexp);
3971 bfa_sm_fault(rp->bfa, event);
3979 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
3981 bfa_trc(rp->bfa, rp->rport_tag);
3982 bfa_trc(rp->bfa, event);
3985 case BFA_RPORT_SM_DELETE:
3986 bfa_stats(rp, sm_off_del);
3987 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3991 case BFA_RPORT_SM_ONLINE:
3992 bfa_stats(rp, sm_off_on);
3993 if (bfa_rport_send_fwcreate(rp))
3994 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3996 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3999 case BFA_RPORT_SM_HWFAIL:
4000 bfa_stats(rp, sm_off_hwf);
4001 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4005 bfa_stats(rp, sm_off_unexp);
4006 bfa_sm_fault(rp->bfa, event);
4011 * Rport is deleted, waiting for firmware response to delete.
4014 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4016 bfa_trc(rp->bfa, rp->rport_tag);
4017 bfa_trc(rp->bfa, event);
4020 case BFA_RPORT_SM_FWRSP:
4021 bfa_stats(rp, sm_del_fwrsp);
4022 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4026 case BFA_RPORT_SM_HWFAIL:
4027 bfa_stats(rp, sm_del_hwf);
4028 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4033 bfa_sm_fault(rp->bfa, event);
4038 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4040 bfa_trc(rp->bfa, rp->rport_tag);
4041 bfa_trc(rp->bfa, event);
4044 case BFA_RPORT_SM_QRESUME:
4045 bfa_stats(rp, sm_del_fwrsp);
4046 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4047 bfa_rport_send_fwdelete(rp);
4050 case BFA_RPORT_SM_HWFAIL:
4051 bfa_stats(rp, sm_del_hwf);
4052 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4053 bfa_reqq_wcancel(&rp->reqq_wait);
4058 bfa_sm_fault(rp->bfa, event);
4063 * Waiting for rport create response from firmware. A delete is pending.
4066 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4067 enum bfa_rport_event event)
4069 bfa_trc(rp->bfa, rp->rport_tag);
4070 bfa_trc(rp->bfa, event);
4073 case BFA_RPORT_SM_FWRSP:
4074 bfa_stats(rp, sm_delp_fwrsp);
4075 if (bfa_rport_send_fwdelete(rp))
4076 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4078 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4081 case BFA_RPORT_SM_HWFAIL:
4082 bfa_stats(rp, sm_delp_hwf);
4083 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4088 bfa_stats(rp, sm_delp_unexp);
4089 bfa_sm_fault(rp->bfa, event);
4094 * Waiting for rport create response from firmware. Rport offline is pending.
4097 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4098 enum bfa_rport_event event)
4100 bfa_trc(rp->bfa, rp->rport_tag);
4101 bfa_trc(rp->bfa, event);
4104 case BFA_RPORT_SM_FWRSP:
4105 bfa_stats(rp, sm_offp_fwrsp);
4106 if (bfa_rport_send_fwdelete(rp))
4107 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4109 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4112 case BFA_RPORT_SM_DELETE:
4113 bfa_stats(rp, sm_offp_del);
4114 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4117 case BFA_RPORT_SM_HWFAIL:
4118 bfa_stats(rp, sm_offp_hwf);
4119 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4123 bfa_stats(rp, sm_offp_unexp);
4124 bfa_sm_fault(rp->bfa, event);
4132 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4134 bfa_trc(rp->bfa, rp->rport_tag);
4135 bfa_trc(rp->bfa, event);
4138 case BFA_RPORT_SM_OFFLINE:
4139 bfa_stats(rp, sm_iocd_off);
4140 bfa_rport_offline_cb(rp);
4143 case BFA_RPORT_SM_DELETE:
4144 bfa_stats(rp, sm_iocd_del);
4145 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4149 case BFA_RPORT_SM_ONLINE:
4150 bfa_stats(rp, sm_iocd_on);
4151 if (bfa_rport_send_fwcreate(rp))
4152 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4154 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4157 case BFA_RPORT_SM_HWFAIL:
4161 bfa_stats(rp, sm_iocd_unexp);
4162 bfa_sm_fault(rp->bfa, event);
4169 * bfa_rport_private BFA rport private functions
4173 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4175 struct bfa_rport_s *rp = cbarg;
4178 bfa_cb_rport_online(rp->rport_drv);
4182 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4184 struct bfa_rport_s *rp = cbarg;
4187 bfa_cb_rport_offline(rp->rport_drv);
4191 bfa_rport_qresume(void *cbarg)
4193 struct bfa_rport_s *rp = cbarg;
4195 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4199 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4202 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4203 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4205 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4209 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4210 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4212 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4213 struct bfa_rport_s *rp;
4216 INIT_LIST_HEAD(&mod->rp_free_q);
4217 INIT_LIST_HEAD(&mod->rp_active_q);
4219 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4221 mod->num_rports = cfg->fwcfg.num_rports;
4223 bfa_assert(mod->num_rports &&
4224 !(mod->num_rports & (mod->num_rports - 1)));
4226 for (i = 0; i < mod->num_rports; i++, rp++) {
4227 memset(rp, 0, sizeof(struct bfa_rport_s));
4230 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4236 list_add_tail(&rp->qe, &mod->rp_free_q);
4238 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4244 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4248 bfa_rport_detach(struct bfa_s *bfa)
4253 bfa_rport_start(struct bfa_s *bfa)
4258 bfa_rport_stop(struct bfa_s *bfa)
4263 bfa_rport_iocdisable(struct bfa_s *bfa)
4265 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4266 struct bfa_rport_s *rport;
4267 struct list_head *qe, *qen;
4269 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4270 rport = (struct bfa_rport_s *) qe;
4271 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4275 static struct bfa_rport_s *
4276 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4278 struct bfa_rport_s *rport;
4280 bfa_q_deq(&mod->rp_free_q, &rport);
4282 list_add_tail(&rport->qe, &mod->rp_active_q);
4288 bfa_rport_free(struct bfa_rport_s *rport)
4290 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4292 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4293 list_del(&rport->qe);
4294 list_add_tail(&rport->qe, &mod->rp_free_q);
4297 static bfa_boolean_t
4298 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4300 struct bfi_rport_create_req_s *m;
4303 * check for room in queue to send request now
4305 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4307 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4311 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4312 bfa_lpuid(rp->bfa));
4313 m->bfa_handle = rp->rport_tag;
4314 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4315 m->pid = rp->rport_info.pid;
4316 m->lp_tag = rp->rport_info.lp_tag;
4317 m->local_pid = rp->rport_info.local_pid;
4318 m->fc_class = rp->rport_info.fc_class;
4319 m->vf_en = rp->rport_info.vf_en;
4320 m->vf_id = rp->rport_info.vf_id;
4321 m->cisc = rp->rport_info.cisc;
4324 * queue I/O message to firmware
4326 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4330 static bfa_boolean_t
4331 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4333 struct bfi_rport_delete_req_s *m;
4336 * check for room in queue to send request now
4338 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4340 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4344 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4345 bfa_lpuid(rp->bfa));
4346 m->fw_handle = rp->fw_handle;
4349 * queue I/O message to firmware
4351 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4355 static bfa_boolean_t
4356 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4358 struct bfa_rport_speed_req_s *m;
4361 * check for room in queue to send request now
4363 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4365 bfa_trc(rp->bfa, rp->rport_info.speed);
4369 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4370 bfa_lpuid(rp->bfa));
4371 m->fw_handle = rp->fw_handle;
4372 m->speed = (u8)rp->rport_info.speed;
4375 * queue I/O message to firmware
4377 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4388 * Rport interrupt processing.
4391 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4393 union bfi_rport_i2h_msg_u msg;
4394 struct bfa_rport_s *rp;
4396 bfa_trc(bfa, m->mhdr.msg_id);
4400 switch (m->mhdr.msg_id) {
4401 case BFI_RPORT_I2H_CREATE_RSP:
4402 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4403 rp->fw_handle = msg.create_rsp->fw_handle;
4404 rp->qos_attr = msg.create_rsp->qos_attr;
4405 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4406 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4409 case BFI_RPORT_I2H_DELETE_RSP:
4410 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4411 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4412 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4415 case BFI_RPORT_I2H_QOS_SCN:
4416 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4417 rp->event_arg.fw_msg = msg.qos_scn_evt;
4418 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4422 bfa_trc(bfa, m->mhdr.msg_id);
4433 struct bfa_rport_s *
4434 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4436 struct bfa_rport_s *rp;
4438 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4444 rp->rport_drv = rport_drv;
4445 memset(&rp->stats, 0, sizeof(rp->stats));
4447 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4448 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4454 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4456 bfa_assert(rport_info->max_frmsz != 0);
4459 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4460 * responses. Default to minimum size.
4462 if (rport_info->max_frmsz == 0) {
4463 bfa_trc(rport->bfa, rport->rport_tag);
4464 rport_info->max_frmsz = FC_MIN_PDUSZ;
4467 rport->rport_info = *rport_info;
4468 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4472 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4474 bfa_assert(speed != 0);
4475 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4477 rport->rport_info.speed = speed;
4478 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4483 * SGPG related functions
4487 * Compute and return memory needed by FCP(im) module.
4490 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4493 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4494 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4496 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4497 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4502 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4503 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4505 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4507 struct bfa_sgpg_s *hsgpg;
4508 struct bfi_sgpg_s *sgpg;
4513 union bfi_addr_u addr;
4514 } sgpg_pa, sgpg_pa_tmp;
4516 INIT_LIST_HEAD(&mod->sgpg_q);
4517 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4519 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4521 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4522 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4523 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4524 mod->sgpg_arr_pa += align_len;
4525 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4527 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4530 hsgpg = mod->hsgpg_arr;
4531 sgpg = mod->sgpg_arr;
4532 sgpg_pa.pa = mod->sgpg_arr_pa;
4533 mod->free_sgpgs = mod->num_sgpgs;
4535 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4537 for (i = 0; i < mod->num_sgpgs; i++) {
4538 memset(hsgpg, 0, sizeof(*hsgpg));
4539 memset(sgpg, 0, sizeof(*sgpg));
4542 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4543 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4544 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4548 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4551 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4552 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4553 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4557 bfa_sgpg_detach(struct bfa_s *bfa)
4562 bfa_sgpg_start(struct bfa_s *bfa)
4567 bfa_sgpg_stop(struct bfa_s *bfa)
4572 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4579 * hal_sgpg_public BFA SGPG public functions
4583 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4585 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4586 struct bfa_sgpg_s *hsgpg;
4589 bfa_trc_fp(bfa, nsgpgs);
4591 if (mod->free_sgpgs < nsgpgs)
4592 return BFA_STATUS_ENOMEM;
4594 for (i = 0; i < nsgpgs; i++) {
4595 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4597 list_add_tail(&hsgpg->qe, sgpg_q);
4600 mod->free_sgpgs -= nsgpgs;
4601 return BFA_STATUS_OK;
4605 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4607 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4608 struct bfa_sgpg_wqe_s *wqe;
4610 bfa_trc_fp(bfa, nsgpg);
4612 mod->free_sgpgs += nsgpg;
4613 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
4615 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4617 if (list_empty(&mod->sgpg_wait_q))
4621 * satisfy as many waiting requests as possible
4624 wqe = bfa_q_first(&mod->sgpg_wait_q);
4625 if (mod->free_sgpgs < wqe->nsgpg)
4626 nsgpg = mod->free_sgpgs;
4629 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4630 wqe->nsgpg -= nsgpg;
4631 if (wqe->nsgpg == 0) {
4633 wqe->cbfn(wqe->cbarg);
4635 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4639 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4641 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4643 bfa_assert(nsgpg > 0);
4644 bfa_assert(nsgpg > mod->free_sgpgs);
4646 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4649 * allocate any left to this one first
4651 if (mod->free_sgpgs) {
4653 * no one else is waiting for SGPG
4655 bfa_assert(list_empty(&mod->sgpg_wait_q));
4656 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4657 wqe->nsgpg -= mod->free_sgpgs;
4658 mod->free_sgpgs = 0;
4661 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4665 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4667 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4669 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4672 if (wqe->nsgpg_total != wqe->nsgpg)
4673 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4674 wqe->nsgpg_total - wqe->nsgpg);
4678 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4681 INIT_LIST_HEAD(&wqe->sgpg_q);
4687 * UF related functions
4690 *****************************************************************************
4691 * Internal functions
4692 *****************************************************************************
4695 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4697 struct bfa_uf_s *uf = cbarg;
4698 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4701 ufm->ufrecv(ufm->cbarg, uf);
4705 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4709 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4710 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4711 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4714 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4715 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4717 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
4721 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4723 struct bfi_uf_buf_post_s *uf_bp_msg;
4724 struct bfi_sge_s *sge;
4725 union bfi_addr_u sga_zero = { {0} };
4729 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4730 uf_bp_msg = ufm->uf_buf_posts;
4732 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4734 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4736 uf_bp_msg->buf_tag = i;
4737 buf_len = sizeof(struct bfa_uf_buf_s);
4738 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4739 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4740 bfa_lpuid(ufm->bfa));
4742 sge = uf_bp_msg->sge;
4743 sge[0].sg_len = buf_len;
4744 sge[0].flags = BFI_SGE_DATA_LAST;
4745 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4748 sge[1].sg_len = buf_len;
4749 sge[1].flags = BFI_SGE_PGDLEN;
4750 sge[1].sga = sga_zero;
4751 bfa_sge_to_be(&sge[1]);
4755 * advance pointer beyond consumed memory
4757 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4761 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4764 struct bfa_uf_s *uf;
4767 * Claim block of memory for UF list
4769 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4772 * Initialize UFs and queue it in UF free queue
4774 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4775 memset(uf, 0, sizeof(struct bfa_uf_s));
4778 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4779 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4780 uf->buf_pa = ufm_pbs_pa(ufm, i);
4781 list_add_tail(&uf->qe, &ufm->uf_free_q);
4785 * advance memory pointer
4787 bfa_meminfo_kva(mi) = (u8 *) uf;
4791 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4793 claim_uf_pbs(ufm, mi);
4795 claim_uf_post_msgs(ufm, mi);
4799 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4801 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4804 * dma-able memory for UF posted bufs
4806 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4810 * kernel Virtual memory for UFs and UF buf post msg copies
4812 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4813 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4817 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4818 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4820 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4822 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
4824 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4825 INIT_LIST_HEAD(&ufm->uf_free_q);
4826 INIT_LIST_HEAD(&ufm->uf_posted_q);
4828 uf_mem_claim(ufm, meminfo);
4832 bfa_uf_detach(struct bfa_s *bfa)
4836 static struct bfa_uf_s *
4837 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4839 struct bfa_uf_s *uf;
4841 bfa_q_deq(&uf_mod->uf_free_q, &uf);
4846 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4848 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4852 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4854 struct bfi_uf_buf_post_s *uf_post_msg;
4856 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4858 return BFA_STATUS_FAILED;
4860 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
4861 sizeof(struct bfi_uf_buf_post_s));
4862 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4864 bfa_trc(ufm->bfa, uf->uf_tag);
4866 list_add_tail(&uf->qe, &ufm->uf_posted_q);
4867 return BFA_STATUS_OK;
4871 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4873 struct bfa_uf_s *uf;
4875 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4876 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4882 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4884 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4885 u16 uf_tag = m->buf_tag;
4886 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4887 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4888 u8 *buf = &uf_buf->d[0];
4889 struct fchs_s *fchs;
4891 m->frm_len = be16_to_cpu(m->frm_len);
4892 m->xfr_len = be16_to_cpu(m->xfr_len);
4894 fchs = (struct fchs_s *)uf_buf;
4896 list_del(&uf->qe); /* dequeue from posted queue */
4899 uf->data_len = m->xfr_len;
4901 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
4903 if (uf->data_len == sizeof(struct fchs_s)) {
4904 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4905 uf->data_len, (struct fchs_s *)buf);
4907 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4908 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4909 BFA_PL_EID_RX, uf->data_len,
4910 (struct fchs_s *)buf, pld_w0);
4914 __bfa_cb_uf_recv(uf, BFA_TRUE);
4916 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
4920 bfa_uf_stop(struct bfa_s *bfa)
4925 bfa_uf_iocdisable(struct bfa_s *bfa)
4927 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4928 struct bfa_uf_s *uf;
4929 struct list_head *qe, *qen;
4931 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
4932 uf = (struct bfa_uf_s *) qe;
4934 bfa_uf_put(ufm, uf);
4939 bfa_uf_start(struct bfa_s *bfa)
4941 bfa_uf_post_all(BFA_UF_MOD(bfa));
4945 * Register handler for all unsolicted recieve frames.
4947 * @param[in] bfa BFA instance
4948 * @param[in] ufrecv receive handler function
4949 * @param[in] cbarg receive handler arg
4952 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
4954 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4956 ufm->ufrecv = ufrecv;
4961 * Free an unsolicited frame back to BFA.
4963 * @param[in] uf unsolicited frame to be freed
4968 bfa_uf_free(struct bfa_uf_s *uf)
4970 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
4971 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
4977 * uf_pub BFA uf module public functions
4980 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
4982 bfa_trc(bfa, msg->mhdr.msg_id);
4984 switch (msg->mhdr.msg_id) {
4985 case BFI_UF_I2H_FRM_RCVD:
4986 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
4990 bfa_trc(bfa, msg->mhdr.msg_id);