2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfa_os_inc.h"
21 #include "bfa_modules.h"
24 BFA_TRC_FILE(HAL, FCXP);
33 * LPS related definitions
35 #define BFA_LPS_MIN_LPORTS (1)
36 #define BFA_LPS_MAX_LPORTS (256)
39 * Maximum Vports supported per physical port or vf.
41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
45 * lps_pvt BFA LPS private functions
49 BFA_LPS_SM_LOGIN = 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
59 * FC PORT related definitions
62 * The port is considered disabled if corresponding physical port or IOC are
65 #define BFA_PORT_IS_DISABLED(bfa) \
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
71 * BFA port state machine events
73 enum bfa_fcport_sm_event {
74 BFA_FCPORT_SM_START = 1, /* start port state machine */
75 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
76 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
77 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
78 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
79 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
80 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
81 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
86 * BFA port link notification state machine events
89 enum bfa_fcport_ln_sm_event {
90 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
91 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
96 * RPORT related definitions
98 #define bfa_rport_offline_cb(__rp) do { \
99 if ((__rp)->bfa->fcs) \
100 bfa_cb_rport_offline((__rp)->rport_drv); \
102 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
103 __bfa_cb_rport_offline, (__rp)); \
107 #define bfa_rport_online_cb(__rp) do { \
108 if ((__rp)->bfa->fcs) \
109 bfa_cb_rport_online((__rp)->rport_drv); \
111 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
112 __bfa_cb_rport_online, (__rp)); \
117 enum bfa_rport_event {
118 BFA_RPORT_SM_CREATE = 1, /* rport create event */
119 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
130 * forward declarations FCXP related functions
132 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
133 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
134 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
135 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
136 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
137 static void bfa_fcxp_qresume(void *cbarg);
138 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req);
142 * forward declarations for LPS functions
144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
146 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
147 struct bfa_iocfc_cfg_s *cfg,
148 struct bfa_meminfo_s *meminfo,
149 struct bfa_pcidev_s *pcidev);
150 static void bfa_lps_detach(struct bfa_s *bfa);
151 static void bfa_lps_start(struct bfa_s *bfa);
152 static void bfa_lps_stop(struct bfa_s *bfa);
153 static void bfa_lps_iocdisable(struct bfa_s *bfa);
154 static void bfa_lps_login_rsp(struct bfa_s *bfa,
155 struct bfi_lps_login_rsp_s *rsp);
156 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
157 struct bfi_lps_logout_rsp_s *rsp);
158 static void bfa_lps_reqq_resume(void *lps_arg);
159 static void bfa_lps_free(struct bfa_lps_s *lps);
160 static void bfa_lps_send_login(struct bfa_lps_s *lps);
161 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
162 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
167 * forward declaration for LPS state machine
169 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
170 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
173 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
174 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
179 * forward declaration for FC Port functions
181 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
182 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
183 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
184 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
185 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
186 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
187 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
188 enum bfa_port_linkstate event, bfa_boolean_t trunk);
189 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
190 enum bfa_port_linkstate event);
191 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
192 static void bfa_fcport_stats_get_timeout(void *cbarg);
193 static void bfa_fcport_stats_clr_timeout(void *cbarg);
194 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
197 * forward declaration for FC PORT state machine
199 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
200 enum bfa_fcport_sm_event event);
201 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
202 enum bfa_fcport_sm_event event);
203 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
207 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
208 enum bfa_fcport_sm_event event);
209 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
210 enum bfa_fcport_sm_event event);
211 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
212 enum bfa_fcport_sm_event event);
213 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
214 enum bfa_fcport_sm_event event);
215 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
216 enum bfa_fcport_sm_event event);
217 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
218 enum bfa_fcport_sm_event event);
219 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
220 enum bfa_fcport_sm_event event);
221 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
222 enum bfa_fcport_sm_event event);
224 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
225 enum bfa_fcport_ln_sm_event event);
226 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
227 enum bfa_fcport_ln_sm_event event);
228 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
229 enum bfa_fcport_ln_sm_event event);
230 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
231 enum bfa_fcport_ln_sm_event event);
232 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
233 enum bfa_fcport_ln_sm_event event);
234 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
235 enum bfa_fcport_ln_sm_event event);
236 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
237 enum bfa_fcport_ln_sm_event event);
239 static struct bfa_sm_table_s hal_port_sm_table[] = {
240 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
241 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
242 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
243 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
244 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
245 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
246 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
247 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
248 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
249 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
250 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
251 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
256 * forward declaration for RPORT related functions
258 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
259 static void bfa_rport_free(struct bfa_rport_s *rport);
260 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
261 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
262 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
263 static void __bfa_cb_rport_online(void *cbarg,
264 bfa_boolean_t complete);
265 static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete);
269 * forward declaration for RPORT state machine
271 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
274 enum bfa_rport_event event);
275 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
276 enum bfa_rport_event event);
277 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
278 enum bfa_rport_event event);
279 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
280 enum bfa_rport_event event);
281 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
282 enum bfa_rport_event event);
283 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
284 enum bfa_rport_event event);
285 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
286 enum bfa_rport_event event);
287 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
288 enum bfa_rport_event event);
289 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
290 enum bfa_rport_event event);
291 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
292 enum bfa_rport_event event);
293 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
294 enum bfa_rport_event event);
295 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event);
299 * PLOG related definitions
302 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
304 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
305 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
308 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
309 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
316 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
319 struct bfa_plog_rec_s *pl_recp;
321 if (plog->plog_enabled == 0)
324 if (plkd_validate_logrec(pl_rec)) {
331 pl_recp = &(plog->plog_recs[tail]);
333 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
335 pl_recp->tv = bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail);
338 if (plog->head == plog->tail)
339 BFA_PL_LOG_REC_INCR(plog->head);
343 bfa_plog_init(struct bfa_plog_s *plog)
345 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
347 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 plog->head = plog->tail = 0;
349 plog->plog_enabled = 1;
353 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
354 enum bfa_plog_eid event,
355 u16 misc, char *log_str)
357 struct bfa_plog_rec_s lp;
359 if (plog->plog_enabled) {
360 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
363 lp.log_type = BFA_PL_LOG_TYPE_STRING;
365 strncpy(lp.log_entry.string_log, log_str,
366 BFA_PL_STRING_LOG_SZ - 1);
367 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
368 bfa_plog_add(plog, &lp);
373 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
374 enum bfa_plog_eid event,
375 u16 misc, u32 *intarr, u32 num_ints)
377 struct bfa_plog_rec_s lp;
380 if (num_ints > BFA_PL_INT_LOG_SZ)
381 num_ints = BFA_PL_INT_LOG_SZ;
383 if (plog->plog_enabled) {
384 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
387 lp.log_type = BFA_PL_LOG_TYPE_INT;
390 for (i = 0; i < num_ints; i++)
391 bfa_os_assign(lp.log_entry.int_log[i],
394 lp.log_num_ints = (u8) num_ints;
396 bfa_plog_add(plog, &lp);
401 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
402 enum bfa_plog_eid event,
403 u16 misc, struct fchs_s *fchdr)
405 struct bfa_plog_rec_s lp;
406 u32 *tmp_int = (u32 *) fchdr;
407 u32 ints[BFA_PL_INT_LOG_SZ];
409 if (plog->plog_enabled) {
410 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
412 ints[0] = tmp_int[0];
413 ints[1] = tmp_int[1];
414 ints[2] = tmp_int[4];
416 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
421 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
422 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
425 struct bfa_plog_rec_s lp;
426 u32 *tmp_int = (u32 *) fchdr;
427 u32 ints[BFA_PL_INT_LOG_SZ];
429 if (plog->plog_enabled) {
430 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
432 ints[0] = tmp_int[0];
433 ints[1] = tmp_int[1];
434 ints[2] = tmp_int[4];
437 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
442 bfa_plog_clear(struct bfa_plog_s *plog)
444 plog->head = plog->tail = 0;
448 bfa_plog_enable(struct bfa_plog_s *plog)
450 plog->plog_enabled = 1;
454 bfa_plog_disable(struct bfa_plog_s *plog)
456 plog->plog_enabled = 0;
460 bfa_plog_get_setting(struct bfa_plog_s *plog)
462 return (bfa_boolean_t)plog->plog_enabled;
466 * fcxp_pvt BFA FCXP private functions
470 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
476 dm_kva = bfa_meminfo_dma_virt(mi);
477 dm_pa = bfa_meminfo_dma_phys(mi);
479 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
482 * Initialize the fcxp req payload list
484 mod->req_pld_list_kva = dm_kva;
485 mod->req_pld_list_pa = dm_pa;
486 dm_kva += buf_pool_sz;
487 dm_pa += buf_pool_sz;
488 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
491 * Initialize the fcxp rsp payload list
493 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
494 mod->rsp_pld_list_kva = dm_kva;
495 mod->rsp_pld_list_pa = dm_pa;
496 dm_kva += buf_pool_sz;
497 dm_pa += buf_pool_sz;
498 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
500 bfa_meminfo_dma_virt(mi) = dm_kva;
501 bfa_meminfo_dma_phys(mi) = dm_pa;
505 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
508 struct bfa_fcxp_s *fcxp;
510 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
511 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
513 INIT_LIST_HEAD(&mod->fcxp_free_q);
514 INIT_LIST_HEAD(&mod->fcxp_active_q);
516 mod->fcxp_list = fcxp;
518 for (i = 0; i < mod->num_fcxps; i++) {
519 fcxp->fcxp_mod = mod;
522 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
523 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
524 fcxp->reqq_waiting = BFA_FALSE;
529 bfa_meminfo_kva(mi) = (void *)fcxp;
533 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
536 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
538 if (num_fcxp_reqs == 0)
542 * Account for req/rsp payload
544 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
545 if (cfg->drvcfg.min_cfg)
546 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
548 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
551 * Account for fcxp structs
553 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
557 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
558 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
562 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
564 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
567 * Initialize FCXP request and response payload sizes.
569 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
570 if (!cfg->drvcfg.min_cfg)
571 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
573 INIT_LIST_HEAD(&mod->wait_q);
575 claim_fcxp_req_rsp_mem(mod, meminfo);
576 claim_fcxps_mem(mod, meminfo);
580 bfa_fcxp_detach(struct bfa_s *bfa)
585 bfa_fcxp_start(struct bfa_s *bfa)
590 bfa_fcxp_stop(struct bfa_s *bfa)
595 bfa_fcxp_iocdisable(struct bfa_s *bfa)
597 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
598 struct bfa_fcxp_s *fcxp;
599 struct list_head *qe, *qen;
601 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
602 fcxp = (struct bfa_fcxp_s *) qe;
603 if (fcxp->caller == NULL) {
604 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
605 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
608 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
609 bfa_cb_queue(bfa, &fcxp->hcb_qe,
610 __bfa_fcxp_send_cbfn, fcxp);
615 static struct bfa_fcxp_s *
616 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
618 struct bfa_fcxp_s *fcxp;
620 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
623 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
629 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
633 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
634 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
635 struct list_head *r_sgpg_q,
637 bfa_fcxp_get_sgaddr_t sga_cbfn,
638 bfa_fcxp_get_sglen_t sglen_cbfn)
641 bfa_assert(bfa != NULL);
643 bfa_trc(bfa, fcxp->fcxp_tag);
648 bfa_assert(*sga_cbfn != NULL);
649 bfa_assert(*sglen_cbfn != NULL);
652 *r_sga_cbfn = sga_cbfn;
653 *r_sglen_cbfn = sglen_cbfn;
658 * alloc required sgpgs
660 if (n_sgles > BFI_SGE_INLINE)
667 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
668 void *caller, struct bfa_s *bfa, int nreq_sgles,
669 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
670 bfa_fcxp_get_sglen_t req_sglen_cbfn,
671 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
672 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
675 bfa_assert(bfa != NULL);
677 bfa_trc(bfa, fcxp->fcxp_tag);
679 fcxp->caller = caller;
681 bfa_fcxp_init_reqrsp(fcxp, bfa,
682 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
683 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
684 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
686 bfa_fcxp_init_reqrsp(fcxp, bfa,
687 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
688 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
689 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
694 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
696 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
697 struct bfa_fcxp_wqe_s *wqe;
699 bfa_q_deq(&mod->wait_q, &wqe);
701 bfa_trc(mod->bfa, fcxp->fcxp_tag);
703 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
704 wqe->nrsp_sgles, wqe->req_sga_cbfn,
705 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
706 wqe->rsp_sglen_cbfn);
708 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
712 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
714 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
718 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
719 bfa_status_t req_status, u32 rsp_len,
720 u32 resid_len, struct fchs_s *rsp_fchs)
722 /* discarded fcxp completion */
726 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
728 struct bfa_fcxp_s *fcxp = cbarg;
731 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
732 fcxp->rsp_status, fcxp->rsp_len,
733 fcxp->residue_len, &fcxp->rsp_fchs);
740 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
742 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
743 struct bfa_fcxp_s *fcxp;
744 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
746 bfa_trc(bfa, fcxp_tag);
748 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
751 * @todo f/w should not set residue to non-0 when everything
754 if (fcxp_rsp->req_status == BFA_STATUS_OK)
755 fcxp_rsp->residue_len = 0;
757 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
759 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
761 bfa_assert(fcxp->send_cbfn != NULL);
763 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
765 if (fcxp->send_cbfn != NULL) {
766 bfa_trc(mod->bfa, (NULL == fcxp->caller));
767 if (fcxp->caller == NULL) {
768 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
769 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
770 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
772 * fcxp automatically freed on return from the callback
776 fcxp->rsp_status = fcxp_rsp->req_status;
777 fcxp->rsp_len = fcxp_rsp->rsp_len;
778 fcxp->residue_len = fcxp_rsp->residue_len;
779 fcxp->rsp_fchs = fcxp_rsp->fchs;
781 bfa_cb_queue(bfa, &fcxp->hcb_qe,
782 __bfa_fcxp_send_cbfn, fcxp);
785 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
790 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
792 union bfi_addr_u sga_zero = { {0} };
794 sge->sg_len = reqlen;
795 sge->flags = BFI_SGE_DATA_LAST;
796 bfa_dma_addr_set(sge[0].sga, req_pa);
801 sge->sg_len = reqlen;
802 sge->flags = BFI_SGE_PGDLEN;
807 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
814 if (fcxp->use_ireqbuf) {
816 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
818 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
820 reqlen + sizeof(struct fchs_s), fchs,
823 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
825 reqlen + sizeof(struct fchs_s),
829 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
830 reqlen + sizeof(struct fchs_s), fchs);
835 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
836 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
838 if (fcxp_rsp->rsp_len > 0) {
839 if (fcxp->use_irspbuf) {
841 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
843 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
845 (u16) fcxp_rsp->rsp_len,
846 &fcxp_rsp->fchs, pld_w0);
848 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
850 (u16) fcxp_rsp->rsp_len,
854 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
855 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
860 * Handler to resume sending fcxp when space in available in cpe queue.
863 bfa_fcxp_qresume(void *cbarg)
865 struct bfa_fcxp_s *fcxp = cbarg;
866 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
867 struct bfi_fcxp_send_req_s *send_req;
869 fcxp->reqq_waiting = BFA_FALSE;
870 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
871 bfa_fcxp_queue(fcxp, send_req);
875 * Queue fcxp send request to foimrware.
878 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
880 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
881 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
882 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
883 struct bfa_rport_s *rport = reqi->bfa_rport;
885 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
888 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
890 send_req->rport_fw_hndl = rport->fw_handle;
891 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
892 if (send_req->max_frmsz == 0)
893 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
895 send_req->rport_fw_hndl = 0;
896 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
899 send_req->vf_id = bfa_os_htons(reqi->vf_id);
900 send_req->lp_tag = reqi->lp_tag;
901 send_req->class = reqi->class;
902 send_req->rsp_timeout = rspi->rsp_timeout;
903 send_req->cts = reqi->cts;
904 send_req->fchs = reqi->fchs;
906 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
907 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
912 if (fcxp->use_ireqbuf == 1) {
913 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
914 BFA_FCXP_REQ_PLD_PA(fcxp));
916 if (fcxp->nreq_sgles > 0) {
917 bfa_assert(fcxp->nreq_sgles == 1);
918 hal_fcxp_set_local_sges(send_req->req_sge,
920 fcxp->req_sga_cbfn(fcxp->caller,
923 bfa_assert(reqi->req_tot_len == 0);
924 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
931 if (fcxp->use_irspbuf == 1) {
932 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
934 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
935 BFA_FCXP_RSP_PLD_PA(fcxp));
938 if (fcxp->nrsp_sgles > 0) {
939 bfa_assert(fcxp->nrsp_sgles == 1);
940 hal_fcxp_set_local_sges(send_req->rsp_sge,
942 fcxp->rsp_sga_cbfn(fcxp->caller,
945 bfa_assert(rspi->rsp_maxlen == 0);
946 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
950 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
952 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
954 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
955 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
959 * hal_fcxp_api BFA FCXP API
963 * Allocate an FCXP instance to send a response or to send a request
964 * that has a response. Request/response buffers are allocated by caller.
966 * @param[in] bfa BFA bfa instance
967 * @param[in] nreq_sgles Number of SG elements required for request
968 * buffer. 0, if fcxp internal buffers are used.
969 * Use bfa_fcxp_get_reqbuf() to get the
970 * internal req buffer.
971 * @param[in] req_sgles SG elements describing request buffer. Will be
972 * copied in by BFA and hence can be freed on
973 * return from this function.
974 * @param[in] get_req_sga function ptr to be called to get a request SG
975 * Address (given the sge index).
976 * @param[in] get_req_sglen function ptr to be called to get a request SG
977 * len (given the sge index).
978 * @param[in] get_rsp_sga function ptr to be called to get a response SG
979 * Address (given the sge index).
980 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
981 * len (given the sge index).
983 * @return FCXP instance. NULL on failure.
986 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
987 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
988 bfa_fcxp_get_sglen_t req_sglen_cbfn,
989 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
990 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
992 struct bfa_fcxp_s *fcxp = NULL;
994 bfa_assert(bfa != NULL);
996 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
1000 bfa_trc(bfa, fcxp->fcxp_tag);
1002 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
1003 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
1009 * Get the internal request buffer pointer
1011 * @param[in] fcxp BFA fcxp pointer
1013 * @return pointer to the internal request buffer
1016 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1018 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1021 bfa_assert(fcxp->use_ireqbuf == 1);
1022 reqbuf = ((u8 *)mod->req_pld_list_kva) +
1023 fcxp->fcxp_tag * mod->req_pld_sz;
1028 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1030 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1032 return mod->req_pld_sz;
1036 * Get the internal response buffer pointer
1038 * @param[in] fcxp BFA fcxp pointer
1040 * @return pointer to the internal request buffer
1043 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1045 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1048 bfa_assert(fcxp->use_irspbuf == 1);
1050 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1051 fcxp->fcxp_tag * mod->rsp_pld_sz;
1058 * @param[in] fcxp BFA fcxp pointer
1063 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1065 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1067 bfa_assert(fcxp != NULL);
1068 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1073 * Send a FCXP request
1075 * @param[in] fcxp BFA fcxp pointer
1076 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1077 * @param[in] vf_id virtual Fabric ID
1078 * @param[in] lp_tag lport tag
1079 * @param[in] cts use Continous sequence
1080 * @param[in] cos fc Class of Service
1081 * @param[in] reqlen request length, does not include FCHS length
1082 * @param[in] fchs fc Header Pointer. The header content will be copied
1085 * @param[in] cbfn call back function to be called on receiving
1087 * @param[in] cbarg arg for cbfn
1088 * @param[in] rsp_timeout
1091 * @return bfa_status_t
1094 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1095 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1096 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1097 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1099 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1100 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1101 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1102 struct bfi_fcxp_send_req_s *send_req;
1104 bfa_trc(bfa, fcxp->fcxp_tag);
1107 * setup request/response info
1109 reqi->bfa_rport = rport;
1110 reqi->vf_id = vf_id;
1111 reqi->lp_tag = lp_tag;
1113 rspi->rsp_timeout = rsp_timeout;
1116 reqi->req_tot_len = reqlen;
1117 rspi->rsp_maxlen = rsp_maxlen;
1118 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1119 fcxp->send_cbarg = cbarg;
1122 * If no room in CPE queue, wait for space in request queue
1124 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1126 bfa_trc(bfa, fcxp->fcxp_tag);
1127 fcxp->reqq_waiting = BFA_TRUE;
1128 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1132 bfa_fcxp_queue(fcxp, send_req);
1138 * @param[in] fcxp BFA fcxp pointer
1143 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1145 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1147 return BFA_STATUS_OK;
1151 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1152 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1153 void *caller, int nreq_sgles,
1154 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1155 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1156 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1157 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1159 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1161 bfa_assert(list_empty(&mod->fcxp_free_q));
1163 wqe->alloc_cbfn = alloc_cbfn;
1164 wqe->alloc_cbarg = alloc_cbarg;
1165 wqe->caller = caller;
1167 wqe->nreq_sgles = nreq_sgles;
1168 wqe->nrsp_sgles = nrsp_sgles;
1169 wqe->req_sga_cbfn = req_sga_cbfn;
1170 wqe->req_sglen_cbfn = req_sglen_cbfn;
1171 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1172 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1174 list_add_tail(&wqe->qe, &mod->wait_q);
1178 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1180 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1182 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1187 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1190 * If waiting for room in request queue, cancel reqq wait
1193 if (fcxp->reqq_waiting) {
1194 fcxp->reqq_waiting = BFA_FALSE;
1195 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1196 bfa_fcxp_free(fcxp);
1200 fcxp->send_cbfn = bfa_fcxp_null_comp;
1206 * hal_fcxp_public BFA FCXP public functions
1210 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1212 switch (msg->mhdr.msg_id) {
1213 case BFI_FCXP_I2H_SEND_RSP:
1214 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1218 bfa_trc(bfa, msg->mhdr.msg_id);
1224 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1226 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1228 return mod->rsp_pld_sz;
1233 * BFA LPS state machine functions
1237 * Init state -- no login
1240 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1242 bfa_trc(lps->bfa, lps->lp_tag);
1243 bfa_trc(lps->bfa, event);
1246 case BFA_LPS_SM_LOGIN:
1247 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1248 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1249 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1251 bfa_sm_set_state(lps, bfa_lps_sm_login);
1252 bfa_lps_send_login(lps);
1256 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1257 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1259 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1260 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1263 case BFA_LPS_SM_LOGOUT:
1264 bfa_lps_logout_comp(lps);
1267 case BFA_LPS_SM_DELETE:
1271 case BFA_LPS_SM_RX_CVL:
1272 case BFA_LPS_SM_OFFLINE:
1275 case BFA_LPS_SM_FWRSP:
1277 * Could happen when fabric detects loopback and discards
1278 * the lps request. Fw will eventually sent out the timeout
1284 bfa_sm_fault(lps->bfa, event);
1289 * login is in progress -- awaiting response from firmware
1292 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1294 bfa_trc(lps->bfa, lps->lp_tag);
1295 bfa_trc(lps->bfa, event);
1298 case BFA_LPS_SM_FWRSP:
1299 if (lps->status == BFA_STATUS_OK) {
1300 bfa_sm_set_state(lps, bfa_lps_sm_online);
1302 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1303 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1305 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1306 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1308 bfa_sm_set_state(lps, bfa_lps_sm_init);
1310 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1311 BFA_PL_EID_LOGIN, 0,
1312 "FDISC Fail (RJT or timeout)");
1314 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1315 BFA_PL_EID_LOGIN, 0,
1316 "FLOGI Fail (RJT or timeout)");
1318 bfa_lps_login_comp(lps);
1321 case BFA_LPS_SM_OFFLINE:
1322 bfa_sm_set_state(lps, bfa_lps_sm_init);
1326 bfa_sm_fault(lps->bfa, event);
1331 * login pending - awaiting space in request queue
1334 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1336 bfa_trc(lps->bfa, lps->lp_tag);
1337 bfa_trc(lps->bfa, event);
1340 case BFA_LPS_SM_RESUME:
1341 bfa_sm_set_state(lps, bfa_lps_sm_login);
1344 case BFA_LPS_SM_OFFLINE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 bfa_reqq_wcancel(&lps->wqe);
1349 case BFA_LPS_SM_RX_CVL:
1351 * Login was not even sent out; so when getting out
1352 * of this state, it will appear like a login retry
1353 * after Clear virtual link
1358 bfa_sm_fault(lps->bfa, event);
1366 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1368 bfa_trc(lps->bfa, lps->lp_tag);
1369 bfa_trc(lps->bfa, event);
1372 case BFA_LPS_SM_LOGOUT:
1373 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1374 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1375 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1377 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1378 bfa_lps_send_logout(lps);
1380 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1381 BFA_PL_EID_LOGO, 0, "Logout");
1384 case BFA_LPS_SM_RX_CVL:
1385 bfa_sm_set_state(lps, bfa_lps_sm_init);
1387 /* Let the vport module know about this event */
1388 bfa_lps_cvl_event(lps);
1389 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1390 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1393 case BFA_LPS_SM_OFFLINE:
1394 case BFA_LPS_SM_DELETE:
1395 bfa_sm_set_state(lps, bfa_lps_sm_init);
1399 bfa_sm_fault(lps->bfa, event);
1404 * logout in progress - awaiting firmware response
1407 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1409 bfa_trc(lps->bfa, lps->lp_tag);
1410 bfa_trc(lps->bfa, event);
1413 case BFA_LPS_SM_FWRSP:
1414 bfa_sm_set_state(lps, bfa_lps_sm_init);
1415 bfa_lps_logout_comp(lps);
1418 case BFA_LPS_SM_OFFLINE:
1419 bfa_sm_set_state(lps, bfa_lps_sm_init);
1423 bfa_sm_fault(lps->bfa, event);
1428 * logout pending -- awaiting space in request queue
1431 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1433 bfa_trc(lps->bfa, lps->lp_tag);
1434 bfa_trc(lps->bfa, event);
1437 case BFA_LPS_SM_RESUME:
1438 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1439 bfa_lps_send_logout(lps);
1442 case BFA_LPS_SM_OFFLINE:
1443 bfa_sm_set_state(lps, bfa_lps_sm_init);
1444 bfa_reqq_wcancel(&lps->wqe);
1448 bfa_sm_fault(lps->bfa, event);
1455 * lps_pvt BFA LPS private functions
1459 * return memory requirement
1462 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1465 if (cfg->drvcfg.min_cfg)
1466 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1468 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1472 * bfa module attach at initialization time
1475 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1476 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1478 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1479 struct bfa_lps_s *lps;
1482 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484 if (cfg->drvcfg.min_cfg)
1485 mod->num_lps = BFA_LPS_MIN_LPORTS;
1487 mod->num_lps = BFA_LPS_MAX_LPORTS;
1488 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1490 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1492 INIT_LIST_HEAD(&mod->lps_free_q);
1493 INIT_LIST_HEAD(&mod->lps_active_q);
1495 for (i = 0; i < mod->num_lps; i++, lps++) {
1497 lps->lp_tag = (u8) i;
1498 lps->reqq = BFA_REQQ_LPS;
1499 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1500 list_add_tail(&lps->qe, &mod->lps_free_q);
1505 bfa_lps_detach(struct bfa_s *bfa)
1510 bfa_lps_start(struct bfa_s *bfa)
1515 bfa_lps_stop(struct bfa_s *bfa)
1520 * IOC in disabled state -- consider all lps offline
1523 bfa_lps_iocdisable(struct bfa_s *bfa)
1525 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1526 struct bfa_lps_s *lps;
1527 struct list_head *qe, *qen;
1529 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1530 lps = (struct bfa_lps_s *) qe;
1531 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1536 * Firmware login response
1539 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1541 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1542 struct bfa_lps_s *lps;
1544 bfa_assert(rsp->lp_tag < mod->num_lps);
1545 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1547 lps->status = rsp->status;
1548 switch (rsp->status) {
1550 lps->fport = rsp->f_port;
1551 lps->npiv_en = rsp->npiv_en;
1552 lps->lp_pid = rsp->lp_pid;
1553 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
1554 lps->pr_pwwn = rsp->port_name;
1555 lps->pr_nwwn = rsp->node_name;
1556 lps->auth_req = rsp->auth_req;
1557 lps->lp_mac = rsp->lp_mac;
1558 lps->brcd_switch = rsp->brcd_switch;
1559 lps->fcf_mac = rsp->fcf_mac;
1563 case BFA_STATUS_FABRIC_RJT:
1564 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1565 lps->lsrjt_expl = rsp->lsrjt_expl;
1569 case BFA_STATUS_EPROTOCOL:
1570 lps->ext_status = rsp->ext_status;
1575 /* Nothing to do with other status */
1579 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1583 * Firmware logout response
1586 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1588 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1589 struct bfa_lps_s *lps;
1591 bfa_assert(rsp->lp_tag < mod->num_lps);
1592 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1598 * Firmware received a Clear virtual link request (for FCoE)
1601 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1604 struct bfa_lps_s *lps;
1606 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1608 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1612 * Space is available in request queue, resume queueing request to firmware.
1615 bfa_lps_reqq_resume(void *lps_arg)
1617 struct bfa_lps_s *lps = lps_arg;
1619 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1623 * lps is freed -- triggered by vport delete
1626 bfa_lps_free(struct bfa_lps_s *lps)
1628 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1632 list_add_tail(&lps->qe, &mod->lps_free_q);
1636 * send login request to firmware
1639 bfa_lps_send_login(struct bfa_lps_s *lps)
1641 struct bfi_lps_login_req_s *m;
1643 m = bfa_reqq_next(lps->bfa, lps->reqq);
1646 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1647 bfa_lpuid(lps->bfa));
1649 m->lp_tag = lps->lp_tag;
1650 m->alpa = lps->alpa;
1651 m->pdu_size = bfa_os_htons(lps->pdusz);
1652 m->pwwn = lps->pwwn;
1653 m->nwwn = lps->nwwn;
1654 m->fdisc = lps->fdisc;
1655 m->auth_en = lps->auth_en;
1657 bfa_reqq_produce(lps->bfa, lps->reqq);
1661 * send logout request to firmware
1664 bfa_lps_send_logout(struct bfa_lps_s *lps)
1666 struct bfi_lps_logout_req_s *m;
1668 m = bfa_reqq_next(lps->bfa, lps->reqq);
1671 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1672 bfa_lpuid(lps->bfa));
1674 m->lp_tag = lps->lp_tag;
1675 m->port_name = lps->pwwn;
1676 bfa_reqq_produce(lps->bfa, lps->reqq);
1680 * Indirect login completion handler for non-fcs
1683 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1685 struct bfa_lps_s *lps = arg;
1691 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1697 * Login completion handler -- direct call for fcs, queue for others
1700 bfa_lps_login_comp(struct bfa_lps_s *lps)
1702 if (!lps->bfa->fcs) {
1703 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1709 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1711 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1715 * Indirect logout completion handler for non-fcs
1718 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1720 struct bfa_lps_s *lps = arg;
1726 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1730 * Logout completion handler -- direct call for fcs, queue for others
1733 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1735 if (!lps->bfa->fcs) {
1736 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1741 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1745 * Clear virtual link completion handler for non-fcs
1748 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1750 struct bfa_lps_s *lps = arg;
1755 /* Clear virtual link to base port will result in link down */
1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1761 * Received Clear virtual link event --direct call for fcs,
1765 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1767 if (!lps->bfa->fcs) {
1768 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1773 /* Clear virtual link to base port will result in link down */
1775 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1781 * lps_public BFA LPS public functions
1785 bfa_lps_get_max_vport(struct bfa_s *bfa)
1787 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1788 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1790 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1794 * Allocate a lport srvice tag.
1797 bfa_lps_alloc(struct bfa_s *bfa)
1799 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1800 struct bfa_lps_s *lps = NULL;
1802 bfa_q_deq(&mod->lps_free_q, &lps);
1807 list_add_tail(&lps->qe, &mod->lps_active_q);
1809 bfa_sm_set_state(lps, bfa_lps_sm_init);
1814 * Free lport service tag. This can be called anytime after an alloc.
1815 * No need to wait for any pending login/logout completions.
1818 bfa_lps_delete(struct bfa_lps_s *lps)
1820 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1824 * Initiate a lport login.
1827 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1828 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1835 lps->fdisc = BFA_FALSE;
1836 lps->auth_en = auth_en;
1837 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1841 * Initiate a lport fdisc login.
1844 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1852 lps->fdisc = BFA_TRUE;
1853 lps->auth_en = BFA_FALSE;
1854 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1858 * Initiate a lport logout (flogi).
1861 bfa_lps_flogo(struct bfa_lps_s *lps)
1863 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1867 * Initiate a lport FDSIC logout.
1870 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1872 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1876 * Discard a pending login request -- should be called only for
1877 * link down handling.
1880 bfa_lps_discard(struct bfa_lps_s *lps)
1882 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1886 * Return lport services tag
1889 bfa_lps_get_tag(struct bfa_lps_s *lps)
1895 * Return lport services tag given the pid
1898 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1900 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1901 struct bfa_lps_s *lps;
1904 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1905 if (lps->lp_pid == pid)
1909 /* Return base port tag anyway */
1914 * return if fabric login indicates support for NPIV
1917 bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1919 return lps->npiv_en;
1923 * Return TRUE if attached to F-Port, else return FALSE
1926 bfa_lps_is_fport(struct bfa_lps_s *lps)
1932 * Return TRUE if attached to a Brocade Fabric
1935 bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1937 return lps->brcd_switch;
1940 * return TRUE if authentication is required
1943 bfa_lps_is_authreq(struct bfa_lps_s *lps)
1945 return lps->auth_req;
1949 bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1951 return lps->ext_status;
1955 * return port id assigned to the lport
1958 bfa_lps_get_pid(struct bfa_lps_s *lps)
1964 * return port id assigned to the base lport
1967 bfa_lps_get_base_pid(struct bfa_s *bfa)
1969 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1971 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1975 * Return bb_credit assigned in FLOGI response
1978 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1980 return lps->pr_bbcred;
1984 * Return peer port name
1987 bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1989 return lps->pr_pwwn;
1993 * Return peer node name
1996 bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1998 return lps->pr_nwwn;
2002 * return reason code if login request is rejected
2005 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2007 return lps->lsrjt_rsn;
2011 * return explanation code if login request is rejected
2014 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2016 return lps->lsrjt_expl;
2020 * Return fpma/spma MAC for lport
2023 bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2029 * LPS firmware message class handler.
2032 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2034 union bfi_lps_i2h_msg_u msg;
2036 bfa_trc(bfa, m->mhdr.msg_id);
2039 switch (m->mhdr.msg_id) {
2040 case BFI_LPS_H2I_LOGIN_RSP:
2041 bfa_lps_login_rsp(bfa, msg.login_rsp);
2044 case BFI_LPS_H2I_LOGOUT_RSP:
2045 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2048 case BFI_LPS_H2I_CVL_EVENT:
2049 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2053 bfa_trc(bfa, m->mhdr.msg_id);
2059 * FC PORT state machine functions
2062 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2063 enum bfa_fcport_sm_event event)
2065 bfa_trc(fcport->bfa, event);
2068 case BFA_FCPORT_SM_START:
2070 * Start event after IOC is configured and BFA is started.
2072 if (bfa_fcport_send_enable(fcport)) {
2073 bfa_trc(fcport->bfa, BFA_TRUE);
2074 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2076 bfa_trc(fcport->bfa, BFA_FALSE);
2077 bfa_sm_set_state(fcport,
2078 bfa_fcport_sm_enabling_qwait);
2082 case BFA_FCPORT_SM_ENABLE:
2084 * Port is persistently configured to be in enabled state. Do
2085 * not change state. Port enabling is done when START event is
2090 case BFA_FCPORT_SM_DISABLE:
2092 * If a port is persistently configured to be disabled, the
2093 * first event will a port disable request.
2095 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2098 case BFA_FCPORT_SM_HWFAIL:
2099 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2103 bfa_sm_fault(fcport->bfa, event);
2108 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2109 enum bfa_fcport_sm_event event)
2111 char pwwn_buf[BFA_STRING_32];
2112 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2113 bfa_trc(fcport->bfa, event);
2116 case BFA_FCPORT_SM_QRESUME:
2117 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2118 bfa_fcport_send_enable(fcport);
2121 case BFA_FCPORT_SM_STOP:
2122 bfa_reqq_wcancel(&fcport->reqq_wait);
2123 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2126 case BFA_FCPORT_SM_ENABLE:
2128 * Already enable is in progress.
2132 case BFA_FCPORT_SM_DISABLE:
2134 * Just send disable request to firmware when room becomes
2135 * available in request queue.
2137 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2138 bfa_reqq_wcancel(&fcport->reqq_wait);
2139 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2140 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2141 wwn2str(pwwn_buf, fcport->pwwn);
2142 BFA_LOG(KERN_INFO, bfad, log_level,
2143 "Base port disabled: WWN = %s\n", pwwn_buf);
2146 case BFA_FCPORT_SM_LINKUP:
2147 case BFA_FCPORT_SM_LINKDOWN:
2149 * Possible to get link events when doing back-to-back
2154 case BFA_FCPORT_SM_HWFAIL:
2155 bfa_reqq_wcancel(&fcport->reqq_wait);
2156 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2160 bfa_sm_fault(fcport->bfa, event);
2165 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2166 enum bfa_fcport_sm_event event)
2168 char pwwn_buf[BFA_STRING_32];
2169 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2170 bfa_trc(fcport->bfa, event);
2173 case BFA_FCPORT_SM_FWRSP:
2174 case BFA_FCPORT_SM_LINKDOWN:
2175 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2178 case BFA_FCPORT_SM_LINKUP:
2179 bfa_fcport_update_linkinfo(fcport);
2180 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2182 bfa_assert(fcport->event_cbfn);
2183 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2186 case BFA_FCPORT_SM_ENABLE:
2188 * Already being enabled.
2192 case BFA_FCPORT_SM_DISABLE:
2193 if (bfa_fcport_send_disable(fcport))
2194 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2196 bfa_sm_set_state(fcport,
2197 bfa_fcport_sm_disabling_qwait);
2199 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2200 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2201 wwn2str(pwwn_buf, fcport->pwwn);
2202 BFA_LOG(KERN_INFO, bfad, log_level,
2203 "Base port disabled: WWN = %s\n", pwwn_buf);
2206 case BFA_FCPORT_SM_STOP:
2207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2210 case BFA_FCPORT_SM_HWFAIL:
2211 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2215 bfa_sm_fault(fcport->bfa, event);
2220 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2221 enum bfa_fcport_sm_event event)
2223 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2224 char pwwn_buf[BFA_STRING_32];
2225 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2227 bfa_trc(fcport->bfa, event);
2230 case BFA_FCPORT_SM_LINKUP:
2231 bfa_fcport_update_linkinfo(fcport);
2232 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2233 bfa_assert(fcport->event_cbfn);
2234 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2235 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2236 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2238 bfa_trc(fcport->bfa,
2239 pevent->link_state.vc_fcf.fcf.fipenabled);
2240 bfa_trc(fcport->bfa,
2241 pevent->link_state.vc_fcf.fcf.fipfailed);
2243 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2244 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2245 BFA_PL_EID_FIP_FCF_DISC, 0,
2246 "FIP FCF Discovery Failed");
2248 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2249 BFA_PL_EID_FIP_FCF_DISC, 0,
2250 "FIP FCF Discovered");
2253 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2254 wwn2str(pwwn_buf, fcport->pwwn);
2255 BFA_LOG(KERN_INFO, bfad, log_level,
2256 "Base port online: WWN = %s\n", pwwn_buf);
2259 case BFA_FCPORT_SM_LINKDOWN:
2261 * Possible to get link down event.
2265 case BFA_FCPORT_SM_ENABLE:
2271 case BFA_FCPORT_SM_DISABLE:
2272 if (bfa_fcport_send_disable(fcport))
2273 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2275 bfa_sm_set_state(fcport,
2276 bfa_fcport_sm_disabling_qwait);
2278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2280 wwn2str(pwwn_buf, fcport->pwwn);
2281 BFA_LOG(KERN_INFO, bfad, log_level,
2282 "Base port disabled: WWN = %s\n", pwwn_buf);
2285 case BFA_FCPORT_SM_STOP:
2286 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2289 case BFA_FCPORT_SM_HWFAIL:
2290 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2294 bfa_sm_fault(fcport->bfa, event);
2299 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2300 enum bfa_fcport_sm_event event)
2302 char pwwn_buf[BFA_STRING_32];
2303 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2305 bfa_trc(fcport->bfa, event);
2308 case BFA_FCPORT_SM_ENABLE:
2314 case BFA_FCPORT_SM_DISABLE:
2315 if (bfa_fcport_send_disable(fcport))
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2318 bfa_sm_set_state(fcport,
2319 bfa_fcport_sm_disabling_qwait);
2321 bfa_fcport_reset_linkinfo(fcport);
2322 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2323 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2324 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2325 wwn2str(pwwn_buf, fcport->pwwn);
2326 BFA_LOG(KERN_INFO, bfad, log_level,
2327 "Base port offline: WWN = %s\n", pwwn_buf);
2328 BFA_LOG(KERN_INFO, bfad, log_level,
2329 "Base port disabled: WWN = %s\n", pwwn_buf);
2332 case BFA_FCPORT_SM_LINKDOWN:
2333 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2334 bfa_fcport_reset_linkinfo(fcport);
2335 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2336 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2337 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2338 wwn2str(pwwn_buf, fcport->pwwn);
2339 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2340 BFA_LOG(KERN_INFO, bfad, log_level,
2341 "Base port offline: WWN = %s\n", pwwn_buf);
2343 BFA_LOG(KERN_ERR, bfad, log_level,
2344 "Base port (WWN = %s) "
2345 "lost fabric connectivity\n", pwwn_buf);
2348 case BFA_FCPORT_SM_STOP:
2349 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2350 bfa_fcport_reset_linkinfo(fcport);
2351 wwn2str(pwwn_buf, fcport->pwwn);
2352 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2353 BFA_LOG(KERN_INFO, bfad, log_level,
2354 "Base port offline: WWN = %s\n", pwwn_buf);
2356 BFA_LOG(KERN_ERR, bfad, log_level,
2357 "Base port (WWN = %s) "
2358 "lost fabric connectivity\n", pwwn_buf);
2361 case BFA_FCPORT_SM_HWFAIL:
2362 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2363 bfa_fcport_reset_linkinfo(fcport);
2364 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2365 wwn2str(pwwn_buf, fcport->pwwn);
2366 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2367 BFA_LOG(KERN_INFO, bfad, log_level,
2368 "Base port offline: WWN = %s\n", pwwn_buf);
2370 BFA_LOG(KERN_ERR, bfad, log_level,
2371 "Base port (WWN = %s) "
2372 "lost fabric connectivity\n", pwwn_buf);
2376 bfa_sm_fault(fcport->bfa, event);
2381 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2382 enum bfa_fcport_sm_event event)
2384 bfa_trc(fcport->bfa, event);
2387 case BFA_FCPORT_SM_QRESUME:
2388 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2389 bfa_fcport_send_disable(fcport);
2392 case BFA_FCPORT_SM_STOP:
2393 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2394 bfa_reqq_wcancel(&fcport->reqq_wait);
2397 case BFA_FCPORT_SM_ENABLE:
2398 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2401 case BFA_FCPORT_SM_DISABLE:
2403 * Already being disabled.
2407 case BFA_FCPORT_SM_LINKUP:
2408 case BFA_FCPORT_SM_LINKDOWN:
2410 * Possible to get link events when doing back-to-back
2415 case BFA_FCPORT_SM_HWFAIL:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2417 bfa_reqq_wcancel(&fcport->reqq_wait);
2421 bfa_sm_fault(fcport->bfa, event);
2426 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2427 enum bfa_fcport_sm_event event)
2429 bfa_trc(fcport->bfa, event);
2432 case BFA_FCPORT_SM_QRESUME:
2433 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2434 bfa_fcport_send_disable(fcport);
2435 if (bfa_fcport_send_enable(fcport))
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2438 bfa_sm_set_state(fcport,
2439 bfa_fcport_sm_enabling_qwait);
2442 case BFA_FCPORT_SM_STOP:
2443 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2444 bfa_reqq_wcancel(&fcport->reqq_wait);
2447 case BFA_FCPORT_SM_ENABLE:
2450 case BFA_FCPORT_SM_DISABLE:
2451 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2454 case BFA_FCPORT_SM_LINKUP:
2455 case BFA_FCPORT_SM_LINKDOWN:
2457 * Possible to get link events when doing back-to-back
2462 case BFA_FCPORT_SM_HWFAIL:
2463 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2464 bfa_reqq_wcancel(&fcport->reqq_wait);
2468 bfa_sm_fault(fcport->bfa, event);
2473 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2474 enum bfa_fcport_sm_event event)
2476 char pwwn_buf[BFA_STRING_32];
2477 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2478 bfa_trc(fcport->bfa, event);
2481 case BFA_FCPORT_SM_FWRSP:
2482 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2485 case BFA_FCPORT_SM_DISABLE:
2487 * Already being disabled.
2491 case BFA_FCPORT_SM_ENABLE:
2492 if (bfa_fcport_send_enable(fcport))
2493 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2495 bfa_sm_set_state(fcport,
2496 bfa_fcport_sm_enabling_qwait);
2498 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2499 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2500 wwn2str(pwwn_buf, fcport->pwwn);
2501 BFA_LOG(KERN_INFO, bfad, log_level,
2502 "Base port enabled: WWN = %s\n", pwwn_buf);
2505 case BFA_FCPORT_SM_STOP:
2506 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2509 case BFA_FCPORT_SM_LINKUP:
2510 case BFA_FCPORT_SM_LINKDOWN:
2512 * Possible to get link events when doing back-to-back
2517 case BFA_FCPORT_SM_HWFAIL:
2518 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2522 bfa_sm_fault(fcport->bfa, event);
2527 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2528 enum bfa_fcport_sm_event event)
2530 char pwwn_buf[BFA_STRING_32];
2531 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2532 bfa_trc(fcport->bfa, event);
2535 case BFA_FCPORT_SM_START:
2537 * Ignore start event for a port that is disabled.
2541 case BFA_FCPORT_SM_STOP:
2542 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2545 case BFA_FCPORT_SM_ENABLE:
2546 if (bfa_fcport_send_enable(fcport))
2547 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2549 bfa_sm_set_state(fcport,
2550 bfa_fcport_sm_enabling_qwait);
2552 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2553 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2554 wwn2str(pwwn_buf, fcport->pwwn);
2555 BFA_LOG(KERN_INFO, bfad, log_level,
2556 "Base port enabled: WWN = %s\n", pwwn_buf);
2559 case BFA_FCPORT_SM_DISABLE:
2565 case BFA_FCPORT_SM_HWFAIL:
2566 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2570 bfa_sm_fault(fcport->bfa, event);
2575 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2576 enum bfa_fcport_sm_event event)
2578 bfa_trc(fcport->bfa, event);
2581 case BFA_FCPORT_SM_START:
2582 if (bfa_fcport_send_enable(fcport))
2583 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2585 bfa_sm_set_state(fcport,
2586 bfa_fcport_sm_enabling_qwait);
2591 * Ignore all other events.
2598 * Port is enabled. IOC is down/failed.
2601 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2602 enum bfa_fcport_sm_event event)
2604 bfa_trc(fcport->bfa, event);
2607 case BFA_FCPORT_SM_START:
2608 if (bfa_fcport_send_enable(fcport))
2609 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2611 bfa_sm_set_state(fcport,
2612 bfa_fcport_sm_enabling_qwait);
2617 * Ignore all events.
2624 * Port is disabled. IOC is down/failed.
2627 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2628 enum bfa_fcport_sm_event event)
2630 bfa_trc(fcport->bfa, event);
2633 case BFA_FCPORT_SM_START:
2634 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2637 case BFA_FCPORT_SM_ENABLE:
2638 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2643 * Ignore all events.
2650 * Link state is down
2653 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2654 enum bfa_fcport_ln_sm_event event)
2656 bfa_trc(ln->fcport->bfa, event);
2659 case BFA_FCPORT_LN_SM_LINKUP:
2660 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2661 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2665 bfa_sm_fault(ln->fcport->bfa, event);
2670 * Link state is waiting for down notification
2673 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2674 enum bfa_fcport_ln_sm_event event)
2676 bfa_trc(ln->fcport->bfa, event);
2679 case BFA_FCPORT_LN_SM_LINKUP:
2680 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2683 case BFA_FCPORT_LN_SM_NOTIFICATION:
2684 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2688 bfa_sm_fault(ln->fcport->bfa, event);
2693 * Link state is waiting for down notification and there is a pending up
2696 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2697 enum bfa_fcport_ln_sm_event event)
2699 bfa_trc(ln->fcport->bfa, event);
2702 case BFA_FCPORT_LN_SM_LINKDOWN:
2703 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2706 case BFA_FCPORT_LN_SM_NOTIFICATION:
2707 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2708 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2712 bfa_sm_fault(ln->fcport->bfa, event);
2720 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2721 enum bfa_fcport_ln_sm_event event)
2723 bfa_trc(ln->fcport->bfa, event);
2726 case BFA_FCPORT_LN_SM_LINKDOWN:
2727 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2728 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2732 bfa_sm_fault(ln->fcport->bfa, event);
2737 * Link state is waiting for up notification
2740 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2741 enum bfa_fcport_ln_sm_event event)
2743 bfa_trc(ln->fcport->bfa, event);
2746 case BFA_FCPORT_LN_SM_LINKDOWN:
2747 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2750 case BFA_FCPORT_LN_SM_NOTIFICATION:
2751 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2755 bfa_sm_fault(ln->fcport->bfa, event);
2760 * Link state is waiting for up notification and there is a pending down
2763 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2764 enum bfa_fcport_ln_sm_event event)
2766 bfa_trc(ln->fcport->bfa, event);
2769 case BFA_FCPORT_LN_SM_LINKUP:
2770 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2773 case BFA_FCPORT_LN_SM_NOTIFICATION:
2774 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2775 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2779 bfa_sm_fault(ln->fcport->bfa, event);
2784 * Link state is waiting for up notification and there are pending down and up
2787 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2788 enum bfa_fcport_ln_sm_event event)
2790 bfa_trc(ln->fcport->bfa, event);
2793 case BFA_FCPORT_LN_SM_LINKDOWN:
2794 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2797 case BFA_FCPORT_LN_SM_NOTIFICATION:
2798 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2799 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2803 bfa_sm_fault(ln->fcport->bfa, event);
2814 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2816 struct bfa_fcport_ln_s *ln = cbarg;
2819 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2821 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2825 * Send SCN notification to upper layers.
2826 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2829 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2830 bfa_boolean_t trunk)
2832 if (fcport->cfg.trunked && !trunk)
2836 case BFA_PORT_LINKUP:
2837 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2839 case BFA_PORT_LINKDOWN:
2840 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2848 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2850 struct bfa_fcport_s *fcport = ln->fcport;
2852 if (fcport->bfa->fcs) {
2853 fcport->event_cbfn(fcport->event_cbarg, event);
2854 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2856 ln->ln_event = event;
2857 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2858 __bfa_cb_fcport_event, ln);
2862 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2866 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2869 *dm_len += FCPORT_STATS_DMA_SZ;
2873 bfa_fcport_qresume(void *cbarg)
2875 struct bfa_fcport_s *fcport = cbarg;
2877 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2881 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2886 dm_kva = bfa_meminfo_dma_virt(meminfo);
2887 dm_pa = bfa_meminfo_dma_phys(meminfo);
2889 fcport->stats_kva = dm_kva;
2890 fcport->stats_pa = dm_pa;
2891 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2893 dm_kva += FCPORT_STATS_DMA_SZ;
2894 dm_pa += FCPORT_STATS_DMA_SZ;
2896 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2897 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2901 * Memory initialization.
2904 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2905 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2907 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2908 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2909 struct bfa_fcport_ln_s *ln = &fcport->ln;
2910 struct bfa_timeval_s tv;
2912 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
2914 ln->fcport = fcport;
2916 bfa_fcport_mem_claim(fcport, meminfo);
2918 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2919 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2922 * initialize time stamp for stats reset
2924 bfa_os_gettimeofday(&tv);
2925 fcport->stats_reset_time = tv.tv_sec;
2928 * initialize and set default configuration
2930 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2931 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2932 port_cfg->trunked = BFA_FALSE;
2933 port_cfg->maxfrsize = 0;
2935 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2937 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2941 bfa_fcport_detach(struct bfa_s *bfa)
2946 * Called when IOC is ready.
2949 bfa_fcport_start(struct bfa_s *bfa)
2951 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2955 * Called before IOC is stopped.
2958 bfa_fcport_stop(struct bfa_s *bfa)
2960 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2961 bfa_trunk_iocdisable(bfa);
2965 * Called when IOC failure is detected.
2968 bfa_fcport_iocdisable(struct bfa_s *bfa)
2970 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2972 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2973 bfa_trunk_iocdisable(bfa);
2977 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2979 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2980 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2982 fcport->speed = pevent->link_state.speed;
2983 fcport->topology = pevent->link_state.topology;
2985 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2989 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
2990 bfa_os_assign(fcport->qos_vc_attr,
2991 pevent->link_state.vc_fcf.qos_vc_attr);
2994 * update trunk state if applicable
2996 if (!fcport->cfg.trunked)
2997 trunk->attr.state = BFA_TRUNK_DISABLED;
2999 /* update FCoE specific */
3000 fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
3002 bfa_trc(fcport->bfa, fcport->speed);
3003 bfa_trc(fcport->bfa, fcport->topology);
3007 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3009 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3010 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3014 * Send port enable message to firmware.
3016 static bfa_boolean_t
3017 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3019 struct bfi_fcport_enable_req_s *m;
3022 * Increment message tag before queue check, so that responses to old
3023 * requests are discarded.
3028 * check for room in queue to send request now
3030 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3032 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3033 &fcport->reqq_wait);
3037 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3038 bfa_lpuid(fcport->bfa));
3039 m->nwwn = fcport->nwwn;
3040 m->pwwn = fcport->pwwn;
3041 m->port_cfg = fcport->cfg;
3042 m->msgtag = fcport->msgtag;
3043 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
3044 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3045 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3046 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3049 * queue I/O message to firmware
3051 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3056 * Send port disable message to firmware.
3058 static bfa_boolean_t
3059 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3061 struct bfi_fcport_req_s *m;
3064 * Increment message tag before queue check, so that responses to old
3065 * requests are discarded.
3070 * check for room in queue to send request now
3072 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3074 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3075 &fcport->reqq_wait);
3079 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3080 bfa_lpuid(fcport->bfa));
3081 m->msgtag = fcport->msgtag;
3084 * queue I/O message to firmware
3086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3092 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3094 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
3095 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
3097 bfa_trc(fcport->bfa, fcport->pwwn);
3098 bfa_trc(fcport->bfa, fcport->nwwn);
3102 bfa_fcport_send_txcredit(void *port_cbarg)
3105 struct bfa_fcport_s *fcport = port_cbarg;
3106 struct bfi_fcport_set_svc_params_req_s *m;
3109 * check for room in queue to send request now
3111 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3113 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3117 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3118 bfa_lpuid(fcport->bfa));
3119 m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
3122 * queue I/O message to firmware
3124 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3128 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3129 struct bfa_qos_stats_s *s)
3131 u32 *dip = (u32 *) d;
3132 u32 *sip = (u32 *) s;
3135 /* Now swap the 32 bit fields */
3136 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3137 dip[i] = bfa_os_ntohl(sip[i]);
3141 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3142 struct bfa_fcoe_stats_s *s)
3144 u32 *dip = (u32 *) d;
3145 u32 *sip = (u32 *) s;
3148 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3151 dip[i] = bfa_os_ntohl(sip[i]);
3152 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
3154 dip[i] = bfa_os_ntohl(sip[i + 1]);
3155 dip[i + 1] = bfa_os_ntohl(sip[i]);
3161 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3163 struct bfa_fcport_s *fcport = cbarg;
3166 if (fcport->stats_status == BFA_STATUS_OK) {
3167 struct bfa_timeval_s tv;
3169 /* Swap FC QoS or FCoE stats */
3170 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3171 bfa_fcport_qos_stats_swap(
3172 &fcport->stats_ret->fcqos,
3173 &fcport->stats->fcqos);
3175 bfa_fcport_fcoe_stats_swap(
3176 &fcport->stats_ret->fcoe,
3177 &fcport->stats->fcoe);
3179 bfa_os_gettimeofday(&tv);
3180 fcport->stats_ret->fcoe.secs_reset =
3181 tv.tv_sec - fcport->stats_reset_time;
3184 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3186 fcport->stats_busy = BFA_FALSE;
3187 fcport->stats_status = BFA_STATUS_OK;
3192 bfa_fcport_stats_get_timeout(void *cbarg)
3194 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3196 bfa_trc(fcport->bfa, fcport->stats_qfull);
3198 if (fcport->stats_qfull) {
3199 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3200 fcport->stats_qfull = BFA_FALSE;
3203 fcport->stats_status = BFA_STATUS_ETIMER;
3204 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3209 bfa_fcport_send_stats_get(void *cbarg)
3211 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3212 struct bfi_fcport_req_s *msg;
3214 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3217 fcport->stats_qfull = BFA_TRUE;
3218 bfa_reqq_winit(&fcport->stats_reqq_wait,
3219 bfa_fcport_send_stats_get, fcport);
3220 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3221 &fcport->stats_reqq_wait);
3224 fcport->stats_qfull = BFA_FALSE;
3226 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3227 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3228 bfa_lpuid(fcport->bfa));
3229 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3233 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3235 struct bfa_fcport_s *fcport = cbarg;
3238 struct bfa_timeval_s tv;
3241 * re-initialize time stamp for stats reset
3243 bfa_os_gettimeofday(&tv);
3244 fcport->stats_reset_time = tv.tv_sec;
3246 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3248 fcport->stats_busy = BFA_FALSE;
3249 fcport->stats_status = BFA_STATUS_OK;
3254 bfa_fcport_stats_clr_timeout(void *cbarg)
3256 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3258 bfa_trc(fcport->bfa, fcport->stats_qfull);
3260 if (fcport->stats_qfull) {
3261 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3262 fcport->stats_qfull = BFA_FALSE;
3265 fcport->stats_status = BFA_STATUS_ETIMER;
3266 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3267 __bfa_cb_fcport_stats_clr, fcport);
3271 bfa_fcport_send_stats_clear(void *cbarg)
3273 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3274 struct bfi_fcport_req_s *msg;
3276 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3279 fcport->stats_qfull = BFA_TRUE;
3280 bfa_reqq_winit(&fcport->stats_reqq_wait,
3281 bfa_fcport_send_stats_clear, fcport);
3282 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3283 &fcport->stats_reqq_wait);
3286 fcport->stats_qfull = BFA_FALSE;
3288 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3289 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3290 bfa_lpuid(fcport->bfa));
3291 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3295 * Handle trunk SCN event from firmware.
3298 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3300 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3301 struct bfi_fcport_trunk_link_s *tlink;
3302 struct bfa_trunk_link_attr_s *lattr;
3303 enum bfa_trunk_state state_prev;
3307 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3308 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3309 scn->trunk_state == BFA_TRUNK_OFFLINE);
3311 bfa_trc(fcport->bfa, trunk->attr.state);
3312 bfa_trc(fcport->bfa, scn->trunk_state);
3313 bfa_trc(fcport->bfa, scn->trunk_speed);
3316 * Save off new state for trunk attribute query
3318 state_prev = trunk->attr.state;
3319 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3320 trunk->attr.state = scn->trunk_state;
3321 trunk->attr.speed = scn->trunk_speed;
3322 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3323 lattr = &trunk->attr.link_attr[i];
3324 tlink = &scn->tlink[i];
3326 lattr->link_state = tlink->state;
3327 lattr->trunk_wwn = tlink->trunk_wwn;
3328 lattr->fctl = tlink->fctl;
3329 lattr->speed = tlink->speed;
3330 lattr->deskew = bfa_os_ntohl(tlink->deskew);
3332 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3333 fcport->speed = tlink->speed;
3334 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3338 bfa_trc(fcport->bfa, lattr->link_state);
3339 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3340 bfa_trc(fcport->bfa, lattr->fctl);
3341 bfa_trc(fcport->bfa, lattr->speed);
3342 bfa_trc(fcport->bfa, lattr->deskew);
3347 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3348 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3351 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3352 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3355 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3356 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3359 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3360 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3364 * Notify upper layers if trunk state changed.
3366 if ((state_prev != trunk->attr.state) ||
3367 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3368 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3369 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3374 bfa_trunk_iocdisable(struct bfa_s *bfa)
3376 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3380 * In trunked mode, notify upper layers that link is down
3382 if (fcport->cfg.trunked) {
3383 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3384 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3386 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3387 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3388 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3389 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3390 fcport->trunk.attr.link_attr[i].fctl =
3391 BFA_TRUNK_LINK_FCTL_NORMAL;
3392 fcport->trunk.attr.link_attr[i].link_state =
3393 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3394 fcport->trunk.attr.link_attr[i].speed =
3395 BFA_PORT_SPEED_UNKNOWN;
3396 fcport->trunk.attr.link_attr[i].deskew = 0;
3408 * Called to initialize port attributes
3411 bfa_fcport_init(struct bfa_s *bfa)
3413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3416 * Initialize port attributes from IOC hardware data.
3418 bfa_fcport_set_wwns(fcport);
3419 if (fcport->cfg.maxfrsize == 0)
3420 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3421 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3422 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3424 bfa_assert(fcport->cfg.maxfrsize);
3425 bfa_assert(fcport->cfg.rx_bbcredit);
3426 bfa_assert(fcport->speed_sup);
3430 * Firmware message handler.
3433 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3435 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3436 union bfi_fcport_i2h_msg_u i2hmsg;
3439 fcport->event_arg.i2hmsg = i2hmsg;
3441 bfa_trc(bfa, msg->mhdr.msg_id);
3442 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3444 switch (msg->mhdr.msg_id) {
3445 case BFI_FCPORT_I2H_ENABLE_RSP:
3446 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3447 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3450 case BFI_FCPORT_I2H_DISABLE_RSP:
3451 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3452 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3455 case BFI_FCPORT_I2H_EVENT:
3456 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3457 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3459 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3462 case BFI_FCPORT_I2H_TRUNK_SCN:
3463 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3466 case BFI_FCPORT_I2H_STATS_GET_RSP:
3468 * check for timer pop before processing the rsp
3470 if (fcport->stats_busy == BFA_FALSE ||
3471 fcport->stats_status == BFA_STATUS_ETIMER)
3474 bfa_timer_stop(&fcport->timer);
3475 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3476 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3477 __bfa_cb_fcport_stats_get, fcport);
3480 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3482 * check for timer pop before processing the rsp
3484 if (fcport->stats_busy == BFA_FALSE ||
3485 fcport->stats_status == BFA_STATUS_ETIMER)
3488 bfa_timer_stop(&fcport->timer);
3489 fcport->stats_status = BFA_STATUS_OK;
3490 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3491 __bfa_cb_fcport_stats_clr, fcport);
3494 case BFI_FCPORT_I2H_ENABLE_AEN:
3495 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3498 case BFI_FCPORT_I2H_DISABLE_AEN:
3499 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3515 * Registered callback for port events.
3518 bfa_fcport_event_register(struct bfa_s *bfa,
3519 void (*cbfn) (void *cbarg,
3520 enum bfa_port_linkstate event),
3523 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3525 fcport->event_cbfn = cbfn;
3526 fcport->event_cbarg = cbarg;
3530 bfa_fcport_enable(struct bfa_s *bfa)
3532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3534 if (bfa_ioc_is_disabled(&bfa->ioc))
3535 return BFA_STATUS_IOC_DISABLED;
3537 if (fcport->diag_busy)
3538 return BFA_STATUS_DIAG_BUSY;
3540 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3541 return BFA_STATUS_OK;
3545 bfa_fcport_disable(struct bfa_s *bfa)
3548 if (bfa_ioc_is_disabled(&bfa->ioc))
3549 return BFA_STATUS_IOC_DISABLED;
3551 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3552 return BFA_STATUS_OK;
3556 * Configure port speed.
3559 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3563 bfa_trc(bfa, speed);
3565 if (fcport->cfg.trunked == BFA_TRUE)
3566 return BFA_STATUS_TRUNK_ENABLED;
3567 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3568 bfa_trc(bfa, fcport->speed_sup);
3569 return BFA_STATUS_UNSUPP_SPEED;
3572 fcport->cfg.speed = speed;
3574 return BFA_STATUS_OK;
3578 * Get current speed.
3581 bfa_fcport_get_speed(struct bfa_s *bfa)
3583 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3585 return fcport->speed;
3589 * Configure port topology.
3592 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3594 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3596 bfa_trc(bfa, topology);
3597 bfa_trc(bfa, fcport->cfg.topology);
3600 case BFA_PORT_TOPOLOGY_P2P:
3601 case BFA_PORT_TOPOLOGY_LOOP:
3602 case BFA_PORT_TOPOLOGY_AUTO:
3606 return BFA_STATUS_EINVAL;
3609 fcport->cfg.topology = topology;
3610 return BFA_STATUS_OK;
3614 * Get current topology.
3616 enum bfa_port_topology
3617 bfa_fcport_get_topology(struct bfa_s *bfa)
3619 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3621 return fcport->topology;
3625 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3627 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3630 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3631 bfa_trc(bfa, fcport->cfg.hardalpa);
3633 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3634 fcport->cfg.hardalpa = alpa;
3636 return BFA_STATUS_OK;
3640 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3644 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3645 bfa_trc(bfa, fcport->cfg.hardalpa);
3647 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3648 return BFA_STATUS_OK;
3652 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3654 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3656 *alpa = fcport->cfg.hardalpa;
3657 return fcport->cfg.cfg_hardalpa;
3661 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3663 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3665 return fcport->myalpa;
3669 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3673 bfa_trc(bfa, maxfrsize);
3674 bfa_trc(bfa, fcport->cfg.maxfrsize);
3677 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3678 return BFA_STATUS_INVLD_DFSZ;
3680 /* power of 2, if not the max frame size of 2112 */
3681 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3682 return BFA_STATUS_INVLD_DFSZ;
3684 fcport->cfg.maxfrsize = maxfrsize;
3685 return BFA_STATUS_OK;
3689 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3691 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3693 return fcport->cfg.maxfrsize;
3697 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3699 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3701 return fcport->cfg.rx_bbcredit;
3705 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3707 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3709 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3710 bfa_fcport_send_txcredit(fcport);
3714 * Get port attributes.
3718 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3720 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3722 return fcport->nwwn;
3724 return fcport->pwwn;
3728 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3732 bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
3734 attr->nwwn = fcport->nwwn;
3735 attr->pwwn = fcport->pwwn;
3737 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3738 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3740 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
3741 sizeof(struct bfa_port_cfg_s));
3742 /* speed attributes */
3743 attr->pport_cfg.speed = fcport->cfg.speed;
3744 attr->speed_supported = fcport->speed_sup;
3745 attr->speed = fcport->speed;
3746 attr->cos_supported = FC_CLASS_3;
3748 /* topology attributes */
3749 attr->pport_cfg.topology = fcport->cfg.topology;
3750 attr->topology = fcport->topology;
3751 attr->pport_cfg.trunked = fcport->cfg.trunked;
3753 /* beacon attributes */
3754 attr->beacon = fcport->beacon;
3755 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3756 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
3757 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3759 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3760 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3761 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3762 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3763 attr->port_state = BFA_PORT_ST_IOCDIS;
3764 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3768 attr->fcoe_vlan = fcport->fcoe_vlan;
3771 #define BFA_FCPORT_STATS_TOV 1000
3774 * Fetch port statistics (FCQoS or FCoE).
3777 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3778 bfa_cb_port_t cbfn, void *cbarg)
3780 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3782 if (fcport->stats_busy) {
3783 bfa_trc(bfa, fcport->stats_busy);
3784 return BFA_STATUS_DEVBUSY;
3787 fcport->stats_busy = BFA_TRUE;
3788 fcport->stats_ret = stats;
3789 fcport->stats_cbfn = cbfn;
3790 fcport->stats_cbarg = cbarg;
3792 bfa_fcport_send_stats_get(fcport);
3794 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3795 fcport, BFA_FCPORT_STATS_TOV);
3796 return BFA_STATUS_OK;
3800 * Reset port statistics (FCQoS or FCoE).
3803 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3805 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3807 if (fcport->stats_busy) {
3808 bfa_trc(bfa, fcport->stats_busy);
3809 return BFA_STATUS_DEVBUSY;
3812 fcport->stats_busy = BFA_TRUE;
3813 fcport->stats_cbfn = cbfn;
3814 fcport->stats_cbarg = cbarg;
3816 bfa_fcport_send_stats_clear(fcport);
3818 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3819 fcport, BFA_FCPORT_STATS_TOV);
3820 return BFA_STATUS_OK;
3824 * Fetch FCQoS port statistics
3827 bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3828 bfa_cb_port_t cbfn, void *cbarg)
3830 /* Meaningful only for FC mode */
3831 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3833 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3837 * Reset FCoE port statistics
3840 bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3842 /* Meaningful only for FC mode */
3843 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3845 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3849 * Fetch FCQoS port statistics
3852 bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3853 bfa_cb_port_t cbfn, void *cbarg)
3855 /* Meaningful only for FCoE mode */
3856 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3858 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3862 * Reset FCoE port statistics
3865 bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3867 /* Meaningful only for FCoE mode */
3868 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3870 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3874 bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3878 qos_attr->state = fcport->qos_attr.state;
3879 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
3883 bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3884 struct bfa_qos_vc_attr_s *qos_vc_attr)
3886 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3887 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3890 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
3891 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
3892 qos_vc_attr->elp_opmode_flags =
3893 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
3895 /* Individual VC info */
3896 while (i < qos_vc_attr->total_vc_count) {
3897 qos_vc_attr->vc_info[i].vc_credit =
3898 bfa_vc_attr->vc_info[i].vc_credit;
3899 qos_vc_attr->vc_info[i].borrow_credit =
3900 bfa_vc_attr->vc_info[i].borrow_credit;
3901 qos_vc_attr->vc_info[i].priority =
3902 bfa_vc_attr->vc_info[i].priority;
3908 * Fetch port attributes.
3911 bfa_fcport_is_disabled(struct bfa_s *bfa)
3913 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3915 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3916 BFA_PORT_ST_DISABLED;
3921 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3923 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3925 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3930 bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3932 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3933 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
3935 bfa_trc(bfa, on_off);
3936 bfa_trc(bfa, fcport->cfg.qos_enabled);
3938 bfa_trc(bfa, ioc_type);
3940 if (ioc_type == BFA_IOC_TYPE_FC) {
3941 fcport->cfg.qos_enabled = on_off;
3943 * Notify fcpim of the change in QoS state
3945 bfa_fcpim_update_ioredirect(bfa);
3950 bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3952 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3954 bfa_trc(bfa, on_off);
3955 bfa_trc(bfa, fcport->cfg.ratelimit);
3957 fcport->cfg.ratelimit = on_off;
3958 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
3959 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3963 * Configure default minimum ratelim speed
3966 bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3968 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3970 bfa_trc(bfa, speed);
3972 /* Auto and speeds greater than the supported speed, are invalid */
3973 if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
3974 bfa_trc(bfa, fcport->speed_sup);
3975 return BFA_STATUS_UNSUPP_SPEED;
3978 fcport->cfg.trl_def_speed = speed;
3980 return BFA_STATUS_OK;
3984 * Get default minimum ratelim speed
3987 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3989 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3991 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3992 return fcport->cfg.trl_def_speed;
3996 bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
3998 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4000 bfa_trc(bfa, status);
4001 bfa_trc(bfa, fcport->diag_busy);
4003 fcport->diag_busy = status;
4007 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4008 bfa_boolean_t link_e2e_beacon)
4010 struct bfa_s *bfa = dev;
4011 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4013 bfa_trc(bfa, beacon);
4014 bfa_trc(bfa, link_e2e_beacon);
4015 bfa_trc(bfa, fcport->beacon);
4016 bfa_trc(bfa, fcport->link_e2e_beacon);
4018 fcport->beacon = beacon;
4019 fcport->link_e2e_beacon = link_e2e_beacon;
4023 bfa_fcport_is_linkup(struct bfa_s *bfa)
4025 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4027 return (!fcport->cfg.trunked &&
4028 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4029 (fcport->cfg.trunked &&
4030 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4034 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4036 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4038 return fcport->cfg.qos_enabled;
4042 bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
4045 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4046 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4048 bfa_trc(bfa, fcport->cfg.trunked);
4049 bfa_trc(bfa, trunk->attr.state);
4050 *attr = trunk->attr;
4051 attr->port_id = bfa_lps_get_base_pid(bfa);
4053 return BFA_STATUS_OK;
4057 bfa_trunk_enable_cfg(struct bfa_s *bfa)
4059 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4060 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4063 trunk->attr.state = BFA_TRUNK_OFFLINE;
4064 fcport->cfg.trunked = BFA_TRUE;
4068 bfa_trunk_enable(struct bfa_s *bfa)
4070 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4071 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4075 trunk->attr.state = BFA_TRUNK_OFFLINE;
4076 bfa_fcport_disable(bfa);
4077 fcport->cfg.trunked = BFA_TRUE;
4078 bfa_fcport_enable(bfa);
4080 return BFA_STATUS_OK;
4084 bfa_trunk_disable(struct bfa_s *bfa)
4086 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4087 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4090 trunk->attr.state = BFA_TRUNK_DISABLED;
4091 bfa_fcport_disable(bfa);
4092 fcport->cfg.trunked = BFA_FALSE;
4093 bfa_fcport_enable(bfa);
4094 return BFA_STATUS_OK;
4099 * Rport State machine functions
4102 * Beginning state, only online event expected.
4105 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4107 bfa_trc(rp->bfa, rp->rport_tag);
4108 bfa_trc(rp->bfa, event);
4111 case BFA_RPORT_SM_CREATE:
4112 bfa_stats(rp, sm_un_cr);
4113 bfa_sm_set_state(rp, bfa_rport_sm_created);
4117 bfa_stats(rp, sm_un_unexp);
4118 bfa_sm_fault(rp->bfa, event);
4123 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4125 bfa_trc(rp->bfa, rp->rport_tag);
4126 bfa_trc(rp->bfa, event);
4129 case BFA_RPORT_SM_ONLINE:
4130 bfa_stats(rp, sm_cr_on);
4131 if (bfa_rport_send_fwcreate(rp))
4132 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4134 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4137 case BFA_RPORT_SM_DELETE:
4138 bfa_stats(rp, sm_cr_del);
4139 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4143 case BFA_RPORT_SM_HWFAIL:
4144 bfa_stats(rp, sm_cr_hwf);
4145 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4149 bfa_stats(rp, sm_cr_unexp);
4150 bfa_sm_fault(rp->bfa, event);
4155 * Waiting for rport create response from firmware.
4158 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4160 bfa_trc(rp->bfa, rp->rport_tag);
4161 bfa_trc(rp->bfa, event);
4164 case BFA_RPORT_SM_FWRSP:
4165 bfa_stats(rp, sm_fwc_rsp);
4166 bfa_sm_set_state(rp, bfa_rport_sm_online);
4167 bfa_rport_online_cb(rp);
4170 case BFA_RPORT_SM_DELETE:
4171 bfa_stats(rp, sm_fwc_del);
4172 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4175 case BFA_RPORT_SM_OFFLINE:
4176 bfa_stats(rp, sm_fwc_off);
4177 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4180 case BFA_RPORT_SM_HWFAIL:
4181 bfa_stats(rp, sm_fwc_hwf);
4182 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4186 bfa_stats(rp, sm_fwc_unexp);
4187 bfa_sm_fault(rp->bfa, event);
4192 * Request queue is full, awaiting queue resume to send create request.
4195 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4197 bfa_trc(rp->bfa, rp->rport_tag);
4198 bfa_trc(rp->bfa, event);
4201 case BFA_RPORT_SM_QRESUME:
4202 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4203 bfa_rport_send_fwcreate(rp);
4206 case BFA_RPORT_SM_DELETE:
4207 bfa_stats(rp, sm_fwc_del);
4208 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4209 bfa_reqq_wcancel(&rp->reqq_wait);
4213 case BFA_RPORT_SM_OFFLINE:
4214 bfa_stats(rp, sm_fwc_off);
4215 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4216 bfa_reqq_wcancel(&rp->reqq_wait);
4217 bfa_rport_offline_cb(rp);
4220 case BFA_RPORT_SM_HWFAIL:
4221 bfa_stats(rp, sm_fwc_hwf);
4222 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4223 bfa_reqq_wcancel(&rp->reqq_wait);
4227 bfa_stats(rp, sm_fwc_unexp);
4228 bfa_sm_fault(rp->bfa, event);
4233 * Online state - normal parking state.
4236 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4238 struct bfi_rport_qos_scn_s *qos_scn;
4240 bfa_trc(rp->bfa, rp->rport_tag);
4241 bfa_trc(rp->bfa, event);
4244 case BFA_RPORT_SM_OFFLINE:
4245 bfa_stats(rp, sm_on_off);
4246 if (bfa_rport_send_fwdelete(rp))
4247 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4249 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4252 case BFA_RPORT_SM_DELETE:
4253 bfa_stats(rp, sm_on_del);
4254 if (bfa_rport_send_fwdelete(rp))
4255 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4257 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4260 case BFA_RPORT_SM_HWFAIL:
4261 bfa_stats(rp, sm_on_hwf);
4262 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4265 case BFA_RPORT_SM_SET_SPEED:
4266 bfa_rport_send_fwspeed(rp);
4269 case BFA_RPORT_SM_QOS_SCN:
4270 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4271 rp->qos_attr = qos_scn->new_qos_attr;
4272 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4273 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4274 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4275 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4277 qos_scn->old_qos_attr.qos_flow_id =
4278 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
4279 qos_scn->new_qos_attr.qos_flow_id =
4280 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
4282 if (qos_scn->old_qos_attr.qos_flow_id !=
4283 qos_scn->new_qos_attr.qos_flow_id)
4284 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4285 qos_scn->old_qos_attr,
4286 qos_scn->new_qos_attr);
4287 if (qos_scn->old_qos_attr.qos_priority !=
4288 qos_scn->new_qos_attr.qos_priority)
4289 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4290 qos_scn->old_qos_attr,
4291 qos_scn->new_qos_attr);
4295 bfa_stats(rp, sm_on_unexp);
4296 bfa_sm_fault(rp->bfa, event);
4301 * Firmware rport is being deleted - awaiting f/w response.
4304 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4306 bfa_trc(rp->bfa, rp->rport_tag);
4307 bfa_trc(rp->bfa, event);
4310 case BFA_RPORT_SM_FWRSP:
4311 bfa_stats(rp, sm_fwd_rsp);
4312 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4313 bfa_rport_offline_cb(rp);
4316 case BFA_RPORT_SM_DELETE:
4317 bfa_stats(rp, sm_fwd_del);
4318 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4321 case BFA_RPORT_SM_HWFAIL:
4322 bfa_stats(rp, sm_fwd_hwf);
4323 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4324 bfa_rport_offline_cb(rp);
4328 bfa_stats(rp, sm_fwd_unexp);
4329 bfa_sm_fault(rp->bfa, event);
4334 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4336 bfa_trc(rp->bfa, rp->rport_tag);
4337 bfa_trc(rp->bfa, event);
4340 case BFA_RPORT_SM_QRESUME:
4341 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4342 bfa_rport_send_fwdelete(rp);
4345 case BFA_RPORT_SM_DELETE:
4346 bfa_stats(rp, sm_fwd_del);
4347 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4350 case BFA_RPORT_SM_HWFAIL:
4351 bfa_stats(rp, sm_fwd_hwf);
4352 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4353 bfa_reqq_wcancel(&rp->reqq_wait);
4354 bfa_rport_offline_cb(rp);
4358 bfa_stats(rp, sm_fwd_unexp);
4359 bfa_sm_fault(rp->bfa, event);
4367 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4369 bfa_trc(rp->bfa, rp->rport_tag);
4370 bfa_trc(rp->bfa, event);
4373 case BFA_RPORT_SM_DELETE:
4374 bfa_stats(rp, sm_off_del);
4375 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4379 case BFA_RPORT_SM_ONLINE:
4380 bfa_stats(rp, sm_off_on);
4381 if (bfa_rport_send_fwcreate(rp))
4382 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4384 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4387 case BFA_RPORT_SM_HWFAIL:
4388 bfa_stats(rp, sm_off_hwf);
4389 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4393 bfa_stats(rp, sm_off_unexp);
4394 bfa_sm_fault(rp->bfa, event);
4399 * Rport is deleted, waiting for firmware response to delete.
4402 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4404 bfa_trc(rp->bfa, rp->rport_tag);
4405 bfa_trc(rp->bfa, event);
4408 case BFA_RPORT_SM_FWRSP:
4409 bfa_stats(rp, sm_del_fwrsp);
4410 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4414 case BFA_RPORT_SM_HWFAIL:
4415 bfa_stats(rp, sm_del_hwf);
4416 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4421 bfa_sm_fault(rp->bfa, event);
4426 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4428 bfa_trc(rp->bfa, rp->rport_tag);
4429 bfa_trc(rp->bfa, event);
4432 case BFA_RPORT_SM_QRESUME:
4433 bfa_stats(rp, sm_del_fwrsp);
4434 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4435 bfa_rport_send_fwdelete(rp);
4438 case BFA_RPORT_SM_HWFAIL:
4439 bfa_stats(rp, sm_del_hwf);
4440 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4441 bfa_reqq_wcancel(&rp->reqq_wait);
4446 bfa_sm_fault(rp->bfa, event);
4451 * Waiting for rport create response from firmware. A delete is pending.
4454 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4455 enum bfa_rport_event event)
4457 bfa_trc(rp->bfa, rp->rport_tag);
4458 bfa_trc(rp->bfa, event);
4461 case BFA_RPORT_SM_FWRSP:
4462 bfa_stats(rp, sm_delp_fwrsp);
4463 if (bfa_rport_send_fwdelete(rp))
4464 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4466 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4469 case BFA_RPORT_SM_HWFAIL:
4470 bfa_stats(rp, sm_delp_hwf);
4471 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4476 bfa_stats(rp, sm_delp_unexp);
4477 bfa_sm_fault(rp->bfa, event);
4482 * Waiting for rport create response from firmware. Rport offline is pending.
4485 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4486 enum bfa_rport_event event)
4488 bfa_trc(rp->bfa, rp->rport_tag);
4489 bfa_trc(rp->bfa, event);
4492 case BFA_RPORT_SM_FWRSP:
4493 bfa_stats(rp, sm_offp_fwrsp);
4494 if (bfa_rport_send_fwdelete(rp))
4495 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4497 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4500 case BFA_RPORT_SM_DELETE:
4501 bfa_stats(rp, sm_offp_del);
4502 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4505 case BFA_RPORT_SM_HWFAIL:
4506 bfa_stats(rp, sm_offp_hwf);
4507 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4511 bfa_stats(rp, sm_offp_unexp);
4512 bfa_sm_fault(rp->bfa, event);
4520 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4522 bfa_trc(rp->bfa, rp->rport_tag);
4523 bfa_trc(rp->bfa, event);
4526 case BFA_RPORT_SM_OFFLINE:
4527 bfa_stats(rp, sm_iocd_off);
4528 bfa_rport_offline_cb(rp);
4531 case BFA_RPORT_SM_DELETE:
4532 bfa_stats(rp, sm_iocd_del);
4533 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4537 case BFA_RPORT_SM_ONLINE:
4538 bfa_stats(rp, sm_iocd_on);
4539 if (bfa_rport_send_fwcreate(rp))
4540 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4542 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4545 case BFA_RPORT_SM_HWFAIL:
4549 bfa_stats(rp, sm_iocd_unexp);
4550 bfa_sm_fault(rp->bfa, event);
4557 * bfa_rport_private BFA rport private functions
4561 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4563 struct bfa_rport_s *rp = cbarg;
4566 bfa_cb_rport_online(rp->rport_drv);
4570 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4572 struct bfa_rport_s *rp = cbarg;
4575 bfa_cb_rport_offline(rp->rport_drv);
4579 bfa_rport_qresume(void *cbarg)
4581 struct bfa_rport_s *rp = cbarg;
4583 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4587 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4590 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4591 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4593 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4597 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4598 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4600 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4601 struct bfa_rport_s *rp;
4604 INIT_LIST_HEAD(&mod->rp_free_q);
4605 INIT_LIST_HEAD(&mod->rp_active_q);
4607 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4609 mod->num_rports = cfg->fwcfg.num_rports;
4611 bfa_assert(mod->num_rports &&
4612 !(mod->num_rports & (mod->num_rports - 1)));
4614 for (i = 0; i < mod->num_rports; i++, rp++) {
4615 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
4618 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4624 list_add_tail(&rp->qe, &mod->rp_free_q);
4626 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4632 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4636 bfa_rport_detach(struct bfa_s *bfa)
4641 bfa_rport_start(struct bfa_s *bfa)
4646 bfa_rport_stop(struct bfa_s *bfa)
4651 bfa_rport_iocdisable(struct bfa_s *bfa)
4653 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4654 struct bfa_rport_s *rport;
4655 struct list_head *qe, *qen;
4657 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4658 rport = (struct bfa_rport_s *) qe;
4659 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4663 static struct bfa_rport_s *
4664 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4666 struct bfa_rport_s *rport;
4668 bfa_q_deq(&mod->rp_free_q, &rport);
4670 list_add_tail(&rport->qe, &mod->rp_active_q);
4676 bfa_rport_free(struct bfa_rport_s *rport)
4678 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4680 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4681 list_del(&rport->qe);
4682 list_add_tail(&rport->qe, &mod->rp_free_q);
4685 static bfa_boolean_t
4686 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4688 struct bfi_rport_create_req_s *m;
4691 * check for room in queue to send request now
4693 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4695 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4699 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4700 bfa_lpuid(rp->bfa));
4701 m->bfa_handle = rp->rport_tag;
4702 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
4703 m->pid = rp->rport_info.pid;
4704 m->lp_tag = rp->rport_info.lp_tag;
4705 m->local_pid = rp->rport_info.local_pid;
4706 m->fc_class = rp->rport_info.fc_class;
4707 m->vf_en = rp->rport_info.vf_en;
4708 m->vf_id = rp->rport_info.vf_id;
4709 m->cisc = rp->rport_info.cisc;
4712 * queue I/O message to firmware
4714 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4718 static bfa_boolean_t
4719 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4721 struct bfi_rport_delete_req_s *m;
4724 * check for room in queue to send request now
4726 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4728 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4732 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4733 bfa_lpuid(rp->bfa));
4734 m->fw_handle = rp->fw_handle;
4737 * queue I/O message to firmware
4739 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4743 static bfa_boolean_t
4744 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4746 struct bfa_rport_speed_req_s *m;
4749 * check for room in queue to send request now
4751 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4753 bfa_trc(rp->bfa, rp->rport_info.speed);
4757 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4758 bfa_lpuid(rp->bfa));
4759 m->fw_handle = rp->fw_handle;
4760 m->speed = (u8)rp->rport_info.speed;
4763 * queue I/O message to firmware
4765 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4776 * Rport interrupt processing.
4779 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4781 union bfi_rport_i2h_msg_u msg;
4782 struct bfa_rport_s *rp;
4784 bfa_trc(bfa, m->mhdr.msg_id);
4788 switch (m->mhdr.msg_id) {
4789 case BFI_RPORT_I2H_CREATE_RSP:
4790 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4791 rp->fw_handle = msg.create_rsp->fw_handle;
4792 rp->qos_attr = msg.create_rsp->qos_attr;
4793 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4794 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4797 case BFI_RPORT_I2H_DELETE_RSP:
4798 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4799 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4800 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4803 case BFI_RPORT_I2H_QOS_SCN:
4804 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4805 rp->event_arg.fw_msg = msg.qos_scn_evt;
4806 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4810 bfa_trc(bfa, m->mhdr.msg_id);
4821 struct bfa_rport_s *
4822 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4824 struct bfa_rport_s *rp;
4826 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4832 rp->rport_drv = rport_drv;
4833 bfa_rport_clear_stats(rp);
4835 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4836 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4842 bfa_rport_delete(struct bfa_rport_s *rport)
4844 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4848 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4850 bfa_assert(rport_info->max_frmsz != 0);
4853 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4854 * responses. Default to minimum size.
4856 if (rport_info->max_frmsz == 0) {
4857 bfa_trc(rport->bfa, rport->rport_tag);
4858 rport_info->max_frmsz = FC_MIN_PDUSZ;
4861 bfa_os_assign(rport->rport_info, *rport_info);
4862 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4866 bfa_rport_offline(struct bfa_rport_s *rport)
4868 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4872 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4874 bfa_assert(speed != 0);
4875 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4877 rport->rport_info.speed = speed;
4878 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4882 bfa_rport_get_stats(struct bfa_rport_s *rport,
4883 struct bfa_rport_hal_stats_s *stats)
4885 *stats = rport->stats;
4889 bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4890 struct bfa_rport_qos_attr_s *qos_attr)
4892 qos_attr->qos_priority = rport->qos_attr.qos_priority;
4893 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
4898 bfa_rport_clear_stats(struct bfa_rport_s *rport)
4900 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
4905 * SGPG related functions
4909 * Compute and return memory needed by FCP(im) module.
4912 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4915 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4916 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4918 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4919 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4924 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4925 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4927 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4929 struct bfa_sgpg_s *hsgpg;
4930 struct bfi_sgpg_s *sgpg;
4935 union bfi_addr_u addr;
4936 } sgpg_pa, sgpg_pa_tmp;
4938 INIT_LIST_HEAD(&mod->sgpg_q);
4939 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4941 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4943 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4944 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4945 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4946 mod->sgpg_arr_pa += align_len;
4947 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4949 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4952 hsgpg = mod->hsgpg_arr;
4953 sgpg = mod->sgpg_arr;
4954 sgpg_pa.pa = mod->sgpg_arr_pa;
4955 mod->free_sgpgs = mod->num_sgpgs;
4957 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4959 for (i = 0; i < mod->num_sgpgs; i++) {
4960 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
4961 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
4964 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4965 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4966 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4970 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4973 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4974 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4975 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4979 bfa_sgpg_detach(struct bfa_s *bfa)
4984 bfa_sgpg_start(struct bfa_s *bfa)
4989 bfa_sgpg_stop(struct bfa_s *bfa)
4994 bfa_sgpg_iocdisable(struct bfa_s *bfa)
5001 * hal_sgpg_public BFA SGPG public functions
5005 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5007 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5008 struct bfa_sgpg_s *hsgpg;
5011 bfa_trc_fp(bfa, nsgpgs);
5013 if (mod->free_sgpgs < nsgpgs)
5014 return BFA_STATUS_ENOMEM;
5016 for (i = 0; i < nsgpgs; i++) {
5017 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5019 list_add_tail(&hsgpg->qe, sgpg_q);
5022 mod->free_sgpgs -= nsgpgs;
5023 return BFA_STATUS_OK;
5027 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5029 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5030 struct bfa_sgpg_wqe_s *wqe;
5032 bfa_trc_fp(bfa, nsgpg);
5034 mod->free_sgpgs += nsgpg;
5035 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
5037 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5039 if (list_empty(&mod->sgpg_wait_q))
5043 * satisfy as many waiting requests as possible
5046 wqe = bfa_q_first(&mod->sgpg_wait_q);
5047 if (mod->free_sgpgs < wqe->nsgpg)
5048 nsgpg = mod->free_sgpgs;
5051 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5052 wqe->nsgpg -= nsgpg;
5053 if (wqe->nsgpg == 0) {
5055 wqe->cbfn(wqe->cbarg);
5057 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5061 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5063 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5065 bfa_assert(nsgpg > 0);
5066 bfa_assert(nsgpg > mod->free_sgpgs);
5068 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5071 * allocate any left to this one first
5073 if (mod->free_sgpgs) {
5075 * no one else is waiting for SGPG
5077 bfa_assert(list_empty(&mod->sgpg_wait_q));
5078 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5079 wqe->nsgpg -= mod->free_sgpgs;
5080 mod->free_sgpgs = 0;
5083 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5087 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5089 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5091 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5094 if (wqe->nsgpg_total != wqe->nsgpg)
5095 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5096 wqe->nsgpg_total - wqe->nsgpg);
5100 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5103 INIT_LIST_HEAD(&wqe->sgpg_q);
5109 * UF related functions
5112 *****************************************************************************
5113 * Internal functions
5114 *****************************************************************************
5117 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5119 struct bfa_uf_s *uf = cbarg;
5120 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5123 ufm->ufrecv(ufm->cbarg, uf);
5127 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5131 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
5132 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
5133 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
5136 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
5137 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
5139 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
5143 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5145 struct bfi_uf_buf_post_s *uf_bp_msg;
5146 struct bfi_sge_s *sge;
5147 union bfi_addr_u sga_zero = { {0} };
5151 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
5152 uf_bp_msg = ufm->uf_buf_posts;
5154 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5156 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5158 uf_bp_msg->buf_tag = i;
5159 buf_len = sizeof(struct bfa_uf_buf_s);
5160 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
5161 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5162 bfa_lpuid(ufm->bfa));
5164 sge = uf_bp_msg->sge;
5165 sge[0].sg_len = buf_len;
5166 sge[0].flags = BFI_SGE_DATA_LAST;
5167 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
5170 sge[1].sg_len = buf_len;
5171 sge[1].flags = BFI_SGE_PGDLEN;
5172 sge[1].sga = sga_zero;
5173 bfa_sge_to_be(&sge[1]);
5177 * advance pointer beyond consumed memory
5179 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
5183 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5186 struct bfa_uf_s *uf;
5189 * Claim block of memory for UF list
5191 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
5194 * Initialize UFs and queue it in UF free queue
5196 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5197 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
5200 uf->pb_len = sizeof(struct bfa_uf_buf_s);
5201 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
5202 uf->buf_pa = ufm_pbs_pa(ufm, i);
5203 list_add_tail(&uf->qe, &ufm->uf_free_q);
5207 * advance memory pointer
5209 bfa_meminfo_kva(mi) = (u8 *) uf;
5213 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5215 claim_uf_pbs(ufm, mi);
5217 claim_uf_post_msgs(ufm, mi);
5221 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
5223 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5226 * dma-able memory for UF posted bufs
5228 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
5232 * kernel Virtual memory for UFs and UF buf post msg copies
5234 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
5235 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
5239 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5240 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
5242 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5244 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
5246 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5247 INIT_LIST_HEAD(&ufm->uf_free_q);
5248 INIT_LIST_HEAD(&ufm->uf_posted_q);
5250 uf_mem_claim(ufm, meminfo);
5254 bfa_uf_detach(struct bfa_s *bfa)
5258 static struct bfa_uf_s *
5259 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5261 struct bfa_uf_s *uf;
5263 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5268 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5270 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5274 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5276 struct bfi_uf_buf_post_s *uf_post_msg;
5278 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5280 return BFA_STATUS_FAILED;
5282 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5283 sizeof(struct bfi_uf_buf_post_s));
5284 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5286 bfa_trc(ufm->bfa, uf->uf_tag);
5288 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5289 return BFA_STATUS_OK;
5293 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5295 struct bfa_uf_s *uf;
5297 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5298 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5304 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5306 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5307 u16 uf_tag = m->buf_tag;
5308 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
5309 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5310 u8 *buf = &uf_buf->d[0];
5311 struct fchs_s *fchs;
5313 m->frm_len = bfa_os_ntohs(m->frm_len);
5314 m->xfr_len = bfa_os_ntohs(m->xfr_len);
5316 fchs = (struct fchs_s *)uf_buf;
5318 list_del(&uf->qe); /* dequeue from posted queue */
5321 uf->data_len = m->xfr_len;
5323 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
5325 if (uf->data_len == sizeof(struct fchs_s)) {
5326 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5327 uf->data_len, (struct fchs_s *)buf);
5329 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5330 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5331 BFA_PL_EID_RX, uf->data_len,
5332 (struct fchs_s *)buf, pld_w0);
5336 __bfa_cb_uf_recv(uf, BFA_TRUE);
5338 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5342 bfa_uf_stop(struct bfa_s *bfa)
5347 bfa_uf_iocdisable(struct bfa_s *bfa)
5349 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5350 struct bfa_uf_s *uf;
5351 struct list_head *qe, *qen;
5353 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5354 uf = (struct bfa_uf_s *) qe;
5356 bfa_uf_put(ufm, uf);
5361 bfa_uf_start(struct bfa_s *bfa)
5363 bfa_uf_post_all(BFA_UF_MOD(bfa));
5373 * Register handler for all unsolicted recieve frames.
5375 * @param[in] bfa BFA instance
5376 * @param[in] ufrecv receive handler function
5377 * @param[in] cbarg receive handler arg
5380 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5382 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5384 ufm->ufrecv = ufrecv;
5389 * Free an unsolicited frame back to BFA.
5391 * @param[in] uf unsolicited frame to be freed
5396 bfa_uf_free(struct bfa_uf_s *uf)
5398 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5399 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5405 * uf_pub BFA uf module public functions
5408 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5410 bfa_trc(bfa, msg->mhdr.msg_id);
5412 switch (msg->mhdr.msg_id) {
5413 case BFI_UF_I2H_FRM_RCVD:
5414 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5418 bfa_trc(bfa, msg->mhdr.msg_id);