1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy);
28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
54 * @hba: adapter structure pointer
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90 fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
103 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
104 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
105 ((u64) hba->hash_tbl_pbl_dma >> 32);
107 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
108 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
109 ((u64) hba->t2_hash_tbl_dma >> 32);
111 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
112 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
113 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
115 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
117 /* fill init3 KWQE */
118 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
119 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
120 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
121 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
122 fcoe_init3.error_bit_map_lo = 0xffffffff;
123 fcoe_init3.error_bit_map_hi = 0xffffffff;
126 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
127 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
128 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
130 if (hba->cnic && hba->cnic->submit_kwqes)
131 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
135 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
137 struct fcoe_kwqe_destroy fcoe_destroy;
138 struct kwqe *kwqe_arr[2];
142 /* fill destroy KWQE */
143 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
144 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
145 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
146 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
147 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
149 if (hba->cnic && hba->cnic->submit_kwqes)
150 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
155 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
157 * @port: port structure pointer
158 * @tgt: bnx2fc_rport structure pointer
160 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
161 struct bnx2fc_rport *tgt)
163 struct fc_lport *lport = port->lport;
164 struct bnx2fc_hba *hba = port->priv;
165 struct kwqe *kwqe_arr[4];
166 struct fcoe_kwqe_conn_offload1 ofld_req1;
167 struct fcoe_kwqe_conn_offload2 ofld_req2;
168 struct fcoe_kwqe_conn_offload3 ofld_req3;
169 struct fcoe_kwqe_conn_offload4 ofld_req4;
170 struct fc_rport_priv *rdata = tgt->rdata;
171 struct fc_rport *rport = tgt->rport;
177 /* Initialize offload request 1 structure */
178 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
180 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
181 ofld_req1.hdr.flags =
182 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
185 conn_id = (u16)tgt->fcoe_conn_id;
186 ofld_req1.fcoe_conn_id = conn_id;
189 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
190 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
192 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
193 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
195 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
196 ofld_req1.rq_first_pbe_addr_hi =
197 (u32)((u64) tgt->rq_dma >> 32);
199 ofld_req1.rq_prod = 0x8000;
201 /* Initialize offload request 2 structure */
202 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
204 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
205 ofld_req2.hdr.flags =
206 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
208 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
210 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
211 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
213 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
214 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
216 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
217 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
219 /* Initialize offload request 3 structure */
220 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
222 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
223 ofld_req3.hdr.flags =
224 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
226 ofld_req3.vlan_tag = hba->vlan_id <<
227 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
228 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
230 port_id = fc_host_port_id(lport->host);
232 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
237 * Store s_id of the initiator for further reference. This will
238 * be used during disable/destroy during linkdown processing as
239 * when the lport is reset, the port_id also is reset to 0
242 ofld_req3.s_id[0] = (port_id & 0x000000FF);
243 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
244 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
246 port_id = rport->port_id;
247 ofld_req3.d_id[0] = (port_id & 0x000000FF);
248 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
249 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
251 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
253 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
254 ofld_req3.rx_max_fc_pay_len = lport->mfs;
256 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
257 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
258 ofld_req3.rx_open_seqs_exch_c3 = 1;
260 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
261 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
263 /* set mul_n_port_ids supported flag to 0, until it is supported */
266 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
267 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
269 /* Info from PLOGI response */
270 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
271 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
273 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
274 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
277 ofld_req3.flags |= (hba->vlan_enabled <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
280 /* C2_VALID and ACK flags are not set as they are not suppported */
283 /* Initialize offload request 4 structure */
284 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
285 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
286 ofld_req4.hdr.flags =
287 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
289 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
292 ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
294 ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
295 ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
296 ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
297 ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
298 ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
299 ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
300 ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
301 ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
302 ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
303 ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
304 ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
306 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
307 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
309 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
310 ofld_req4.confq_pbl_base_addr_hi =
311 (u32)((u64) tgt->confq_pbl_dma >> 32);
313 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
314 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
315 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
316 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
318 if (hba->cnic && hba->cnic->submit_kwqes)
319 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
325 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
327 * @port: port structure pointer
328 * @tgt: bnx2fc_rport structure pointer
330 static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
331 struct bnx2fc_rport *tgt)
333 struct kwqe *kwqe_arr[2];
334 struct bnx2fc_hba *hba = port->priv;
335 struct fcoe_kwqe_conn_enable_disable enbl_req;
336 struct fc_lport *lport = port->lport;
337 struct fc_rport *rport = tgt->rport;
342 memset(&enbl_req, 0x00,
343 sizeof(struct fcoe_kwqe_conn_enable_disable));
344 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
346 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
348 enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
350 enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
351 enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
352 enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
353 enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
354 enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
356 enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
357 enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
358 enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
359 enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
360 enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
361 enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
363 port_id = fc_host_port_id(lport->host);
364 if (port_id != tgt->sid) {
365 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
366 "sid = 0x%x\n", port_id, tgt->sid);
369 enbl_req.s_id[0] = (port_id & 0x000000FF);
370 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
371 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
373 port_id = rport->port_id;
374 enbl_req.d_id[0] = (port_id & 0x000000FF);
375 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
376 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
377 enbl_req.vlan_tag = hba->vlan_id <<
378 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
379 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
380 enbl_req.vlan_flag = hba->vlan_enabled;
381 enbl_req.context_id = tgt->context_id;
382 enbl_req.conn_id = tgt->fcoe_conn_id;
384 kwqe_arr[0] = (struct kwqe *) &enbl_req;
386 if (hba->cnic && hba->cnic->submit_kwqes)
387 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
392 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
394 * @port: port structure pointer
395 * @tgt: bnx2fc_rport structure pointer
397 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
398 struct bnx2fc_rport *tgt)
400 struct bnx2fc_hba *hba = port->priv;
401 struct fcoe_kwqe_conn_enable_disable disable_req;
402 struct kwqe *kwqe_arr[2];
403 struct fc_rport *rport = tgt->rport;
408 memset(&disable_req, 0x00,
409 sizeof(struct fcoe_kwqe_conn_enable_disable));
410 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
411 disable_req.hdr.flags =
412 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
414 disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
415 disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
416 disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
417 disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
418 disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
420 disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
421 disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
422 disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
423 disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
424 disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
425 disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
428 disable_req.s_id[0] = (port_id & 0x000000FF);
429 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
430 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
433 port_id = rport->port_id;
434 disable_req.d_id[0] = (port_id & 0x000000FF);
435 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
436 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
437 disable_req.context_id = tgt->context_id;
438 disable_req.conn_id = tgt->fcoe_conn_id;
439 disable_req.vlan_tag = hba->vlan_id <<
440 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
441 disable_req.vlan_tag |=
442 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
443 disable_req.vlan_flag = hba->vlan_enabled;
445 kwqe_arr[0] = (struct kwqe *) &disable_req;
447 if (hba->cnic && hba->cnic->submit_kwqes)
448 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
454 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
456 * @port: port structure pointer
457 * @tgt: bnx2fc_rport structure pointer
459 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
460 struct bnx2fc_rport *tgt)
462 struct fcoe_kwqe_conn_destroy destroy_req;
463 struct kwqe *kwqe_arr[2];
467 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
468 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
469 destroy_req.hdr.flags =
470 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
472 destroy_req.context_id = tgt->context_id;
473 destroy_req.conn_id = tgt->fcoe_conn_id;
475 kwqe_arr[0] = (struct kwqe *) &destroy_req;
477 if (hba->cnic && hba->cnic->submit_kwqes)
478 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
483 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
485 struct bnx2fc_lport *blport;
487 spin_lock_bh(&hba->hba_lock);
488 list_for_each_entry(blport, &hba->vports, list) {
489 if (blport->lport == lport) {
490 spin_unlock_bh(&hba->hba_lock);
494 spin_unlock_bh(&hba->hba_lock);
500 static void bnx2fc_unsol_els_work(struct work_struct *work)
502 struct bnx2fc_unsol_els *unsol_els;
503 struct fc_lport *lport;
504 struct bnx2fc_hba *hba;
507 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
508 lport = unsol_els->lport;
510 hba = unsol_els->hba;
511 if (is_valid_lport(hba, lport))
512 fc_exch_recv(lport, fp);
516 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
518 u32 frame_len, u16 l2_oxid)
520 struct fcoe_port *port = tgt->port;
521 struct fc_lport *lport = port->lport;
522 struct bnx2fc_hba *hba = port->priv;
523 struct bnx2fc_unsol_els *unsol_els;
524 struct fc_frame_header *fh;
532 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
534 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
538 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
541 payload_len = frame_len - sizeof(struct fc_frame_header);
543 fp = fc_frame_alloc(lport, payload_len);
545 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
550 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
551 /* Copy FC Frame header and payload into the frame */
552 memcpy(fh, buf, frame_len);
554 if (l2_oxid != FC_XID_UNKNOWN)
555 fh->fh_ox_id = htons(l2_oxid);
559 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
560 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
562 if (fh->fh_type == FC_TYPE_ELS) {
563 op = fc_frame_payload_op(fp);
564 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
565 (op == ELS_FAN) || (op == ELS_CSU)) {
567 * No need to reply for these
570 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
576 crc = fcoe_fc_crc(fp);
579 fr_sof(fp) = FC_SOF_I3;
580 fr_eof(fp) = FC_EOF_T;
581 fr_crc(fp) = cpu_to_le32(~crc);
582 unsol_els->lport = lport;
583 unsol_els->hba = hba;
585 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
586 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
588 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
594 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
597 struct fcoe_err_report_entry *err_entry;
598 unsigned char *rq_data;
599 unsigned char *buf = NULL, *buf1;
603 struct bnx2fc_cmd *io_req = NULL;
604 struct fcoe_task_ctx_entry *task, *task_page;
605 struct bnx2fc_hba *hba = tgt->port->priv;
610 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
611 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
612 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
613 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
614 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
616 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
618 spin_lock_bh(&tgt->tgt_lock);
619 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
620 spin_unlock_bh(&tgt->tgt_lock);
625 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
629 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
633 for (i = 0; i < num_rq; i++) {
634 spin_lock_bh(&tgt->tgt_lock);
635 rq_data = (unsigned char *)
636 bnx2fc_get_next_rqe(tgt, 1);
637 spin_unlock_bh(&tgt->tgt_lock);
638 len = BNX2FC_RQ_BUF_SZ;
639 memcpy(buf1, rq_data, len);
643 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
648 spin_lock_bh(&tgt->tgt_lock);
649 bnx2fc_return_rqe(tgt, num_rq);
650 spin_unlock_bh(&tgt->tgt_lock);
653 case FCOE_ERROR_DETECTION_CQE_TYPE:
655 * In case of error reporting CQE a single RQ entry
658 spin_lock_bh(&tgt->tgt_lock);
660 err_entry = (struct fcoe_err_report_entry *)
661 bnx2fc_get_next_rqe(tgt, 1);
662 xid = err_entry->fc_hdr.ox_id;
663 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
664 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
665 err_entry->err_warn_bitmap_hi,
666 err_entry->err_warn_bitmap_lo);
667 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
668 err_entry->tx_buf_off, err_entry->rx_buf_off);
670 bnx2fc_return_rqe(tgt, 1);
672 if (xid > BNX2FC_MAX_XID) {
673 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
675 spin_unlock_bh(&tgt->tgt_lock);
679 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
680 index = xid % BNX2FC_TASKS_PER_PAGE;
681 task_page = (struct fcoe_task_ctx_entry *)
682 hba->task_ctx[task_idx];
683 task = &(task_page[index]);
685 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
687 spin_unlock_bh(&tgt->tgt_lock);
691 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
692 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
693 spin_unlock_bh(&tgt->tgt_lock);
697 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
698 &io_req->req_flags)) {
699 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
700 "progress.. ignore unsol err\n");
701 spin_unlock_bh(&tgt->tgt_lock);
706 * If ABTS is already in progress, and FW error is
707 * received after that, do not cancel the timeout_work
708 * and let the error recovery continue by explicitly
709 * logging out the target, when the ABTS eventually
712 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
713 &io_req->req_flags)) {
715 * Cancel the timeout_work, as we received IO
716 * completion with FW error.
718 if (cancel_delayed_work(&io_req->timeout_work))
719 kref_put(&io_req->refcount,
720 bnx2fc_cmd_release); /* timer hold */
722 rc = bnx2fc_initiate_abts(io_req);
724 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
725 "failed. issue cleanup\n");
726 rc = bnx2fc_initiate_cleanup(io_req);
730 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
731 "in ABTS processing\n", xid);
732 spin_unlock_bh(&tgt->tgt_lock);
735 case FCOE_WARNING_DETECTION_CQE_TYPE:
737 *In case of warning reporting CQE a single RQ entry
740 spin_lock_bh(&tgt->tgt_lock);
742 err_entry = (struct fcoe_err_report_entry *)
743 bnx2fc_get_next_rqe(tgt, 1);
744 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
745 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
746 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
747 err_entry->err_warn_bitmap_hi,
748 err_entry->err_warn_bitmap_lo);
749 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
750 err_entry->tx_buf_off, err_entry->rx_buf_off);
752 bnx2fc_return_rqe(tgt, 1);
753 spin_unlock_bh(&tgt->tgt_lock);
757 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
762 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
764 struct fcoe_task_ctx_entry *task;
765 struct fcoe_task_ctx_entry *task_page;
766 struct fcoe_port *port = tgt->port;
767 struct bnx2fc_hba *hba = port->priv;
768 struct bnx2fc_cmd *io_req;
775 spin_lock_bh(&tgt->tgt_lock);
776 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
777 if (xid >= BNX2FC_MAX_TASKS) {
778 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
779 spin_unlock_bh(&tgt->tgt_lock);
782 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
783 index = xid % BNX2FC_TASKS_PER_PAGE;
784 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
785 task = &(task_page[index]);
787 num_rq = ((task->rx_wr_tx_rd.rx_flags &
788 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
789 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
791 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
793 if (io_req == NULL) {
794 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
795 spin_unlock_bh(&tgt->tgt_lock);
799 /* Timestamp IO completion time */
800 cmd_type = io_req->cmd_type;
802 /* optimized completion path */
803 if (cmd_type == BNX2FC_SCSI_CMD) {
804 rx_state = ((task->rx_wr_tx_rd.rx_flags &
805 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
806 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
808 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
809 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
810 spin_unlock_bh(&tgt->tgt_lock);
815 /* Process other IO completion types */
817 case BNX2FC_SCSI_CMD:
818 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
819 bnx2fc_process_abts_compl(io_req, task, num_rq);
821 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
822 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
824 printk(KERN_ERR PFX "Invalid rx state - %d\n",
828 case BNX2FC_TASK_MGMT_CMD:
829 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
830 bnx2fc_process_tm_compl(io_req, task, num_rq);
835 * ABTS request received by firmware. ABTS response
836 * will be delivered to the task belonging to the IO
839 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
840 kref_put(&io_req->refcount, bnx2fc_cmd_release);
844 BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
845 bnx2fc_process_els_compl(io_req, task, num_rq);
849 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
850 kref_put(&io_req->refcount, bnx2fc_cmd_release);
854 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
857 spin_unlock_bh(&tgt->tgt_lock);
860 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
862 struct bnx2fc_work *work;
863 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
867 INIT_LIST_HEAD(&work->list);
873 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
877 struct fcoe_cqe *cqe;
879 bool more_cqes_found = false;
882 * cq_lock is a low contention lock used to protect
883 * the CQ data structure from being freed up during
884 * the upload operation
886 spin_lock_bh(&tgt->cq_lock);
889 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
890 spin_unlock_bh(&tgt->cq_lock);
894 cq_cons = tgt->cq_cons_idx;
898 more_cqes_found ^= true;
900 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
901 (tgt->cq_curr_toggle_bit <<
902 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
904 /* new entry on the cq */
905 if (wqe & FCOE_CQE_CQE_TYPE) {
906 /* Unsolicited event notification */
907 bnx2fc_process_unsol_compl(tgt, wqe);
909 struct bnx2fc_work *work = NULL;
910 struct bnx2fc_percpu_s *fps = NULL;
911 unsigned int cpu = wqe % num_possible_cpus();
913 fps = &per_cpu(bnx2fc_percpu, cpu);
914 spin_lock_bh(&fps->fp_work_lock);
915 if (unlikely(!fps->iothread))
918 work = bnx2fc_alloc_work(tgt, wqe);
920 list_add_tail(&work->list,
923 spin_unlock_bh(&fps->fp_work_lock);
925 /* Pending work request completion */
926 if (fps->iothread && work)
927 wake_up_process(fps->iothread);
929 bnx2fc_process_cq_compl(tgt, wqe);
934 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
935 tgt->cq_cons_idx = 0;
937 tgt->cq_curr_toggle_bit =
938 1 - tgt->cq_curr_toggle_bit;
942 if (more_cqes_found) {
943 tgt->conn_db->cq_arm.lo = -1;
946 } while (more_cqes_found);
949 * Commit tgt->cq_cons_idx change to the memory
950 * spin_lock implies full memory barrier, no need to smp_wmb
953 spin_unlock_bh(&tgt->cq_lock);
958 * bnx2fc_fastpath_notification - process global event queue (KCQ)
960 * @hba: adapter structure pointer
961 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
963 * Fast path event notification handler
965 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
966 struct fcoe_kcqe *new_cqe_kcqe)
968 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
969 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
972 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
976 bnx2fc_process_new_cqes(tgt);
980 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
982 * @hba: adapter structure pointer
983 * @ofld_kcqe: connection offload kcqe pointer
985 * handle session offload completion, enable the session if offload is
988 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
989 struct fcoe_kcqe *ofld_kcqe)
991 struct bnx2fc_rport *tgt;
992 struct fcoe_port *port;
997 conn_id = ofld_kcqe->fcoe_conn_id;
998 context_id = ofld_kcqe->fcoe_conn_context_id;
999 tgt = hba->tgt_ofld_list[conn_id];
1001 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1004 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1005 ofld_kcqe->fcoe_conn_context_id);
1007 if (hba != tgt->port->priv) {
1008 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1012 * cnic has allocated a context_id for this session; use this
1013 * while enabling the session.
1015 tgt->context_id = context_id;
1016 if (ofld_kcqe->completion_status) {
1017 if (ofld_kcqe->completion_status ==
1018 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1019 printk(KERN_ERR PFX "unable to allocate FCoE context "
1021 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1026 /* now enable the session */
1027 rc = bnx2fc_send_session_enable_req(port, tgt);
1029 printk(KERN_ALERT PFX "enable session failed\n");
1035 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1036 wake_up_interruptible(&tgt->ofld_wait);
1040 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1042 * @hba: adapter structure pointer
1043 * @ofld_kcqe: connection offload kcqe pointer
1045 * handle session enable completion, mark the rport as ready
1048 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1049 struct fcoe_kcqe *ofld_kcqe)
1051 struct bnx2fc_rport *tgt;
1055 context_id = ofld_kcqe->fcoe_conn_context_id;
1056 conn_id = ofld_kcqe->fcoe_conn_id;
1057 tgt = hba->tgt_ofld_list[conn_id];
1059 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1063 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1064 ofld_kcqe->fcoe_conn_context_id);
1067 * context_id should be the same for this target during offload
1070 if (tgt->context_id != context_id) {
1071 printk(KERN_ALERT PFX "context id mis-match\n");
1074 if (hba != tgt->port->priv) {
1075 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1078 if (ofld_kcqe->completion_status) {
1081 /* enable successful - rport ready for issuing IOs */
1082 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1083 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1084 wake_up_interruptible(&tgt->ofld_wait);
1089 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1090 wake_up_interruptible(&tgt->ofld_wait);
1093 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1094 struct fcoe_kcqe *disable_kcqe)
1097 struct bnx2fc_rport *tgt;
1100 conn_id = disable_kcqe->fcoe_conn_id;
1101 tgt = hba->tgt_ofld_list[conn_id];
1103 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1107 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1109 if (disable_kcqe->completion_status) {
1110 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1111 disable_kcqe->completion_status);
1114 /* disable successful */
1115 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1116 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1117 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1118 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1119 wake_up_interruptible(&tgt->upld_wait);
1123 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1124 struct fcoe_kcqe *destroy_kcqe)
1126 struct bnx2fc_rport *tgt;
1129 conn_id = destroy_kcqe->fcoe_conn_id;
1130 tgt = hba->tgt_ofld_list[conn_id];
1132 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1136 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1138 if (destroy_kcqe->completion_status) {
1139 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1140 destroy_kcqe->completion_status);
1143 /* destroy successful */
1144 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1145 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1146 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1147 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1148 wake_up_interruptible(&tgt->upld_wait);
1152 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1155 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1156 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1159 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1160 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1163 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1164 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1168 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1173 * bnx2fc_indicae_kcqe - process KCQE
1175 * @hba: adapter structure pointer
1176 * @kcqe: kcqe pointer
1177 * @num_cqe: Number of completion queue elements
1179 * Generic KCQ event handler
1181 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1184 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1186 struct fcoe_kcqe *kcqe = NULL;
1188 while (i < num_cqe) {
1189 kcqe = (struct fcoe_kcqe *) kcq[i++];
1191 switch (kcqe->op_code) {
1192 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1193 bnx2fc_fastpath_notification(hba, kcqe);
1196 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1197 bnx2fc_process_ofld_cmpl(hba, kcqe);
1200 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1201 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1204 case FCOE_KCQE_OPCODE_INIT_FUNC:
1205 if (kcqe->completion_status !=
1206 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1207 bnx2fc_init_failure(hba,
1208 kcqe->completion_status);
1210 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1211 bnx2fc_get_link_state(hba);
1212 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1213 (u8)hba->pcidev->bus->number);
1217 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1218 if (kcqe->completion_status !=
1219 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1221 printk(KERN_ERR PFX "DESTROY failed\n");
1223 printk(KERN_ERR PFX "DESTROY success\n");
1225 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1226 wake_up_interruptible(&hba->destroy_wait);
1229 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1230 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1233 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1234 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1237 case FCOE_KCQE_OPCODE_STAT_FUNC:
1238 if (kcqe->completion_status !=
1239 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1240 printk(KERN_ERR PFX "STAT failed\n");
1241 complete(&hba->stat_req_done);
1244 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1247 printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1253 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1255 struct fcoe_sqe *sqe;
1257 sqe = &tgt->sq[tgt->sq_prod_idx];
1260 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1261 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1263 /* Advance SQ Prod Idx */
1264 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1265 tgt->sq_prod_idx = 0;
1266 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1270 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1272 struct b577xx_doorbell_set_prod ev_doorbell;
1277 memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
1278 ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
1280 ev_doorbell.prod = tgt->sq_prod_idx |
1281 (tgt->sq_curr_toggle_bit << 15);
1282 ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
1283 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
1284 msg = *((u32 *)&ev_doorbell);
1285 writel(cpu_to_le32(msg), tgt->ctx_base);
1291 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1293 u32 context_id = tgt->context_id;
1294 struct fcoe_port *port = tgt->port;
1296 resource_size_t reg_base;
1297 struct bnx2fc_hba *hba = port->priv;
1299 reg_base = pci_resource_start(hba->pcidev,
1300 BNX2X_DOORBELL_PCI_BAR);
1301 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1302 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1303 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1309 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1311 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1313 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1316 tgt->rq_cons_idx += num_items;
1318 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1319 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1324 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1326 /* return the rq buffer */
1327 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1328 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1329 /* Wrap around RQ */
1330 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1332 tgt->rq_prod_idx = next_prod_idx;
1333 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1336 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1337 struct fcoe_task_ctx_entry *task,
1340 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1341 struct bnx2fc_rport *tgt = io_req->tgt;
1342 u32 context_id = tgt->context_id;
1344 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1346 /* Tx Write Rx Read */
1347 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1348 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1349 task->tx_wr_rx_rd.init_flags = task_type <<
1350 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1351 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1352 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1354 task->cmn.common_flags = context_id <<
1355 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1356 task->cmn.general.cleanup_info.task_id = orig_xid;
1361 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1362 struct fcoe_task_ctx_entry *task)
1364 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1365 struct bnx2fc_rport *tgt = io_req->tgt;
1366 struct fc_frame_header *fc_hdr;
1373 /* Obtain task_type */
1374 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1375 (io_req->cmd_type == BNX2FC_ELS)) {
1376 task_type = FCOE_TASK_TYPE_MIDPATH;
1377 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1378 task_type = FCOE_TASK_TYPE_ABTS;
1381 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1383 /* Setup the task from io_req for easy reference */
1384 io_req->task = task;
1386 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1387 io_req->cmd_type, task_type);
1390 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1391 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1392 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1393 (u32)mp_req->mp_req_bd_dma;
1394 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1395 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1396 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1397 BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
1398 (unsigned long long)mp_req->mp_req_bd_dma);
1401 /* Tx Write Rx Read */
1402 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1403 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1404 task->tx_wr_rx_rd.init_flags = task_type <<
1405 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1406 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1407 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1408 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1409 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1412 task->cmn.data_2_trns = io_req->data_xfer_len;
1413 context_id = tgt->context_id;
1414 task->cmn.common_flags = context_id <<
1415 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1416 task->cmn.common_flags |= 1 <<
1417 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1418 task->cmn.common_flags |= 1 <<
1419 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1421 /* Rx Write Tx Read */
1422 fc_hdr = &(mp_req->req_fc_hdr);
1423 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1424 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1425 fc_hdr->fh_rx_id = htons(0xffff);
1426 task->rx_wr_tx_rd.rx_id = 0xffff;
1427 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1428 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1431 /* Fill FC Header into middle path buffer */
1432 hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
1433 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1434 hdr[0] = cpu_to_be64(temp_hdr[0]);
1435 hdr[1] = cpu_to_be64(temp_hdr[1]);
1436 hdr[2] = cpu_to_be64(temp_hdr[2]);
1439 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1441 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1442 (u32)mp_req->mp_resp_bd_dma;
1443 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1444 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1445 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1449 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1450 struct fcoe_task_ctx_entry *task)
1453 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1454 struct io_bdt *bd_tbl = io_req->bd_tbl;
1455 struct bnx2fc_rport *tgt = io_req->tgt;
1457 u64 tmp_fcp_cmnd[4];
1462 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1464 /* Setup the task from io_req for easy reference */
1465 io_req->task = task;
1467 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1468 task_type = FCOE_TASK_TYPE_WRITE;
1470 task_type = FCOE_TASK_TYPE_READ;
1473 if (task_type == FCOE_TASK_TYPE_WRITE) {
1474 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1475 (u32)bd_tbl->bd_tbl_dma;
1476 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1477 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1478 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
1482 /*Tx Write Rx Read */
1483 /* Init state to NORMAL */
1484 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1485 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1486 task->tx_wr_rx_rd.init_flags = task_type <<
1487 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1488 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1489 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1490 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1491 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1494 task->cmn.data_2_trns = io_req->data_xfer_len;
1495 context_id = tgt->context_id;
1496 task->cmn.common_flags = context_id <<
1497 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1498 task->cmn.common_flags |= 1 <<
1499 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1500 task->cmn.common_flags |= 1 <<
1501 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1503 /* Set initiative ownership */
1504 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
1506 /* Set initial seq counter */
1507 task->cmn.tx_low_seq_cnt = 1;
1509 /* Set state to "waiting for the first packet" */
1510 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
1512 /* Fill FCP_CMND IU */
1514 task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
1515 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1518 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1520 for (i = 0; i < cnt; i++) {
1521 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1525 /* Rx Write Tx Read */
1526 task->rx_wr_tx_rd.rx_id = 0xffff;
1529 if (task_type == FCOE_TASK_TYPE_READ) {
1531 bd_count = bd_tbl->bd_valid;
1532 if (bd_count == 1) {
1534 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1536 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
1537 fcoe_bd_tbl->buf_addr_lo;
1538 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
1539 fcoe_bd_tbl->buf_addr_hi;
1540 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
1541 fcoe_bd_tbl->buf_len;
1542 task->tx_wr_rx_rd.init_flags |= 1 <<
1543 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
1546 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1547 (u32)bd_tbl->bd_tbl_dma;
1548 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1549 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1550 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
1557 * bnx2fc_setup_task_ctx - allocate and map task context
1559 * @hba: pointer to adapter structure
1561 * allocate memory for task context, and associated BD table to be used
1565 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1568 struct regpair *task_ctx_bdt;
1573 * Allocate task context bd table. A page size of bd table
1574 * can map 256 buffers. Each buffer contains 32 task context
1575 * entries. Hence the limit with one page is 8192 task context
1578 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1580 &hba->task_ctx_bd_dma,
1582 if (!hba->task_ctx_bd_tbl) {
1583 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1587 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1590 * Allocate task_ctx which is an array of pointers pointing to
1591 * a page containing 32 task contexts
1593 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1595 if (!hba->task_ctx) {
1596 printk(KERN_ERR PFX "unable to allocate task context array\n");
1602 * Allocate task_ctx_dma which is an array of dma addresses
1604 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1605 sizeof(dma_addr_t)), GFP_KERNEL);
1606 if (!hba->task_ctx_dma) {
1607 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1612 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1613 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1615 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1617 &hba->task_ctx_dma[i],
1619 if (!hba->task_ctx[i]) {
1620 printk(KERN_ERR PFX "unable to alloc task context\n");
1624 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1625 addr = (u64)hba->task_ctx_dma[i];
1626 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1627 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1633 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1634 if (hba->task_ctx[i]) {
1636 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1637 hba->task_ctx[i], hba->task_ctx_dma[i]);
1638 hba->task_ctx[i] = NULL;
1642 kfree(hba->task_ctx_dma);
1643 hba->task_ctx_dma = NULL;
1645 kfree(hba->task_ctx);
1646 hba->task_ctx = NULL;
1648 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1649 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1650 hba->task_ctx_bd_tbl = NULL;
1655 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1659 if (hba->task_ctx_bd_tbl) {
1660 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1661 hba->task_ctx_bd_tbl,
1662 hba->task_ctx_bd_dma);
1663 hba->task_ctx_bd_tbl = NULL;
1666 if (hba->task_ctx) {
1667 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1668 if (hba->task_ctx[i]) {
1669 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1671 hba->task_ctx_dma[i]);
1672 hba->task_ctx[i] = NULL;
1675 kfree(hba->task_ctx);
1676 hba->task_ctx = NULL;
1679 kfree(hba->task_ctx_dma);
1680 hba->task_ctx_dma = NULL;
1683 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1687 int hash_table_size;
1690 segment_count = hba->hash_tbl_segment_count;
1691 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1692 sizeof(struct fcoe_hash_table_entry);
1694 pbl = hba->hash_tbl_pbl;
1695 for (i = 0; i < segment_count; ++i) {
1696 dma_addr_t dma_address;
1698 dma_address = le32_to_cpu(*pbl);
1700 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1702 dma_free_coherent(&hba->pcidev->dev,
1703 BNX2FC_HASH_TBL_CHUNK_SIZE,
1704 hba->hash_tbl_segments[i],
1709 if (hba->hash_tbl_pbl) {
1710 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1712 hba->hash_tbl_pbl_dma);
1713 hba->hash_tbl_pbl = NULL;
1717 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1720 int hash_table_size;
1722 int segment_array_size;
1723 int dma_segment_array_size;
1724 dma_addr_t *dma_segment_array;
1727 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1728 sizeof(struct fcoe_hash_table_entry);
1730 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1731 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1732 hba->hash_tbl_segment_count = segment_count;
1734 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1735 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1736 if (!hba->hash_tbl_segments) {
1737 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1740 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1741 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1742 if (!dma_segment_array) {
1743 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1747 for (i = 0; i < segment_count; ++i) {
1748 hba->hash_tbl_segments[i] =
1749 dma_alloc_coherent(&hba->pcidev->dev,
1750 BNX2FC_HASH_TBL_CHUNK_SIZE,
1751 &dma_segment_array[i],
1753 if (!hba->hash_tbl_segments[i]) {
1754 printk(KERN_ERR PFX "hash segment alloc failed\n");
1756 dma_free_coherent(&hba->pcidev->dev,
1757 BNX2FC_HASH_TBL_CHUNK_SIZE,
1758 hba->hash_tbl_segments[i],
1759 dma_segment_array[i]);
1760 hba->hash_tbl_segments[i] = NULL;
1762 kfree(dma_segment_array);
1765 memset(hba->hash_tbl_segments[i], 0,
1766 BNX2FC_HASH_TBL_CHUNK_SIZE);
1769 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1771 &hba->hash_tbl_pbl_dma,
1773 if (!hba->hash_tbl_pbl) {
1774 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1775 kfree(dma_segment_array);
1778 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1780 pbl = hba->hash_tbl_pbl;
1781 for (i = 0; i < segment_count; ++i) {
1782 u64 paddr = dma_segment_array[i];
1783 *pbl = cpu_to_le32((u32) paddr);
1785 *pbl = cpu_to_le32((u32) (paddr >> 32));
1788 pbl = hba->hash_tbl_pbl;
1790 while (*pbl && *(pbl + 1)) {
1799 kfree(dma_segment_array);
1804 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1806 * @hba: Pointer to adapter structure
1809 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1815 if (bnx2fc_allocate_hash_table(hba))
1818 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1819 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1820 &hba->t2_hash_tbl_ptr_dma,
1822 if (!hba->t2_hash_tbl_ptr) {
1823 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1824 bnx2fc_free_fw_resc(hba);
1827 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1829 mem_size = BNX2FC_NUM_MAX_SESS *
1830 sizeof(struct fcoe_t2_hash_table_entry);
1831 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1832 &hba->t2_hash_tbl_dma,
1834 if (!hba->t2_hash_tbl) {
1835 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1836 bnx2fc_free_fw_resc(hba);
1839 memset(hba->t2_hash_tbl, 0x00, mem_size);
1840 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1841 addr = (unsigned long) hba->t2_hash_tbl_dma +
1842 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1843 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1844 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1847 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1848 PAGE_SIZE, &hba->dummy_buf_dma,
1850 if (!hba->dummy_buffer) {
1851 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1852 bnx2fc_free_fw_resc(hba);
1856 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1858 &hba->stats_buf_dma,
1860 if (!hba->stats_buffer) {
1861 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1862 bnx2fc_free_fw_resc(hba);
1865 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1870 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1874 if (hba->stats_buffer) {
1875 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1876 hba->stats_buffer, hba->stats_buf_dma);
1877 hba->stats_buffer = NULL;
1880 if (hba->dummy_buffer) {
1881 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1882 hba->dummy_buffer, hba->dummy_buf_dma);
1883 hba->dummy_buffer = NULL;
1886 if (hba->t2_hash_tbl_ptr) {
1887 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1888 dma_free_coherent(&hba->pcidev->dev, mem_size,
1889 hba->t2_hash_tbl_ptr,
1890 hba->t2_hash_tbl_ptr_dma);
1891 hba->t2_hash_tbl_ptr = NULL;
1894 if (hba->t2_hash_tbl) {
1895 mem_size = BNX2FC_NUM_MAX_SESS *
1896 sizeof(struct fcoe_t2_hash_table_entry);
1897 dma_free_coherent(&hba->pcidev->dev, mem_size,
1898 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1899 hba->t2_hash_tbl = NULL;
1901 bnx2fc_free_hash_table(hba);