]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
i2c: sun6-p2wi: fix call to snprintf
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_sriov_common.c
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2013 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7
8 #include "qlcnic_sriov.h"
9 #include "qlcnic.h"
10 #include "qlcnic_83xx_hw.h"
11 #include <linux/types.h>
12
13 #define QLC_BC_COMMAND  0
14 #define QLC_BC_RESPONSE 1
15
16 #define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
17 #define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
18
19 #define QLC_BC_MSG              0
20 #define QLC_BC_CFREE            1
21 #define QLC_BC_FLR              2
22 #define QLC_BC_HDR_SZ           16
23 #define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
24
25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
27
28 #define QLC_83XX_VF_RESET_FAIL_THRESH   8
29 #define QLC_BC_CMD_MAX_RETRY_CNT        5
30
31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
37                                   struct qlcnic_cmd_args *);
38 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
39 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
40 static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
41 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
42
43 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
44         .read_crb                       = qlcnic_83xx_read_crb,
45         .write_crb                      = qlcnic_83xx_write_crb,
46         .read_reg                       = qlcnic_83xx_rd_reg_indirect,
47         .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
48         .get_mac_address                = qlcnic_83xx_get_mac_address,
49         .setup_intr                     = qlcnic_83xx_setup_intr,
50         .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
51         .mbx_cmd                        = qlcnic_sriov_issue_cmd,
52         .get_func_no                    = qlcnic_83xx_get_func_no,
53         .api_lock                       = qlcnic_83xx_cam_lock,
54         .api_unlock                     = qlcnic_83xx_cam_unlock,
55         .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
56         .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
57         .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
58         .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
59         .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
60         .setup_link_event               = qlcnic_83xx_setup_link_event,
61         .get_nic_info                   = qlcnic_83xx_get_nic_info,
62         .get_pci_info                   = qlcnic_83xx_get_pci_info,
63         .set_nic_info                   = qlcnic_83xx_set_nic_info,
64         .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
65         .napi_enable                    = qlcnic_83xx_napi_enable,
66         .napi_disable                   = qlcnic_83xx_napi_disable,
67         .config_intr_coal               = qlcnic_83xx_config_intr_coal,
68         .config_rss                     = qlcnic_83xx_config_rss,
69         .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
70         .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
71         .change_l2_filter               = qlcnic_83xx_change_l2_filter,
72         .get_board_info                 = qlcnic_83xx_get_port_info,
73         .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
74         .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
75         .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
76 };
77
78 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
79         .config_bridged_mode    = qlcnic_config_bridged_mode,
80         .config_led             = qlcnic_config_led,
81         .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
82         .napi_add               = qlcnic_83xx_napi_add,
83         .napi_del               = qlcnic_83xx_napi_del,
84         .shutdown               = qlcnic_sriov_vf_shutdown,
85         .resume                 = qlcnic_sriov_vf_resume,
86         .config_ipaddr          = qlcnic_83xx_config_ipaddr,
87         .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
88 };
89
90 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
91         {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
92         {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
93         {QLCNIC_BC_CMD_GET_ACL, 3, 14},
94         {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
95 };
96
97 static inline bool qlcnic_sriov_bc_msg_check(u32 val)
98 {
99         return (val & (1 << QLC_BC_MSG)) ? true : false;
100 }
101
102 static inline bool qlcnic_sriov_channel_free_check(u32 val)
103 {
104         return (val & (1 << QLC_BC_CFREE)) ? true : false;
105 }
106
107 static inline bool qlcnic_sriov_flr_check(u32 val)
108 {
109         return (val & (1 << QLC_BC_FLR)) ? true : false;
110 }
111
112 static inline u8 qlcnic_sriov_target_func_id(u32 val)
113 {
114         return (val >> 4) & 0xff;
115 }
116
117 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
118 {
119         struct pci_dev *dev = adapter->pdev;
120         int pos;
121         u16 stride, offset;
122
123         if (qlcnic_sriov_vf_check(adapter))
124                 return 0;
125
126         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
127         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
128         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
129
130         return (dev->devfn + offset + stride * vf_id) & 0xff;
131 }
132
133 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
134 {
135         struct qlcnic_sriov *sriov;
136         struct qlcnic_back_channel *bc;
137         struct workqueue_struct *wq;
138         struct qlcnic_vport *vp;
139         struct qlcnic_vf_info *vf;
140         int err, i;
141
142         if (!qlcnic_sriov_enable_check(adapter))
143                 return -EIO;
144
145         sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
146         if (!sriov)
147                 return -ENOMEM;
148
149         adapter->ahw->sriov = sriov;
150         sriov->num_vfs = num_vfs;
151         bc = &sriov->bc;
152         sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
153                                  num_vfs, GFP_KERNEL);
154         if (!sriov->vf_info) {
155                 err = -ENOMEM;
156                 goto qlcnic_free_sriov;
157         }
158
159         wq = create_singlethread_workqueue("bc-trans");
160         if (wq == NULL) {
161                 err = -ENOMEM;
162                 dev_err(&adapter->pdev->dev,
163                         "Cannot create bc-trans workqueue\n");
164                 goto qlcnic_free_vf_info;
165         }
166
167         bc->bc_trans_wq = wq;
168
169         wq = create_singlethread_workqueue("async");
170         if (wq == NULL) {
171                 err = -ENOMEM;
172                 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
173                 goto qlcnic_destroy_trans_wq;
174         }
175
176         bc->bc_async_wq =  wq;
177         INIT_LIST_HEAD(&bc->async_list);
178
179         for (i = 0; i < num_vfs; i++) {
180                 vf = &sriov->vf_info[i];
181                 vf->adapter = adapter;
182                 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
183                 mutex_init(&vf->send_cmd_lock);
184                 mutex_init(&vf->vlan_list_lock);
185                 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
186                 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
187                 spin_lock_init(&vf->rcv_act.lock);
188                 spin_lock_init(&vf->rcv_pend.lock);
189                 init_completion(&vf->ch_free_cmpl);
190
191                 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
192
193                 if (qlcnic_sriov_pf_check(adapter)) {
194                         vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
195                         if (!vp) {
196                                 err = -ENOMEM;
197                                 goto qlcnic_destroy_async_wq;
198                         }
199                         sriov->vf_info[i].vp = vp;
200                         vp->max_tx_bw = MAX_BW;
201                         vp->spoofchk = true;
202                         random_ether_addr(vp->mac);
203                         dev_info(&adapter->pdev->dev,
204                                  "MAC Address %pM is configured for VF %d\n",
205                                  vp->mac, i);
206                 }
207         }
208
209         return 0;
210
211 qlcnic_destroy_async_wq:
212         destroy_workqueue(bc->bc_async_wq);
213
214 qlcnic_destroy_trans_wq:
215         destroy_workqueue(bc->bc_trans_wq);
216
217 qlcnic_free_vf_info:
218         kfree(sriov->vf_info);
219
220 qlcnic_free_sriov:
221         kfree(adapter->ahw->sriov);
222         return err;
223 }
224
225 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
226 {
227         struct qlcnic_bc_trans *trans;
228         struct qlcnic_cmd_args cmd;
229         unsigned long flags;
230
231         spin_lock_irqsave(&t_list->lock, flags);
232
233         while (!list_empty(&t_list->wait_list)) {
234                 trans = list_first_entry(&t_list->wait_list,
235                                          struct qlcnic_bc_trans, list);
236                 list_del(&trans->list);
237                 t_list->count--;
238                 cmd.req.arg = (u32 *)trans->req_pay;
239                 cmd.rsp.arg = (u32 *)trans->rsp_pay;
240                 qlcnic_free_mbx_args(&cmd);
241                 qlcnic_sriov_cleanup_transaction(trans);
242         }
243
244         spin_unlock_irqrestore(&t_list->lock, flags);
245 }
246
247 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
248 {
249         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
250         struct qlcnic_back_channel *bc = &sriov->bc;
251         struct qlcnic_vf_info *vf;
252         int i;
253
254         if (!qlcnic_sriov_enable_check(adapter))
255                 return;
256
257         qlcnic_sriov_cleanup_async_list(bc);
258         destroy_workqueue(bc->bc_async_wq);
259
260         for (i = 0; i < sriov->num_vfs; i++) {
261                 vf = &sriov->vf_info[i];
262                 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
263                 cancel_work_sync(&vf->trans_work);
264                 qlcnic_sriov_cleanup_list(&vf->rcv_act);
265         }
266
267         destroy_workqueue(bc->bc_trans_wq);
268
269         for (i = 0; i < sriov->num_vfs; i++)
270                 kfree(sriov->vf_info[i].vp);
271
272         kfree(sriov->vf_info);
273         kfree(adapter->ahw->sriov);
274 }
275
276 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
277 {
278         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
279         qlcnic_sriov_cfg_bc_intr(adapter, 0);
280         __qlcnic_sriov_cleanup(adapter);
281 }
282
283 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
284 {
285         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
286                 return;
287
288         qlcnic_sriov_free_vlans(adapter);
289
290         if (qlcnic_sriov_pf_check(adapter))
291                 qlcnic_sriov_pf_cleanup(adapter);
292
293         if (qlcnic_sriov_vf_check(adapter))
294                 qlcnic_sriov_vf_cleanup(adapter);
295 }
296
297 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
298                                     u32 *pay, u8 pci_func, u8 size)
299 {
300         struct qlcnic_hardware_context *ahw = adapter->ahw;
301         struct qlcnic_mailbox *mbx = ahw->mailbox;
302         struct qlcnic_cmd_args cmd;
303         unsigned long timeout;
304         int err;
305
306         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
307         cmd.hdr = hdr;
308         cmd.pay = pay;
309         cmd.pay_size = size;
310         cmd.func_num = pci_func;
311         cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
312         cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
313
314         err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
315         if (err) {
316                 dev_err(&adapter->pdev->dev,
317                         "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
318                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
319                         ahw->op_mode);
320                 return err;
321         }
322
323         if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
324                 dev_err(&adapter->pdev->dev,
325                         "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
326                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
327                         ahw->op_mode);
328                 flush_workqueue(mbx->work_q);
329         }
330
331         return cmd.rsp_opcode;
332 }
333
334 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
335 {
336         adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
337         adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
338         adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
339         adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
340         adapter->num_txd = MAX_CMD_DESCRIPTORS;
341         adapter->max_rds_rings = MAX_RDS_RINGS;
342 }
343
344 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
345                                    struct qlcnic_info *npar_info, u16 vport_id)
346 {
347         struct device *dev = &adapter->pdev->dev;
348         struct qlcnic_cmd_args cmd;
349         int err;
350         u32 status;
351
352         err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
353         if (err)
354                 return err;
355
356         cmd.req.arg[1] = vport_id << 16 | 0x1;
357         err = qlcnic_issue_cmd(adapter, &cmd);
358         if (err) {
359                 dev_err(&adapter->pdev->dev,
360                         "Failed to get vport info, err=%d\n", err);
361                 qlcnic_free_mbx_args(&cmd);
362                 return err;
363         }
364
365         status = cmd.rsp.arg[2] & 0xffff;
366         if (status & BIT_0)
367                 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
368         if (status & BIT_1)
369                 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
370         if (status & BIT_2)
371                 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
372         if (status & BIT_3)
373                 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
374         if (status & BIT_4)
375                 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
376         if (status & BIT_5)
377                 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
378         if (status & BIT_6)
379                 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
380         if (status & BIT_7)
381                 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
382         if (status & BIT_8)
383                 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
384         if (status & BIT_9)
385                 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
386
387         npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
388         npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
389         npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
390         npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
391
392         dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
393                  "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
394                  "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
395                  "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
396                  "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
397                  "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
398                  npar_info->min_tx_bw, npar_info->max_tx_bw,
399                  npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
400                  npar_info->max_rx_mcast_mac_filters,
401                  npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
402                  npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
403                  npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
404                  npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
405                  npar_info->max_remote_ipv6_addrs);
406
407         qlcnic_free_mbx_args(&cmd);
408         return err;
409 }
410
411 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
412                                       struct qlcnic_cmd_args *cmd)
413 {
414         adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
415         adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
416         return 0;
417 }
418
419 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
420                                             struct qlcnic_cmd_args *cmd)
421 {
422         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
423         int i, num_vlans;
424         u16 *vlans;
425
426         if (sriov->allowed_vlans)
427                 return 0;
428
429         sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
430         sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
431         dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
432                  sriov->num_allowed_vlans);
433
434         qlcnic_sriov_alloc_vlans(adapter);
435
436         if (!sriov->any_vlan)
437                 return 0;
438
439         num_vlans = sriov->num_allowed_vlans;
440         sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
441         if (!sriov->allowed_vlans)
442                 return -ENOMEM;
443
444         vlans = (u16 *)&cmd->rsp.arg[3];
445         for (i = 0; i < num_vlans; i++)
446                 sriov->allowed_vlans[i] = vlans[i];
447
448         return 0;
449 }
450
451 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
452 {
453         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
454         struct qlcnic_cmd_args cmd;
455         int ret = 0;
456
457         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
458         if (ret)
459                 return ret;
460
461         ret = qlcnic_issue_cmd(adapter, &cmd);
462         if (ret) {
463                 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
464                         ret);
465         } else {
466                 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
467                 switch (sriov->vlan_mode) {
468                 case QLC_GUEST_VLAN_MODE:
469                         ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
470                         break;
471                 case QLC_PVID_MODE:
472                         ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
473                         break;
474                 }
475         }
476
477         qlcnic_free_mbx_args(&cmd);
478         return ret;
479 }
480
481 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
482 {
483         struct qlcnic_hardware_context *ahw = adapter->ahw;
484         struct qlcnic_info nic_info;
485         int err;
486
487         err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
488         if (err)
489                 return err;
490
491         ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
492
493         err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
494         if (err)
495                 return -EIO;
496
497         if (qlcnic_83xx_get_port_info(adapter))
498                 return -EIO;
499
500         qlcnic_sriov_vf_cfg_buff_desc(adapter);
501         adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
502         dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
503                  adapter->ahw->fw_hal_version);
504
505         ahw->physical_port = (u8) nic_info.phys_port;
506         ahw->switch_mode = nic_info.switch_mode;
507         ahw->max_mtu = nic_info.max_mtu;
508         ahw->op_mode = nic_info.op_mode;
509         ahw->capabilities = nic_info.capabilities;
510         return 0;
511 }
512
513 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
514                                  int pci_using_dac)
515 {
516         int err;
517
518         INIT_LIST_HEAD(&adapter->vf_mc_list);
519         if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
520                 dev_warn(&adapter->pdev->dev,
521                          "Device does not support MSI interrupts\n");
522
523         /* compute and set default and max tx/sds rings */
524         qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
525         qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
526
527         err = qlcnic_setup_intr(adapter);
528         if (err) {
529                 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
530                 goto err_out_disable_msi;
531         }
532
533         err = qlcnic_83xx_setup_mbx_intr(adapter);
534         if (err)
535                 goto err_out_disable_msi;
536
537         err = qlcnic_sriov_init(adapter, 1);
538         if (err)
539                 goto err_out_disable_mbx_intr;
540
541         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
542         if (err)
543                 goto err_out_cleanup_sriov;
544
545         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
546         if (err)
547                 goto err_out_disable_bc_intr;
548
549         err = qlcnic_sriov_vf_init_driver(adapter);
550         if (err)
551                 goto err_out_send_channel_term;
552
553         err = qlcnic_sriov_get_vf_acl(adapter);
554         if (err)
555                 goto err_out_send_channel_term;
556
557         err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
558         if (err)
559                 goto err_out_send_channel_term;
560
561         pci_set_drvdata(adapter->pdev, adapter);
562         dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
563                  adapter->netdev->name);
564
565         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
566                              adapter->ahw->idc.delay);
567         return 0;
568
569 err_out_send_channel_term:
570         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
571
572 err_out_disable_bc_intr:
573         qlcnic_sriov_cfg_bc_intr(adapter, 0);
574
575 err_out_cleanup_sriov:
576         __qlcnic_sriov_cleanup(adapter);
577
578 err_out_disable_mbx_intr:
579         qlcnic_83xx_free_mbx_intr(adapter);
580
581 err_out_disable_msi:
582         qlcnic_teardown_intr(adapter);
583         return err;
584 }
585
586 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
587 {
588         u32 state;
589
590         do {
591                 msleep(20);
592                 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
593                         return -EIO;
594                 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
595         } while (state != QLC_83XX_IDC_DEV_READY);
596
597         return 0;
598 }
599
600 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
601 {
602         struct qlcnic_hardware_context *ahw = adapter->ahw;
603         int err;
604
605         set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
606         ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
607         ahw->reset_context = 0;
608         adapter->fw_fail_cnt = 0;
609         ahw->msix_supported = 1;
610         adapter->need_fw_reset = 0;
611         adapter->flags |= QLCNIC_TX_INTR_SHARED;
612
613         err = qlcnic_sriov_check_dev_ready(adapter);
614         if (err)
615                 return err;
616
617         err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
618         if (err)
619                 return err;
620
621         if (qlcnic_read_mac_addr(adapter))
622                 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
623
624         INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
625
626         clear_bit(__QLCNIC_RESETTING, &adapter->state);
627         return 0;
628 }
629
630 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
631 {
632         struct qlcnic_hardware_context *ahw = adapter->ahw;
633
634         ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
635         dev_info(&adapter->pdev->dev,
636                  "HAL Version: %d Non Privileged SRIOV function\n",
637                  ahw->fw_hal_version);
638         adapter->nic_ops = &qlcnic_sriov_vf_ops;
639         set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
640         return;
641 }
642
643 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
644 {
645         ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
646         ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
647         ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
648 }
649
650 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
651 {
652         u32 pay_size;
653
654         pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
655
656         if (pay_size)
657                 pay_size = QLC_BC_PAYLOAD_SZ;
658         else
659                 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
660
661         return pay_size;
662 }
663
664 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
665 {
666         struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
667         u8 i;
668
669         if (qlcnic_sriov_vf_check(adapter))
670                 return 0;
671
672         for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
673                 if (vf_info[i].pci_func == pci_func)
674                         return i;
675         }
676
677         return -EINVAL;
678 }
679
680 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
681 {
682         *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
683         if (!*trans)
684                 return -ENOMEM;
685
686         init_completion(&(*trans)->resp_cmpl);
687         return 0;
688 }
689
690 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
691                                             u32 size)
692 {
693         *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
694         if (!*hdr)
695                 return -ENOMEM;
696
697         return 0;
698 }
699
700 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
701 {
702         const struct qlcnic_mailbox_metadata *mbx_tbl;
703         int i, size;
704
705         mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
706         size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
707
708         for (i = 0; i < size; i++) {
709                 if (type == mbx_tbl[i].cmd) {
710                         mbx->op_type = QLC_BC_CMD;
711                         mbx->req.num = mbx_tbl[i].in_args;
712                         mbx->rsp.num = mbx_tbl[i].out_args;
713                         mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
714                                                GFP_ATOMIC);
715                         if (!mbx->req.arg)
716                                 return -ENOMEM;
717                         mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
718                                                GFP_ATOMIC);
719                         if (!mbx->rsp.arg) {
720                                 kfree(mbx->req.arg);
721                                 mbx->req.arg = NULL;
722                                 return -ENOMEM;
723                         }
724                         memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
725                         memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
726                         mbx->req.arg[0] = (type | (mbx->req.num << 16) |
727                                            (3 << 29));
728                         mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
729                         return 0;
730                 }
731         }
732         return -EINVAL;
733 }
734
735 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
736                                        struct qlcnic_cmd_args *cmd,
737                                        u16 seq, u8 msg_type)
738 {
739         struct qlcnic_bc_hdr *hdr;
740         int i;
741         u32 num_regs, bc_pay_sz;
742         u16 remainder;
743         u8 cmd_op, num_frags, t_num_frags;
744
745         bc_pay_sz = QLC_BC_PAYLOAD_SZ;
746         if (msg_type == QLC_BC_COMMAND) {
747                 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
748                 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
749                 num_regs = cmd->req.num;
750                 trans->req_pay_size = (num_regs * 4);
751                 num_regs = cmd->rsp.num;
752                 trans->rsp_pay_size = (num_regs * 4);
753                 cmd_op = cmd->req.arg[0] & 0xff;
754                 remainder = (trans->req_pay_size) % (bc_pay_sz);
755                 num_frags = (trans->req_pay_size) / (bc_pay_sz);
756                 if (remainder)
757                         num_frags++;
758                 t_num_frags = num_frags;
759                 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
760                         return -ENOMEM;
761                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
762                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
763                 if (remainder)
764                         num_frags++;
765                 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
766                         return -ENOMEM;
767                 num_frags  = t_num_frags;
768                 hdr = trans->req_hdr;
769         }  else {
770                 cmd->req.arg = (u32 *)trans->req_pay;
771                 cmd->rsp.arg = (u32 *)trans->rsp_pay;
772                 cmd_op = cmd->req.arg[0] & 0xff;
773                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
774                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
775                 if (remainder)
776                         num_frags++;
777                 cmd->req.num = trans->req_pay_size / 4;
778                 cmd->rsp.num = trans->rsp_pay_size / 4;
779                 hdr = trans->rsp_hdr;
780                 cmd->op_type = trans->req_hdr->op_type;
781         }
782
783         trans->trans_id = seq;
784         trans->cmd_id = cmd_op;
785         for (i = 0; i < num_frags; i++) {
786                 hdr[i].version = 2;
787                 hdr[i].msg_type = msg_type;
788                 hdr[i].op_type = cmd->op_type;
789                 hdr[i].num_cmds = 1;
790                 hdr[i].num_frags = num_frags;
791                 hdr[i].frag_num = i + 1;
792                 hdr[i].cmd_op = cmd_op;
793                 hdr[i].seq_id = seq;
794         }
795         return 0;
796 }
797
798 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
799 {
800         if (!trans)
801                 return;
802         kfree(trans->req_hdr);
803         kfree(trans->rsp_hdr);
804         kfree(trans);
805 }
806
807 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
808                                     struct qlcnic_bc_trans *trans, u8 type)
809 {
810         struct qlcnic_trans_list *t_list;
811         unsigned long flags;
812         int ret = 0;
813
814         if (type == QLC_BC_RESPONSE) {
815                 t_list = &vf->rcv_act;
816                 spin_lock_irqsave(&t_list->lock, flags);
817                 t_list->count--;
818                 list_del(&trans->list);
819                 if (t_list->count > 0)
820                         ret = 1;
821                 spin_unlock_irqrestore(&t_list->lock, flags);
822         }
823         if (type == QLC_BC_COMMAND) {
824                 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
825                         msleep(100);
826                 vf->send_cmd = NULL;
827                 clear_bit(QLC_BC_VF_SEND, &vf->state);
828         }
829         return ret;
830 }
831
832 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
833                                          struct qlcnic_vf_info *vf,
834                                          work_func_t func)
835 {
836         if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
837             vf->adapter->need_fw_reset)
838                 return;
839
840         queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
841 }
842
843 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
844 {
845         struct completion *cmpl = &trans->resp_cmpl;
846
847         if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
848                 trans->trans_state = QLC_END;
849         else
850                 trans->trans_state = QLC_ABORT;
851
852         return;
853 }
854
855 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
856                                             u8 type)
857 {
858         if (type == QLC_BC_RESPONSE) {
859                 trans->curr_rsp_frag++;
860                 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
861                         trans->trans_state = QLC_INIT;
862                 else
863                         trans->trans_state = QLC_END;
864         } else {
865                 trans->curr_req_frag++;
866                 if (trans->curr_req_frag < trans->req_hdr->num_frags)
867                         trans->trans_state = QLC_INIT;
868                 else
869                         trans->trans_state = QLC_WAIT_FOR_RESP;
870         }
871 }
872
873 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
874                                                u8 type)
875 {
876         struct qlcnic_vf_info *vf = trans->vf;
877         struct completion *cmpl = &vf->ch_free_cmpl;
878
879         if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
880                 trans->trans_state = QLC_ABORT;
881                 return;
882         }
883
884         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
885         qlcnic_sriov_handle_multi_frags(trans, type);
886 }
887
888 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
889                                      u32 *hdr, u32 *pay, u32 size)
890 {
891         struct qlcnic_hardware_context *ahw = adapter->ahw;
892         u32 fw_mbx;
893         u8 i, max = 2, hdr_size, j;
894
895         hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
896         max = (size / sizeof(u32)) + hdr_size;
897
898         fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
899         for (i = 2, j = 0; j < hdr_size; i++, j++)
900                 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
901         for (; j < max; i++, j++)
902                 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
903 }
904
905 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
906 {
907         int ret = -EBUSY;
908         u32 timeout = 10000;
909
910         do {
911                 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
912                         ret = 0;
913                         break;
914                 }
915                 mdelay(1);
916         } while (--timeout);
917
918         return ret;
919 }
920
921 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
922 {
923         struct qlcnic_vf_info *vf = trans->vf;
924         u32 pay_size, hdr_size;
925         u32 *hdr, *pay;
926         int ret;
927         u8 pci_func = trans->func_id;
928
929         if (__qlcnic_sriov_issue_bc_post(vf))
930                 return -EBUSY;
931
932         if (type == QLC_BC_COMMAND) {
933                 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
934                 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
935                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
936                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
937                                                        trans->curr_req_frag);
938                 pay_size = (pay_size / sizeof(u32));
939         } else {
940                 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
941                 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
942                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
943                 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
944                                                        trans->curr_rsp_frag);
945                 pay_size = (pay_size / sizeof(u32));
946         }
947
948         ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
949                                        pci_func, pay_size);
950         return ret;
951 }
952
953 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
954                                       struct qlcnic_vf_info *vf, u8 type)
955 {
956         bool flag = true;
957         int err = -EIO;
958
959         while (flag) {
960                 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
961                     vf->adapter->need_fw_reset)
962                         trans->trans_state = QLC_ABORT;
963
964                 switch (trans->trans_state) {
965                 case QLC_INIT:
966                         trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
967                         if (qlcnic_sriov_issue_bc_post(trans, type))
968                                 trans->trans_state = QLC_ABORT;
969                         break;
970                 case QLC_WAIT_FOR_CHANNEL_FREE:
971                         qlcnic_sriov_wait_for_channel_free(trans, type);
972                         break;
973                 case QLC_WAIT_FOR_RESP:
974                         qlcnic_sriov_wait_for_resp(trans);
975                         break;
976                 case QLC_END:
977                         err = 0;
978                         flag = false;
979                         break;
980                 case QLC_ABORT:
981                         err = -EIO;
982                         flag = false;
983                         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
984                         break;
985                 default:
986                         err = -EIO;
987                         flag = false;
988                 }
989         }
990         return err;
991 }
992
993 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
994                                     struct qlcnic_bc_trans *trans, int pci_func)
995 {
996         struct qlcnic_vf_info *vf;
997         int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
998
999         if (index < 0)
1000                 return -EIO;
1001
1002         vf = &adapter->ahw->sriov->vf_info[index];
1003         trans->vf = vf;
1004         trans->func_id = pci_func;
1005
1006         if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1007                 if (qlcnic_sriov_pf_check(adapter))
1008                         return -EIO;
1009                 if (qlcnic_sriov_vf_check(adapter) &&
1010                     trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1011                         return -EIO;
1012         }
1013
1014         mutex_lock(&vf->send_cmd_lock);
1015         vf->send_cmd = trans;
1016         err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1017         qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1018         mutex_unlock(&vf->send_cmd_lock);
1019         return err;
1020 }
1021
1022 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1023                                           struct qlcnic_bc_trans *trans,
1024                                           struct qlcnic_cmd_args *cmd)
1025 {
1026 #ifdef CONFIG_QLCNIC_SRIOV
1027         if (qlcnic_sriov_pf_check(adapter)) {
1028                 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1029                 return;
1030         }
1031 #endif
1032         cmd->rsp.arg[0] |= (0x9 << 25);
1033         return;
1034 }
1035
1036 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1037 {
1038         struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1039                                                  trans_work);
1040         struct qlcnic_bc_trans *trans = NULL;
1041         struct qlcnic_adapter *adapter  = vf->adapter;
1042         struct qlcnic_cmd_args cmd;
1043         u8 req;
1044
1045         if (adapter->need_fw_reset)
1046                 return;
1047
1048         if (test_bit(QLC_BC_VF_FLR, &vf->state))
1049                 return;
1050
1051         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1052         trans = list_first_entry(&vf->rcv_act.wait_list,
1053                                  struct qlcnic_bc_trans, list);
1054         adapter = vf->adapter;
1055
1056         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1057                                         QLC_BC_RESPONSE))
1058                 goto cleanup_trans;
1059
1060         __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1061         trans->trans_state = QLC_INIT;
1062         __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1063
1064 cleanup_trans:
1065         qlcnic_free_mbx_args(&cmd);
1066         req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1067         qlcnic_sriov_cleanup_transaction(trans);
1068         if (req)
1069                 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1070                                              qlcnic_sriov_process_bc_cmd);
1071 }
1072
1073 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1074                                         struct qlcnic_vf_info *vf)
1075 {
1076         struct qlcnic_bc_trans *trans;
1077         u32 pay_size;
1078
1079         if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1080                 return;
1081
1082         trans = vf->send_cmd;
1083
1084         if (trans == NULL)
1085                 goto clear_send;
1086
1087         if (trans->trans_id != hdr->seq_id)
1088                 goto clear_send;
1089
1090         pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1091                                                trans->curr_rsp_frag);
1092         qlcnic_sriov_pull_bc_msg(vf->adapter,
1093                                  (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1094                                  (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1095                                  pay_size);
1096         if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1097                 goto clear_send;
1098
1099         complete(&trans->resp_cmpl);
1100
1101 clear_send:
1102         clear_bit(QLC_BC_VF_SEND, &vf->state);
1103 }
1104
1105 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1106                                 struct qlcnic_vf_info *vf,
1107                                 struct qlcnic_bc_trans *trans)
1108 {
1109         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1110
1111         t_list->count++;
1112         list_add_tail(&trans->list, &t_list->wait_list);
1113         if (t_list->count == 1)
1114                 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1115                                              qlcnic_sriov_process_bc_cmd);
1116         return 0;
1117 }
1118
1119 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1120                                      struct qlcnic_vf_info *vf,
1121                                      struct qlcnic_bc_trans *trans)
1122 {
1123         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1124
1125         spin_lock(&t_list->lock);
1126
1127         __qlcnic_sriov_add_act_list(sriov, vf, trans);
1128
1129         spin_unlock(&t_list->lock);
1130         return 0;
1131 }
1132
1133 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1134                                               struct qlcnic_vf_info *vf,
1135                                               struct qlcnic_bc_hdr *hdr)
1136 {
1137         struct qlcnic_bc_trans *trans = NULL;
1138         struct list_head *node;
1139         u32 pay_size, curr_frag;
1140         u8 found = 0, active = 0;
1141
1142         spin_lock(&vf->rcv_pend.lock);
1143         if (vf->rcv_pend.count > 0) {
1144                 list_for_each(node, &vf->rcv_pend.wait_list) {
1145                         trans = list_entry(node, struct qlcnic_bc_trans, list);
1146                         if (trans->trans_id == hdr->seq_id) {
1147                                 found = 1;
1148                                 break;
1149                         }
1150                 }
1151         }
1152
1153         if (found) {
1154                 curr_frag = trans->curr_req_frag;
1155                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1156                                                        curr_frag);
1157                 qlcnic_sriov_pull_bc_msg(vf->adapter,
1158                                          (u32 *)(trans->req_hdr + curr_frag),
1159                                          (u32 *)(trans->req_pay + curr_frag),
1160                                          pay_size);
1161                 trans->curr_req_frag++;
1162                 if (trans->curr_req_frag >= hdr->num_frags) {
1163                         vf->rcv_pend.count--;
1164                         list_del(&trans->list);
1165                         active = 1;
1166                 }
1167         }
1168         spin_unlock(&vf->rcv_pend.lock);
1169
1170         if (active)
1171                 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1172                         qlcnic_sriov_cleanup_transaction(trans);
1173
1174         return;
1175 }
1176
1177 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1178                                        struct qlcnic_bc_hdr *hdr,
1179                                        struct qlcnic_vf_info *vf)
1180 {
1181         struct qlcnic_bc_trans *trans;
1182         struct qlcnic_adapter *adapter = vf->adapter;
1183         struct qlcnic_cmd_args cmd;
1184         u32 pay_size;
1185         int err;
1186         u8 cmd_op;
1187
1188         if (adapter->need_fw_reset)
1189                 return;
1190
1191         if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1192             hdr->op_type != QLC_BC_CMD &&
1193             hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1194                 return;
1195
1196         if (hdr->frag_num > 1) {
1197                 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1198                 return;
1199         }
1200
1201         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1202         cmd_op = hdr->cmd_op;
1203         if (qlcnic_sriov_alloc_bc_trans(&trans))
1204                 return;
1205
1206         if (hdr->op_type == QLC_BC_CMD)
1207                 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1208         else
1209                 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1210
1211         if (err) {
1212                 qlcnic_sriov_cleanup_transaction(trans);
1213                 return;
1214         }
1215
1216         cmd.op_type = hdr->op_type;
1217         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1218                                         QLC_BC_COMMAND)) {
1219                 qlcnic_free_mbx_args(&cmd);
1220                 qlcnic_sriov_cleanup_transaction(trans);
1221                 return;
1222         }
1223
1224         pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1225                                          trans->curr_req_frag);
1226         qlcnic_sriov_pull_bc_msg(vf->adapter,
1227                                  (u32 *)(trans->req_hdr + trans->curr_req_frag),
1228                                  (u32 *)(trans->req_pay + trans->curr_req_frag),
1229                                  pay_size);
1230         trans->func_id = vf->pci_func;
1231         trans->vf = vf;
1232         trans->trans_id = hdr->seq_id;
1233         trans->curr_req_frag++;
1234
1235         if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1236                 return;
1237
1238         if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1239                 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1240                         qlcnic_free_mbx_args(&cmd);
1241                         qlcnic_sriov_cleanup_transaction(trans);
1242                 }
1243         } else {
1244                 spin_lock(&vf->rcv_pend.lock);
1245                 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1246                 vf->rcv_pend.count++;
1247                 spin_unlock(&vf->rcv_pend.lock);
1248         }
1249 }
1250
1251 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1252                                           struct qlcnic_vf_info *vf)
1253 {
1254         struct qlcnic_bc_hdr hdr;
1255         u32 *ptr = (u32 *)&hdr;
1256         u8 msg_type, i;
1257
1258         for (i = 2; i < 6; i++)
1259                 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1260         msg_type = hdr.msg_type;
1261
1262         switch (msg_type) {
1263         case QLC_BC_COMMAND:
1264                 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1265                 break;
1266         case QLC_BC_RESPONSE:
1267                 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1268                 break;
1269         }
1270 }
1271
1272 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1273                                           struct qlcnic_vf_info *vf)
1274 {
1275         struct qlcnic_adapter *adapter = vf->adapter;
1276
1277         if (qlcnic_sriov_pf_check(adapter))
1278                 qlcnic_sriov_pf_handle_flr(sriov, vf);
1279         else
1280                 dev_err(&adapter->pdev->dev,
1281                         "Invalid event to VF. VF should not get FLR event\n");
1282 }
1283
1284 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1285 {
1286         struct qlcnic_vf_info *vf;
1287         struct qlcnic_sriov *sriov;
1288         int index;
1289         u8 pci_func;
1290
1291         sriov = adapter->ahw->sriov;
1292         pci_func = qlcnic_sriov_target_func_id(event);
1293         index = qlcnic_sriov_func_to_index(adapter, pci_func);
1294
1295         if (index < 0)
1296                 return;
1297
1298         vf = &sriov->vf_info[index];
1299         vf->pci_func = pci_func;
1300
1301         if (qlcnic_sriov_channel_free_check(event))
1302                 complete(&vf->ch_free_cmpl);
1303
1304         if (qlcnic_sriov_flr_check(event)) {
1305                 qlcnic_sriov_handle_flr_event(sriov, vf);
1306                 return;
1307         }
1308
1309         if (qlcnic_sriov_bc_msg_check(event))
1310                 qlcnic_sriov_handle_msg_event(sriov, vf);
1311 }
1312
1313 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1314 {
1315         struct qlcnic_cmd_args cmd;
1316         int err;
1317
1318         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1319                 return 0;
1320
1321         if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1322                 return -ENOMEM;
1323
1324         if (enable)
1325                 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1326
1327         err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1328
1329         if (err != QLCNIC_RCODE_SUCCESS) {
1330                 dev_err(&adapter->pdev->dev,
1331                         "Failed to %s bc events, err=%d\n",
1332                         (enable ? "enable" : "disable"), err);
1333         }
1334
1335         qlcnic_free_mbx_args(&cmd);
1336         return err;
1337 }
1338
1339 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1340                                      struct qlcnic_bc_trans *trans)
1341 {
1342         u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1343         u32 state;
1344
1345         state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1346         if (state == QLC_83XX_IDC_DEV_READY) {
1347                 msleep(20);
1348                 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1349                 trans->trans_state = QLC_INIT;
1350                 if (++adapter->fw_fail_cnt > max)
1351                         return -EIO;
1352                 else
1353                         return 0;
1354         }
1355
1356         return -EIO;
1357 }
1358
1359 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1360                                   struct qlcnic_cmd_args *cmd)
1361 {
1362         struct qlcnic_hardware_context *ahw = adapter->ahw;
1363         struct qlcnic_mailbox *mbx = ahw->mailbox;
1364         struct device *dev = &adapter->pdev->dev;
1365         struct qlcnic_bc_trans *trans;
1366         int err;
1367         u32 rsp_data, opcode, mbx_err_code, rsp;
1368         u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1369         u8 func = ahw->pci_func;
1370
1371         rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1372         if (rsp)
1373                 goto free_cmd;
1374
1375         rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1376         if (rsp)
1377                 goto cleanup_transaction;
1378
1379 retry:
1380         if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1381                 rsp = -EIO;
1382                 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1383                       QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1384                 goto err_out;
1385         }
1386
1387         err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1388         if (err) {
1389                 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1390                         (cmd->req.arg[0] & 0xffff), func);
1391                 rsp = QLCNIC_RCODE_TIMEOUT;
1392
1393                 /* After adapter reset PF driver may take some time to
1394                  * respond to VF's request. Retry request till maximum retries.
1395                  */
1396                 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1397                     !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1398                         goto retry;
1399
1400                 goto err_out;
1401         }
1402
1403         rsp_data = cmd->rsp.arg[0];
1404         mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1405         opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1406
1407         if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1408             (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1409                 rsp = QLCNIC_RCODE_SUCCESS;
1410         } else {
1411                 rsp = mbx_err_code;
1412                 if (!rsp)
1413                         rsp = 1;
1414                 dev_err(dev,
1415                         "MBX command 0x%x failed with err:0x%x for VF %d\n",
1416                         opcode, mbx_err_code, func);
1417         }
1418
1419 err_out:
1420         if (rsp == QLCNIC_RCODE_TIMEOUT) {
1421                 ahw->reset_context = 1;
1422                 adapter->need_fw_reset = 1;
1423                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1424         }
1425
1426 cleanup_transaction:
1427         qlcnic_sriov_cleanup_transaction(trans);
1428
1429 free_cmd:
1430         if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1431                 qlcnic_free_mbx_args(cmd);
1432                 kfree(cmd);
1433         }
1434
1435         return rsp;
1436 }
1437
1438 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1439 {
1440         struct qlcnic_cmd_args cmd;
1441         struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1442         int ret;
1443
1444         if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1445                 return -ENOMEM;
1446
1447         ret = qlcnic_issue_cmd(adapter, &cmd);
1448         if (ret) {
1449                 dev_err(&adapter->pdev->dev,
1450                         "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1451                         ret);
1452                 goto out;
1453         }
1454
1455         cmd_op = (cmd.rsp.arg[0] & 0xff);
1456         if (cmd.rsp.arg[0] >> 25 == 2)
1457                 return 2;
1458         if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1459                 set_bit(QLC_BC_VF_STATE, &vf->state);
1460         else
1461                 clear_bit(QLC_BC_VF_STATE, &vf->state);
1462
1463 out:
1464         qlcnic_free_mbx_args(&cmd);
1465         return ret;
1466 }
1467
1468 static void qlcnic_vf_add_mc_list(struct net_device *netdev)
1469 {
1470         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1471         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1472         struct qlcnic_mac_vlan_list *cur;
1473         struct list_head *head, tmp_list;
1474         struct qlcnic_vf_info *vf;
1475         u16 vlan_id;
1476         int i;
1477
1478         static const u8 bcast_addr[ETH_ALEN] = {
1479                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1480         };
1481
1482         vf = &adapter->ahw->sriov->vf_info[0];
1483         INIT_LIST_HEAD(&tmp_list);
1484         head = &adapter->vf_mc_list;
1485         netif_addr_lock_bh(netdev);
1486
1487         while (!list_empty(head)) {
1488                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
1489                 list_move(&cur->list, &tmp_list);
1490         }
1491
1492         netif_addr_unlock_bh(netdev);
1493
1494         while (!list_empty(&tmp_list)) {
1495                 cur = list_entry((&tmp_list)->next,
1496                                  struct qlcnic_mac_vlan_list, list);
1497                 if (!qlcnic_sriov_check_any_vlan(vf)) {
1498                         qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1499                         qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1500                 } else {
1501                         mutex_lock(&vf->vlan_list_lock);
1502                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1503                                 vlan_id = vf->sriov_vlans[i];
1504                                 if (vlan_id) {
1505                                         qlcnic_nic_add_mac(adapter, bcast_addr,
1506                                                            vlan_id);
1507                                         qlcnic_nic_add_mac(adapter,
1508                                                            cur->mac_addr,
1509                                                            vlan_id);
1510                                 }
1511                         }
1512                         mutex_unlock(&vf->vlan_list_lock);
1513                         if (qlcnic_84xx_check(adapter)) {
1514                                 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1515                                 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1516                         }
1517                 }
1518                 list_del(&cur->list);
1519                 kfree(cur);
1520         }
1521 }
1522
1523 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1524 {
1525         struct list_head *head = &bc->async_list;
1526         struct qlcnic_async_work_list *entry;
1527
1528         while (!list_empty(head)) {
1529                 entry = list_entry(head->next, struct qlcnic_async_work_list,
1530                                    list);
1531                 cancel_work_sync(&entry->work);
1532                 list_del(&entry->list);
1533                 kfree(entry);
1534         }
1535 }
1536
1537 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1538 {
1539         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1540         struct qlcnic_hardware_context *ahw = adapter->ahw;
1541         u32 mode = VPORT_MISS_MODE_DROP;
1542
1543         if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1544                 return;
1545
1546         if (netdev->flags & IFF_PROMISC) {
1547                 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1548                         mode = VPORT_MISS_MODE_ACCEPT_ALL;
1549         } else if ((netdev->flags & IFF_ALLMULTI) ||
1550                    (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1551                 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1552         }
1553
1554         if (qlcnic_sriov_vf_check(adapter))
1555                 qlcnic_vf_add_mc_list(netdev);
1556
1557         qlcnic_nic_set_promisc(adapter, mode);
1558 }
1559
1560 static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1561 {
1562         struct qlcnic_async_work_list *entry;
1563         struct net_device *netdev;
1564
1565         entry = container_of(work, struct qlcnic_async_work_list, work);
1566         netdev = (struct net_device *)entry->ptr;
1567
1568         qlcnic_sriov_vf_set_multi(netdev);
1569         return;
1570 }
1571
1572 static struct qlcnic_async_work_list *
1573 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1574 {
1575         struct list_head *node;
1576         struct qlcnic_async_work_list *entry = NULL;
1577         u8 empty = 0;
1578
1579         list_for_each(node, &bc->async_list) {
1580                 entry = list_entry(node, struct qlcnic_async_work_list, list);
1581                 if (!work_pending(&entry->work)) {
1582                         empty = 1;
1583                         break;
1584                 }
1585         }
1586
1587         if (!empty) {
1588                 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1589                                 GFP_ATOMIC);
1590                 if (entry == NULL)
1591                         return NULL;
1592                 list_add_tail(&entry->list, &bc->async_list);
1593         }
1594
1595         return entry;
1596 }
1597
1598 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1599                                                 work_func_t func, void *data)
1600 {
1601         struct qlcnic_async_work_list *entry = NULL;
1602
1603         entry = qlcnic_sriov_get_free_node_async_work(bc);
1604         if (!entry)
1605                 return;
1606
1607         entry->ptr = data;
1608         INIT_WORK(&entry->work, func);
1609         queue_work(bc->bc_async_wq, &entry->work);
1610 }
1611
1612 void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1613 {
1614
1615         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1616         struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1617
1618         if (adapter->need_fw_reset)
1619                 return;
1620
1621         qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1622                                             netdev);
1623 }
1624
1625 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1626 {
1627         int err;
1628
1629         adapter->need_fw_reset = 0;
1630         qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1631         qlcnic_83xx_enable_mbx_interrupt(adapter);
1632
1633         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1634         if (err)
1635                 return err;
1636
1637         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1638         if (err)
1639                 goto err_out_cleanup_bc_intr;
1640
1641         err = qlcnic_sriov_vf_init_driver(adapter);
1642         if (err)
1643                 goto err_out_term_channel;
1644
1645         return 0;
1646
1647 err_out_term_channel:
1648         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1649
1650 err_out_cleanup_bc_intr:
1651         qlcnic_sriov_cfg_bc_intr(adapter, 0);
1652         return err;
1653 }
1654
1655 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1656 {
1657         struct net_device *netdev = adapter->netdev;
1658
1659         if (netif_running(netdev)) {
1660                 if (!qlcnic_up(adapter, netdev))
1661                         qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1662         }
1663
1664         netif_device_attach(netdev);
1665 }
1666
1667 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1668 {
1669         struct qlcnic_hardware_context *ahw = adapter->ahw;
1670         struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1671         struct net_device *netdev = adapter->netdev;
1672         u8 i, max_ints = ahw->num_msix - 1;
1673
1674         netif_device_detach(netdev);
1675         qlcnic_83xx_detach_mailbox_work(adapter);
1676         qlcnic_83xx_disable_mbx_intr(adapter);
1677
1678         if (netif_running(netdev))
1679                 qlcnic_down(adapter, netdev);
1680
1681         for (i = 0; i < max_ints; i++) {
1682                 intr_tbl[i].id = i;
1683                 intr_tbl[i].enabled = 0;
1684                 intr_tbl[i].src = 0;
1685         }
1686         ahw->reset_context = 0;
1687 }
1688
1689 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1690 {
1691         struct qlcnic_hardware_context *ahw = adapter->ahw;
1692         struct device *dev = &adapter->pdev->dev;
1693         struct qlc_83xx_idc *idc = &ahw->idc;
1694         u8 func = ahw->pci_func;
1695         u32 state;
1696
1697         if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1698             (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1699                 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1700                         qlcnic_sriov_vf_attach(adapter);
1701                         adapter->fw_fail_cnt = 0;
1702                         dev_info(dev,
1703                                  "%s: Reinitialization of VF 0x%x done after FW reset\n",
1704                                  __func__, func);
1705                 } else {
1706                         dev_err(dev,
1707                                 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1708                                 __func__, func);
1709                         state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1710                         dev_info(dev, "Current state 0x%x after FW reset\n",
1711                                  state);
1712                 }
1713         }
1714
1715         return 0;
1716 }
1717
1718 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1719 {
1720         struct qlcnic_hardware_context *ahw = adapter->ahw;
1721         struct qlcnic_mailbox *mbx = ahw->mailbox;
1722         struct device *dev = &adapter->pdev->dev;
1723         struct qlc_83xx_idc *idc = &ahw->idc;
1724         u8 func = ahw->pci_func;
1725         u32 state;
1726
1727         adapter->reset_ctx_cnt++;
1728
1729         /* Skip the context reset and check if FW is hung */
1730         if (adapter->reset_ctx_cnt < 3) {
1731                 adapter->need_fw_reset = 1;
1732                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1733                 dev_info(dev,
1734                          "Resetting context, wait here to check if FW is in failed state\n");
1735                 return 0;
1736         }
1737
1738         /* Check if number of resets exceed the threshold.
1739          * If it exceeds the threshold just fail the VF.
1740          */
1741         if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1742                 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1743                 adapter->tx_timeo_cnt = 0;
1744                 adapter->fw_fail_cnt = 0;
1745                 adapter->reset_ctx_cnt = 0;
1746                 qlcnic_sriov_vf_detach(adapter);
1747                 dev_err(dev,
1748                         "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1749                 return -EIO;
1750         }
1751
1752         dev_info(dev, "Resetting context of VF 0x%x\n", func);
1753         dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1754                  __func__, adapter->reset_ctx_cnt, func);
1755         set_bit(__QLCNIC_RESETTING, &adapter->state);
1756         adapter->need_fw_reset = 1;
1757         clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1758         qlcnic_sriov_vf_detach(adapter);
1759         adapter->need_fw_reset = 0;
1760
1761         if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1762                 qlcnic_sriov_vf_attach(adapter);
1763                 adapter->tx_timeo_cnt = 0;
1764                 adapter->reset_ctx_cnt = 0;
1765                 adapter->fw_fail_cnt = 0;
1766                 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1767         } else {
1768                 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1769                         __func__, func);
1770                 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1771                 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1772         }
1773
1774         return 0;
1775 }
1776
1777 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1778 {
1779         struct qlcnic_hardware_context *ahw = adapter->ahw;
1780         int ret = 0;
1781
1782         if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1783                 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1784         else if (ahw->reset_context)
1785                 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1786
1787         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1788         return ret;
1789 }
1790
1791 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1792 {
1793         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1794
1795         dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1796         if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1797                 qlcnic_sriov_vf_detach(adapter);
1798
1799         clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1800         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1801         return -EIO;
1802 }
1803
1804 static int
1805 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1806 {
1807         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1808         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1809
1810         dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1811         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1812                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1813                 adapter->tx_timeo_cnt = 0;
1814                 adapter->reset_ctx_cnt = 0;
1815                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1816                 qlcnic_sriov_vf_detach(adapter);
1817         }
1818
1819         return 0;
1820 }
1821
1822 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1823 {
1824         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1825         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1826         u8 func = adapter->ahw->pci_func;
1827
1828         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1829                 dev_err(&adapter->pdev->dev,
1830                         "Firmware hang detected by VF 0x%x\n", func);
1831                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1832                 adapter->tx_timeo_cnt = 0;
1833                 adapter->reset_ctx_cnt = 0;
1834                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1835                 qlcnic_sriov_vf_detach(adapter);
1836         }
1837         return 0;
1838 }
1839
1840 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1841 {
1842         dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1843         return 0;
1844 }
1845
1846 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1847 {
1848         struct qlcnic_adapter *adapter;
1849         struct qlc_83xx_idc *idc;
1850         int ret = 0;
1851
1852         adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1853         idc = &adapter->ahw->idc;
1854         idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1855
1856         switch (idc->curr_state) {
1857         case QLC_83XX_IDC_DEV_READY:
1858                 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1859                 break;
1860         case QLC_83XX_IDC_DEV_NEED_RESET:
1861         case QLC_83XX_IDC_DEV_INIT:
1862                 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1863                 break;
1864         case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1865                 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1866                 break;
1867         case QLC_83XX_IDC_DEV_FAILED:
1868                 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1869                 break;
1870         case QLC_83XX_IDC_DEV_QUISCENT:
1871                 break;
1872         default:
1873                 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1874         }
1875
1876         idc->prev_state = idc->curr_state;
1877         if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1878                 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1879                                      idc->delay);
1880 }
1881
1882 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1883 {
1884         while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1885                 msleep(20);
1886
1887         clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1888         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1889         cancel_delayed_work_sync(&adapter->fw_work);
1890 }
1891
1892 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1893                                       struct qlcnic_vf_info *vf, u16 vlan_id)
1894 {
1895         int i, err = -EINVAL;
1896
1897         if (!vf->sriov_vlans)
1898                 return err;
1899
1900         mutex_lock(&vf->vlan_list_lock);
1901
1902         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1903                 if (vf->sriov_vlans[i] == vlan_id) {
1904                         err = 0;
1905                         break;
1906                 }
1907         }
1908
1909         mutex_unlock(&vf->vlan_list_lock);
1910         return err;
1911 }
1912
1913 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1914                                            struct qlcnic_vf_info *vf)
1915 {
1916         int err = 0;
1917
1918         mutex_lock(&vf->vlan_list_lock);
1919
1920         if (vf->num_vlan >= sriov->num_allowed_vlans)
1921                 err = -EINVAL;
1922
1923         mutex_unlock(&vf->vlan_list_lock);
1924         return err;
1925 }
1926
1927 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1928                                           u16 vid, u8 enable)
1929 {
1930         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1931         struct qlcnic_vf_info *vf;
1932         bool vlan_exist;
1933         u8 allowed = 0;
1934         int i;
1935
1936         vf = &adapter->ahw->sriov->vf_info[0];
1937         vlan_exist = qlcnic_sriov_check_any_vlan(vf);
1938         if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1939                 return -EINVAL;
1940
1941         if (enable) {
1942                 if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1943                         return -EINVAL;
1944
1945                 if (qlcnic_sriov_validate_num_vlans(sriov, vf))
1946                         return -EINVAL;
1947
1948                 if (sriov->any_vlan) {
1949                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1950                                 if (sriov->allowed_vlans[i] == vid)
1951                                         allowed = 1;
1952                         }
1953
1954                         if (!allowed)
1955                                 return -EINVAL;
1956                 }
1957         } else {
1958                 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
1959                         return -EINVAL;
1960         }
1961
1962         return 0;
1963 }
1964
1965 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1966                                         enum qlcnic_vlan_operations opcode)
1967 {
1968         struct qlcnic_adapter *adapter = vf->adapter;
1969         struct qlcnic_sriov *sriov;
1970
1971         sriov = adapter->ahw->sriov;
1972
1973         if (!vf->sriov_vlans)
1974                 return;
1975
1976         mutex_lock(&vf->vlan_list_lock);
1977
1978         switch (opcode) {
1979         case QLC_VLAN_ADD:
1980                 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
1981                 break;
1982         case QLC_VLAN_DELETE:
1983                 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
1984                 break;
1985         default:
1986                 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1987         }
1988
1989         mutex_unlock(&vf->vlan_list_lock);
1990         return;
1991 }
1992
1993 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1994                                    u16 vid, u8 enable)
1995 {
1996         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1997         struct qlcnic_vf_info *vf;
1998         struct qlcnic_cmd_args cmd;
1999         int ret;
2000
2001         if (vid == 0)
2002                 return 0;
2003
2004         vf = &adapter->ahw->sriov->vf_info[0];
2005         ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
2006         if (ret)
2007                 return ret;
2008
2009         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2010                                              QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2011         if (ret)
2012                 return ret;
2013
2014         cmd.req.arg[1] = (enable & 1) | vid << 16;
2015
2016         qlcnic_sriov_cleanup_async_list(&sriov->bc);
2017         ret = qlcnic_issue_cmd(adapter, &cmd);
2018         if (ret) {
2019                 dev_err(&adapter->pdev->dev,
2020                         "Failed to configure guest VLAN, err=%d\n", ret);
2021         } else {
2022                 qlcnic_free_mac_list(adapter);
2023
2024                 if (enable)
2025                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2026                 else
2027                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2028
2029                 qlcnic_set_multi(adapter->netdev);
2030         }
2031
2032         qlcnic_free_mbx_args(&cmd);
2033         return ret;
2034 }
2035
2036 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2037 {
2038         struct list_head *head = &adapter->mac_list;
2039         struct qlcnic_mac_vlan_list *cur;
2040
2041         while (!list_empty(head)) {
2042                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2043                 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2044                                           QLCNIC_MAC_DEL);
2045                 list_del(&cur->list);
2046                 kfree(cur);
2047         }
2048 }
2049
2050
2051 static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2052 {
2053         struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2054         struct net_device *netdev = adapter->netdev;
2055         int retval;
2056
2057         netif_device_detach(netdev);
2058         qlcnic_cancel_idc_work(adapter);
2059
2060         if (netif_running(netdev))
2061                 qlcnic_down(adapter, netdev);
2062
2063         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2064         qlcnic_sriov_cfg_bc_intr(adapter, 0);
2065         qlcnic_83xx_disable_mbx_intr(adapter);
2066         cancel_delayed_work_sync(&adapter->idc_aen_work);
2067
2068         retval = pci_save_state(pdev);
2069         if (retval)
2070                 return retval;
2071
2072         return 0;
2073 }
2074
2075 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2076 {
2077         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2078         struct net_device *netdev = adapter->netdev;
2079         int err;
2080
2081         set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2082         qlcnic_83xx_enable_mbx_interrupt(adapter);
2083         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2084         if (err)
2085                 return err;
2086
2087         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2088         if (!err) {
2089                 if (netif_running(netdev)) {
2090                         err = qlcnic_up(adapter, netdev);
2091                         if (!err)
2092                                 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2093                 }
2094         }
2095
2096         netif_device_attach(netdev);
2097         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2098                              idc->delay);
2099         return err;
2100 }
2101
2102 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2103 {
2104         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2105         struct qlcnic_vf_info *vf;
2106         int i;
2107
2108         for (i = 0; i < sriov->num_vfs; i++) {
2109                 vf = &sriov->vf_info[i];
2110                 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2111                                           sizeof(*vf->sriov_vlans), GFP_KERNEL);
2112         }
2113 }
2114
2115 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2116 {
2117         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2118         struct qlcnic_vf_info *vf;
2119         int i;
2120
2121         for (i = 0; i < sriov->num_vfs; i++) {
2122                 vf = &sriov->vf_info[i];
2123                 kfree(vf->sriov_vlans);
2124                 vf->sriov_vlans = NULL;
2125         }
2126 }
2127
2128 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2129                               struct qlcnic_vf_info *vf, u16 vlan_id)
2130 {
2131         int i;
2132
2133         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2134                 if (!vf->sriov_vlans[i]) {
2135                         vf->sriov_vlans[i] = vlan_id;
2136                         vf->num_vlan++;
2137                         return;
2138                 }
2139         }
2140 }
2141
2142 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2143                               struct qlcnic_vf_info *vf, u16 vlan_id)
2144 {
2145         int i;
2146
2147         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2148                 if (vf->sriov_vlans[i] == vlan_id) {
2149                         vf->sriov_vlans[i] = 0;
2150                         vf->num_vlan--;
2151                         return;
2152                 }
2153         }
2154 }
2155
2156 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2157 {
2158         bool err = false;
2159
2160         mutex_lock(&vf->vlan_list_lock);
2161
2162         if (vf->num_vlan)
2163                 err = true;
2164
2165         mutex_unlock(&vf->vlan_list_lock);
2166         return err;
2167 }