]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/be2iscsi/be_cmds.c
bna: add missing iounmap() on error in bnad_init()
[karo-tx-linux.git] / drivers / scsi / be2iscsi / be_cmds.c
1 /**
2  * Copyright (C) 2005 - 2012 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <scsi/iscsi_proto.h>
19
20 #include "be.h"
21 #include "be_mgmt.h"
22 #include "be_main.h"
23
24 int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
25 {
26         u32 sreset;
27         u8 *pci_reset_offset = 0;
28         u8 *pci_online0_offset = 0;
29         u8 *pci_online1_offset = 0;
30         u32 pconline0 = 0;
31         u32 pconline1 = 0;
32         u32 i;
33
34         pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
35         pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
36         pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
37         sreset = readl((void *)pci_reset_offset);
38         sreset |= BE2_SET_RESET;
39         writel(sreset, (void *)pci_reset_offset);
40
41         i = 0;
42         while (sreset & BE2_SET_RESET) {
43                 if (i > 64)
44                         break;
45                 msleep(100);
46                 sreset = readl((void *)pci_reset_offset);
47                 i++;
48         }
49
50         if (sreset & BE2_SET_RESET) {
51                 printk(KERN_ERR DRV_NAME
52                        " Soft Reset  did not deassert\n");
53                 return -EIO;
54         }
55         pconline1 = BE2_MPU_IRAM_ONLINE;
56         writel(pconline0, (void *)pci_online0_offset);
57         writel(pconline1, (void *)pci_online1_offset);
58
59         sreset |= BE2_SET_RESET;
60         writel(sreset, (void *)pci_reset_offset);
61
62         i = 0;
63         while (sreset & BE2_SET_RESET) {
64                 if (i > 64)
65                         break;
66                 msleep(1);
67                 sreset = readl((void *)pci_reset_offset);
68                 i++;
69         }
70         if (sreset & BE2_SET_RESET) {
71                 printk(KERN_ERR DRV_NAME
72                        " MPU Online Soft Reset did not deassert\n");
73                 return -EIO;
74         }
75         return 0;
76 }
77
78 int be_chk_reset_complete(struct beiscsi_hba *phba)
79 {
80         unsigned int num_loop;
81         u8 *mpu_sem = 0;
82         u32 status;
83
84         num_loop = 1000;
85         mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
86         msleep(5000);
87
88         while (num_loop) {
89                 status = readl((void *)mpu_sem);
90
91                 if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
92                         break;
93                 msleep(60);
94                 num_loop--;
95         }
96
97         if ((status & 0x80000000) || (!num_loop)) {
98                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
99                             "BC_%d : Failed in be_chk_reset_complete"
100                             "status = 0x%x\n", status);
101                 return -EIO;
102         }
103
104         return 0;
105 }
106
107 void be_mcc_notify(struct beiscsi_hba *phba)
108 {
109         struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
110         u32 val = 0;
111
112         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
113         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
114         iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
115 }
116
117 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
118 {
119         unsigned int tag = 0;
120
121         if (phba->ctrl.mcc_tag_available) {
122                 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
123                 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
124                 phba->ctrl.mcc_numtag[tag] = 0;
125         }
126         if (tag) {
127                 phba->ctrl.mcc_tag_available--;
128                 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
129                         phba->ctrl.mcc_alloc_index = 0;
130                 else
131                         phba->ctrl.mcc_alloc_index++;
132         }
133         return tag;
134 }
135
136 /*
137  * beiscsi_mccq_compl()- Wait for completion of MBX
138  * @phba: Driver private structure
139  * @tag: Tag for the MBX Command
140  * @wrb: the WRB used for the MBX Command
141  * @cmd_hdr: IOCTL Hdr for the MBX Cmd
142  *
143  * Waits for MBX completion with the passed TAG.
144  *
145  * return
146  * Success: 0
147  * Failure: Non-Zero
148  **/
149 int beiscsi_mccq_compl(struct beiscsi_hba *phba,
150                 uint32_t tag, struct be_mcc_wrb **wrb,
151                 void *cmd_hdr)
152 {
153         int rc = 0;
154         uint32_t mcc_tag_response;
155         uint16_t status = 0, addl_status = 0, wrb_num = 0;
156         struct be_mcc_wrb *temp_wrb;
157         struct be_cmd_req_hdr *ioctl_hdr;
158         struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
159
160         if (beiscsi_error(phba))
161                 return -EIO;
162
163         /* wait for the mccq completion */
164         rc = wait_event_interruptible_timeout(
165                                 phba->ctrl.mcc_wait[tag],
166                                 phba->ctrl.mcc_numtag[tag],
167                                 msecs_to_jiffies(
168                                 BEISCSI_HOST_MBX_TIMEOUT));
169
170         if (rc <= 0) {
171                 beiscsi_log(phba, KERN_ERR,
172                             BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
173                             BEISCSI_LOG_CONFIG,
174                             "BC_%d : MBX Cmd Completion timed out\n");
175                 rc = -EAGAIN;
176                 goto release_mcc_tag;
177         } else
178                 rc = 0;
179
180         mcc_tag_response = phba->ctrl.mcc_numtag[tag];
181         status = (mcc_tag_response & CQE_STATUS_MASK);
182         addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
183                         CQE_STATUS_ADDL_SHIFT);
184
185         if (cmd_hdr) {
186                 ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
187         } else {
188                 wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
189                            CQE_STATUS_WRB_SHIFT;
190                 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
191                 ioctl_hdr = embedded_payload(temp_wrb);
192
193                 if (wrb)
194                         *wrb = temp_wrb;
195         }
196
197         if (status || addl_status) {
198                 beiscsi_log(phba, KERN_ERR,
199                             BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
200                             BEISCSI_LOG_CONFIG,
201                             "BC_%d : MBX Cmd Failed for "
202                             "Subsys : %d Opcode : %d with "
203                             "Status : %d and Extd_Status : %d\n",
204                             ioctl_hdr->subsystem,
205                             ioctl_hdr->opcode,
206                             status, addl_status);
207                 rc = -EAGAIN;
208         }
209
210 release_mcc_tag:
211         /* Release the MCC entry */
212         free_mcc_tag(&phba->ctrl, tag);
213
214         return rc;
215 }
216
217 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
218 {
219         spin_lock(&ctrl->mbox_lock);
220         tag = tag & 0x000000FF;
221         ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
222         if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
223                 ctrl->mcc_free_index = 0;
224         else
225                 ctrl->mcc_free_index++;
226         ctrl->mcc_tag_available++;
227         spin_unlock(&ctrl->mbox_lock);
228 }
229
230 bool is_link_state_evt(u32 trailer)
231 {
232         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
233                   ASYNC_TRAILER_EVENT_CODE_MASK) ==
234                   ASYNC_EVENT_CODE_LINK_STATE);
235 }
236
237 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
238 {
239         if (compl->flags != 0) {
240                 compl->flags = le32_to_cpu(compl->flags);
241                 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
242                 return true;
243         } else
244                 return false;
245 }
246
247 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
248 {
249         compl->flags = 0;
250 }
251
252 /*
253  * be_mcc_compl_process()- Check the MBX comapletion status
254  * @ctrl: Function specific MBX data structure
255  * @compl: Completion status of MBX Command
256  *
257  * Check for the MBX completion status when BMBX method used
258  *
259  * return
260  * Success: Zero
261  * Failure: Non-Zero
262  **/
263 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
264                                 struct be_mcc_compl *compl)
265 {
266         u16 compl_status, extd_status;
267         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
268         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
269         struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
270
271         be_dws_le_to_cpu(compl, 4);
272
273         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
274                                         CQE_STATUS_COMPL_MASK;
275         if (compl_status != MCC_STATUS_SUCCESS) {
276                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
277                                                 CQE_STATUS_EXTD_MASK;
278
279                 beiscsi_log(phba, KERN_ERR,
280                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
281                             "BC_%d : error in cmd completion: "
282                             "Subsystem : %d Opcode : %d "
283                             "status(compl/extd)=%d/%d\n",
284                             hdr->subsystem, hdr->opcode,
285                             compl_status, extd_status);
286
287                 return -EBUSY;
288         }
289         return 0;
290 }
291
292 int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
293                                     struct be_mcc_compl *compl)
294 {
295         u16 compl_status, extd_status;
296         unsigned short tag;
297
298         be_dws_le_to_cpu(compl, 4);
299
300         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
301                                         CQE_STATUS_COMPL_MASK;
302         /* The ctrl.mcc_numtag[tag] is filled with
303          * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
304          * [7:0] = compl_status
305          */
306         tag = (compl->tag0 & 0x000000FF);
307         extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
308                                         CQE_STATUS_EXTD_MASK;
309
310         ctrl->mcc_numtag[tag]  = 0x80000000;
311         ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
312         ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
313         ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
314         wake_up_interruptible(&ctrl->mcc_wait[tag]);
315         return 0;
316 }
317
318 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
319 {
320         struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
321         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
322
323         if (be_mcc_compl_is_new(compl)) {
324                 queue_tail_inc(mcc_cq);
325                 return compl;
326         }
327         return NULL;
328 }
329
330 static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
331 {
332         iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
333 }
334
335 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
336                 struct be_async_event_link_state *evt)
337 {
338         switch (evt->port_link_status) {
339         case ASYNC_EVENT_LINK_DOWN:
340                 beiscsi_log(phba, KERN_ERR,
341                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
342                             "BC_%d : Link Down on Physical Port %d\n",
343                             evt->physical_port);
344
345                 phba->state |= BE_ADAPTER_LINK_DOWN;
346                 iscsi_host_for_each_session(phba->shost,
347                                             be2iscsi_fail_session);
348                 break;
349         case ASYNC_EVENT_LINK_UP:
350                 phba->state = BE_ADAPTER_UP;
351                 beiscsi_log(phba, KERN_ERR,
352                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
353                             "BC_%d : Link UP on Physical Port %d\n",
354                             evt->physical_port);
355                 break;
356         default:
357                 beiscsi_log(phba, KERN_ERR,
358                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
359                             "BC_%d : Unexpected Async Notification %d on"
360                             "Physical Port %d\n",
361                             evt->port_link_status,
362                             evt->physical_port);
363         }
364 }
365
366 static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
367                        u16 num_popped)
368 {
369         u32 val = 0;
370         val |= qid & DB_CQ_RING_ID_MASK;
371         if (arm)
372                 val |= 1 << DB_CQ_REARM_SHIFT;
373         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
374         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
375 }
376
377
378 int beiscsi_process_mcc(struct beiscsi_hba *phba)
379 {
380         struct be_mcc_compl *compl;
381         int num = 0, status = 0;
382         struct be_ctrl_info *ctrl = &phba->ctrl;
383
384         spin_lock_bh(&phba->ctrl.mcc_cq_lock);
385         while ((compl = be_mcc_compl_get(phba))) {
386                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
387                         /* Interpret flags as an async trailer */
388                         if (is_link_state_evt(compl->flags))
389                                 /* Interpret compl as a async link evt */
390                                 beiscsi_async_link_state_process(phba,
391                                    (struct be_async_event_link_state *) compl);
392                         else
393                                 beiscsi_log(phba, KERN_ERR,
394                                             BEISCSI_LOG_CONFIG |
395                                             BEISCSI_LOG_MBOX,
396                                             "BC_%d : Unsupported Async Event, flags"
397                                             " = 0x%08x\n", compl->flags);
398
399                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
400                                 status = be_mcc_compl_process(ctrl, compl);
401                                 atomic_dec(&phba->ctrl.mcc_obj.q.used);
402                 }
403                 be_mcc_compl_use(compl);
404                 num++;
405         }
406
407         if (num)
408                 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
409
410         spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
411         return status;
412 }
413
414 /*
415  * be_mcc_wait_compl()- Wait for MBX completion
416  * @phba: driver private structure
417  *
418  * Wait till no more pending mcc requests are present
419  *
420  * return
421  * Success: 0
422  * Failure: Non-Zero
423  *
424  **/
425 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
426 {
427         int i, status;
428         for (i = 0; i < mcc_timeout; i++) {
429                 if (beiscsi_error(phba))
430                         return -EIO;
431
432                 status = beiscsi_process_mcc(phba);
433                 if (status)
434                         return status;
435
436                 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
437                         break;
438                 udelay(100);
439         }
440         if (i == mcc_timeout) {
441                 beiscsi_log(phba, KERN_ERR,
442                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
443                             "BC_%d : FW Timed Out\n");
444                 phba->fw_timeout = true;
445                 beiscsi_ue_detect(phba);
446                 return -EBUSY;
447         }
448         return 0;
449 }
450
451 /*
452  * be_mcc_notify_wait()- Notify and wait for Compl
453  * @phba: driver private structure
454  *
455  * Notify MCC requests and wait for completion
456  *
457  * return
458  * Success: 0
459  * Failure: Non-Zero
460  **/
461 int be_mcc_notify_wait(struct beiscsi_hba *phba)
462 {
463         be_mcc_notify(phba);
464         return be_mcc_wait_compl(phba);
465 }
466
467 /*
468  * be_mbox_db_ready_wait()- Check ready status
469  * @ctrl: Function specific MBX data structure
470  *
471  * Check for the ready status of FW to send BMBX
472  * commands to adapter.
473  *
474  * return
475  * Success: 0
476  * Failure: Non-Zero
477  **/
478 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
479 {
480         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
481         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
482         int wait = 0;
483         u32 ready;
484
485         do {
486
487                 if (beiscsi_error(phba))
488                         return -EIO;
489
490                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
491                 if (ready)
492                         break;
493
494                 if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
495                         beiscsi_log(phba, KERN_ERR,
496                                     BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
497                                     "BC_%d : FW Timed Out\n");
498                         phba->fw_timeout = true;
499                         beiscsi_ue_detect(phba);
500                         return -EBUSY;
501                 }
502
503                 mdelay(1);
504                 wait++;
505         } while (true);
506         return 0;
507 }
508
509 /*
510  * be_mbox_notify: Notify adapter of new BMBX command
511  * @ctrl: Function specific MBX data structure
512  *
513  * Ring doorbell to inform adapter of a BMBX command
514  * to process
515  *
516  * return
517  * Success: 0
518  * Failure: Non-Zero
519  **/
520 int be_mbox_notify(struct be_ctrl_info *ctrl)
521 {
522         int status;
523         u32 val = 0;
524         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
525         struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
526         struct be_mcc_mailbox *mbox = mbox_mem->va;
527         struct be_mcc_compl *compl = &mbox->compl;
528         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
529
530         val &= ~MPU_MAILBOX_DB_RDY_MASK;
531         val |= MPU_MAILBOX_DB_HI_MASK;
532         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
533         iowrite32(val, db);
534
535         status = be_mbox_db_ready_wait(ctrl);
536         if (status)
537                 return status;
538
539         val = 0;
540         val &= ~MPU_MAILBOX_DB_RDY_MASK;
541         val &= ~MPU_MAILBOX_DB_HI_MASK;
542         val |= (u32) (mbox_mem->dma >> 4) << 2;
543         iowrite32(val, db);
544
545         status = be_mbox_db_ready_wait(ctrl);
546         if (status)
547                 return status;
548
549         if (be_mcc_compl_is_new(compl)) {
550                 status = be_mcc_compl_process(ctrl, &mbox->compl);
551                 be_mcc_compl_use(compl);
552                 if (status) {
553                         beiscsi_log(phba, KERN_ERR,
554                                     BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
555                                     "BC_%d : After be_mcc_compl_process\n");
556
557                         return status;
558                 }
559         } else {
560                 beiscsi_log(phba, KERN_ERR,
561                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
562                             "BC_%d : Invalid Mailbox Completion\n");
563
564                 return -EBUSY;
565         }
566         return 0;
567 }
568
569 /*
570  * Insert the mailbox address into the doorbell in two steps
571  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
572  */
573 static int be_mbox_notify_wait(struct beiscsi_hba *phba)
574 {
575         int status;
576         u32 val = 0;
577         void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
578         struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
579         struct be_mcc_mailbox *mbox = mbox_mem->va;
580         struct be_mcc_compl *compl = &mbox->compl;
581         struct be_ctrl_info *ctrl = &phba->ctrl;
582
583         val |= MPU_MAILBOX_DB_HI_MASK;
584         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
585         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
586         iowrite32(val, db);
587
588         /* wait for ready to be set */
589         status = be_mbox_db_ready_wait(ctrl);
590         if (status != 0)
591                 return status;
592
593         val = 0;
594         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
595         val |= (u32)(mbox_mem->dma >> 4) << 2;
596         iowrite32(val, db);
597
598         status = be_mbox_db_ready_wait(ctrl);
599         if (status != 0)
600                 return status;
601
602         /* A cq entry has been made now */
603         if (be_mcc_compl_is_new(compl)) {
604                 status = be_mcc_compl_process(ctrl, &mbox->compl);
605                 be_mcc_compl_use(compl);
606                 if (status)
607                         return status;
608         } else {
609                 beiscsi_log(phba, KERN_ERR,
610                             BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
611                             "BC_%d : invalid mailbox completion\n");
612
613                 return -EBUSY;
614         }
615         return 0;
616 }
617
618 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
619                                 bool embedded, u8 sge_cnt)
620 {
621         if (embedded)
622                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
623         else
624                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
625                                                 MCC_WRB_SGE_CNT_SHIFT;
626         wrb->payload_length = payload_len;
627         be_dws_cpu_to_le(wrb, 8);
628 }
629
630 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
631                         u8 subsystem, u8 opcode, int cmd_len)
632 {
633         req_hdr->opcode = opcode;
634         req_hdr->subsystem = subsystem;
635         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
636         req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
637 }
638
639 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
640                                                         struct be_dma_mem *mem)
641 {
642         int i, buf_pages;
643         u64 dma = (u64) mem->dma;
644
645         buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
646         for (i = 0; i < buf_pages; i++) {
647                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
648                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
649                 dma += PAGE_SIZE_4K;
650         }
651 }
652
653 static u32 eq_delay_to_mult(u32 usec_delay)
654 {
655 #define MAX_INTR_RATE 651042
656         const u32 round = 10;
657         u32 multiplier;
658
659         if (usec_delay == 0)
660                 multiplier = 0;
661         else {
662                 u32 interrupt_rate = 1000000 / usec_delay;
663                 if (interrupt_rate == 0)
664                         multiplier = 1023;
665                 else {
666                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
667                         multiplier /= interrupt_rate;
668                         multiplier = (multiplier + round / 2) / round;
669                         multiplier = min(multiplier, (u32) 1023);
670                 }
671         }
672         return multiplier;
673 }
674
675 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
676 {
677         return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
678 }
679
680 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
681 {
682         struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
683         struct be_mcc_wrb *wrb;
684
685         BUG_ON(atomic_read(&mccq->used) >= mccq->len);
686         wrb = queue_head_node(mccq);
687         memset(wrb, 0, sizeof(*wrb));
688         wrb->tag0 = (mccq->head & 0x000000FF) << 16;
689         queue_head_inc(mccq);
690         atomic_inc(&mccq->used);
691         return wrb;
692 }
693
694
695 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
696                           struct be_queue_info *eq, int eq_delay)
697 {
698         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
699         struct be_cmd_req_eq_create *req = embedded_payload(wrb);
700         struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
701         struct be_dma_mem *q_mem = &eq->dma_mem;
702         int status;
703
704         spin_lock(&ctrl->mbox_lock);
705         memset(wrb, 0, sizeof(*wrb));
706
707         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
708
709         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
710                         OPCODE_COMMON_EQ_CREATE, sizeof(*req));
711
712         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
713
714         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
715                                                 PCI_FUNC(ctrl->pdev->devfn));
716         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
717         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
718         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
719                                         __ilog2_u32(eq->len / 256));
720         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
721                                         eq_delay_to_mult(eq_delay));
722         be_dws_cpu_to_le(req->context, sizeof(req->context));
723
724         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
725
726         status = be_mbox_notify(ctrl);
727         if (!status) {
728                 eq->id = le16_to_cpu(resp->eq_id);
729                 eq->created = true;
730         }
731         spin_unlock(&ctrl->mbox_lock);
732         return status;
733 }
734
735 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
736 {
737         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
738         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
739         int status;
740         u8 *endian_check;
741
742         spin_lock(&ctrl->mbox_lock);
743         memset(wrb, 0, sizeof(*wrb));
744
745         endian_check = (u8 *) wrb;
746         *endian_check++ = 0xFF;
747         *endian_check++ = 0x12;
748         *endian_check++ = 0x34;
749         *endian_check++ = 0xFF;
750         *endian_check++ = 0xFF;
751         *endian_check++ = 0x56;
752         *endian_check++ = 0x78;
753         *endian_check++ = 0xFF;
754         be_dws_cpu_to_le(wrb, sizeof(*wrb));
755
756         status = be_mbox_notify(ctrl);
757         if (status)
758                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
759                             "BC_%d : be_cmd_fw_initialize Failed\n");
760
761         spin_unlock(&ctrl->mbox_lock);
762         return status;
763 }
764
765 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
766                           struct be_queue_info *cq, struct be_queue_info *eq,
767                           bool sol_evts, bool no_delay, int coalesce_wm)
768 {
769         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
770         struct be_cmd_req_cq_create *req = embedded_payload(wrb);
771         struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
772         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
773         struct be_dma_mem *q_mem = &cq->dma_mem;
774         void *ctxt = &req->context;
775         int status;
776
777         spin_lock(&ctrl->mbox_lock);
778         memset(wrb, 0, sizeof(*wrb));
779
780         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
781
782         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
783                         OPCODE_COMMON_CQ_CREATE, sizeof(*req));
784
785         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
786         if (chip_skh_r(ctrl->pdev)) {
787                 req->hdr.version = MBX_CMD_VER2;
788                 req->page_size = 1;
789                 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
790                               ctxt, coalesce_wm);
791                 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
792                               ctxt, no_delay);
793                 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
794                               __ilog2_u32(cq->len / 256));
795                 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
796                 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
797                 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
798                 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
799         } else {
800                 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
801                               ctxt, coalesce_wm);
802                 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
803                 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
804                               __ilog2_u32(cq->len / 256));
805                 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
806                 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
807                 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
808                 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
809                 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
810                 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
811                               PCI_FUNC(ctrl->pdev->devfn));
812         }
813
814         be_dws_cpu_to_le(ctxt, sizeof(req->context));
815
816         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
817
818         status = be_mbox_notify(ctrl);
819         if (!status) {
820                 cq->id = le16_to_cpu(resp->cq_id);
821                 cq->created = true;
822         } else
823                 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
824                             "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
825                             status);
826
827         spin_unlock(&ctrl->mbox_lock);
828
829         return status;
830 }
831
832 static u32 be_encoded_q_len(int q_len)
833 {
834         u32 len_encoded = fls(q_len);   /* log2(len) + 1 */
835         if (len_encoded == 16)
836                 len_encoded = 0;
837         return len_encoded;
838 }
839
840 int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
841                         struct be_queue_info *mccq,
842                         struct be_queue_info *cq)
843 {
844         struct be_mcc_wrb *wrb;
845         struct be_cmd_req_mcc_create *req;
846         struct be_dma_mem *q_mem = &mccq->dma_mem;
847         struct be_ctrl_info *ctrl;
848         void *ctxt;
849         int status;
850
851         spin_lock(&phba->ctrl.mbox_lock);
852         ctrl = &phba->ctrl;
853         wrb = wrb_from_mbox(&ctrl->mbox_mem);
854         memset(wrb, 0, sizeof(*wrb));
855         req = embedded_payload(wrb);
856         ctxt = &req->context;
857
858         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
859
860         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
861                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
862
863         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
864
865         AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
866                       PCI_FUNC(phba->pcidev->devfn));
867         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
868         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
869                 be_encoded_q_len(mccq->len));
870         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
871
872         be_dws_cpu_to_le(ctxt, sizeof(req->context));
873
874         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
875
876         status = be_mbox_notify_wait(phba);
877         if (!status) {
878                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
879                 mccq->id = le16_to_cpu(resp->id);
880                 mccq->created = true;
881         }
882         spin_unlock(&phba->ctrl.mbox_lock);
883
884         return status;
885 }
886
887 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
888                           int queue_type)
889 {
890         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
891         struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
892         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
893         u8 subsys = 0, opcode = 0;
894         int status;
895
896         beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
897                     "BC_%d : In beiscsi_cmd_q_destroy "
898                     "queue_type : %d\n", queue_type);
899
900         spin_lock(&ctrl->mbox_lock);
901         memset(wrb, 0, sizeof(*wrb));
902         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
903
904         switch (queue_type) {
905         case QTYPE_EQ:
906                 subsys = CMD_SUBSYSTEM_COMMON;
907                 opcode = OPCODE_COMMON_EQ_DESTROY;
908                 break;
909         case QTYPE_CQ:
910                 subsys = CMD_SUBSYSTEM_COMMON;
911                 opcode = OPCODE_COMMON_CQ_DESTROY;
912                 break;
913         case QTYPE_MCCQ:
914                 subsys = CMD_SUBSYSTEM_COMMON;
915                 opcode = OPCODE_COMMON_MCC_DESTROY;
916                 break;
917         case QTYPE_WRBQ:
918                 subsys = CMD_SUBSYSTEM_ISCSI;
919                 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
920                 break;
921         case QTYPE_DPDUQ:
922                 subsys = CMD_SUBSYSTEM_ISCSI;
923                 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
924                 break;
925         case QTYPE_SGL:
926                 subsys = CMD_SUBSYSTEM_ISCSI;
927                 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
928                 break;
929         default:
930                 spin_unlock(&ctrl->mbox_lock);
931                 BUG();
932                 return -ENXIO;
933         }
934         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
935         if (queue_type != QTYPE_SGL)
936                 req->id = cpu_to_le16(q->id);
937
938         status = be_mbox_notify(ctrl);
939
940         spin_unlock(&ctrl->mbox_lock);
941         return status;
942 }
943
944 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
945                                     struct be_queue_info *cq,
946                                     struct be_queue_info *dq, int length,
947                                     int entry_size)
948 {
949         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
950         struct be_defq_create_req *req = embedded_payload(wrb);
951         struct be_dma_mem *q_mem = &dq->dma_mem;
952         void *ctxt = &req->context;
953         int status;
954
955         spin_lock(&ctrl->mbox_lock);
956         memset(wrb, 0, sizeof(*wrb));
957
958         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
959
960         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
961                            OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
962
963         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
964         AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
965         AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
966                       1);
967         AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
968                       PCI_FUNC(ctrl->pdev->devfn));
969         AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
970                       be_encoded_q_len(length / sizeof(struct phys_addr)));
971         AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
972                       ctxt, entry_size);
973         AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
974                       cq->id);
975
976         be_dws_cpu_to_le(ctxt, sizeof(req->context));
977
978         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
979
980         status = be_mbox_notify(ctrl);
981         if (!status) {
982                 struct be_defq_create_resp *resp = embedded_payload(wrb);
983
984                 dq->id = le16_to_cpu(resp->id);
985                 dq->created = true;
986         }
987         spin_unlock(&ctrl->mbox_lock);
988
989         return status;
990 }
991
992 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
993                        struct be_queue_info *wrbq)
994 {
995         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
996         struct be_wrbq_create_req *req = embedded_payload(wrb);
997         struct be_wrbq_create_resp *resp = embedded_payload(wrb);
998         int status;
999
1000         spin_lock(&ctrl->mbox_lock);
1001         memset(wrb, 0, sizeof(*wrb));
1002
1003         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1004
1005         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1006                 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1007         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1008         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1009
1010         status = be_mbox_notify(ctrl);
1011         if (!status) {
1012                 wrbq->id = le16_to_cpu(resp->cid);
1013                 wrbq->created = true;
1014         }
1015         spin_unlock(&ctrl->mbox_lock);
1016         return status;
1017 }
1018
1019 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1020                                 struct be_dma_mem *q_mem,
1021                                 u32 page_offset, u32 num_pages)
1022 {
1023         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1024         struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1025         struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1026         int status;
1027         unsigned int curr_pages;
1028         u32 internal_page_offset = 0;
1029         u32 temp_num_pages = num_pages;
1030
1031         if (num_pages == 0xff)
1032                 num_pages = 1;
1033
1034         spin_lock(&ctrl->mbox_lock);
1035         do {
1036                 memset(wrb, 0, sizeof(*wrb));
1037                 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1038                 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1039                                    OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1040                                    sizeof(*req));
1041                 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1042                                                 pages);
1043                 req->num_pages = min(num_pages, curr_pages);
1044                 req->page_offset = page_offset;
1045                 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1046                 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1047                 internal_page_offset += req->num_pages;
1048                 page_offset += req->num_pages;
1049                 num_pages -= req->num_pages;
1050
1051                 if (temp_num_pages == 0xff)
1052                         req->num_pages = temp_num_pages;
1053
1054                 status = be_mbox_notify(ctrl);
1055                 if (status) {
1056                         beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1057                                     "BC_%d : FW CMD to map iscsi frags failed.\n");
1058
1059                         goto error;
1060                 }
1061         } while (num_pages > 0);
1062 error:
1063         spin_unlock(&ctrl->mbox_lock);
1064         if (status != 0)
1065                 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1066         return status;
1067 }
1068
1069 int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
1070 {
1071         struct be_ctrl_info *ctrl = &phba->ctrl;
1072         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1073         struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1074         int status;
1075
1076         spin_lock(&ctrl->mbox_lock);
1077
1078         req = embedded_payload(wrb);
1079         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1080         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1081                            OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1082         status = be_mbox_notify_wait(phba);
1083
1084         spin_unlock(&ctrl->mbox_lock);
1085         return status;
1086 }
1087
1088 /**
1089  * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1090  * @phba: device priv structure instance
1091  * @vlan_tag: TAG to be set
1092  *
1093  * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1094  *
1095  * returns
1096  *      TAG for the MBX Cmd
1097  * **/
1098 int be_cmd_set_vlan(struct beiscsi_hba *phba,
1099                      uint16_t vlan_tag)
1100 {
1101         unsigned int tag = 0;
1102         struct be_mcc_wrb *wrb;
1103         struct be_cmd_set_vlan_req *req;
1104         struct be_ctrl_info *ctrl = &phba->ctrl;
1105
1106         spin_lock(&ctrl->mbox_lock);
1107         tag = alloc_mcc_tag(phba);
1108         if (!tag) {
1109                 spin_unlock(&ctrl->mbox_lock);
1110                 return tag;
1111         }
1112
1113         wrb = wrb_from_mccq(phba);
1114         req = embedded_payload(wrb);
1115         wrb->tag0 |= tag;
1116         be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1117         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1118                            OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1119                            sizeof(*req));
1120
1121         req->interface_hndl = phba->interface_handle;
1122         req->vlan_priority = vlan_tag;
1123
1124         be_mcc_notify(phba);
1125         spin_unlock(&ctrl->mbox_lock);
1126
1127         return tag;
1128 }