2 * Copyright (C) 2005 - 2014 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
59 u32 cmd_privileges = adapter->cmd_privileges;
61 for (i = 0; i < num_entries; i++)
62 if (opcode == cmd_priv_map[i].opcode &&
63 subsystem == cmd_priv_map[i].subsystem)
64 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
70 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 return wrb->payload.embedded_payload;
75 static void be_mcc_notify(struct be_adapter *adapter)
77 struct be_queue_info *mccq = &adapter->mcc_obj.q;
80 if (be_error(adapter))
83 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
84 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
87 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
90 /* To check if valid bit is set, check the entire word as we don't know
91 * the endianness of the data (old entry is host endian while a new entry is
93 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
97 if (compl->flags != 0) {
98 flags = le32_to_cpu(compl->flags);
99 if (flags & CQE_FLAGS_VALID_MASK) {
100 compl->flags = flags;
107 /* Need to reset the entire word that houses the valid bit */
108 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
113 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
118 addr = ((addr << 16) << 16) | tag0;
122 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
135 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
138 static void be_async_cmd_process(struct be_adapter *adapter,
139 struct be_mcc_compl *compl,
140 struct be_cmd_resp_hdr *resp_hdr)
142 enum mcc_base_status base_status = base_status(compl->status);
143 u8 opcode = 0, subsystem = 0;
146 opcode = resp_hdr->opcode;
147 subsystem = resp_hdr->subsystem;
150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
152 complete(&adapter->et_cmd_compl);
156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
157 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
158 subsystem == CMD_SUBSYSTEM_COMMON) {
159 adapter->flash_status = compl->status;
160 complete(&adapter->et_cmd_compl);
164 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
165 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
166 subsystem == CMD_SUBSYSTEM_ETH &&
167 base_status == MCC_STATUS_SUCCESS) {
168 be_parse_stats(adapter);
169 adapter->stats_cmd_sent = false;
173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 if (base_status == MCC_STATUS_SUCCESS) {
176 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
178 adapter->drv_stats.be_on_die_temperature =
179 resp->on_die_temperature;
181 adapter->be_get_temp_freq = 0;
187 static int be_mcc_compl_process(struct be_adapter *adapter,
188 struct be_mcc_compl *compl)
190 enum mcc_base_status base_status;
191 enum mcc_addl_status addl_status;
192 struct be_cmd_resp_hdr *resp_hdr;
193 u8 opcode = 0, subsystem = 0;
195 /* Just swap the status to host endian; mcc tag is opaquely copied
197 be_dws_le_to_cpu(compl, 4);
199 base_status = base_status(compl->status);
200 addl_status = addl_status(compl->status);
202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
204 opcode = resp_hdr->opcode;
205 subsystem = resp_hdr->subsystem;
208 be_async_cmd_process(adapter, compl, resp_hdr);
210 if (base_status != MCC_STATUS_SUCCESS &&
211 !be_skip_err_log(opcode, base_status, addl_status)) {
213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
214 dev_warn(&adapter->pdev->dev,
215 "VF is not privileged to issue opcode %d-%d\n",
218 dev_err(&adapter->pdev->dev,
219 "opcode %d-%d failed:status %d-%d\n",
220 opcode, subsystem, base_status, addl_status);
223 return compl->status;
226 /* Link state evt is a string of bytes; no need for endian swapping */
227 static void be_async_link_state_process(struct be_adapter *adapter,
228 struct be_mcc_compl *compl)
230 struct be_async_event_link_state *evt =
231 (struct be_async_event_link_state *)compl;
233 /* When link status changes, link speed must be re-queried from FW */
234 adapter->phy.link_speed = -1;
236 /* On BEx the FW does not send a separate link status
237 * notification for physical and logical link.
238 * On other chips just process the logical link
239 * status notification
241 if (!BEx_chip(adapter) &&
242 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
245 /* For the initial link status do not rely on the ASYNC event as
246 * it may not be received in some cases.
248 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
249 be_link_status_update(adapter,
250 evt->port_link_status & LINK_STATUS_MASK);
253 /* Grp5 CoS Priority evt */
254 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
255 struct be_mcc_compl *compl)
257 struct be_async_event_grp5_cos_priority *evt =
258 (struct be_async_event_grp5_cos_priority *)compl;
261 adapter->vlan_prio_bmap = evt->available_priority_bmap;
262 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
263 adapter->recommended_prio =
264 evt->reco_default_priority << VLAN_PRIO_SHIFT;
268 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
269 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
270 struct be_mcc_compl *compl)
272 struct be_async_event_grp5_qos_link_speed *evt =
273 (struct be_async_event_grp5_qos_link_speed *)compl;
275 if (adapter->phy.link_speed >= 0 &&
276 evt->physical_port == adapter->port_num)
277 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
281 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
282 struct be_mcc_compl *compl)
284 struct be_async_event_grp5_pvid_state *evt =
285 (struct be_async_event_grp5_pvid_state *)compl;
288 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
289 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
295 static void be_async_grp5_evt_process(struct be_adapter *adapter,
296 struct be_mcc_compl *compl)
298 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
299 ASYNC_EVENT_TYPE_MASK;
301 switch (event_type) {
302 case ASYNC_EVENT_COS_PRIORITY:
303 be_async_grp5_cos_priority_process(adapter, compl);
305 case ASYNC_EVENT_QOS_SPEED:
306 be_async_grp5_qos_speed_process(adapter, compl);
308 case ASYNC_EVENT_PVID_STATE:
309 be_async_grp5_pvid_state_process(adapter, compl);
312 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
318 static void be_async_dbg_evt_process(struct be_adapter *adapter,
319 struct be_mcc_compl *cmp)
322 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
324 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
325 ASYNC_EVENT_TYPE_MASK;
327 switch (event_type) {
328 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
330 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
331 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
334 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
340 static inline bool is_link_state_evt(u32 flags)
342 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
343 ASYNC_EVENT_CODE_LINK_STATE;
346 static inline bool is_grp5_evt(u32 flags)
348 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
349 ASYNC_EVENT_CODE_GRP_5;
352 static inline bool is_dbg_evt(u32 flags)
354 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
355 ASYNC_EVENT_CODE_QNQ;
358 static void be_mcc_event_process(struct be_adapter *adapter,
359 struct be_mcc_compl *compl)
361 if (is_link_state_evt(compl->flags))
362 be_async_link_state_process(adapter, compl);
363 else if (is_grp5_evt(compl->flags))
364 be_async_grp5_evt_process(adapter, compl);
365 else if (is_dbg_evt(compl->flags))
366 be_async_dbg_evt_process(adapter, compl);
369 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
371 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
372 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
374 if (be_mcc_compl_is_new(compl)) {
375 queue_tail_inc(mcc_cq);
381 void be_async_mcc_enable(struct be_adapter *adapter)
383 spin_lock_bh(&adapter->mcc_cq_lock);
385 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
386 adapter->mcc_obj.rearm_cq = true;
388 spin_unlock_bh(&adapter->mcc_cq_lock);
391 void be_async_mcc_disable(struct be_adapter *adapter)
393 spin_lock_bh(&adapter->mcc_cq_lock);
395 adapter->mcc_obj.rearm_cq = false;
396 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
398 spin_unlock_bh(&adapter->mcc_cq_lock);
401 int be_process_mcc(struct be_adapter *adapter)
403 struct be_mcc_compl *compl;
404 int num = 0, status = 0;
405 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
407 spin_lock(&adapter->mcc_cq_lock);
409 while ((compl = be_mcc_compl_get(adapter))) {
410 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
411 be_mcc_event_process(adapter, compl);
412 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
413 status = be_mcc_compl_process(adapter, compl);
414 atomic_dec(&mcc_obj->q.used);
416 be_mcc_compl_use(compl);
421 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
423 spin_unlock(&adapter->mcc_cq_lock);
427 /* Wait till no more pending mcc requests are present */
428 static int be_mcc_wait_compl(struct be_adapter *adapter)
430 #define mcc_timeout 120000 /* 12s timeout */
432 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
434 for (i = 0; i < mcc_timeout; i++) {
435 if (be_error(adapter))
439 status = be_process_mcc(adapter);
442 if (atomic_read(&mcc_obj->q.used) == 0)
446 if (i == mcc_timeout) {
447 dev_err(&adapter->pdev->dev, "FW not responding\n");
448 adapter->fw_timeout = true;
454 /* Notify MCC requests and wait for completion */
455 static int be_mcc_notify_wait(struct be_adapter *adapter)
458 struct be_mcc_wrb *wrb;
459 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
460 u16 index = mcc_obj->q.head;
461 struct be_cmd_resp_hdr *resp;
463 index_dec(&index, mcc_obj->q.len);
464 wrb = queue_index_node(&mcc_obj->q, index);
466 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
468 be_mcc_notify(adapter);
470 status = be_mcc_wait_compl(adapter);
474 status = (resp->base_status |
475 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
476 CQE_ADDL_STATUS_SHIFT));
481 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
487 if (be_error(adapter))
490 ready = ioread32(db);
491 if (ready == 0xffffffff)
494 ready &= MPU_MAILBOX_DB_RDY_MASK;
499 dev_err(&adapter->pdev->dev, "FW not responding\n");
500 adapter->fw_timeout = true;
501 be_detect_error(adapter);
513 * Insert the mailbox address into the doorbell in two steps
514 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
516 static int be_mbox_notify_wait(struct be_adapter *adapter)
520 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
521 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
522 struct be_mcc_mailbox *mbox = mbox_mem->va;
523 struct be_mcc_compl *compl = &mbox->compl;
525 /* wait for ready to be set */
526 status = be_mbox_db_ready_wait(adapter, db);
530 val |= MPU_MAILBOX_DB_HI_MASK;
531 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
532 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
535 /* wait for ready to be set */
536 status = be_mbox_db_ready_wait(adapter, db);
541 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
542 val |= (u32)(mbox_mem->dma >> 4) << 2;
545 status = be_mbox_db_ready_wait(adapter, db);
549 /* A cq entry has been made now */
550 if (be_mcc_compl_is_new(compl)) {
551 status = be_mcc_compl_process(adapter, &mbox->compl);
552 be_mcc_compl_use(compl);
556 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
562 static u16 be_POST_stage_get(struct be_adapter *adapter)
566 if (BEx_chip(adapter))
567 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
569 pci_read_config_dword(adapter->pdev,
570 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
572 return sem & POST_STAGE_MASK;
575 static int lancer_wait_ready(struct be_adapter *adapter)
577 #define SLIPORT_READY_TIMEOUT 30
581 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
582 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
583 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
589 if (i == SLIPORT_READY_TIMEOUT)
595 static bool lancer_provisioning_error(struct be_adapter *adapter)
597 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
598 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
599 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
600 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
601 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
603 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
604 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
610 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
613 u32 sliport_status, err, reset_needed;
616 resource_error = lancer_provisioning_error(adapter);
620 status = lancer_wait_ready(adapter);
622 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
623 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
624 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
625 if (err && reset_needed) {
626 iowrite32(SLI_PORT_CONTROL_IP_MASK,
627 adapter->db + SLIPORT_CONTROL_OFFSET);
629 /* check adapter has corrected the error */
630 status = lancer_wait_ready(adapter);
631 sliport_status = ioread32(adapter->db +
632 SLIPORT_STATUS_OFFSET);
633 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
634 SLIPORT_STATUS_RN_MASK);
635 if (status || sliport_status)
637 } else if (err || reset_needed) {
641 /* Stop error recovery if error is not recoverable.
642 * No resource error is temporary errors and will go away
643 * when PF provisions resources.
645 resource_error = lancer_provisioning_error(adapter);
652 int be_fw_wait_ready(struct be_adapter *adapter)
655 int status, timeout = 0;
656 struct device *dev = &adapter->pdev->dev;
658 if (lancer_chip(adapter)) {
659 status = lancer_wait_ready(adapter);
664 stage = be_POST_stage_get(adapter);
665 if (stage == POST_STAGE_ARMFW_RDY)
668 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
669 if (msleep_interruptible(2000)) {
670 dev_err(dev, "Waiting for POST aborted\n");
674 } while (timeout < 60);
676 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
681 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
683 return &wrb->payload.sgl[0];
686 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
688 wrb->tag0 = addr & 0xFFFFFFFF;
689 wrb->tag1 = upper_32_bits(addr);
692 /* Don't touch the hdr after it's prepared */
693 /* mem will be NULL for embedded commands */
694 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
695 u8 subsystem, u8 opcode, int cmd_len,
696 struct be_mcc_wrb *wrb,
697 struct be_dma_mem *mem)
701 req_hdr->opcode = opcode;
702 req_hdr->subsystem = subsystem;
703 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
704 req_hdr->version = 0;
705 fill_wrb_tags(wrb, (ulong) req_hdr);
706 wrb->payload_length = cmd_len;
708 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
709 MCC_WRB_SGE_CNT_SHIFT;
710 sge = nonembedded_sgl(wrb);
711 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
712 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
713 sge->len = cpu_to_le32(mem->size);
715 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
716 be_dws_cpu_to_le(wrb, 8);
719 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
720 struct be_dma_mem *mem)
722 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
723 u64 dma = (u64)mem->dma;
725 for (i = 0; i < buf_pages; i++) {
726 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
727 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
732 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
734 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
735 struct be_mcc_wrb *wrb
736 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
737 memset(wrb, 0, sizeof(*wrb));
741 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
743 struct be_queue_info *mccq = &adapter->mcc_obj.q;
744 struct be_mcc_wrb *wrb;
749 if (atomic_read(&mccq->used) >= mccq->len)
752 wrb = queue_head_node(mccq);
753 queue_head_inc(mccq);
754 atomic_inc(&mccq->used);
755 memset(wrb, 0, sizeof(*wrb));
759 static bool use_mcc(struct be_adapter *adapter)
761 return adapter->mcc_obj.q.created;
764 /* Must be used only in process context */
765 static int be_cmd_lock(struct be_adapter *adapter)
767 if (use_mcc(adapter)) {
768 spin_lock_bh(&adapter->mcc_lock);
771 return mutex_lock_interruptible(&adapter->mbox_lock);
775 /* Must be used only in process context */
776 static void be_cmd_unlock(struct be_adapter *adapter)
778 if (use_mcc(adapter))
779 spin_unlock_bh(&adapter->mcc_lock);
781 return mutex_unlock(&adapter->mbox_lock);
784 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
785 struct be_mcc_wrb *wrb)
787 struct be_mcc_wrb *dest_wrb;
789 if (use_mcc(adapter)) {
790 dest_wrb = wrb_from_mccq(adapter);
794 dest_wrb = wrb_from_mbox(adapter);
797 memcpy(dest_wrb, wrb, sizeof(*wrb));
798 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
799 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
804 /* Must be used only in process context */
805 static int be_cmd_notify_wait(struct be_adapter *adapter,
806 struct be_mcc_wrb *wrb)
808 struct be_mcc_wrb *dest_wrb;
811 status = be_cmd_lock(adapter);
815 dest_wrb = be_cmd_copy(adapter, wrb);
819 if (use_mcc(adapter))
820 status = be_mcc_notify_wait(adapter);
822 status = be_mbox_notify_wait(adapter);
825 memcpy(wrb, dest_wrb, sizeof(*wrb));
827 be_cmd_unlock(adapter);
831 /* Tell fw we're about to start firing cmds by writing a
832 * special pattern across the wrb hdr; uses mbox
834 int be_cmd_fw_init(struct be_adapter *adapter)
839 if (lancer_chip(adapter))
842 if (mutex_lock_interruptible(&adapter->mbox_lock))
845 wrb = (u8 *)wrb_from_mbox(adapter);
855 status = be_mbox_notify_wait(adapter);
857 mutex_unlock(&adapter->mbox_lock);
861 /* Tell fw we're done with firing cmds by writing a
862 * special pattern across the wrb hdr; uses mbox
864 int be_cmd_fw_clean(struct be_adapter *adapter)
869 if (lancer_chip(adapter))
872 if (mutex_lock_interruptible(&adapter->mbox_lock))
875 wrb = (u8 *)wrb_from_mbox(adapter);
885 status = be_mbox_notify_wait(adapter);
887 mutex_unlock(&adapter->mbox_lock);
891 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
893 struct be_mcc_wrb *wrb;
894 struct be_cmd_req_eq_create *req;
895 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
898 if (mutex_lock_interruptible(&adapter->mbox_lock))
901 wrb = wrb_from_mbox(adapter);
902 req = embedded_payload(wrb);
904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
905 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
908 /* Support for EQ_CREATEv2 available only SH-R onwards */
909 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
912 req->hdr.version = ver;
913 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
915 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
917 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
918 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
919 __ilog2_u32(eqo->q.len / 256));
920 be_dws_cpu_to_le(req->context, sizeof(req->context));
922 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
924 status = be_mbox_notify_wait(adapter);
926 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
927 eqo->q.id = le16_to_cpu(resp->eq_id);
929 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
930 eqo->q.created = true;
933 mutex_unlock(&adapter->mbox_lock);
938 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
939 bool permanent, u32 if_handle, u32 pmac_id)
941 struct be_mcc_wrb *wrb;
942 struct be_cmd_req_mac_query *req;
945 spin_lock_bh(&adapter->mcc_lock);
947 wrb = wrb_from_mccq(adapter);
952 req = embedded_payload(wrb);
954 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
955 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
957 req->type = MAC_ADDRESS_TYPE_NETWORK;
961 req->if_id = cpu_to_le16((u16) if_handle);
962 req->pmac_id = cpu_to_le32(pmac_id);
966 status = be_mcc_notify_wait(adapter);
968 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
969 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
973 spin_unlock_bh(&adapter->mcc_lock);
977 /* Uses synchronous MCCQ */
978 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
979 u32 if_id, u32 *pmac_id, u32 domain)
981 struct be_mcc_wrb *wrb;
982 struct be_cmd_req_pmac_add *req;
985 spin_lock_bh(&adapter->mcc_lock);
987 wrb = wrb_from_mccq(adapter);
992 req = embedded_payload(wrb);
994 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
995 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
998 req->hdr.domain = domain;
999 req->if_id = cpu_to_le32(if_id);
1000 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1002 status = be_mcc_notify_wait(adapter);
1004 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1005 *pmac_id = le32_to_cpu(resp->pmac_id);
1009 spin_unlock_bh(&adapter->mcc_lock);
1011 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1017 /* Uses synchronous MCCQ */
1018 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1020 struct be_mcc_wrb *wrb;
1021 struct be_cmd_req_pmac_del *req;
1027 spin_lock_bh(&adapter->mcc_lock);
1029 wrb = wrb_from_mccq(adapter);
1034 req = embedded_payload(wrb);
1036 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1037 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
1039 req->hdr.domain = dom;
1040 req->if_id = cpu_to_le32(if_id);
1041 req->pmac_id = cpu_to_le32(pmac_id);
1043 status = be_mcc_notify_wait(adapter);
1046 spin_unlock_bh(&adapter->mcc_lock);
1051 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1052 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1054 struct be_mcc_wrb *wrb;
1055 struct be_cmd_req_cq_create *req;
1056 struct be_dma_mem *q_mem = &cq->dma_mem;
1060 if (mutex_lock_interruptible(&adapter->mbox_lock))
1063 wrb = wrb_from_mbox(adapter);
1064 req = embedded_payload(wrb);
1065 ctxt = &req->context;
1067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1068 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1071 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1073 if (BEx_chip(adapter)) {
1074 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1076 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1078 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1079 __ilog2_u32(cq->len / 256));
1080 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1081 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1082 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1084 req->hdr.version = 2;
1085 req->page_size = 1; /* 1 for 4K */
1087 /* coalesce-wm field in this cmd is not relevant to Lancer.
1088 * Lancer uses COMMON_MODIFY_CQ to set this field
1090 if (!lancer_chip(adapter))
1091 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1093 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1095 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1096 __ilog2_u32(cq->len / 256));
1097 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1098 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1099 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1102 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1104 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1106 status = be_mbox_notify_wait(adapter);
1108 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1109 cq->id = le16_to_cpu(resp->cq_id);
1113 mutex_unlock(&adapter->mbox_lock);
1118 static u32 be_encoded_q_len(int q_len)
1120 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1121 if (len_encoded == 16)
1126 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1127 struct be_queue_info *mccq,
1128 struct be_queue_info *cq)
1130 struct be_mcc_wrb *wrb;
1131 struct be_cmd_req_mcc_ext_create *req;
1132 struct be_dma_mem *q_mem = &mccq->dma_mem;
1136 if (mutex_lock_interruptible(&adapter->mbox_lock))
1139 wrb = wrb_from_mbox(adapter);
1140 req = embedded_payload(wrb);
1141 ctxt = &req->context;
1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1144 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1147 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1148 if (BEx_chip(adapter)) {
1149 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1150 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1151 be_encoded_q_len(mccq->len));
1152 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1154 req->hdr.version = 1;
1155 req->cq_id = cpu_to_le16(cq->id);
1157 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1158 be_encoded_q_len(mccq->len));
1159 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1160 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1162 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1166 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1167 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1168 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1169 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1171 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1173 status = be_mbox_notify_wait(adapter);
1175 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1176 mccq->id = le16_to_cpu(resp->id);
1177 mccq->created = true;
1179 mutex_unlock(&adapter->mbox_lock);
1184 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1185 struct be_queue_info *mccq,
1186 struct be_queue_info *cq)
1188 struct be_mcc_wrb *wrb;
1189 struct be_cmd_req_mcc_create *req;
1190 struct be_dma_mem *q_mem = &mccq->dma_mem;
1194 if (mutex_lock_interruptible(&adapter->mbox_lock))
1197 wrb = wrb_from_mbox(adapter);
1198 req = embedded_payload(wrb);
1199 ctxt = &req->context;
1201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1202 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1205 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1207 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1208 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1209 be_encoded_q_len(mccq->len));
1210 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1212 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1214 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1216 status = be_mbox_notify_wait(adapter);
1218 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1219 mccq->id = le16_to_cpu(resp->id);
1220 mccq->created = true;
1223 mutex_unlock(&adapter->mbox_lock);
1227 int be_cmd_mccq_create(struct be_adapter *adapter,
1228 struct be_queue_info *mccq, struct be_queue_info *cq)
1232 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1233 if (status && BEx_chip(adapter)) {
1234 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1235 "or newer to avoid conflicting priorities between NIC "
1236 "and FCoE traffic");
1237 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1242 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1244 struct be_mcc_wrb wrb = {0};
1245 struct be_cmd_req_eth_tx_create *req;
1246 struct be_queue_info *txq = &txo->q;
1247 struct be_queue_info *cq = &txo->cq;
1248 struct be_dma_mem *q_mem = &txq->dma_mem;
1249 int status, ver = 0;
1251 req = embedded_payload(&wrb);
1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1253 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1255 if (lancer_chip(adapter)) {
1256 req->hdr.version = 1;
1257 } else if (BEx_chip(adapter)) {
1258 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1259 req->hdr.version = 2;
1260 } else { /* For SH */
1261 req->hdr.version = 2;
1264 if (req->hdr.version > 0)
1265 req->if_id = cpu_to_le16(adapter->if_handle);
1266 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1267 req->ulp_num = BE_ULP1_NUM;
1268 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1269 req->cq_id = cpu_to_le16(cq->id);
1270 req->queue_size = be_encoded_q_len(txq->len);
1271 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1272 ver = req->hdr.version;
1274 status = be_cmd_notify_wait(adapter, &wrb);
1276 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1277 txq->id = le16_to_cpu(resp->cid);
1279 txo->db_offset = le32_to_cpu(resp->db_offset);
1281 txo->db_offset = DB_TXULP1_OFFSET;
1282 txq->created = true;
1289 int be_cmd_rxq_create(struct be_adapter *adapter,
1290 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1291 u32 if_id, u32 rss, u8 *rss_id)
1293 struct be_mcc_wrb *wrb;
1294 struct be_cmd_req_eth_rx_create *req;
1295 struct be_dma_mem *q_mem = &rxq->dma_mem;
1298 spin_lock_bh(&adapter->mcc_lock);
1300 wrb = wrb_from_mccq(adapter);
1305 req = embedded_payload(wrb);
1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1308 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1310 req->cq_id = cpu_to_le16(cq_id);
1311 req->frag_size = fls(frag_size) - 1;
1313 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1314 req->interface_id = cpu_to_le32(if_id);
1315 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1316 req->rss_queue = cpu_to_le32(rss);
1318 status = be_mcc_notify_wait(adapter);
1320 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1321 rxq->id = le16_to_cpu(resp->id);
1322 rxq->created = true;
1323 *rss_id = resp->rss_id;
1327 spin_unlock_bh(&adapter->mcc_lock);
1331 /* Generic destroyer function for all types of queues
1334 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1337 struct be_mcc_wrb *wrb;
1338 struct be_cmd_req_q_destroy *req;
1339 u8 subsys = 0, opcode = 0;
1342 if (mutex_lock_interruptible(&adapter->mbox_lock))
1345 wrb = wrb_from_mbox(adapter);
1346 req = embedded_payload(wrb);
1348 switch (queue_type) {
1350 subsys = CMD_SUBSYSTEM_COMMON;
1351 opcode = OPCODE_COMMON_EQ_DESTROY;
1354 subsys = CMD_SUBSYSTEM_COMMON;
1355 opcode = OPCODE_COMMON_CQ_DESTROY;
1358 subsys = CMD_SUBSYSTEM_ETH;
1359 opcode = OPCODE_ETH_TX_DESTROY;
1362 subsys = CMD_SUBSYSTEM_ETH;
1363 opcode = OPCODE_ETH_RX_DESTROY;
1366 subsys = CMD_SUBSYSTEM_COMMON;
1367 opcode = OPCODE_COMMON_MCC_DESTROY;
1373 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1375 req->id = cpu_to_le16(q->id);
1377 status = be_mbox_notify_wait(adapter);
1380 mutex_unlock(&adapter->mbox_lock);
1385 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1387 struct be_mcc_wrb *wrb;
1388 struct be_cmd_req_q_destroy *req;
1391 spin_lock_bh(&adapter->mcc_lock);
1393 wrb = wrb_from_mccq(adapter);
1398 req = embedded_payload(wrb);
1400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1401 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1402 req->id = cpu_to_le16(q->id);
1404 status = be_mcc_notify_wait(adapter);
1408 spin_unlock_bh(&adapter->mcc_lock);
1412 /* Create an rx filtering policy configuration on an i/f
1413 * Will use MBOX only if MCCQ has not been created.
1415 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1416 u32 *if_handle, u32 domain)
1418 struct be_mcc_wrb wrb = {0};
1419 struct be_cmd_req_if_create *req;
1422 req = embedded_payload(&wrb);
1423 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1424 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1425 sizeof(*req), &wrb, NULL);
1426 req->hdr.domain = domain;
1427 req->capability_flags = cpu_to_le32(cap_flags);
1428 req->enable_flags = cpu_to_le32(en_flags);
1429 req->pmac_invalid = true;
1431 status = be_cmd_notify_wait(adapter, &wrb);
1433 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1434 *if_handle = le32_to_cpu(resp->interface_id);
1436 /* Hack to retrieve VF's pmac-id on BE3 */
1437 if (BE3_chip(adapter) && !be_physfn(adapter))
1438 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1444 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1446 struct be_mcc_wrb *wrb;
1447 struct be_cmd_req_if_destroy *req;
1450 if (interface_id == -1)
1453 spin_lock_bh(&adapter->mcc_lock);
1455 wrb = wrb_from_mccq(adapter);
1460 req = embedded_payload(wrb);
1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1463 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1464 sizeof(*req), wrb, NULL);
1465 req->hdr.domain = domain;
1466 req->interface_id = cpu_to_le32(interface_id);
1468 status = be_mcc_notify_wait(adapter);
1470 spin_unlock_bh(&adapter->mcc_lock);
1474 /* Get stats is a non embedded command: the request is not embedded inside
1475 * WRB but is a separate dma memory block
1476 * Uses asynchronous MCC
1478 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1480 struct be_mcc_wrb *wrb;
1481 struct be_cmd_req_hdr *hdr;
1484 spin_lock_bh(&adapter->mcc_lock);
1486 wrb = wrb_from_mccq(adapter);
1491 hdr = nonemb_cmd->va;
1493 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1494 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1497 /* version 1 of the cmd is not supported only by BE2 */
1498 if (BE2_chip(adapter))
1500 if (BE3_chip(adapter) || lancer_chip(adapter))
1505 be_mcc_notify(adapter);
1506 adapter->stats_cmd_sent = true;
1509 spin_unlock_bh(&adapter->mcc_lock);
1514 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1515 struct be_dma_mem *nonemb_cmd)
1518 struct be_mcc_wrb *wrb;
1519 struct lancer_cmd_req_pport_stats *req;
1522 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1526 spin_lock_bh(&adapter->mcc_lock);
1528 wrb = wrb_from_mccq(adapter);
1533 req = nonemb_cmd->va;
1535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1536 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1539 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1540 req->cmd_params.params.reset_stats = 0;
1542 be_mcc_notify(adapter);
1543 adapter->stats_cmd_sent = true;
1546 spin_unlock_bh(&adapter->mcc_lock);
1550 static int be_mac_to_link_speed(int mac_speed)
1552 switch (mac_speed) {
1553 case PHY_LINK_SPEED_ZERO:
1555 case PHY_LINK_SPEED_10MBPS:
1557 case PHY_LINK_SPEED_100MBPS:
1559 case PHY_LINK_SPEED_1GBPS:
1561 case PHY_LINK_SPEED_10GBPS:
1563 case PHY_LINK_SPEED_20GBPS:
1565 case PHY_LINK_SPEED_25GBPS:
1567 case PHY_LINK_SPEED_40GBPS:
1573 /* Uses synchronous mcc
1574 * Returns link_speed in Mbps
1576 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1577 u8 *link_status, u32 dom)
1579 struct be_mcc_wrb *wrb;
1580 struct be_cmd_req_link_status *req;
1583 spin_lock_bh(&adapter->mcc_lock);
1586 *link_status = LINK_DOWN;
1588 wrb = wrb_from_mccq(adapter);
1593 req = embedded_payload(wrb);
1595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1596 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1597 sizeof(*req), wrb, NULL);
1599 /* version 1 of the cmd is not supported only by BE2 */
1600 if (!BE2_chip(adapter))
1601 req->hdr.version = 1;
1603 req->hdr.domain = dom;
1605 status = be_mcc_notify_wait(adapter);
1607 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1609 *link_speed = resp->link_speed ?
1610 le16_to_cpu(resp->link_speed) * 10 :
1611 be_mac_to_link_speed(resp->mac_speed);
1613 if (!resp->logical_link_status)
1617 *link_status = resp->logical_link_status;
1621 spin_unlock_bh(&adapter->mcc_lock);
1625 /* Uses synchronous mcc */
1626 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1628 struct be_mcc_wrb *wrb;
1629 struct be_cmd_req_get_cntl_addnl_attribs *req;
1632 spin_lock_bh(&adapter->mcc_lock);
1634 wrb = wrb_from_mccq(adapter);
1639 req = embedded_payload(wrb);
1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1642 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1643 sizeof(*req), wrb, NULL);
1645 be_mcc_notify(adapter);
1648 spin_unlock_bh(&adapter->mcc_lock);
1652 /* Uses synchronous mcc */
1653 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1655 struct be_mcc_wrb *wrb;
1656 struct be_cmd_req_get_fat *req;
1659 spin_lock_bh(&adapter->mcc_lock);
1661 wrb = wrb_from_mccq(adapter);
1666 req = embedded_payload(wrb);
1668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1669 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1671 req->fat_operation = cpu_to_le32(QUERY_FAT);
1672 status = be_mcc_notify_wait(adapter);
1674 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1675 if (log_size && resp->log_size)
1676 *log_size = le32_to_cpu(resp->log_size) -
1680 spin_unlock_bh(&adapter->mcc_lock);
1684 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1686 struct be_dma_mem get_fat_cmd;
1687 struct be_mcc_wrb *wrb;
1688 struct be_cmd_req_get_fat *req;
1689 u32 offset = 0, total_size, buf_size,
1690 log_offset = sizeof(u32), payload_len;
1696 total_size = buf_len;
1698 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1699 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1702 if (!get_fat_cmd.va) {
1704 dev_err(&adapter->pdev->dev,
1705 "Memory allocation failure while retrieving FAT data\n");
1709 spin_lock_bh(&adapter->mcc_lock);
1711 while (total_size) {
1712 buf_size = min(total_size, (u32)60*1024);
1713 total_size -= buf_size;
1715 wrb = wrb_from_mccq(adapter);
1720 req = get_fat_cmd.va;
1722 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1724 OPCODE_COMMON_MANAGE_FAT, payload_len,
1727 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1728 req->read_log_offset = cpu_to_le32(log_offset);
1729 req->read_log_length = cpu_to_le32(buf_size);
1730 req->data_buffer_size = cpu_to_le32(buf_size);
1732 status = be_mcc_notify_wait(adapter);
1734 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1735 memcpy(buf + offset,
1737 le32_to_cpu(resp->read_log_length));
1739 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1743 log_offset += buf_size;
1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1747 get_fat_cmd.va, get_fat_cmd.dma);
1748 spin_unlock_bh(&adapter->mcc_lock);
1751 /* Uses synchronous mcc */
1752 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1754 struct be_mcc_wrb *wrb;
1755 struct be_cmd_req_get_fw_version *req;
1758 spin_lock_bh(&adapter->mcc_lock);
1760 wrb = wrb_from_mccq(adapter);
1766 req = embedded_payload(wrb);
1768 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1769 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1771 status = be_mcc_notify_wait(adapter);
1773 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1774 strcpy(adapter->fw_ver, resp->firmware_version_string);
1775 strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string);
1778 spin_unlock_bh(&adapter->mcc_lock);
1782 /* set the EQ delay interval of an EQ to specified value
1785 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1788 struct be_mcc_wrb *wrb;
1789 struct be_cmd_req_modify_eq_delay *req;
1792 spin_lock_bh(&adapter->mcc_lock);
1794 wrb = wrb_from_mccq(adapter);
1799 req = embedded_payload(wrb);
1801 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1802 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1805 req->num_eq = cpu_to_le32(num);
1806 for (i = 0; i < num; i++) {
1807 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1808 req->set_eqd[i].phase = 0;
1809 req->set_eqd[i].delay_multiplier =
1810 cpu_to_le32(set_eqd[i].delay_multiplier);
1813 be_mcc_notify(adapter);
1815 spin_unlock_bh(&adapter->mcc_lock);
1819 /* Uses sycnhronous mcc */
1820 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1823 struct be_mcc_wrb *wrb;
1824 struct be_cmd_req_vlan_config *req;
1827 spin_lock_bh(&adapter->mcc_lock);
1829 wrb = wrb_from_mccq(adapter);
1834 req = embedded_payload(wrb);
1836 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1837 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1840 req->interface_id = if_id;
1841 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1842 req->num_vlan = num;
1843 memcpy(req->normal_vlan, vtag_array,
1844 req->num_vlan * sizeof(vtag_array[0]));
1846 status = be_mcc_notify_wait(adapter);
1848 spin_unlock_bh(&adapter->mcc_lock);
1852 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1854 struct be_mcc_wrb *wrb;
1855 struct be_dma_mem *mem = &adapter->rx_filter;
1856 struct be_cmd_req_rx_filter *req = mem->va;
1859 spin_lock_bh(&adapter->mcc_lock);
1861 wrb = wrb_from_mccq(adapter);
1866 memset(req, 0, sizeof(*req));
1867 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1868 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1871 req->if_id = cpu_to_le32(adapter->if_handle);
1872 if (flags & IFF_PROMISC) {
1873 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1874 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1875 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1878 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1879 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1880 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1881 } else if (flags & IFF_ALLMULTI) {
1882 req->if_flags_mask = req->if_flags =
1883 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1884 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1885 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1889 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1891 struct netdev_hw_addr *ha;
1894 req->if_flags_mask = req->if_flags =
1895 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1897 /* Reset mcast promisc mode if already set by setting mask
1898 * and not setting flags field
1900 req->if_flags_mask |=
1901 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1902 be_if_cap_flags(adapter));
1903 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1904 netdev_for_each_mc_addr(ha, adapter->netdev)
1905 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1908 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1909 req->if_flags_mask) {
1910 dev_warn(&adapter->pdev->dev,
1911 "Cannot set rx filter flags 0x%x\n",
1912 req->if_flags_mask);
1913 dev_warn(&adapter->pdev->dev,
1914 "Interface is capable of 0x%x flags only\n",
1915 be_if_cap_flags(adapter));
1917 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1919 status = be_mcc_notify_wait(adapter);
1922 spin_unlock_bh(&adapter->mcc_lock);
1926 /* Uses synchrounous mcc */
1927 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1929 struct be_mcc_wrb *wrb;
1930 struct be_cmd_req_set_flow_control *req;
1933 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1934 CMD_SUBSYSTEM_COMMON))
1937 spin_lock_bh(&adapter->mcc_lock);
1939 wrb = wrb_from_mccq(adapter);
1944 req = embedded_payload(wrb);
1946 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1947 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1950 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1951 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1953 status = be_mcc_notify_wait(adapter);
1956 spin_unlock_bh(&adapter->mcc_lock);
1961 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1963 struct be_mcc_wrb *wrb;
1964 struct be_cmd_req_get_flow_control *req;
1967 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1968 CMD_SUBSYSTEM_COMMON))
1971 spin_lock_bh(&adapter->mcc_lock);
1973 wrb = wrb_from_mccq(adapter);
1978 req = embedded_payload(wrb);
1980 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1981 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
1984 status = be_mcc_notify_wait(adapter);
1986 struct be_cmd_resp_get_flow_control *resp =
1987 embedded_payload(wrb);
1988 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1989 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1993 spin_unlock_bh(&adapter->mcc_lock);
1998 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2000 struct be_mcc_wrb *wrb;
2001 struct be_cmd_req_query_fw_cfg *req;
2004 if (mutex_lock_interruptible(&adapter->mbox_lock))
2007 wrb = wrb_from_mbox(adapter);
2008 req = embedded_payload(wrb);
2010 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2011 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2012 sizeof(*req), wrb, NULL);
2014 status = be_mbox_notify_wait(adapter);
2016 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2017 adapter->port_num = le32_to_cpu(resp->phys_port);
2018 adapter->function_mode = le32_to_cpu(resp->function_mode);
2019 adapter->function_caps = le32_to_cpu(resp->function_caps);
2020 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2023 mutex_unlock(&adapter->mbox_lock);
2028 int be_cmd_reset_function(struct be_adapter *adapter)
2030 struct be_mcc_wrb *wrb;
2031 struct be_cmd_req_hdr *req;
2034 if (lancer_chip(adapter)) {
2035 status = lancer_wait_ready(adapter);
2037 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2038 adapter->db + SLIPORT_CONTROL_OFFSET);
2039 status = lancer_test_and_set_rdy_state(adapter);
2042 dev_err(&adapter->pdev->dev,
2043 "Adapter in non recoverable error\n");
2048 if (mutex_lock_interruptible(&adapter->mbox_lock))
2051 wrb = wrb_from_mbox(adapter);
2052 req = embedded_payload(wrb);
2054 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2055 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2058 status = be_mbox_notify_wait(adapter);
2060 mutex_unlock(&adapter->mbox_lock);
2064 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2065 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2067 struct be_mcc_wrb *wrb;
2068 struct be_cmd_req_rss_config *req;
2071 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2074 spin_lock_bh(&adapter->mcc_lock);
2076 wrb = wrb_from_mccq(adapter);
2081 req = embedded_payload(wrb);
2083 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2084 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2086 req->if_id = cpu_to_le32(adapter->if_handle);
2087 req->enable_rss = cpu_to_le16(rss_hash_opts);
2088 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2090 if (!BEx_chip(adapter))
2091 req->hdr.version = 1;
2093 memcpy(req->cpu_table, rsstable, table_size);
2094 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2095 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2097 status = be_mcc_notify_wait(adapter);
2099 spin_unlock_bh(&adapter->mcc_lock);
2104 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2105 u8 bcn, u8 sts, u8 state)
2107 struct be_mcc_wrb *wrb;
2108 struct be_cmd_req_enable_disable_beacon *req;
2111 spin_lock_bh(&adapter->mcc_lock);
2113 wrb = wrb_from_mccq(adapter);
2118 req = embedded_payload(wrb);
2120 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2121 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2122 sizeof(*req), wrb, NULL);
2124 req->port_num = port_num;
2125 req->beacon_state = state;
2126 req->beacon_duration = bcn;
2127 req->status_duration = sts;
2129 status = be_mcc_notify_wait(adapter);
2132 spin_unlock_bh(&adapter->mcc_lock);
2137 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2139 struct be_mcc_wrb *wrb;
2140 struct be_cmd_req_get_beacon_state *req;
2143 spin_lock_bh(&adapter->mcc_lock);
2145 wrb = wrb_from_mccq(adapter);
2150 req = embedded_payload(wrb);
2152 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2153 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2156 req->port_num = port_num;
2158 status = be_mcc_notify_wait(adapter);
2160 struct be_cmd_resp_get_beacon_state *resp =
2161 embedded_payload(wrb);
2162 *state = resp->beacon_state;
2166 spin_unlock_bh(&adapter->mcc_lock);
2170 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2171 u32 data_size, u32 data_offset,
2172 const char *obj_name, u32 *data_written,
2173 u8 *change_status, u8 *addn_status)
2175 struct be_mcc_wrb *wrb;
2176 struct lancer_cmd_req_write_object *req;
2177 struct lancer_cmd_resp_write_object *resp;
2181 spin_lock_bh(&adapter->mcc_lock);
2182 adapter->flash_status = 0;
2184 wrb = wrb_from_mccq(adapter);
2190 req = embedded_payload(wrb);
2192 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2193 OPCODE_COMMON_WRITE_OBJECT,
2194 sizeof(struct lancer_cmd_req_write_object), wrb,
2197 ctxt = &req->context;
2198 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2199 write_length, ctxt, data_size);
2202 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2205 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2208 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2209 req->write_offset = cpu_to_le32(data_offset);
2210 strcpy(req->object_name, obj_name);
2211 req->descriptor_count = cpu_to_le32(1);
2212 req->buf_len = cpu_to_le32(data_size);
2213 req->addr_low = cpu_to_le32((cmd->dma +
2214 sizeof(struct lancer_cmd_req_write_object))
2216 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2217 sizeof(struct lancer_cmd_req_write_object)));
2219 be_mcc_notify(adapter);
2220 spin_unlock_bh(&adapter->mcc_lock);
2222 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2223 msecs_to_jiffies(60000)))
2224 status = -ETIMEDOUT;
2226 status = adapter->flash_status;
2228 resp = embedded_payload(wrb);
2230 *data_written = le32_to_cpu(resp->actual_write_len);
2231 *change_status = resp->change_status;
2233 *addn_status = resp->additional_status;
2239 spin_unlock_bh(&adapter->mcc_lock);
2243 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2245 struct lancer_cmd_req_delete_object *req;
2246 struct be_mcc_wrb *wrb;
2249 spin_lock_bh(&adapter->mcc_lock);
2251 wrb = wrb_from_mccq(adapter);
2257 req = embedded_payload(wrb);
2259 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2260 OPCODE_COMMON_DELETE_OBJECT,
2261 sizeof(*req), wrb, NULL);
2263 strcpy(req->object_name, obj_name);
2265 status = be_mcc_notify_wait(adapter);
2267 spin_unlock_bh(&adapter->mcc_lock);
2271 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2272 u32 data_size, u32 data_offset, const char *obj_name,
2273 u32 *data_read, u32 *eof, u8 *addn_status)
2275 struct be_mcc_wrb *wrb;
2276 struct lancer_cmd_req_read_object *req;
2277 struct lancer_cmd_resp_read_object *resp;
2280 spin_lock_bh(&adapter->mcc_lock);
2282 wrb = wrb_from_mccq(adapter);
2288 req = embedded_payload(wrb);
2290 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2291 OPCODE_COMMON_READ_OBJECT,
2292 sizeof(struct lancer_cmd_req_read_object), wrb,
2295 req->desired_read_len = cpu_to_le32(data_size);
2296 req->read_offset = cpu_to_le32(data_offset);
2297 strcpy(req->object_name, obj_name);
2298 req->descriptor_count = cpu_to_le32(1);
2299 req->buf_len = cpu_to_le32(data_size);
2300 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2301 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2303 status = be_mcc_notify_wait(adapter);
2305 resp = embedded_payload(wrb);
2307 *data_read = le32_to_cpu(resp->actual_read_len);
2308 *eof = le32_to_cpu(resp->eof);
2310 *addn_status = resp->additional_status;
2314 spin_unlock_bh(&adapter->mcc_lock);
2318 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2319 u32 flash_type, u32 flash_opcode, u32 buf_size)
2321 struct be_mcc_wrb *wrb;
2322 struct be_cmd_write_flashrom *req;
2325 spin_lock_bh(&adapter->mcc_lock);
2326 adapter->flash_status = 0;
2328 wrb = wrb_from_mccq(adapter);
2335 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2336 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2339 req->params.op_type = cpu_to_le32(flash_type);
2340 req->params.op_code = cpu_to_le32(flash_opcode);
2341 req->params.data_buf_size = cpu_to_le32(buf_size);
2343 be_mcc_notify(adapter);
2344 spin_unlock_bh(&adapter->mcc_lock);
2346 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2347 msecs_to_jiffies(40000)))
2348 status = -ETIMEDOUT;
2350 status = adapter->flash_status;
2355 spin_unlock_bh(&adapter->mcc_lock);
2359 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2360 u16 optype, int offset)
2362 struct be_mcc_wrb *wrb;
2363 struct be_cmd_read_flash_crc *req;
2366 spin_lock_bh(&adapter->mcc_lock);
2368 wrb = wrb_from_mccq(adapter);
2373 req = embedded_payload(wrb);
2375 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2376 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2379 req->params.op_type = cpu_to_le32(optype);
2380 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2381 req->params.offset = cpu_to_le32(offset);
2382 req->params.data_buf_size = cpu_to_le32(0x4);
2384 status = be_mcc_notify_wait(adapter);
2386 memcpy(flashed_crc, req->crc, 4);
2389 spin_unlock_bh(&adapter->mcc_lock);
2393 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2394 struct be_dma_mem *nonemb_cmd)
2396 struct be_mcc_wrb *wrb;
2397 struct be_cmd_req_acpi_wol_magic_config *req;
2400 spin_lock_bh(&adapter->mcc_lock);
2402 wrb = wrb_from_mccq(adapter);
2407 req = nonemb_cmd->va;
2409 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2410 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2412 memcpy(req->magic_mac, mac, ETH_ALEN);
2414 status = be_mcc_notify_wait(adapter);
2417 spin_unlock_bh(&adapter->mcc_lock);
2421 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2422 u8 loopback_type, u8 enable)
2424 struct be_mcc_wrb *wrb;
2425 struct be_cmd_req_set_lmode *req;
2428 spin_lock_bh(&adapter->mcc_lock);
2430 wrb = wrb_from_mccq(adapter);
2436 req = embedded_payload(wrb);
2438 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2439 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2442 req->src_port = port_num;
2443 req->dest_port = port_num;
2444 req->loopback_type = loopback_type;
2445 req->loopback_state = enable;
2447 status = be_mcc_notify_wait(adapter);
2449 spin_unlock_bh(&adapter->mcc_lock);
2453 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2454 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2457 struct be_mcc_wrb *wrb;
2458 struct be_cmd_req_loopback_test *req;
2459 struct be_cmd_resp_loopback_test *resp;
2462 spin_lock_bh(&adapter->mcc_lock);
2464 wrb = wrb_from_mccq(adapter);
2470 req = embedded_payload(wrb);
2472 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2473 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2476 req->hdr.timeout = cpu_to_le32(15);
2477 req->pattern = cpu_to_le64(pattern);
2478 req->src_port = cpu_to_le32(port_num);
2479 req->dest_port = cpu_to_le32(port_num);
2480 req->pkt_size = cpu_to_le32(pkt_size);
2481 req->num_pkts = cpu_to_le32(num_pkts);
2482 req->loopback_type = cpu_to_le32(loopback_type);
2484 be_mcc_notify(adapter);
2486 spin_unlock_bh(&adapter->mcc_lock);
2488 wait_for_completion(&adapter->et_cmd_compl);
2489 resp = embedded_payload(wrb);
2490 status = le32_to_cpu(resp->status);
2494 spin_unlock_bh(&adapter->mcc_lock);
2498 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2499 u32 byte_cnt, struct be_dma_mem *cmd)
2501 struct be_mcc_wrb *wrb;
2502 struct be_cmd_req_ddrdma_test *req;
2506 spin_lock_bh(&adapter->mcc_lock);
2508 wrb = wrb_from_mccq(adapter);
2514 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2515 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2518 req->pattern = cpu_to_le64(pattern);
2519 req->byte_count = cpu_to_le32(byte_cnt);
2520 for (i = 0; i < byte_cnt; i++) {
2521 req->snd_buff[i] = (u8)(pattern >> (j*8));
2527 status = be_mcc_notify_wait(adapter);
2530 struct be_cmd_resp_ddrdma_test *resp;
2532 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2539 spin_unlock_bh(&adapter->mcc_lock);
2543 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2544 struct be_dma_mem *nonemb_cmd)
2546 struct be_mcc_wrb *wrb;
2547 struct be_cmd_req_seeprom_read *req;
2550 spin_lock_bh(&adapter->mcc_lock);
2552 wrb = wrb_from_mccq(adapter);
2557 req = nonemb_cmd->va;
2559 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2560 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2563 status = be_mcc_notify_wait(adapter);
2566 spin_unlock_bh(&adapter->mcc_lock);
2570 int be_cmd_get_phy_info(struct be_adapter *adapter)
2572 struct be_mcc_wrb *wrb;
2573 struct be_cmd_req_get_phy_info *req;
2574 struct be_dma_mem cmd;
2577 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2578 CMD_SUBSYSTEM_COMMON))
2581 spin_lock_bh(&adapter->mcc_lock);
2583 wrb = wrb_from_mccq(adapter);
2588 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2589 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2591 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2598 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2599 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2602 status = be_mcc_notify_wait(adapter);
2604 struct be_phy_info *resp_phy_info =
2605 cmd.va + sizeof(struct be_cmd_req_hdr);
2606 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2607 adapter->phy.interface_type =
2608 le16_to_cpu(resp_phy_info->interface_type);
2609 adapter->phy.auto_speeds_supported =
2610 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2611 adapter->phy.fixed_speeds_supported =
2612 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2613 adapter->phy.misc_params =
2614 le32_to_cpu(resp_phy_info->misc_params);
2616 if (BE2_chip(adapter)) {
2617 adapter->phy.fixed_speeds_supported =
2618 BE_SUPPORTED_SPEED_10GBPS |
2619 BE_SUPPORTED_SPEED_1GBPS;
2622 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2624 spin_unlock_bh(&adapter->mcc_lock);
2628 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2630 struct be_mcc_wrb *wrb;
2631 struct be_cmd_req_set_qos *req;
2634 spin_lock_bh(&adapter->mcc_lock);
2636 wrb = wrb_from_mccq(adapter);
2642 req = embedded_payload(wrb);
2644 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2645 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2647 req->hdr.domain = domain;
2648 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2649 req->max_bps_nic = cpu_to_le32(bps);
2651 status = be_mcc_notify_wait(adapter);
2654 spin_unlock_bh(&adapter->mcc_lock);
2658 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2660 struct be_mcc_wrb *wrb;
2661 struct be_cmd_req_cntl_attribs *req;
2662 struct be_cmd_resp_cntl_attribs *resp;
2664 int payload_len = max(sizeof(*req), sizeof(*resp));
2665 struct mgmt_controller_attrib *attribs;
2666 struct be_dma_mem attribs_cmd;
2668 if (mutex_lock_interruptible(&adapter->mbox_lock))
2671 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2672 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2673 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2675 if (!attribs_cmd.va) {
2676 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2681 wrb = wrb_from_mbox(adapter);
2686 req = attribs_cmd.va;
2688 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2689 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2692 status = be_mbox_notify_wait(adapter);
2694 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2695 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2699 mutex_unlock(&adapter->mbox_lock);
2701 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2702 attribs_cmd.va, attribs_cmd.dma);
2707 int be_cmd_req_native_mode(struct be_adapter *adapter)
2709 struct be_mcc_wrb *wrb;
2710 struct be_cmd_req_set_func_cap *req;
2713 if (mutex_lock_interruptible(&adapter->mbox_lock))
2716 wrb = wrb_from_mbox(adapter);
2722 req = embedded_payload(wrb);
2724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2725 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2726 sizeof(*req), wrb, NULL);
2728 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2729 CAPABILITY_BE3_NATIVE_ERX_API);
2730 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2732 status = be_mbox_notify_wait(adapter);
2734 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2735 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2736 CAPABILITY_BE3_NATIVE_ERX_API;
2737 if (!adapter->be3_native)
2738 dev_warn(&adapter->pdev->dev,
2739 "adapter not in advanced mode\n");
2742 mutex_unlock(&adapter->mbox_lock);
2746 /* Get privilege(s) for a function */
2747 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2750 struct be_mcc_wrb *wrb;
2751 struct be_cmd_req_get_fn_privileges *req;
2754 spin_lock_bh(&adapter->mcc_lock);
2756 wrb = wrb_from_mccq(adapter);
2762 req = embedded_payload(wrb);
2764 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2765 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2768 req->hdr.domain = domain;
2770 status = be_mcc_notify_wait(adapter);
2772 struct be_cmd_resp_get_fn_privileges *resp =
2773 embedded_payload(wrb);
2774 *privilege = le32_to_cpu(resp->privilege_mask);
2776 /* In UMC mode FW does not return right privileges.
2777 * Override with correct privilege equivalent to PF.
2779 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2781 *privilege = MAX_PRIVILEGES;
2785 spin_unlock_bh(&adapter->mcc_lock);
2789 /* Set privilege(s) for a function */
2790 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2793 struct be_mcc_wrb *wrb;
2794 struct be_cmd_req_set_fn_privileges *req;
2797 spin_lock_bh(&adapter->mcc_lock);
2799 wrb = wrb_from_mccq(adapter);
2805 req = embedded_payload(wrb);
2806 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2807 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2809 req->hdr.domain = domain;
2810 if (lancer_chip(adapter))
2811 req->privileges_lancer = cpu_to_le32(privileges);
2813 req->privileges = cpu_to_le32(privileges);
2815 status = be_mcc_notify_wait(adapter);
2817 spin_unlock_bh(&adapter->mcc_lock);
2821 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2822 * pmac_id_valid: false => pmac_id or MAC address is requested.
2823 * If pmac_id is returned, pmac_id_valid is returned as true
2825 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2826 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2829 struct be_mcc_wrb *wrb;
2830 struct be_cmd_req_get_mac_list *req;
2833 struct be_dma_mem get_mac_list_cmd;
2836 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2837 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2838 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2839 get_mac_list_cmd.size,
2840 &get_mac_list_cmd.dma);
2842 if (!get_mac_list_cmd.va) {
2843 dev_err(&adapter->pdev->dev,
2844 "Memory allocation failure during GET_MAC_LIST\n");
2848 spin_lock_bh(&adapter->mcc_lock);
2850 wrb = wrb_from_mccq(adapter);
2856 req = get_mac_list_cmd.va;
2858 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2859 OPCODE_COMMON_GET_MAC_LIST,
2860 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2861 req->hdr.domain = domain;
2862 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2863 if (*pmac_id_valid) {
2864 req->mac_id = cpu_to_le32(*pmac_id);
2865 req->iface_id = cpu_to_le16(if_handle);
2866 req->perm_override = 0;
2868 req->perm_override = 1;
2871 status = be_mcc_notify_wait(adapter);
2873 struct be_cmd_resp_get_mac_list *resp =
2874 get_mac_list_cmd.va;
2876 if (*pmac_id_valid) {
2877 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2882 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2883 /* Mac list returned could contain one or more active mac_ids
2884 * or one or more true or pseudo permanant mac addresses.
2885 * If an active mac_id is present, return first active mac_id
2888 for (i = 0; i < mac_count; i++) {
2889 struct get_list_macaddr *mac_entry;
2893 mac_entry = &resp->macaddr_list[i];
2894 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2895 /* mac_id is a 32 bit value and mac_addr size
2898 if (mac_addr_size == sizeof(u32)) {
2899 *pmac_id_valid = true;
2900 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2901 *pmac_id = le32_to_cpu(mac_id);
2905 /* If no active mac_id found, return first mac addr */
2906 *pmac_id_valid = false;
2907 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2912 spin_unlock_bh(&adapter->mcc_lock);
2913 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2914 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2918 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
2919 u8 *mac, u32 if_handle, bool active, u32 domain)
2923 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
2925 if (BEx_chip(adapter))
2926 return be_cmd_mac_addr_query(adapter, mac, false,
2927 if_handle, curr_pmac_id);
2929 /* Fetch the MAC address using pmac_id */
2930 return be_cmd_get_mac_from_list(adapter, mac, &active,
2935 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2938 bool pmac_valid = false;
2940 memset(mac, 0, ETH_ALEN);
2942 if (BEx_chip(adapter)) {
2943 if (be_physfn(adapter))
2944 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2947 status = be_cmd_mac_addr_query(adapter, mac, false,
2948 adapter->if_handle, 0);
2950 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2951 NULL, adapter->if_handle, 0);
2957 /* Uses synchronous MCCQ */
2958 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2959 u8 mac_count, u32 domain)
2961 struct be_mcc_wrb *wrb;
2962 struct be_cmd_req_set_mac_list *req;
2964 struct be_dma_mem cmd;
2966 memset(&cmd, 0, sizeof(struct be_dma_mem));
2967 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2968 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2969 &cmd.dma, GFP_KERNEL);
2973 spin_lock_bh(&adapter->mcc_lock);
2975 wrb = wrb_from_mccq(adapter);
2982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2983 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2986 req->hdr.domain = domain;
2987 req->mac_count = mac_count;
2989 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2991 status = be_mcc_notify_wait(adapter);
2994 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2995 spin_unlock_bh(&adapter->mcc_lock);
2999 /* Wrapper to delete any active MACs and provision the new mac.
3000 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3001 * current list are active.
3003 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3005 bool active_mac = false;
3006 u8 old_mac[ETH_ALEN];
3010 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3011 &pmac_id, if_id, dom);
3013 if (!status && active_mac)
3014 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3016 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3019 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3020 u32 domain, u16 intf_id, u16 hsw_mode)
3022 struct be_mcc_wrb *wrb;
3023 struct be_cmd_req_set_hsw_config *req;
3027 spin_lock_bh(&adapter->mcc_lock);
3029 wrb = wrb_from_mccq(adapter);
3035 req = embedded_payload(wrb);
3036 ctxt = &req->context;
3038 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3039 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3042 req->hdr.domain = domain;
3043 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3045 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3046 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3048 if (!BEx_chip(adapter) && hsw_mode) {
3049 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3050 ctxt, adapter->hba_port_num);
3051 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3052 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3056 be_dws_cpu_to_le(req->context, sizeof(req->context));
3057 status = be_mcc_notify_wait(adapter);
3060 spin_unlock_bh(&adapter->mcc_lock);
3064 /* Get Hyper switch config */
3065 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3066 u32 domain, u16 intf_id, u8 *mode)
3068 struct be_mcc_wrb *wrb;
3069 struct be_cmd_req_get_hsw_config *req;
3074 spin_lock_bh(&adapter->mcc_lock);
3076 wrb = wrb_from_mccq(adapter);
3082 req = embedded_payload(wrb);
3083 ctxt = &req->context;
3085 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3086 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3089 req->hdr.domain = domain;
3090 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3092 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3094 if (!BEx_chip(adapter) && mode) {
3095 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3096 ctxt, adapter->hba_port_num);
3097 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3099 be_dws_cpu_to_le(req->context, sizeof(req->context));
3101 status = be_mcc_notify_wait(adapter);
3103 struct be_cmd_resp_get_hsw_config *resp =
3104 embedded_payload(wrb);
3105 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3106 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3107 pvid, &resp->context);
3109 *pvid = le16_to_cpu(vid);
3111 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3112 port_fwd_type, &resp->context);
3116 spin_unlock_bh(&adapter->mcc_lock);
3120 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3122 struct be_mcc_wrb *wrb;
3123 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3125 struct be_dma_mem cmd;
3127 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3131 if (be_is_wol_excluded(adapter))
3134 if (mutex_lock_interruptible(&adapter->mbox_lock))
3137 memset(&cmd, 0, sizeof(struct be_dma_mem));
3138 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3139 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3141 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3146 wrb = wrb_from_mbox(adapter);
3154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3155 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3156 sizeof(*req), wrb, &cmd);
3158 req->hdr.version = 1;
3159 req->query_options = BE_GET_WOL_CAP;
3161 status = be_mbox_notify_wait(adapter);
3163 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3164 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3166 adapter->wol_cap = resp->wol_settings;
3167 if (adapter->wol_cap & BE_WOL_CAP)
3168 adapter->wol_en = true;
3171 mutex_unlock(&adapter->mbox_lock);
3173 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3178 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3180 struct be_dma_mem extfat_cmd;
3181 struct be_fat_conf_params *cfgs;
3185 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3186 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3187 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3192 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3196 cfgs = (struct be_fat_conf_params *)
3197 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3198 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3199 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3200 for (j = 0; j < num_modes; j++) {
3201 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3202 cfgs->module[i].trace_lvl[j].dbg_lvl =
3207 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3209 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3214 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3216 struct be_dma_mem extfat_cmd;
3217 struct be_fat_conf_params *cfgs;
3221 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3222 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3223 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3226 if (!extfat_cmd.va) {
3227 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3232 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3234 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3235 sizeof(struct be_cmd_resp_hdr));
3236 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3237 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3238 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3241 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3247 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3248 struct be_dma_mem *cmd)
3250 struct be_mcc_wrb *wrb;
3251 struct be_cmd_req_get_ext_fat_caps *req;
3254 if (mutex_lock_interruptible(&adapter->mbox_lock))
3257 wrb = wrb_from_mbox(adapter);
3264 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3265 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3266 cmd->size, wrb, cmd);
3267 req->parameter_type = cpu_to_le32(1);
3269 status = be_mbox_notify_wait(adapter);
3271 mutex_unlock(&adapter->mbox_lock);
3275 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3276 struct be_dma_mem *cmd,
3277 struct be_fat_conf_params *configs)
3279 struct be_mcc_wrb *wrb;
3280 struct be_cmd_req_set_ext_fat_caps *req;
3283 spin_lock_bh(&adapter->mcc_lock);
3285 wrb = wrb_from_mccq(adapter);
3292 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3293 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3294 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3295 cmd->size, wrb, cmd);
3297 status = be_mcc_notify_wait(adapter);
3299 spin_unlock_bh(&adapter->mcc_lock);
3303 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3305 struct be_mcc_wrb *wrb;
3306 struct be_cmd_req_get_port_name *req;
3309 if (!lancer_chip(adapter)) {
3310 *port_name = adapter->hba_port_num + '0';
3314 spin_lock_bh(&adapter->mcc_lock);
3316 wrb = wrb_from_mccq(adapter);
3322 req = embedded_payload(wrb);
3324 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3325 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3327 req->hdr.version = 1;
3329 status = be_mcc_notify_wait(adapter);
3331 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3332 *port_name = resp->port_name[adapter->hba_port_num];
3334 *port_name = adapter->hba_port_num + '0';
3337 spin_unlock_bh(&adapter->mcc_lock);
3341 /* Descriptor type */
3347 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3350 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3351 struct be_nic_res_desc *nic;
3354 for (i = 0; i < desc_count; i++) {
3355 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3356 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3357 nic = (struct be_nic_res_desc *)hdr;
3358 if (desc_type == FUNC_DESC ||
3359 (desc_type == VFT_DESC &&
3360 nic->flags & (1 << VFT_SHIFT)))
3364 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3365 hdr = (void *)hdr + hdr->desc_len;
3370 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3372 return be_get_nic_desc(buf, desc_count, VFT_DESC);
3375 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3377 return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3380 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3383 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3384 struct be_pcie_res_desc *pcie;
3387 for (i = 0; i < desc_count; i++) {
3388 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3389 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3390 pcie = (struct be_pcie_res_desc *)hdr;
3391 if (pcie->pf_num == devfn)
3395 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3396 hdr = (void *)hdr + hdr->desc_len;
3401 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3403 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3406 for (i = 0; i < desc_count; i++) {
3407 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3408 return (struct be_port_res_desc *)hdr;
3410 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3411 hdr = (void *)hdr + hdr->desc_len;
3416 static void be_copy_nic_desc(struct be_resources *res,
3417 struct be_nic_res_desc *desc)
3419 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3420 res->max_vlans = le16_to_cpu(desc->vlan_count);
3421 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3422 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3423 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3424 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3425 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3426 /* Clear flags that driver is not interested in */
3427 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3428 BE_IF_CAP_FLAGS_WANT;
3429 /* Need 1 RXQ as the default RXQ */
3430 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3431 res->max_rss_qs -= 1;
3435 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3437 struct be_mcc_wrb *wrb;
3438 struct be_cmd_req_get_func_config *req;
3440 struct be_dma_mem cmd;
3442 if (mutex_lock_interruptible(&adapter->mbox_lock))
3445 memset(&cmd, 0, sizeof(struct be_dma_mem));
3446 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3447 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3449 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3454 wrb = wrb_from_mbox(adapter);
3462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3463 OPCODE_COMMON_GET_FUNC_CONFIG,
3464 cmd.size, wrb, &cmd);
3466 if (skyhawk_chip(adapter))
3467 req->hdr.version = 1;
3469 status = be_mbox_notify_wait(adapter);
3471 struct be_cmd_resp_get_func_config *resp = cmd.va;
3472 u32 desc_count = le32_to_cpu(resp->desc_count);
3473 struct be_nic_res_desc *desc;
3475 desc = be_get_func_nic_desc(resp->func_param, desc_count);
3481 adapter->pf_number = desc->pf_num;
3482 be_copy_nic_desc(res, desc);
3485 mutex_unlock(&adapter->mbox_lock);
3487 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3491 /* Will use MBOX only if MCCQ has not been created */
3492 int be_cmd_get_profile_config(struct be_adapter *adapter,
3493 struct be_resources *res, u8 domain)
3495 struct be_cmd_resp_get_profile_config *resp;
3496 struct be_cmd_req_get_profile_config *req;
3497 struct be_nic_res_desc *vf_res;
3498 struct be_pcie_res_desc *pcie;
3499 struct be_port_res_desc *port;
3500 struct be_nic_res_desc *nic;
3501 struct be_mcc_wrb wrb = {0};
3502 struct be_dma_mem cmd;
3506 memset(&cmd, 0, sizeof(struct be_dma_mem));
3507 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3508 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3513 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3514 OPCODE_COMMON_GET_PROFILE_CONFIG,
3515 cmd.size, &wrb, &cmd);
3517 req->hdr.domain = domain;
3518 if (!lancer_chip(adapter))
3519 req->hdr.version = 1;
3520 req->type = ACTIVE_PROFILE_TYPE;
3522 status = be_cmd_notify_wait(adapter, &wrb);
3527 desc_count = le32_to_cpu(resp->desc_count);
3529 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3532 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3534 port = be_get_port_desc(resp->func_param, desc_count);
3536 adapter->mc_type = port->mc_type;
3538 nic = be_get_func_nic_desc(resp->func_param, desc_count);
3540 be_copy_nic_desc(res, nic);
3542 vf_res = be_get_vft_desc(resp->func_param, desc_count);
3544 res->vf_if_cap_flags = vf_res->cap_flags;
3547 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3551 /* Will use MBOX only if MCCQ has not been created */
3552 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3553 int size, int count, u8 version, u8 domain)
3555 struct be_cmd_req_set_profile_config *req;
3556 struct be_mcc_wrb wrb = {0};
3557 struct be_dma_mem cmd;
3560 memset(&cmd, 0, sizeof(struct be_dma_mem));
3561 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3562 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3567 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3568 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3570 req->hdr.version = version;
3571 req->hdr.domain = domain;
3572 req->desc_count = cpu_to_le32(count);
3573 memcpy(req->desc, desc, size);
3575 status = be_cmd_notify_wait(adapter, &wrb);
3578 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3582 /* Mark all fields invalid */
3583 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3585 memset(nic, 0, sizeof(*nic));
3586 nic->unicast_mac_count = 0xFFFF;
3587 nic->mcc_count = 0xFFFF;
3588 nic->vlan_count = 0xFFFF;
3589 nic->mcast_mac_count = 0xFFFF;
3590 nic->txq_count = 0xFFFF;
3591 nic->rq_count = 0xFFFF;
3592 nic->rssq_count = 0xFFFF;
3593 nic->lro_count = 0xFFFF;
3594 nic->cq_count = 0xFFFF;
3595 nic->toe_conn_count = 0xFFFF;
3596 nic->eq_count = 0xFFFF;
3597 nic->iface_count = 0xFFFF;
3598 nic->link_param = 0xFF;
3599 nic->channel_id_param = cpu_to_le16(0xF000);
3600 nic->acpi_params = 0xFF;
3601 nic->wol_param = 0x0F;
3602 nic->tunnel_iface_count = 0xFFFF;
3603 nic->direct_tenant_iface_count = 0xFFFF;
3604 nic->bw_min = 0xFFFFFFFF;
3605 nic->bw_max = 0xFFFFFFFF;
3608 /* Mark all fields invalid */
3609 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3611 memset(pcie, 0, sizeof(*pcie));
3612 pcie->sriov_state = 0xFF;
3613 pcie->pf_state = 0xFF;
3614 pcie->pf_type = 0xFF;
3615 pcie->num_vfs = 0xFFFF;
3618 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3621 struct be_nic_res_desc nic_desc;
3625 if (BE3_chip(adapter))
3626 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3628 be_reset_nic_desc(&nic_desc);
3629 nic_desc.pf_num = adapter->pf_number;
3630 nic_desc.vf_num = domain;
3631 if (lancer_chip(adapter)) {
3632 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3633 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3634 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3636 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3639 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3640 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3641 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3642 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3643 nic_desc.bw_max = cpu_to_le32(bw_percent);
3646 return be_cmd_set_profile_config(adapter, &nic_desc,
3647 nic_desc.hdr.desc_len,
3648 1, version, domain);
3651 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3652 struct be_resources res, u16 num_vfs)
3655 struct be_pcie_res_desc pcie;
3656 struct be_nic_res_desc nic_vft;
3660 if (BEx_chip(adapter) || lancer_chip(adapter))
3663 /* PF PCIE descriptor */
3664 be_reset_pcie_desc(&desc.pcie);
3665 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3666 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3667 desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3668 desc.pcie.pf_num = adapter->pdev->devfn;
3669 desc.pcie.sriov_state = num_vfs ? 1 : 0;
3670 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3672 /* VF NIC Template descriptor */
3673 be_reset_nic_desc(&desc.nic_vft);
3674 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3675 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3676 desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
3678 desc.nic_vft.pf_num = adapter->pdev->devfn;
3679 desc.nic_vft.vf_num = 0;
3681 if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3682 /* If number of VFs requested is 8 less than max supported,
3683 * assign 8 queue pairs to the PF and divide the remaining
3684 * resources evenly among the VFs
3686 if (num_vfs < (be_max_vfs(adapter) - 8))
3687 vf_q_count = (res.max_rss_qs - 8) / num_vfs;
3689 vf_q_count = res.max_rss_qs / num_vfs;
3691 desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
3692 desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
3693 desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
3694 desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
3696 desc.nic_vft.txq_count = cpu_to_le16(1);
3697 desc.nic_vft.rq_count = cpu_to_le16(1);
3698 desc.nic_vft.rssq_count = cpu_to_le16(0);
3699 /* One CQ for each TX, RX and MCCQ */
3700 desc.nic_vft.cq_count = cpu_to_le16(3);
3703 return be_cmd_set_profile_config(adapter, &desc,
3704 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3707 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3709 struct be_mcc_wrb *wrb;
3710 struct be_cmd_req_manage_iface_filters *req;
3713 if (iface == 0xFFFFFFFF)
3716 spin_lock_bh(&adapter->mcc_lock);
3718 wrb = wrb_from_mccq(adapter);
3723 req = embedded_payload(wrb);
3725 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3726 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3729 req->target_iface_id = cpu_to_le32(iface);
3731 status = be_mcc_notify_wait(adapter);
3733 spin_unlock_bh(&adapter->mcc_lock);
3737 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3739 struct be_port_res_desc port_desc;
3741 memset(&port_desc, 0, sizeof(port_desc));
3742 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3743 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3744 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3745 port_desc.link_num = adapter->hba_port_num;
3747 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3749 port_desc.nv_port = swab16(port);
3751 port_desc.nv_flags = NV_TYPE_DISABLED;
3752 port_desc.nv_port = 0;
3755 return be_cmd_set_profile_config(adapter, &port_desc,
3756 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3759 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3762 struct be_mcc_wrb *wrb;
3763 struct be_cmd_req_get_iface_list *req;
3764 struct be_cmd_resp_get_iface_list *resp;
3767 spin_lock_bh(&adapter->mcc_lock);
3769 wrb = wrb_from_mccq(adapter);
3774 req = embedded_payload(wrb);
3776 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3777 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3779 req->hdr.domain = vf_num + 1;
3781 status = be_mcc_notify_wait(adapter);
3783 resp = (struct be_cmd_resp_get_iface_list *)req;
3784 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3788 spin_unlock_bh(&adapter->mcc_lock);
3792 static int lancer_wait_idle(struct be_adapter *adapter)
3794 #define SLIPORT_IDLE_TIMEOUT 30
3798 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3799 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3800 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3806 if (i == SLIPORT_IDLE_TIMEOUT)
3812 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3816 status = lancer_wait_idle(adapter);
3820 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3825 /* Routine to check whether dump image is present or not */
3826 bool dump_present(struct be_adapter *adapter)
3828 u32 sliport_status = 0;
3830 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3831 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3834 int lancer_initiate_dump(struct be_adapter *adapter)
3836 struct device *dev = &adapter->pdev->dev;
3839 if (dump_present(adapter)) {
3840 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
3844 /* give firmware reset and diagnostic dump */
3845 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3846 PHYSDEV_CONTROL_DD_MASK);
3848 dev_err(dev, "FW reset failed\n");
3852 status = lancer_wait_idle(adapter);
3856 if (!dump_present(adapter)) {
3857 dev_err(dev, "FW dump not generated\n");
3864 int lancer_delete_dump(struct be_adapter *adapter)
3868 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
3869 return be_cmd_status(status);
3873 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3875 struct be_mcc_wrb *wrb;
3876 struct be_cmd_enable_disable_vf *req;
3879 if (BEx_chip(adapter))
3882 spin_lock_bh(&adapter->mcc_lock);
3884 wrb = wrb_from_mccq(adapter);
3890 req = embedded_payload(wrb);
3892 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3893 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3896 req->hdr.domain = domain;
3898 status = be_mcc_notify_wait(adapter);
3900 spin_unlock_bh(&adapter->mcc_lock);
3904 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3906 struct be_mcc_wrb *wrb;
3907 struct be_cmd_req_intr_set *req;
3910 if (mutex_lock_interruptible(&adapter->mbox_lock))
3913 wrb = wrb_from_mbox(adapter);
3915 req = embedded_payload(wrb);
3917 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3918 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3921 req->intr_enabled = intr_enable;
3923 status = be_mbox_notify_wait(adapter);
3925 mutex_unlock(&adapter->mbox_lock);
3930 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
3932 struct be_cmd_req_get_active_profile *req;
3933 struct be_mcc_wrb *wrb;
3936 if (mutex_lock_interruptible(&adapter->mbox_lock))
3939 wrb = wrb_from_mbox(adapter);
3945 req = embedded_payload(wrb);
3947 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3948 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
3951 status = be_mbox_notify_wait(adapter);
3953 struct be_cmd_resp_get_active_profile *resp =
3954 embedded_payload(wrb);
3955 *profile_id = le16_to_cpu(resp->active_profile_id);
3959 mutex_unlock(&adapter->mbox_lock);
3963 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
3964 int link_state, u8 domain)
3966 struct be_mcc_wrb *wrb;
3967 struct be_cmd_req_set_ll_link *req;
3970 if (BEx_chip(adapter) || lancer_chip(adapter))
3973 spin_lock_bh(&adapter->mcc_lock);
3975 wrb = wrb_from_mccq(adapter);
3981 req = embedded_payload(wrb);
3983 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3984 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
3985 sizeof(*req), wrb, NULL);
3987 req->hdr.version = 1;
3988 req->hdr.domain = domain;
3990 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
3991 req->link_config |= 1;
3993 if (link_state == IFLA_VF_LINK_STATE_AUTO)
3994 req->link_config |= 1 << PLINK_TRACK_SHIFT;
3996 status = be_mcc_notify_wait(adapter);
3998 spin_unlock_bh(&adapter->mcc_lock);
4002 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4003 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4005 struct be_adapter *adapter = netdev_priv(netdev_handle);
4006 struct be_mcc_wrb *wrb;
4007 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
4008 struct be_cmd_req_hdr *req;
4009 struct be_cmd_resp_hdr *resp;
4012 spin_lock_bh(&adapter->mcc_lock);
4014 wrb = wrb_from_mccq(adapter);
4019 req = embedded_payload(wrb);
4020 resp = embedded_payload(wrb);
4022 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4023 hdr->opcode, wrb_payload_size, wrb, NULL);
4024 memcpy(req, wrb_payload, wrb_payload_size);
4025 be_dws_cpu_to_le(req, wrb_payload_size);
4027 status = be_mcc_notify_wait(adapter);
4029 *cmd_status = (status & 0xffff);
4032 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4033 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4035 spin_unlock_bh(&adapter->mcc_lock);
4038 EXPORT_SYMBOL(be_roce_mcc_cmd);