2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
24 return wrb->payload.embedded_payload;
27 static void be_mcc_notify(struct be_adapter *adapter)
29 struct be_queue_info *mccq = &adapter->mcc_obj.q;
32 if (be_error(adapter))
35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
39 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
42 /* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
62 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
67 addr = ((addr << 16) << 16) | tag0;
71 static int be_mcc_compl_process(struct be_adapter *adapter,
72 struct be_mcc_compl *compl)
74 u16 compl_status, extd_status;
75 struct be_cmd_resp_hdr *resp_hdr;
76 u8 opcode = 0, subsystem = 0;
78 /* Just swap the status to host endian; mcc tag is opaquely copied
80 be_dws_le_to_cpu(compl, 4);
82 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
83 CQE_STATUS_COMPL_MASK;
85 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
88 opcode = resp_hdr->opcode;
89 subsystem = resp_hdr->subsystem;
92 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
93 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
94 (subsystem == CMD_SUBSYSTEM_COMMON)) {
95 adapter->flash_status = compl_status;
96 complete(&adapter->flash_compl);
99 if (compl_status == MCC_STATUS_SUCCESS) {
100 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
101 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
102 (subsystem == CMD_SUBSYSTEM_ETH)) {
103 be_parse_stats(adapter);
104 adapter->stats_cmd_sent = false;
106 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
107 subsystem == CMD_SUBSYSTEM_COMMON) {
108 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
110 adapter->drv_stats.be_on_die_temperature =
111 resp->on_die_temperature;
114 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
115 adapter->be_get_temp_freq = 0;
117 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
118 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n",
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
127 CQE_STATUS_EXTD_MASK;
128 dev_err(&adapter->pdev->dev,
129 "opcode %d-%d failed:status %d-%d\n",
130 opcode, subsystem, compl_status, extd_status);
137 /* Link state evt is a string of bytes; no need for endian swapping */
138 static void be_async_link_state_process(struct be_adapter *adapter,
139 struct be_async_event_link_state *evt)
141 /* When link status changes, link speed must be re-queried from FW */
142 adapter->phy.link_speed = -1;
144 /* For the initial link status do not rely on the ASYNC event as
145 * it may not be received in some cases.
147 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
148 be_link_status_update(adapter, evt->port_link_status);
151 /* Grp5 CoS Priority evt */
152 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
153 struct be_async_event_grp5_cos_priority *evt)
156 adapter->vlan_prio_bmap = evt->available_priority_bmap;
157 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
158 adapter->recommended_prio =
159 evt->reco_default_priority << VLAN_PRIO_SHIFT;
163 /* Grp5 QOS Speed evt */
164 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
165 struct be_async_event_grp5_qos_link_speed *evt)
167 if (evt->physical_port == adapter->port_num) {
168 /* qos_link_speed is in units of 10 Mbps */
169 adapter->phy.link_speed = evt->qos_link_speed * 10;
174 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
175 struct be_async_event_grp5_pvid_state *evt)
178 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
183 static void be_async_grp5_evt_process(struct be_adapter *adapter,
184 u32 trailer, struct be_mcc_compl *evt)
188 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
189 ASYNC_TRAILER_EVENT_TYPE_MASK;
191 switch (event_type) {
192 case ASYNC_EVENT_COS_PRIORITY:
193 be_async_grp5_cos_priority_process(adapter,
194 (struct be_async_event_grp5_cos_priority *)evt);
196 case ASYNC_EVENT_QOS_SPEED:
197 be_async_grp5_qos_speed_process(adapter,
198 (struct be_async_event_grp5_qos_link_speed *)evt);
200 case ASYNC_EVENT_PVID_STATE:
201 be_async_grp5_pvid_state_process(adapter,
202 (struct be_async_event_grp5_pvid_state *)evt);
205 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
210 static inline bool is_link_state_evt(u32 trailer)
212 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
213 ASYNC_TRAILER_EVENT_CODE_MASK) ==
214 ASYNC_EVENT_CODE_LINK_STATE;
217 static inline bool is_grp5_evt(u32 trailer)
219 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
220 ASYNC_TRAILER_EVENT_CODE_MASK) ==
221 ASYNC_EVENT_CODE_GRP_5);
224 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
226 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
227 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
229 if (be_mcc_compl_is_new(compl)) {
230 queue_tail_inc(mcc_cq);
236 void be_async_mcc_enable(struct be_adapter *adapter)
238 spin_lock_bh(&adapter->mcc_cq_lock);
240 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
241 adapter->mcc_obj.rearm_cq = true;
243 spin_unlock_bh(&adapter->mcc_cq_lock);
246 void be_async_mcc_disable(struct be_adapter *adapter)
248 adapter->mcc_obj.rearm_cq = false;
251 int be_process_mcc(struct be_adapter *adapter)
253 struct be_mcc_compl *compl;
254 int num = 0, status = 0;
255 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
257 spin_lock_bh(&adapter->mcc_cq_lock);
258 while ((compl = be_mcc_compl_get(adapter))) {
259 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
260 /* Interpret flags as an async trailer */
261 if (is_link_state_evt(compl->flags))
262 be_async_link_state_process(adapter,
263 (struct be_async_event_link_state *) compl);
264 else if (is_grp5_evt(compl->flags))
265 be_async_grp5_evt_process(adapter,
266 compl->flags, compl);
267 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
268 status = be_mcc_compl_process(adapter, compl);
269 atomic_dec(&mcc_obj->q.used);
271 be_mcc_compl_use(compl);
276 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
278 spin_unlock_bh(&adapter->mcc_cq_lock);
282 /* Wait till no more pending mcc requests are present */
283 static int be_mcc_wait_compl(struct be_adapter *adapter)
285 #define mcc_timeout 120000 /* 12s timeout */
287 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
289 for (i = 0; i < mcc_timeout; i++) {
290 if (be_error(adapter))
293 status = be_process_mcc(adapter);
295 if (atomic_read(&mcc_obj->q.used) == 0)
299 if (i == mcc_timeout) {
300 dev_err(&adapter->pdev->dev, "FW not responding\n");
301 adapter->fw_timeout = true;
307 /* Notify MCC requests and wait for completion */
308 static int be_mcc_notify_wait(struct be_adapter *adapter)
311 struct be_mcc_wrb *wrb;
312 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
313 u16 index = mcc_obj->q.head;
314 struct be_cmd_resp_hdr *resp;
316 index_dec(&index, mcc_obj->q.len);
317 wrb = queue_index_node(&mcc_obj->q, index);
319 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
321 be_mcc_notify(adapter);
323 status = be_mcc_wait_compl(adapter);
327 status = resp->status;
332 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
338 if (be_error(adapter))
341 ready = ioread32(db);
342 if (ready == 0xffffffff)
345 ready &= MPU_MAILBOX_DB_RDY_MASK;
350 dev_err(&adapter->pdev->dev, "FW not responding\n");
351 adapter->fw_timeout = true;
352 be_detect_error(adapter);
364 * Insert the mailbox address into the doorbell in two steps
365 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
367 static int be_mbox_notify_wait(struct be_adapter *adapter)
371 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
372 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
373 struct be_mcc_mailbox *mbox = mbox_mem->va;
374 struct be_mcc_compl *compl = &mbox->compl;
376 /* wait for ready to be set */
377 status = be_mbox_db_ready_wait(adapter, db);
381 val |= MPU_MAILBOX_DB_HI_MASK;
382 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
383 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
386 /* wait for ready to be set */
387 status = be_mbox_db_ready_wait(adapter, db);
392 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
393 val |= (u32)(mbox_mem->dma >> 4) << 2;
396 status = be_mbox_db_ready_wait(adapter, db);
400 /* A cq entry has been made now */
401 if (be_mcc_compl_is_new(compl)) {
402 status = be_mcc_compl_process(adapter, &mbox->compl);
403 be_mcc_compl_use(compl);
407 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
413 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
417 if (lancer_chip(adapter))
418 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
420 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
422 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
423 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
429 int lancer_wait_ready(struct be_adapter *adapter)
431 #define SLIPORT_READY_TIMEOUT 30
435 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
436 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
437 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
443 if (i == SLIPORT_READY_TIMEOUT)
449 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
452 u32 sliport_status, err, reset_needed;
453 status = lancer_wait_ready(adapter);
455 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
456 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
457 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
458 if (err && reset_needed) {
459 iowrite32(SLI_PORT_CONTROL_IP_MASK,
460 adapter->db + SLIPORT_CONTROL_OFFSET);
462 /* check adapter has corrected the error */
463 status = lancer_wait_ready(adapter);
464 sliport_status = ioread32(adapter->db +
465 SLIPORT_STATUS_OFFSET);
466 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
467 SLIPORT_STATUS_RN_MASK);
468 if (status || sliport_status)
470 } else if (err || reset_needed) {
477 int be_fw_wait_ready(struct be_adapter *adapter)
480 int status, timeout = 0;
481 struct device *dev = &adapter->pdev->dev;
483 if (lancer_chip(adapter)) {
484 status = lancer_wait_ready(adapter);
489 status = be_POST_stage_get(adapter, &stage);
491 dev_err(dev, "POST error; stage=0x%x\n", stage);
493 } else if (stage != POST_STAGE_ARMFW_RDY) {
494 if (msleep_interruptible(2000)) {
495 dev_err(dev, "Waiting for POST aborted\n");
502 } while (timeout < 60);
504 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
509 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
511 return &wrb->payload.sgl[0];
515 /* Don't touch the hdr after it's prepared */
516 /* mem will be NULL for embedded commands */
517 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
518 u8 subsystem, u8 opcode, int cmd_len,
519 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
522 unsigned long addr = (unsigned long)req_hdr;
525 req_hdr->opcode = opcode;
526 req_hdr->subsystem = subsystem;
527 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
528 req_hdr->version = 0;
530 wrb->tag0 = req_addr & 0xFFFFFFFF;
531 wrb->tag1 = upper_32_bits(req_addr);
533 wrb->payload_length = cmd_len;
535 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
536 MCC_WRB_SGE_CNT_SHIFT;
537 sge = nonembedded_sgl(wrb);
538 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
539 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
540 sge->len = cpu_to_le32(mem->size);
542 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
543 be_dws_cpu_to_le(wrb, 8);
546 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
547 struct be_dma_mem *mem)
549 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
550 u64 dma = (u64)mem->dma;
552 for (i = 0; i < buf_pages; i++) {
553 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
554 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
559 /* Converts interrupt delay in microseconds to multiplier value */
560 static u32 eq_delay_to_mult(u32 usec_delay)
562 #define MAX_INTR_RATE 651042
563 const u32 round = 10;
569 u32 interrupt_rate = 1000000 / usec_delay;
570 /* Max delay, corresponding to the lowest interrupt rate */
571 if (interrupt_rate == 0)
574 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
575 multiplier /= interrupt_rate;
576 /* Round the multiplier to the closest value.*/
577 multiplier = (multiplier + round/2) / round;
578 multiplier = min(multiplier, (u32)1023);
584 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
586 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
587 struct be_mcc_wrb *wrb
588 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
589 memset(wrb, 0, sizeof(*wrb));
593 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
595 struct be_queue_info *mccq = &adapter->mcc_obj.q;
596 struct be_mcc_wrb *wrb;
598 if (atomic_read(&mccq->used) >= mccq->len) {
599 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
603 wrb = queue_head_node(mccq);
604 queue_head_inc(mccq);
605 atomic_inc(&mccq->used);
606 memset(wrb, 0, sizeof(*wrb));
610 /* Tell fw we're about to start firing cmds by writing a
611 * special pattern across the wrb hdr; uses mbox
613 int be_cmd_fw_init(struct be_adapter *adapter)
618 if (lancer_chip(adapter))
621 if (mutex_lock_interruptible(&adapter->mbox_lock))
624 wrb = (u8 *)wrb_from_mbox(adapter);
634 status = be_mbox_notify_wait(adapter);
636 mutex_unlock(&adapter->mbox_lock);
640 /* Tell fw we're done with firing cmds by writing a
641 * special pattern across the wrb hdr; uses mbox
643 int be_cmd_fw_clean(struct be_adapter *adapter)
648 if (lancer_chip(adapter))
651 if (mutex_lock_interruptible(&adapter->mbox_lock))
654 wrb = (u8 *)wrb_from_mbox(adapter);
664 status = be_mbox_notify_wait(adapter);
666 mutex_unlock(&adapter->mbox_lock);
670 int be_cmd_eq_create(struct be_adapter *adapter,
671 struct be_queue_info *eq, int eq_delay)
673 struct be_mcc_wrb *wrb;
674 struct be_cmd_req_eq_create *req;
675 struct be_dma_mem *q_mem = &eq->dma_mem;
678 if (mutex_lock_interruptible(&adapter->mbox_lock))
681 wrb = wrb_from_mbox(adapter);
682 req = embedded_payload(wrb);
684 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
685 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
687 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
689 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
691 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
692 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
693 __ilog2_u32(eq->len/256));
694 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
695 eq_delay_to_mult(eq_delay));
696 be_dws_cpu_to_le(req->context, sizeof(req->context));
698 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
700 status = be_mbox_notify_wait(adapter);
702 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
703 eq->id = le16_to_cpu(resp->eq_id);
707 mutex_unlock(&adapter->mbox_lock);
712 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
713 u8 type, bool permanent, u32 if_handle, u32 pmac_id)
715 struct be_mcc_wrb *wrb;
716 struct be_cmd_req_mac_query *req;
719 spin_lock_bh(&adapter->mcc_lock);
721 wrb = wrb_from_mccq(adapter);
726 req = embedded_payload(wrb);
728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
729 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
734 req->if_id = cpu_to_le16((u16) if_handle);
735 req->pmac_id = cpu_to_le32(pmac_id);
739 status = be_mcc_notify_wait(adapter);
741 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
742 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
746 spin_unlock_bh(&adapter->mcc_lock);
750 /* Uses synchronous MCCQ */
751 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
752 u32 if_id, u32 *pmac_id, u32 domain)
754 struct be_mcc_wrb *wrb;
755 struct be_cmd_req_pmac_add *req;
758 spin_lock_bh(&adapter->mcc_lock);
760 wrb = wrb_from_mccq(adapter);
765 req = embedded_payload(wrb);
767 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
768 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
770 req->hdr.domain = domain;
771 req->if_id = cpu_to_le32(if_id);
772 memcpy(req->mac_address, mac_addr, ETH_ALEN);
774 status = be_mcc_notify_wait(adapter);
776 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
777 *pmac_id = le32_to_cpu(resp->pmac_id);
781 spin_unlock_bh(&adapter->mcc_lock);
783 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
789 /* Uses synchronous MCCQ */
790 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
792 struct be_mcc_wrb *wrb;
793 struct be_cmd_req_pmac_del *req;
799 spin_lock_bh(&adapter->mcc_lock);
801 wrb = wrb_from_mccq(adapter);
806 req = embedded_payload(wrb);
808 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
809 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
811 req->hdr.domain = dom;
812 req->if_id = cpu_to_le32(if_id);
813 req->pmac_id = cpu_to_le32(pmac_id);
815 status = be_mcc_notify_wait(adapter);
818 spin_unlock_bh(&adapter->mcc_lock);
823 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
824 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
826 struct be_mcc_wrb *wrb;
827 struct be_cmd_req_cq_create *req;
828 struct be_dma_mem *q_mem = &cq->dma_mem;
832 if (mutex_lock_interruptible(&adapter->mbox_lock))
835 wrb = wrb_from_mbox(adapter);
836 req = embedded_payload(wrb);
837 ctxt = &req->context;
839 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
840 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
842 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
843 if (lancer_chip(adapter)) {
844 req->hdr.version = 2;
845 req->page_size = 1; /* 1 for 4K */
846 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
848 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
849 __ilog2_u32(cq->len/256));
850 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
851 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
853 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
856 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
858 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
860 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
861 __ilog2_u32(cq->len/256));
862 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
863 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
864 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
867 be_dws_cpu_to_le(ctxt, sizeof(req->context));
869 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
871 status = be_mbox_notify_wait(adapter);
873 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
874 cq->id = le16_to_cpu(resp->cq_id);
878 mutex_unlock(&adapter->mbox_lock);
883 static u32 be_encoded_q_len(int q_len)
885 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
886 if (len_encoded == 16)
891 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
892 struct be_queue_info *mccq,
893 struct be_queue_info *cq)
895 struct be_mcc_wrb *wrb;
896 struct be_cmd_req_mcc_ext_create *req;
897 struct be_dma_mem *q_mem = &mccq->dma_mem;
901 if (mutex_lock_interruptible(&adapter->mbox_lock))
904 wrb = wrb_from_mbox(adapter);
905 req = embedded_payload(wrb);
906 ctxt = &req->context;
908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
909 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
911 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
912 if (lancer_chip(adapter)) {
913 req->hdr.version = 1;
914 req->cq_id = cpu_to_le16(cq->id);
916 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
917 be_encoded_q_len(mccq->len));
918 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
919 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
921 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
925 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
926 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
927 be_encoded_q_len(mccq->len));
928 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
931 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
932 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
933 be_dws_cpu_to_le(ctxt, sizeof(req->context));
935 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
937 status = be_mbox_notify_wait(adapter);
939 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
940 mccq->id = le16_to_cpu(resp->id);
941 mccq->created = true;
943 mutex_unlock(&adapter->mbox_lock);
948 int be_cmd_mccq_org_create(struct be_adapter *adapter,
949 struct be_queue_info *mccq,
950 struct be_queue_info *cq)
952 struct be_mcc_wrb *wrb;
953 struct be_cmd_req_mcc_create *req;
954 struct be_dma_mem *q_mem = &mccq->dma_mem;
958 if (mutex_lock_interruptible(&adapter->mbox_lock))
961 wrb = wrb_from_mbox(adapter);
962 req = embedded_payload(wrb);
963 ctxt = &req->context;
965 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
966 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
968 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
970 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
971 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
972 be_encoded_q_len(mccq->len));
973 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
975 be_dws_cpu_to_le(ctxt, sizeof(req->context));
977 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
979 status = be_mbox_notify_wait(adapter);
981 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
982 mccq->id = le16_to_cpu(resp->id);
983 mccq->created = true;
986 mutex_unlock(&adapter->mbox_lock);
990 int be_cmd_mccq_create(struct be_adapter *adapter,
991 struct be_queue_info *mccq,
992 struct be_queue_info *cq)
996 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
997 if (status && !lancer_chip(adapter)) {
998 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
999 "or newer to avoid conflicting priorities between NIC "
1000 "and FCoE traffic");
1001 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1006 int be_cmd_txq_create(struct be_adapter *adapter,
1007 struct be_queue_info *txq,
1008 struct be_queue_info *cq)
1010 struct be_mcc_wrb *wrb;
1011 struct be_cmd_req_eth_tx_create *req;
1012 struct be_dma_mem *q_mem = &txq->dma_mem;
1016 spin_lock_bh(&adapter->mcc_lock);
1018 wrb = wrb_from_mccq(adapter);
1024 req = embedded_payload(wrb);
1025 ctxt = &req->context;
1027 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1028 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1030 if (lancer_chip(adapter)) {
1031 req->hdr.version = 1;
1032 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1033 adapter->if_handle);
1036 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1037 req->ulp_num = BE_ULP1_NUM;
1038 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1040 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1041 be_encoded_q_len(txq->len));
1042 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1043 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1045 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1047 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1049 status = be_mcc_notify_wait(adapter);
1051 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1052 txq->id = le16_to_cpu(resp->cid);
1053 txq->created = true;
1057 spin_unlock_bh(&adapter->mcc_lock);
1063 int be_cmd_rxq_create(struct be_adapter *adapter,
1064 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1065 u32 if_id, u32 rss, u8 *rss_id)
1067 struct be_mcc_wrb *wrb;
1068 struct be_cmd_req_eth_rx_create *req;
1069 struct be_dma_mem *q_mem = &rxq->dma_mem;
1072 spin_lock_bh(&adapter->mcc_lock);
1074 wrb = wrb_from_mccq(adapter);
1079 req = embedded_payload(wrb);
1081 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1082 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1084 req->cq_id = cpu_to_le16(cq_id);
1085 req->frag_size = fls(frag_size) - 1;
1087 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1088 req->interface_id = cpu_to_le32(if_id);
1089 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1090 req->rss_queue = cpu_to_le32(rss);
1092 status = be_mcc_notify_wait(adapter);
1094 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1095 rxq->id = le16_to_cpu(resp->id);
1096 rxq->created = true;
1097 *rss_id = resp->rss_id;
1101 spin_unlock_bh(&adapter->mcc_lock);
1105 /* Generic destroyer function for all types of queues
1108 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1111 struct be_mcc_wrb *wrb;
1112 struct be_cmd_req_q_destroy *req;
1113 u8 subsys = 0, opcode = 0;
1116 if (mutex_lock_interruptible(&adapter->mbox_lock))
1119 wrb = wrb_from_mbox(adapter);
1120 req = embedded_payload(wrb);
1122 switch (queue_type) {
1124 subsys = CMD_SUBSYSTEM_COMMON;
1125 opcode = OPCODE_COMMON_EQ_DESTROY;
1128 subsys = CMD_SUBSYSTEM_COMMON;
1129 opcode = OPCODE_COMMON_CQ_DESTROY;
1132 subsys = CMD_SUBSYSTEM_ETH;
1133 opcode = OPCODE_ETH_TX_DESTROY;
1136 subsys = CMD_SUBSYSTEM_ETH;
1137 opcode = OPCODE_ETH_RX_DESTROY;
1140 subsys = CMD_SUBSYSTEM_COMMON;
1141 opcode = OPCODE_COMMON_MCC_DESTROY;
1147 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1149 req->id = cpu_to_le16(q->id);
1151 status = be_mbox_notify_wait(adapter);
1155 mutex_unlock(&adapter->mbox_lock);
1160 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1162 struct be_mcc_wrb *wrb;
1163 struct be_cmd_req_q_destroy *req;
1166 spin_lock_bh(&adapter->mcc_lock);
1168 wrb = wrb_from_mccq(adapter);
1173 req = embedded_payload(wrb);
1175 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1176 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1177 req->id = cpu_to_le16(q->id);
1179 status = be_mcc_notify_wait(adapter);
1184 spin_unlock_bh(&adapter->mcc_lock);
1188 /* Create an rx filtering policy configuration on an i/f
1191 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1192 u32 *if_handle, u32 domain)
1194 struct be_mcc_wrb *wrb;
1195 struct be_cmd_req_if_create *req;
1198 spin_lock_bh(&adapter->mcc_lock);
1200 wrb = wrb_from_mccq(adapter);
1205 req = embedded_payload(wrb);
1207 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1208 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1209 req->hdr.domain = domain;
1210 req->capability_flags = cpu_to_le32(cap_flags);
1211 req->enable_flags = cpu_to_le32(en_flags);
1213 req->pmac_invalid = true;
1215 status = be_mcc_notify_wait(adapter);
1217 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1218 *if_handle = le32_to_cpu(resp->interface_id);
1222 spin_unlock_bh(&adapter->mcc_lock);
1227 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1229 struct be_mcc_wrb *wrb;
1230 struct be_cmd_req_if_destroy *req;
1233 if (interface_id == -1)
1236 spin_lock_bh(&adapter->mcc_lock);
1238 wrb = wrb_from_mccq(adapter);
1243 req = embedded_payload(wrb);
1245 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1246 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1247 req->hdr.domain = domain;
1248 req->interface_id = cpu_to_le32(interface_id);
1250 status = be_mcc_notify_wait(adapter);
1252 spin_unlock_bh(&adapter->mcc_lock);
1256 /* Get stats is a non embedded command: the request is not embedded inside
1257 * WRB but is a separate dma memory block
1258 * Uses asynchronous MCC
1260 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1262 struct be_mcc_wrb *wrb;
1263 struct be_cmd_req_hdr *hdr;
1266 spin_lock_bh(&adapter->mcc_lock);
1268 wrb = wrb_from_mccq(adapter);
1273 hdr = nonemb_cmd->va;
1275 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1276 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1278 if (adapter->generation == BE_GEN3)
1281 be_mcc_notify(adapter);
1282 adapter->stats_cmd_sent = true;
1285 spin_unlock_bh(&adapter->mcc_lock);
1290 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1291 struct be_dma_mem *nonemb_cmd)
1294 struct be_mcc_wrb *wrb;
1295 struct lancer_cmd_req_pport_stats *req;
1298 spin_lock_bh(&adapter->mcc_lock);
1300 wrb = wrb_from_mccq(adapter);
1305 req = nonemb_cmd->va;
1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1308 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1311 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1312 req->cmd_params.params.reset_stats = 0;
1314 be_mcc_notify(adapter);
1315 adapter->stats_cmd_sent = true;
1318 spin_unlock_bh(&adapter->mcc_lock);
1322 /* Uses synchronous mcc */
1323 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1324 u16 *link_speed, u8 *link_status, u32 dom)
1326 struct be_mcc_wrb *wrb;
1327 struct be_cmd_req_link_status *req;
1330 spin_lock_bh(&adapter->mcc_lock);
1333 *link_status = LINK_DOWN;
1335 wrb = wrb_from_mccq(adapter);
1340 req = embedded_payload(wrb);
1342 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1343 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1345 if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1346 req->hdr.version = 1;
1348 req->hdr.domain = dom;
1350 status = be_mcc_notify_wait(adapter);
1352 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1353 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1355 *link_speed = le16_to_cpu(resp->link_speed);
1357 *mac_speed = resp->mac_speed;
1360 *link_status = resp->logical_link_status;
1364 spin_unlock_bh(&adapter->mcc_lock);
1368 /* Uses synchronous mcc */
1369 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1371 struct be_mcc_wrb *wrb;
1372 struct be_cmd_req_get_cntl_addnl_attribs *req;
1375 spin_lock_bh(&adapter->mcc_lock);
1377 wrb = wrb_from_mccq(adapter);
1382 req = embedded_payload(wrb);
1384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1385 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1388 be_mcc_notify(adapter);
1391 spin_unlock_bh(&adapter->mcc_lock);
1395 /* Uses synchronous mcc */
1396 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1398 struct be_mcc_wrb *wrb;
1399 struct be_cmd_req_get_fat *req;
1402 spin_lock_bh(&adapter->mcc_lock);
1404 wrb = wrb_from_mccq(adapter);
1409 req = embedded_payload(wrb);
1411 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1412 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1413 req->fat_operation = cpu_to_le32(QUERY_FAT);
1414 status = be_mcc_notify_wait(adapter);
1416 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1417 if (log_size && resp->log_size)
1418 *log_size = le32_to_cpu(resp->log_size) -
1422 spin_unlock_bh(&adapter->mcc_lock);
1426 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1428 struct be_dma_mem get_fat_cmd;
1429 struct be_mcc_wrb *wrb;
1430 struct be_cmd_req_get_fat *req;
1431 u32 offset = 0, total_size, buf_size,
1432 log_offset = sizeof(u32), payload_len;
1438 total_size = buf_len;
1440 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1441 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1444 if (!get_fat_cmd.va) {
1446 dev_err(&adapter->pdev->dev,
1447 "Memory allocation failure while retrieving FAT data\n");
1451 spin_lock_bh(&adapter->mcc_lock);
1453 while (total_size) {
1454 buf_size = min(total_size, (u32)60*1024);
1455 total_size -= buf_size;
1457 wrb = wrb_from_mccq(adapter);
1462 req = get_fat_cmd.va;
1464 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1465 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1466 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1469 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1470 req->read_log_offset = cpu_to_le32(log_offset);
1471 req->read_log_length = cpu_to_le32(buf_size);
1472 req->data_buffer_size = cpu_to_le32(buf_size);
1474 status = be_mcc_notify_wait(adapter);
1476 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1477 memcpy(buf + offset,
1479 le32_to_cpu(resp->read_log_length));
1481 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1485 log_offset += buf_size;
1488 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1491 spin_unlock_bh(&adapter->mcc_lock);
1494 /* Uses synchronous mcc */
1495 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1498 struct be_mcc_wrb *wrb;
1499 struct be_cmd_req_get_fw_version *req;
1502 spin_lock_bh(&adapter->mcc_lock);
1504 wrb = wrb_from_mccq(adapter);
1510 req = embedded_payload(wrb);
1512 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1513 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1514 status = be_mcc_notify_wait(adapter);
1516 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1517 strcpy(fw_ver, resp->firmware_version_string);
1519 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1522 spin_unlock_bh(&adapter->mcc_lock);
1526 /* set the EQ delay interval of an EQ to specified value
1529 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1531 struct be_mcc_wrb *wrb;
1532 struct be_cmd_req_modify_eq_delay *req;
1535 spin_lock_bh(&adapter->mcc_lock);
1537 wrb = wrb_from_mccq(adapter);
1542 req = embedded_payload(wrb);
1544 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1545 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1547 req->num_eq = cpu_to_le32(1);
1548 req->delay[0].eq_id = cpu_to_le32(eq_id);
1549 req->delay[0].phase = 0;
1550 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1552 be_mcc_notify(adapter);
1555 spin_unlock_bh(&adapter->mcc_lock);
1559 /* Uses sycnhronous mcc */
1560 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1561 u32 num, bool untagged, bool promiscuous)
1563 struct be_mcc_wrb *wrb;
1564 struct be_cmd_req_vlan_config *req;
1567 spin_lock_bh(&adapter->mcc_lock);
1569 wrb = wrb_from_mccq(adapter);
1574 req = embedded_payload(wrb);
1576 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1577 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1579 req->interface_id = if_id;
1580 req->promiscuous = promiscuous;
1581 req->untagged = untagged;
1582 req->num_vlan = num;
1584 memcpy(req->normal_vlan, vtag_array,
1585 req->num_vlan * sizeof(vtag_array[0]));
1588 status = be_mcc_notify_wait(adapter);
1591 spin_unlock_bh(&adapter->mcc_lock);
1595 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1597 struct be_mcc_wrb *wrb;
1598 struct be_dma_mem *mem = &adapter->rx_filter;
1599 struct be_cmd_req_rx_filter *req = mem->va;
1602 spin_lock_bh(&adapter->mcc_lock);
1604 wrb = wrb_from_mccq(adapter);
1609 memset(req, 0, sizeof(*req));
1610 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1611 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1614 req->if_id = cpu_to_le32(adapter->if_handle);
1615 if (flags & IFF_PROMISC) {
1616 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1617 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1619 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1620 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1621 } else if (flags & IFF_ALLMULTI) {
1622 req->if_flags_mask = req->if_flags =
1623 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1625 struct netdev_hw_addr *ha;
1628 req->if_flags_mask = req->if_flags =
1629 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1631 /* Reset mcast promisc mode if already set by setting mask
1632 * and not setting flags field
1634 if (!lancer_chip(adapter) || be_physfn(adapter))
1635 req->if_flags_mask |=
1636 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1638 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1639 netdev_for_each_mc_addr(ha, adapter->netdev)
1640 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1643 status = be_mcc_notify_wait(adapter);
1645 spin_unlock_bh(&adapter->mcc_lock);
1649 /* Uses synchrounous mcc */
1650 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1652 struct be_mcc_wrb *wrb;
1653 struct be_cmd_req_set_flow_control *req;
1656 spin_lock_bh(&adapter->mcc_lock);
1658 wrb = wrb_from_mccq(adapter);
1663 req = embedded_payload(wrb);
1665 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1666 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1668 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1669 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1671 status = be_mcc_notify_wait(adapter);
1674 spin_unlock_bh(&adapter->mcc_lock);
1679 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1681 struct be_mcc_wrb *wrb;
1682 struct be_cmd_req_get_flow_control *req;
1685 spin_lock_bh(&adapter->mcc_lock);
1687 wrb = wrb_from_mccq(adapter);
1692 req = embedded_payload(wrb);
1694 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1695 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1697 status = be_mcc_notify_wait(adapter);
1699 struct be_cmd_resp_get_flow_control *resp =
1700 embedded_payload(wrb);
1701 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1702 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1706 spin_unlock_bh(&adapter->mcc_lock);
1711 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1712 u32 *mode, u32 *caps)
1714 struct be_mcc_wrb *wrb;
1715 struct be_cmd_req_query_fw_cfg *req;
1718 if (mutex_lock_interruptible(&adapter->mbox_lock))
1721 wrb = wrb_from_mbox(adapter);
1722 req = embedded_payload(wrb);
1724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1725 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1727 status = be_mbox_notify_wait(adapter);
1729 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1730 *port_num = le32_to_cpu(resp->phys_port);
1731 *mode = le32_to_cpu(resp->function_mode);
1732 *caps = le32_to_cpu(resp->function_caps);
1735 mutex_unlock(&adapter->mbox_lock);
1740 int be_cmd_reset_function(struct be_adapter *adapter)
1742 struct be_mcc_wrb *wrb;
1743 struct be_cmd_req_hdr *req;
1746 if (lancer_chip(adapter)) {
1747 status = lancer_wait_ready(adapter);
1749 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1750 adapter->db + SLIPORT_CONTROL_OFFSET);
1751 status = lancer_test_and_set_rdy_state(adapter);
1754 dev_err(&adapter->pdev->dev,
1755 "Adapter in non recoverable error\n");
1760 if (mutex_lock_interruptible(&adapter->mbox_lock))
1763 wrb = wrb_from_mbox(adapter);
1764 req = embedded_payload(wrb);
1766 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1767 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1769 status = be_mbox_notify_wait(adapter);
1771 mutex_unlock(&adapter->mbox_lock);
1775 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1777 struct be_mcc_wrb *wrb;
1778 struct be_cmd_req_rss_config *req;
1779 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1780 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1781 0x3ea83c02, 0x4a110304};
1784 if (mutex_lock_interruptible(&adapter->mbox_lock))
1787 wrb = wrb_from_mbox(adapter);
1788 req = embedded_payload(wrb);
1790 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1791 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1793 req->if_id = cpu_to_le32(adapter->if_handle);
1794 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1795 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1797 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1798 req->hdr.version = 1;
1799 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1800 RSS_ENABLE_UDP_IPV6);
1803 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1804 memcpy(req->cpu_table, rsstable, table_size);
1805 memcpy(req->hash, myhash, sizeof(myhash));
1806 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1808 status = be_mbox_notify_wait(adapter);
1810 mutex_unlock(&adapter->mbox_lock);
1815 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1816 u8 bcn, u8 sts, u8 state)
1818 struct be_mcc_wrb *wrb;
1819 struct be_cmd_req_enable_disable_beacon *req;
1822 spin_lock_bh(&adapter->mcc_lock);
1824 wrb = wrb_from_mccq(adapter);
1829 req = embedded_payload(wrb);
1831 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1832 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1834 req->port_num = port_num;
1835 req->beacon_state = state;
1836 req->beacon_duration = bcn;
1837 req->status_duration = sts;
1839 status = be_mcc_notify_wait(adapter);
1842 spin_unlock_bh(&adapter->mcc_lock);
1847 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1849 struct be_mcc_wrb *wrb;
1850 struct be_cmd_req_get_beacon_state *req;
1853 spin_lock_bh(&adapter->mcc_lock);
1855 wrb = wrb_from_mccq(adapter);
1860 req = embedded_payload(wrb);
1862 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1863 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1865 req->port_num = port_num;
1867 status = be_mcc_notify_wait(adapter);
1869 struct be_cmd_resp_get_beacon_state *resp =
1870 embedded_payload(wrb);
1871 *state = resp->beacon_state;
1875 spin_unlock_bh(&adapter->mcc_lock);
1879 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1880 u32 data_size, u32 data_offset,
1881 const char *obj_name, u32 *data_written,
1882 u8 *change_status, u8 *addn_status)
1884 struct be_mcc_wrb *wrb;
1885 struct lancer_cmd_req_write_object *req;
1886 struct lancer_cmd_resp_write_object *resp;
1890 spin_lock_bh(&adapter->mcc_lock);
1891 adapter->flash_status = 0;
1893 wrb = wrb_from_mccq(adapter);
1899 req = embedded_payload(wrb);
1901 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1902 OPCODE_COMMON_WRITE_OBJECT,
1903 sizeof(struct lancer_cmd_req_write_object), wrb,
1906 ctxt = &req->context;
1907 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1908 write_length, ctxt, data_size);
1911 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1914 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1917 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1918 req->write_offset = cpu_to_le32(data_offset);
1919 strcpy(req->object_name, obj_name);
1920 req->descriptor_count = cpu_to_le32(1);
1921 req->buf_len = cpu_to_le32(data_size);
1922 req->addr_low = cpu_to_le32((cmd->dma +
1923 sizeof(struct lancer_cmd_req_write_object))
1925 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1926 sizeof(struct lancer_cmd_req_write_object)));
1928 be_mcc_notify(adapter);
1929 spin_unlock_bh(&adapter->mcc_lock);
1931 if (!wait_for_completion_timeout(&adapter->flash_compl,
1932 msecs_to_jiffies(30000)))
1935 status = adapter->flash_status;
1937 resp = embedded_payload(wrb);
1939 *data_written = le32_to_cpu(resp->actual_write_len);
1940 *change_status = resp->change_status;
1942 *addn_status = resp->additional_status;
1948 spin_unlock_bh(&adapter->mcc_lock);
1952 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1953 u32 data_size, u32 data_offset, const char *obj_name,
1954 u32 *data_read, u32 *eof, u8 *addn_status)
1956 struct be_mcc_wrb *wrb;
1957 struct lancer_cmd_req_read_object *req;
1958 struct lancer_cmd_resp_read_object *resp;
1961 spin_lock_bh(&adapter->mcc_lock);
1963 wrb = wrb_from_mccq(adapter);
1969 req = embedded_payload(wrb);
1971 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1972 OPCODE_COMMON_READ_OBJECT,
1973 sizeof(struct lancer_cmd_req_read_object), wrb,
1976 req->desired_read_len = cpu_to_le32(data_size);
1977 req->read_offset = cpu_to_le32(data_offset);
1978 strcpy(req->object_name, obj_name);
1979 req->descriptor_count = cpu_to_le32(1);
1980 req->buf_len = cpu_to_le32(data_size);
1981 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1982 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1984 status = be_mcc_notify_wait(adapter);
1986 resp = embedded_payload(wrb);
1988 *data_read = le32_to_cpu(resp->actual_read_len);
1989 *eof = le32_to_cpu(resp->eof);
1991 *addn_status = resp->additional_status;
1995 spin_unlock_bh(&adapter->mcc_lock);
1999 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2000 u32 flash_type, u32 flash_opcode, u32 buf_size)
2002 struct be_mcc_wrb *wrb;
2003 struct be_cmd_write_flashrom *req;
2006 spin_lock_bh(&adapter->mcc_lock);
2007 adapter->flash_status = 0;
2009 wrb = wrb_from_mccq(adapter);
2016 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2017 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2019 req->params.op_type = cpu_to_le32(flash_type);
2020 req->params.op_code = cpu_to_le32(flash_opcode);
2021 req->params.data_buf_size = cpu_to_le32(buf_size);
2023 be_mcc_notify(adapter);
2024 spin_unlock_bh(&adapter->mcc_lock);
2026 if (!wait_for_completion_timeout(&adapter->flash_compl,
2027 msecs_to_jiffies(40000)))
2030 status = adapter->flash_status;
2035 spin_unlock_bh(&adapter->mcc_lock);
2039 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2042 struct be_mcc_wrb *wrb;
2043 struct be_cmd_write_flashrom *req;
2046 spin_lock_bh(&adapter->mcc_lock);
2048 wrb = wrb_from_mccq(adapter);
2053 req = embedded_payload(wrb);
2055 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2056 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
2058 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2059 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2060 req->params.offset = cpu_to_le32(offset);
2061 req->params.data_buf_size = cpu_to_le32(0x4);
2063 status = be_mcc_notify_wait(adapter);
2065 memcpy(flashed_crc, req->params.data_buf, 4);
2068 spin_unlock_bh(&adapter->mcc_lock);
2072 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2073 struct be_dma_mem *nonemb_cmd)
2075 struct be_mcc_wrb *wrb;
2076 struct be_cmd_req_acpi_wol_magic_config *req;
2079 spin_lock_bh(&adapter->mcc_lock);
2081 wrb = wrb_from_mccq(adapter);
2086 req = nonemb_cmd->va;
2088 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2089 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2091 memcpy(req->magic_mac, mac, ETH_ALEN);
2093 status = be_mcc_notify_wait(adapter);
2096 spin_unlock_bh(&adapter->mcc_lock);
2100 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2101 u8 loopback_type, u8 enable)
2103 struct be_mcc_wrb *wrb;
2104 struct be_cmd_req_set_lmode *req;
2107 spin_lock_bh(&adapter->mcc_lock);
2109 wrb = wrb_from_mccq(adapter);
2115 req = embedded_payload(wrb);
2117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2118 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2121 req->src_port = port_num;
2122 req->dest_port = port_num;
2123 req->loopback_type = loopback_type;
2124 req->loopback_state = enable;
2126 status = be_mcc_notify_wait(adapter);
2128 spin_unlock_bh(&adapter->mcc_lock);
2132 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2133 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2135 struct be_mcc_wrb *wrb;
2136 struct be_cmd_req_loopback_test *req;
2139 spin_lock_bh(&adapter->mcc_lock);
2141 wrb = wrb_from_mccq(adapter);
2147 req = embedded_payload(wrb);
2149 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2150 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2151 req->hdr.timeout = cpu_to_le32(4);
2153 req->pattern = cpu_to_le64(pattern);
2154 req->src_port = cpu_to_le32(port_num);
2155 req->dest_port = cpu_to_le32(port_num);
2156 req->pkt_size = cpu_to_le32(pkt_size);
2157 req->num_pkts = cpu_to_le32(num_pkts);
2158 req->loopback_type = cpu_to_le32(loopback_type);
2160 status = be_mcc_notify_wait(adapter);
2162 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2163 status = le32_to_cpu(resp->status);
2167 spin_unlock_bh(&adapter->mcc_lock);
2171 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2172 u32 byte_cnt, struct be_dma_mem *cmd)
2174 struct be_mcc_wrb *wrb;
2175 struct be_cmd_req_ddrdma_test *req;
2179 spin_lock_bh(&adapter->mcc_lock);
2181 wrb = wrb_from_mccq(adapter);
2187 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2188 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2190 req->pattern = cpu_to_le64(pattern);
2191 req->byte_count = cpu_to_le32(byte_cnt);
2192 for (i = 0; i < byte_cnt; i++) {
2193 req->snd_buff[i] = (u8)(pattern >> (j*8));
2199 status = be_mcc_notify_wait(adapter);
2202 struct be_cmd_resp_ddrdma_test *resp;
2204 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2211 spin_unlock_bh(&adapter->mcc_lock);
2215 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2216 struct be_dma_mem *nonemb_cmd)
2218 struct be_mcc_wrb *wrb;
2219 struct be_cmd_req_seeprom_read *req;
2223 spin_lock_bh(&adapter->mcc_lock);
2225 wrb = wrb_from_mccq(adapter);
2230 req = nonemb_cmd->va;
2231 sge = nonembedded_sgl(wrb);
2233 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2234 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2237 status = be_mcc_notify_wait(adapter);
2240 spin_unlock_bh(&adapter->mcc_lock);
2244 int be_cmd_get_phy_info(struct be_adapter *adapter)
2246 struct be_mcc_wrb *wrb;
2247 struct be_cmd_req_get_phy_info *req;
2248 struct be_dma_mem cmd;
2251 spin_lock_bh(&adapter->mcc_lock);
2253 wrb = wrb_from_mccq(adapter);
2258 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2259 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2262 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2269 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2270 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2273 status = be_mcc_notify_wait(adapter);
2275 struct be_phy_info *resp_phy_info =
2276 cmd.va + sizeof(struct be_cmd_req_hdr);
2277 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2278 adapter->phy.interface_type =
2279 le16_to_cpu(resp_phy_info->interface_type);
2280 adapter->phy.auto_speeds_supported =
2281 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2282 adapter->phy.fixed_speeds_supported =
2283 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2284 adapter->phy.misc_params =
2285 le32_to_cpu(resp_phy_info->misc_params);
2287 pci_free_consistent(adapter->pdev, cmd.size,
2290 spin_unlock_bh(&adapter->mcc_lock);
2294 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2296 struct be_mcc_wrb *wrb;
2297 struct be_cmd_req_set_qos *req;
2300 spin_lock_bh(&adapter->mcc_lock);
2302 wrb = wrb_from_mccq(adapter);
2308 req = embedded_payload(wrb);
2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2311 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2313 req->hdr.domain = domain;
2314 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2315 req->max_bps_nic = cpu_to_le32(bps);
2317 status = be_mcc_notify_wait(adapter);
2320 spin_unlock_bh(&adapter->mcc_lock);
2324 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2326 struct be_mcc_wrb *wrb;
2327 struct be_cmd_req_cntl_attribs *req;
2328 struct be_cmd_resp_cntl_attribs *resp;
2330 int payload_len = max(sizeof(*req), sizeof(*resp));
2331 struct mgmt_controller_attrib *attribs;
2332 struct be_dma_mem attribs_cmd;
2334 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2335 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2336 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2338 if (!attribs_cmd.va) {
2339 dev_err(&adapter->pdev->dev,
2340 "Memory allocation failure\n");
2344 if (mutex_lock_interruptible(&adapter->mbox_lock))
2347 wrb = wrb_from_mbox(adapter);
2352 req = attribs_cmd.va;
2354 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2355 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2358 status = be_mbox_notify_wait(adapter);
2360 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2361 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2365 mutex_unlock(&adapter->mbox_lock);
2366 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2372 int be_cmd_req_native_mode(struct be_adapter *adapter)
2374 struct be_mcc_wrb *wrb;
2375 struct be_cmd_req_set_func_cap *req;
2378 if (mutex_lock_interruptible(&adapter->mbox_lock))
2381 wrb = wrb_from_mbox(adapter);
2387 req = embedded_payload(wrb);
2389 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2390 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2392 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2393 CAPABILITY_BE3_NATIVE_ERX_API);
2394 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2396 status = be_mbox_notify_wait(adapter);
2398 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2399 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2400 CAPABILITY_BE3_NATIVE_ERX_API;
2403 mutex_unlock(&adapter->mbox_lock);
2407 /* Uses synchronous MCCQ */
2408 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2409 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2411 struct be_mcc_wrb *wrb;
2412 struct be_cmd_req_get_mac_list *req;
2415 struct be_dma_mem get_mac_list_cmd;
2418 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2419 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2420 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2421 get_mac_list_cmd.size,
2422 &get_mac_list_cmd.dma);
2424 if (!get_mac_list_cmd.va) {
2425 dev_err(&adapter->pdev->dev,
2426 "Memory allocation failure during GET_MAC_LIST\n");
2430 spin_lock_bh(&adapter->mcc_lock);
2432 wrb = wrb_from_mccq(adapter);
2438 req = get_mac_list_cmd.va;
2440 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2441 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2442 wrb, &get_mac_list_cmd);
2444 req->hdr.domain = domain;
2445 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2446 req->perm_override = 1;
2448 status = be_mcc_notify_wait(adapter);
2450 struct be_cmd_resp_get_mac_list *resp =
2451 get_mac_list_cmd.va;
2452 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2453 /* Mac list returned could contain one or more active mac_ids
2454 * or one or more true or pseudo permanant mac addresses.
2455 * If an active mac_id is present, return first active mac_id
2458 for (i = 0; i < mac_count; i++) {
2459 struct get_list_macaddr *mac_entry;
2463 mac_entry = &resp->macaddr_list[i];
2464 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2465 /* mac_id is a 32 bit value and mac_addr size
2468 if (mac_addr_size == sizeof(u32)) {
2469 *pmac_id_active = true;
2470 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2471 *pmac_id = le32_to_cpu(mac_id);
2475 /* If no active mac_id found, return first mac addr */
2476 *pmac_id_active = false;
2477 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2482 spin_unlock_bh(&adapter->mcc_lock);
2483 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2484 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2488 /* Uses synchronous MCCQ */
2489 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2490 u8 mac_count, u32 domain)
2492 struct be_mcc_wrb *wrb;
2493 struct be_cmd_req_set_mac_list *req;
2495 struct be_dma_mem cmd;
2497 memset(&cmd, 0, sizeof(struct be_dma_mem));
2498 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2499 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2500 &cmd.dma, GFP_KERNEL);
2502 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2506 spin_lock_bh(&adapter->mcc_lock);
2508 wrb = wrb_from_mccq(adapter);
2515 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2516 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2519 req->hdr.domain = domain;
2520 req->mac_count = mac_count;
2522 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2524 status = be_mcc_notify_wait(adapter);
2527 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2529 spin_unlock_bh(&adapter->mcc_lock);
2533 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2534 u32 domain, u16 intf_id)
2536 struct be_mcc_wrb *wrb;
2537 struct be_cmd_req_set_hsw_config *req;
2541 spin_lock_bh(&adapter->mcc_lock);
2543 wrb = wrb_from_mccq(adapter);
2549 req = embedded_payload(wrb);
2550 ctxt = &req->context;
2552 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2553 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2555 req->hdr.domain = domain;
2556 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2558 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2559 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2562 be_dws_cpu_to_le(req->context, sizeof(req->context));
2563 status = be_mcc_notify_wait(adapter);
2566 spin_unlock_bh(&adapter->mcc_lock);
2570 /* Get Hyper switch config */
2571 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2572 u32 domain, u16 intf_id)
2574 struct be_mcc_wrb *wrb;
2575 struct be_cmd_req_get_hsw_config *req;
2580 spin_lock_bh(&adapter->mcc_lock);
2582 wrb = wrb_from_mccq(adapter);
2588 req = embedded_payload(wrb);
2589 ctxt = &req->context;
2591 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2592 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2594 req->hdr.domain = domain;
2595 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2597 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2598 be_dws_cpu_to_le(req->context, sizeof(req->context));
2600 status = be_mcc_notify_wait(adapter);
2602 struct be_cmd_resp_get_hsw_config *resp =
2603 embedded_payload(wrb);
2604 be_dws_le_to_cpu(&resp->context,
2605 sizeof(resp->context));
2606 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2607 pvid, &resp->context);
2608 *pvid = le16_to_cpu(vid);
2612 spin_unlock_bh(&adapter->mcc_lock);
2616 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2618 struct be_mcc_wrb *wrb;
2619 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2621 int payload_len = sizeof(*req);
2622 struct be_dma_mem cmd;
2624 memset(&cmd, 0, sizeof(struct be_dma_mem));
2625 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2626 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2629 dev_err(&adapter->pdev->dev,
2630 "Memory allocation failure\n");
2634 if (mutex_lock_interruptible(&adapter->mbox_lock))
2637 wrb = wrb_from_mbox(adapter);
2645 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2646 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2647 payload_len, wrb, &cmd);
2649 req->hdr.version = 1;
2650 req->query_options = BE_GET_WOL_CAP;
2652 status = be_mbox_notify_wait(adapter);
2654 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2655 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2657 /* the command could succeed misleadingly on old f/w
2658 * which is not aware of the V1 version. fake an error. */
2659 if (resp->hdr.response_length < payload_len) {
2663 adapter->wol_cap = resp->wol_settings;
2666 mutex_unlock(&adapter->mbox_lock);
2667 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2671 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2672 struct be_dma_mem *cmd)
2674 struct be_mcc_wrb *wrb;
2675 struct be_cmd_req_get_ext_fat_caps *req;
2678 if (mutex_lock_interruptible(&adapter->mbox_lock))
2681 wrb = wrb_from_mbox(adapter);
2688 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2689 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2690 cmd->size, wrb, cmd);
2691 req->parameter_type = cpu_to_le32(1);
2693 status = be_mbox_notify_wait(adapter);
2695 mutex_unlock(&adapter->mbox_lock);
2699 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2700 struct be_dma_mem *cmd,
2701 struct be_fat_conf_params *configs)
2703 struct be_mcc_wrb *wrb;
2704 struct be_cmd_req_set_ext_fat_caps *req;
2707 spin_lock_bh(&adapter->mcc_lock);
2709 wrb = wrb_from_mccq(adapter);
2716 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2717 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2718 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2719 cmd->size, wrb, cmd);
2721 status = be_mcc_notify_wait(adapter);
2723 spin_unlock_bh(&adapter->mcc_lock);
2727 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2729 struct be_mcc_wrb *wrb;
2730 struct be_cmd_req_get_port_name *req;
2733 if (!lancer_chip(adapter)) {
2734 *port_name = adapter->hba_port_num + '0';
2738 spin_lock_bh(&adapter->mcc_lock);
2740 wrb = wrb_from_mccq(adapter);
2746 req = embedded_payload(wrb);
2748 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2749 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2751 req->hdr.version = 1;
2753 status = be_mcc_notify_wait(adapter);
2755 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2756 *port_name = resp->port_name[adapter->hba_port_num];
2758 *port_name = adapter->hba_port_num + '0';
2761 spin_unlock_bh(&adapter->mcc_lock);
2765 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2766 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2768 struct be_adapter *adapter = netdev_priv(netdev_handle);
2769 struct be_mcc_wrb *wrb;
2770 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
2771 struct be_cmd_req_hdr *req;
2772 struct be_cmd_resp_hdr *resp;
2775 spin_lock_bh(&adapter->mcc_lock);
2777 wrb = wrb_from_mccq(adapter);
2782 req = embedded_payload(wrb);
2783 resp = embedded_payload(wrb);
2785 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
2786 hdr->opcode, wrb_payload_size, wrb, NULL);
2787 memcpy(req, wrb_payload, wrb_payload_size);
2788 be_dws_cpu_to_le(req, wrb_payload_size);
2790 status = be_mcc_notify_wait(adapter);
2792 *cmd_status = (status & 0xffff);
2795 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
2796 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
2798 spin_unlock_bh(&adapter->mcc_lock);
2801 EXPORT_SYMBOL(be_roce_mcc_cmd);