]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qed/qed_spq.c
Merge remote-tracking branch 'input-current/for-linus'
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qed / qed_spq.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30
31 /***************************************************************************
32 * Structures & Definitions
33 ***************************************************************************/
34
35 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
36 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
37
38 /***************************************************************************
39 * Blocking Imp. (BLOCK/EBLOCK mode)
40 ***************************************************************************/
41 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
42                                 void *cookie,
43                                 union event_ring_data *data,
44                                 u8 fw_return_code)
45 {
46         struct qed_spq_comp_done *comp_done;
47
48         comp_done = (struct qed_spq_comp_done *)cookie;
49
50         comp_done->done                 = 0x1;
51         comp_done->fw_return_code       = fw_return_code;
52
53         /* make update visible to waiting thread */
54         smp_wmb();
55 }
56
57 static int qed_spq_block(struct qed_hwfn *p_hwfn,
58                          struct qed_spq_entry *p_ent,
59                          u8 *p_fw_ret)
60 {
61         int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
62         struct qed_spq_comp_done *comp_done;
63         int rc;
64
65         comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
66         while (sleep_count) {
67                 /* validate we receive completion update */
68                 smp_rmb();
69                 if (comp_done->done == 1) {
70                         if (p_fw_ret)
71                                 *p_fw_ret = comp_done->fw_return_code;
72                         return 0;
73                 }
74                 usleep_range(5000, 10000);
75                 sleep_count--;
76         }
77
78         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
79         rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
80         if (rc != 0)
81                 DP_NOTICE(p_hwfn, "MCP drain failed\n");
82
83         /* Retry after drain */
84         sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
85         while (sleep_count) {
86                 /* validate we receive completion update */
87                 smp_rmb();
88                 if (comp_done->done == 1) {
89                         if (p_fw_ret)
90                                 *p_fw_ret = comp_done->fw_return_code;
91                         return 0;
92                 }
93                 usleep_range(5000, 10000);
94                 sleep_count--;
95         }
96
97         if (comp_done->done == 1) {
98                 if (p_fw_ret)
99                         *p_fw_ret = comp_done->fw_return_code;
100                 return 0;
101         }
102
103         DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
104
105         return -EBUSY;
106 }
107
108 /***************************************************************************
109 * SPQ entries inner API
110 ***************************************************************************/
111 static int
112 qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113                    struct qed_spq_entry *p_ent)
114 {
115         p_ent->elem.hdr.echo = 0;
116         p_hwfn->p_spq->echo_idx++;
117         p_ent->flags = 0;
118
119         switch (p_ent->comp_mode) {
120         case QED_SPQ_MODE_EBLOCK:
121         case QED_SPQ_MODE_BLOCK:
122                 p_ent->comp_cb.function = qed_spq_blocking_cb;
123                 break;
124         case QED_SPQ_MODE_CB:
125                 break;
126         default:
127                 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
128                           p_ent->comp_mode);
129                 return -EINVAL;
130         }
131
132         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
133                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
134                    p_ent->elem.hdr.cid,
135                    p_ent->elem.hdr.cmd_id,
136                    p_ent->elem.hdr.protocol_id,
137                    p_ent->elem.data_ptr.hi,
138                    p_ent->elem.data_ptr.lo,
139                    D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
140                            QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
141                            "MODE_CB"));
142
143         return 0;
144 }
145
146 /***************************************************************************
147 * HSI access
148 ***************************************************************************/
149 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
150                                   struct qed_spq *p_spq)
151 {
152         u16                             pq;
153         struct qed_cxt_info             cxt_info;
154         struct core_conn_context        *p_cxt;
155         union qed_qm_pq_params          pq_params;
156         int                             rc;
157
158         cxt_info.iid = p_spq->cid;
159
160         rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
161
162         if (rc < 0) {
163                 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
164                           p_spq->cid);
165                 return;
166         }
167
168         p_cxt = cxt_info.p_cxt;
169
170         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
172         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
173                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
174         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
176
177         /* QM physical queue */
178         memset(&pq_params, 0, sizeof(pq_params));
179         pq_params.core.tc = LB_TC;
180         pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
181         p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
182
183         p_cxt->xstorm_st_context.spq_base_lo =
184                 DMA_LO_LE(p_spq->chain.p_phys_addr);
185         p_cxt->xstorm_st_context.spq_base_hi =
186                 DMA_HI_LE(p_spq->chain.p_phys_addr);
187
188         p_cxt->xstorm_st_context.consolid_base_addr.lo =
189                 DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
190         p_cxt->xstorm_st_context.consolid_base_addr.hi =
191                 DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
192 }
193
194 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
195                            struct qed_spq *p_spq,
196                            struct qed_spq_entry *p_ent)
197 {
198         struct qed_chain                *p_chain = &p_hwfn->p_spq->chain;
199         struct slow_path_element        *elem;
200         struct core_db_data             db;
201
202         elem = qed_chain_produce(p_chain);
203         if (!elem) {
204                 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
205                 return -EINVAL;
206         }
207
208         *elem = p_ent->elem; /* struct assignment */
209
210         /* send a doorbell on the slow hwfn session */
211         memset(&db, 0, sizeof(db));
212         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
213         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
214         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
215                   DQ_XCM_CORE_SPQ_PROD_CMD);
216         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
217
218         /* validate producer is up to-date */
219         rmb();
220
221         db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
222
223         /* do not reorder */
224         barrier();
225
226         DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
227
228         /* make sure doorbell is rang */
229         mmiowb();
230
231         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
232                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
233                    qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
234                    p_spq->cid, db.params, db.agg_flags,
235                    qed_chain_get_prod_idx(p_chain));
236
237         return 0;
238 }
239
240 /***************************************************************************
241 * Asynchronous events
242 ***************************************************************************/
243 static int
244 qed_async_event_completion(struct qed_hwfn *p_hwfn,
245                            struct event_ring_entry *p_eqe)
246 {
247         DP_NOTICE(p_hwfn,
248                   "Unknown Async completion for protocol: %d\n",
249                    p_eqe->protocol_id);
250         return -EINVAL;
251 }
252
253 /***************************************************************************
254 * EQ API
255 ***************************************************************************/
256 void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
257                         u16 prod)
258 {
259         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
260                    USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
261
262         REG_WR16(p_hwfn, addr, prod);
263
264         /* keep prod updates ordered */
265         mmiowb();
266 }
267
268 int qed_eq_completion(struct qed_hwfn *p_hwfn,
269                       void *cookie)
270
271 {
272         struct qed_eq *p_eq = cookie;
273         struct qed_chain *p_chain = &p_eq->chain;
274         int rc = 0;
275
276         /* take a snapshot of the FW consumer */
277         u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
278
279         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
280
281         /* Need to guarantee the fw_cons index we use points to a usuable
282          * element (to comply with our chain), so our macros would comply
283          */
284         if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
285             qed_chain_get_usable_per_page(p_chain))
286                 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
287
288         /* Complete current segment of eq entries */
289         while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
290                 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
291
292                 if (!p_eqe) {
293                         rc = -EINVAL;
294                         break;
295                 }
296
297                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
298                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
299                            p_eqe->opcode,
300                            p_eqe->protocol_id,
301                            p_eqe->reserved0,
302                            le16_to_cpu(p_eqe->echo),
303                            p_eqe->fw_return_code,
304                            p_eqe->flags);
305
306                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
307                         if (qed_async_event_completion(p_hwfn, p_eqe))
308                                 rc = -EINVAL;
309                 } else if (qed_spq_completion(p_hwfn,
310                                               p_eqe->echo,
311                                               p_eqe->fw_return_code,
312                                               &p_eqe->data)) {
313                         rc = -EINVAL;
314                 }
315
316                 qed_chain_recycle_consumed(p_chain);
317         }
318
319         qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
320
321         return rc;
322 }
323
324 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
325                             u16 num_elem)
326 {
327         struct qed_eq *p_eq;
328
329         /* Allocate EQ struct */
330         p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
331         if (!p_eq) {
332                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
333                 return NULL;
334         }
335
336         /* Allocate and initialize EQ chain*/
337         if (qed_chain_alloc(p_hwfn->cdev,
338                             QED_CHAIN_USE_TO_PRODUCE,
339                             QED_CHAIN_MODE_PBL,
340                             num_elem,
341                             sizeof(union event_ring_element),
342                             &p_eq->chain)) {
343                 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
344                 goto eq_allocate_fail;
345         }
346
347         /* register EQ completion on the SP SB */
348         qed_int_register_cb(p_hwfn,
349                             qed_eq_completion,
350                             p_eq,
351                             &p_eq->eq_sb_index,
352                             &p_eq->p_fw_cons);
353
354         return p_eq;
355
356 eq_allocate_fail:
357         qed_eq_free(p_hwfn, p_eq);
358         return NULL;
359 }
360
361 void qed_eq_setup(struct qed_hwfn *p_hwfn,
362                   struct qed_eq *p_eq)
363 {
364         qed_chain_reset(&p_eq->chain);
365 }
366
367 void qed_eq_free(struct qed_hwfn *p_hwfn,
368                  struct qed_eq *p_eq)
369 {
370         if (!p_eq)
371                 return;
372         qed_chain_free(p_hwfn->cdev, &p_eq->chain);
373         kfree(p_eq);
374 }
375
376 /***************************************************************************
377 * CQE API - manipulate EQ functionality
378 ***************************************************************************/
379 static int qed_cqe_completion(
380         struct qed_hwfn *p_hwfn,
381         struct eth_slow_path_rx_cqe *cqe,
382         enum protocol_type protocol)
383 {
384         /* @@@tmp - it's possible we'll eventually want to handle some
385          * actual commands that can arrive here, but for now this is only
386          * used to complete the ramrod using the echo value on the cqe
387          */
388         return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
389 }
390
391 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
392                            struct eth_slow_path_rx_cqe *cqe)
393 {
394         int rc;
395
396         rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
397         if (rc)
398                 DP_NOTICE(p_hwfn,
399                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
400                           cqe->ramrod_cmd_id);
401
402         return rc;
403 }
404
405 /***************************************************************************
406 * Slow hwfn Queue (spq)
407 ***************************************************************************/
408 void qed_spq_setup(struct qed_hwfn *p_hwfn)
409 {
410         struct qed_spq          *p_spq  = p_hwfn->p_spq;
411         struct qed_spq_entry    *p_virt = NULL;
412         dma_addr_t              p_phys  = 0;
413         unsigned int            i       = 0;
414
415         INIT_LIST_HEAD(&p_spq->pending);
416         INIT_LIST_HEAD(&p_spq->completion_pending);
417         INIT_LIST_HEAD(&p_spq->free_pool);
418         INIT_LIST_HEAD(&p_spq->unlimited_pending);
419         spin_lock_init(&p_spq->lock);
420
421         /* SPQ empty pool */
422         p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
423         p_virt  = p_spq->p_virt;
424
425         for (i = 0; i < p_spq->chain.capacity; i++) {
426                 p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
427                 p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
428
429                 list_add_tail(&p_virt->list, &p_spq->free_pool);
430
431                 p_virt++;
432                 p_phys += sizeof(struct qed_spq_entry);
433         }
434
435         /* Statistics */
436         p_spq->normal_count             = 0;
437         p_spq->comp_count               = 0;
438         p_spq->comp_sent_count          = 0;
439         p_spq->unlimited_pending_count  = 0;
440         p_spq->echo_idx                 = 0;
441
442         /* SPQ cid, cannot fail */
443         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
444         qed_spq_hw_initialize(p_hwfn, p_spq);
445
446         /* reset the chain itself */
447         qed_chain_reset(&p_spq->chain);
448 }
449
450 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
451 {
452         struct qed_spq          *p_spq  = NULL;
453         dma_addr_t              p_phys  = 0;
454         struct qed_spq_entry    *p_virt = NULL;
455
456         /* SPQ struct */
457         p_spq =
458                 kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
459         if (!p_spq) {
460                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
461                 return -ENOMEM;
462         }
463
464         /* SPQ ring  */
465         if (qed_chain_alloc(p_hwfn->cdev,
466                             QED_CHAIN_USE_TO_PRODUCE,
467                             QED_CHAIN_MODE_SINGLE,
468                             0,   /* N/A when the mode is SINGLE */
469                             sizeof(struct slow_path_element),
470                             &p_spq->chain)) {
471                 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
472                 goto spq_allocate_fail;
473         }
474
475         /* allocate and fill the SPQ elements (incl. ramrod data list) */
476         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
477                                     p_spq->chain.capacity *
478                                     sizeof(struct qed_spq_entry),
479                                     &p_phys,
480                                     GFP_KERNEL);
481
482         if (!p_virt)
483                 goto spq_allocate_fail;
484
485         p_spq->p_virt = p_virt;
486         p_spq->p_phys = p_phys;
487         p_hwfn->p_spq = p_spq;
488
489         return 0;
490
491 spq_allocate_fail:
492         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
493         kfree(p_spq);
494         return -ENOMEM;
495 }
496
497 void qed_spq_free(struct qed_hwfn *p_hwfn)
498 {
499         struct qed_spq *p_spq = p_hwfn->p_spq;
500
501         if (!p_spq)
502                 return;
503
504         if (p_spq->p_virt)
505                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
506                                   p_spq->chain.capacity *
507                                   sizeof(struct qed_spq_entry),
508                                   p_spq->p_virt,
509                                   p_spq->p_phys);
510
511         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
512         ;
513         kfree(p_spq);
514 }
515
516 int
517 qed_spq_get_entry(struct qed_hwfn *p_hwfn,
518                   struct qed_spq_entry **pp_ent)
519 {
520         struct qed_spq *p_spq = p_hwfn->p_spq;
521         struct qed_spq_entry *p_ent = NULL;
522         int rc = 0;
523
524         spin_lock_bh(&p_spq->lock);
525
526         if (list_empty(&p_spq->free_pool)) {
527                 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
528                 if (!p_ent) {
529                         rc = -ENOMEM;
530                         goto out_unlock;
531                 }
532                 p_ent->queue = &p_spq->unlimited_pending;
533         } else {
534                 p_ent = list_first_entry(&p_spq->free_pool,
535                                          struct qed_spq_entry,
536                                          list);
537                 list_del(&p_ent->list);
538                 p_ent->queue = &p_spq->pending;
539         }
540
541         *pp_ent = p_ent;
542
543 out_unlock:
544         spin_unlock_bh(&p_spq->lock);
545         return rc;
546 }
547
548 /* Locked variant; Should be called while the SPQ lock is taken */
549 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
550                                    struct qed_spq_entry *p_ent)
551 {
552         list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
553 }
554
555 void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
556                           struct qed_spq_entry *p_ent)
557 {
558         spin_lock_bh(&p_hwfn->p_spq->lock);
559         __qed_spq_return_entry(p_hwfn, p_ent);
560         spin_unlock_bh(&p_hwfn->p_spq->lock);
561 }
562
563 /**
564  * @brief qed_spq_add_entry - adds a new entry to the pending
565  *        list. Should be used while lock is being held.
566  *
567  * Addes an entry to the pending list is there is room (en empty
568  * element is available in the free_pool), or else places the
569  * entry in the unlimited_pending pool.
570  *
571  * @param p_hwfn
572  * @param p_ent
573  * @param priority
574  *
575  * @return int
576  */
577 static int
578 qed_spq_add_entry(struct qed_hwfn *p_hwfn,
579                   struct qed_spq_entry *p_ent,
580                   enum spq_priority priority)
581 {
582         struct qed_spq *p_spq = p_hwfn->p_spq;
583
584         if (p_ent->queue == &p_spq->unlimited_pending) {
585                 struct qed_spq_entry *p_en2;
586
587                 if (list_empty(&p_spq->free_pool)) {
588                         list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
589                         p_spq->unlimited_pending_count++;
590
591                         return 0;
592                 }
593
594                 p_en2 = list_first_entry(&p_spq->free_pool,
595                                          struct qed_spq_entry,
596                                          list);
597                 list_del(&p_en2->list);
598
599                 /* Strcut assignment */
600                 *p_en2 = *p_ent;
601
602                 kfree(p_ent);
603
604                 p_ent = p_en2;
605         }
606
607         /* entry is to be placed in 'pending' queue */
608         switch (priority) {
609         case QED_SPQ_PRIORITY_NORMAL:
610                 list_add_tail(&p_ent->list, &p_spq->pending);
611                 p_spq->normal_count++;
612                 break;
613         case QED_SPQ_PRIORITY_HIGH:
614                 list_add(&p_ent->list, &p_spq->pending);
615                 p_spq->high_count++;
616                 break;
617         default:
618                 return -EINVAL;
619         }
620
621         return 0;
622 }
623
624 /***************************************************************************
625 * Accessor
626 ***************************************************************************/
627 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
628 {
629         if (!p_hwfn->p_spq)
630                 return 0xffffffff;      /* illegal */
631         return p_hwfn->p_spq->cid;
632 }
633
634 /***************************************************************************
635 * Posting new Ramrods
636 ***************************************************************************/
637 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
638                              struct list_head *head,
639                              u32 keep_reserve)
640 {
641         struct qed_spq *p_spq = p_hwfn->p_spq;
642         int rc;
643
644         while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
645                !list_empty(head)) {
646                 struct qed_spq_entry *p_ent =
647                         list_first_entry(head, struct qed_spq_entry, list);
648                 list_del(&p_ent->list);
649                 list_add_tail(&p_ent->list, &p_spq->completion_pending);
650                 p_spq->comp_sent_count++;
651
652                 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
653                 if (rc) {
654                         list_del(&p_ent->list);
655                         __qed_spq_return_entry(p_hwfn, p_ent);
656                         return rc;
657                 }
658         }
659
660         return 0;
661 }
662
663 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
664 {
665         struct qed_spq *p_spq = p_hwfn->p_spq;
666         struct qed_spq_entry *p_ent = NULL;
667
668         while (!list_empty(&p_spq->free_pool)) {
669                 if (list_empty(&p_spq->unlimited_pending))
670                         break;
671
672                 p_ent = list_first_entry(&p_spq->unlimited_pending,
673                                          struct qed_spq_entry,
674                                          list);
675                 if (!p_ent)
676                         return -EINVAL;
677
678                 list_del(&p_ent->list);
679
680                 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
681         }
682
683         return qed_spq_post_list(p_hwfn, &p_spq->pending,
684                                  SPQ_HIGH_PRI_RESERVE_DEFAULT);
685 }
686
687 int qed_spq_post(struct qed_hwfn *p_hwfn,
688                  struct qed_spq_entry *p_ent,
689                  u8 *fw_return_code)
690 {
691         int rc = 0;
692         struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
693         bool b_ret_ent = true;
694
695         if (!p_hwfn)
696                 return -EINVAL;
697
698         if (!p_ent) {
699                 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
700                 return -EINVAL;
701         }
702
703         /* Complete the entry */
704         rc = qed_spq_fill_entry(p_hwfn, p_ent);
705
706         spin_lock_bh(&p_spq->lock);
707
708         /* Check return value after LOCK is taken for cleaner error flow */
709         if (rc)
710                 goto spq_post_fail;
711
712         /* Add the request to the pending queue */
713         rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
714         if (rc)
715                 goto spq_post_fail;
716
717         rc = qed_spq_pend_post(p_hwfn);
718         if (rc) {
719                 /* Since it's possible that pending failed for a different
720                  * entry [although unlikely], the failed entry was already
721                  * dealt with; No need to return it here.
722                  */
723                 b_ret_ent = false;
724                 goto spq_post_fail;
725         }
726
727         spin_unlock_bh(&p_spq->lock);
728
729         if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
730                 /* For entries in QED BLOCK mode, the completion code cannot
731                  * perform the necessary cleanup - if it did, we couldn't
732                  * access p_ent here to see whether it's successful or not.
733                  * Thus, after gaining the answer perform the cleanup here.
734                  */
735                 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
736                 if (rc)
737                         goto spq_post_fail2;
738
739                 /* return to pool */
740                 qed_spq_return_entry(p_hwfn, p_ent);
741         }
742         return rc;
743
744 spq_post_fail2:
745         spin_lock_bh(&p_spq->lock);
746         list_del(&p_ent->list);
747         qed_chain_return_produced(&p_spq->chain);
748
749 spq_post_fail:
750         /* return to the free pool */
751         if (b_ret_ent)
752                 __qed_spq_return_entry(p_hwfn, p_ent);
753         spin_unlock_bh(&p_spq->lock);
754
755         return rc;
756 }
757
758 int qed_spq_completion(struct qed_hwfn *p_hwfn,
759                        __le16 echo,
760                        u8 fw_return_code,
761                        union event_ring_data *p_data)
762 {
763         struct qed_spq          *p_spq;
764         struct qed_spq_entry    *p_ent = NULL;
765         struct qed_spq_entry    *tmp;
766         struct qed_spq_entry    *found = NULL;
767         int                     rc;
768
769         if (!p_hwfn)
770                 return -EINVAL;
771
772         p_spq = p_hwfn->p_spq;
773         if (!p_spq)
774                 return -EINVAL;
775
776         spin_lock_bh(&p_spq->lock);
777         list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
778                                  list) {
779                 if (p_ent->elem.hdr.echo == echo) {
780                         list_del(&p_ent->list);
781
782                         qed_chain_return_produced(&p_spq->chain);
783                         p_spq->comp_count++;
784                         found = p_ent;
785                         break;
786                 }
787         }
788
789         /* Release lock before callback, as callback may post
790          * an additional ramrod.
791          */
792         spin_unlock_bh(&p_spq->lock);
793
794         if (!found) {
795                 DP_NOTICE(p_hwfn,
796                           "Failed to find an entry this EQE completes\n");
797                 return -EEXIST;
798         }
799
800         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
801                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
802         if (found->comp_cb.function)
803                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
804                                         fw_return_code);
805
806         if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
807                 /* EBLOCK is responsible for freeing its own entry */
808                 qed_spq_return_entry(p_hwfn, found);
809
810         /* Attempt to post pending requests */
811         spin_lock_bh(&p_spq->lock);
812         rc = qed_spq_pend_post(p_hwfn);
813         spin_unlock_bh(&p_spq->lock);
814
815         return rc;
816 }
817
818 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
819 {
820         struct qed_consq *p_consq;
821
822         /* Allocate ConsQ struct */
823         p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
824         if (!p_consq) {
825                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
826                 return NULL;
827         }
828
829         /* Allocate and initialize EQ chain*/
830         if (qed_chain_alloc(p_hwfn->cdev,
831                             QED_CHAIN_USE_TO_PRODUCE,
832                             QED_CHAIN_MODE_PBL,
833                             QED_CHAIN_PAGE_SIZE / 0x80,
834                             0x80,
835                             &p_consq->chain)) {
836                 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
837                 goto consq_allocate_fail;
838         }
839
840         return p_consq;
841
842 consq_allocate_fail:
843         qed_consq_free(p_hwfn, p_consq);
844         return NULL;
845 }
846
847 void qed_consq_setup(struct qed_hwfn *p_hwfn,
848                      struct qed_consq *p_consq)
849 {
850         qed_chain_reset(&p_consq->chain);
851 }
852
853 void qed_consq_free(struct qed_hwfn *p_hwfn,
854                     struct qed_consq *p_consq)
855 {
856         if (!p_consq)
857                 return;
858         qed_chain_free(p_hwfn->cdev, &p_consq->chain);
859         kfree(p_consq);
860 }