1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *****************************************************************************/
31 #include <linux/sched.h>
32 #include <linux/wait.h>
33 #include <linux/gfp.h>
38 #include "iwl-op-mode.h"
40 /******************************************************************************
44 ******************************************************************************/
47 * Rx theory of operation
49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
50 * each of which point to Receive Buffers to be filled by the NIC. These get
51 * used not only for Rx frames, but for any command response or notification
52 * from the NIC. The driver and NIC manage the Rx buffers by means
53 * of indexes into the circular buffer.
56 * The host/firmware share two index registers for managing the Rx buffers.
58 * The READ index maps to the first position that the firmware may be writing
59 * to -- the driver can read up to (but not including) this position and get
61 * The READ index is managed by the firmware once the card is enabled.
63 * The WRITE index maps to the last position the driver has read from -- the
64 * position preceding WRITE is the last slot the firmware can place a packet.
66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
69 * During initialization, the host sets up the READ queue position to the first
70 * INDEX position, and WRITE to the last (READ - 1 wrapped)
72 * When the firmware places a packet in a buffer, it will advance the READ index
73 * and fire the RX interrupt. The driver can then query the READ index and
74 * process as many packets as possible, moving the WRITE index forward as it
75 * resets the Rx queue buffers with new memory.
77 * The management in the driver is as follows:
78 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
79 * When the interrupt handler is called, the request is processed.
80 * The page is either stolen - transferred to the upper layer
81 * or reused - added immediately to the iwl->rxq->rx_free list.
82 * + When the page is stolen - the driver updates the matching queue's used
83 * count, detaches the RBD and transfers it to the queue used list.
84 * When there are two used RBDs - they are transferred to the allocator empty
85 * list. Work is then scheduled for the allocator to start allocating
87 * When there are another 6 used RBDs - they are transferred to the allocator
88 * empty list and the driver tries to claim the pre-allocated buffers and
89 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
91 * When there are 8+ buffers in the free list - either from allocation or from
92 * 8 reused unstolen pages - restock is called to update the FW and indexes.
93 * + In order to make sure the allocator always has RBDs to use for allocation
94 * the allocator has initial pool in the size of num_queues*(8-2) - the
95 * maximum missing RBDs per allocation request (request posted with 2
96 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
97 * The queues supplies the recycle of the rest of the RBDs.
98 * + A received packet is processed and handed to the kernel network stack,
99 * detached from the iwl->rxq. The driver 'processed' index is updated.
100 * + If there are no allocated buffers in iwl->rxq->rx_free,
101 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
102 * If there were enough free buffers and RX_STALLED is set it is cleared.
107 * iwl_rxq_alloc() Allocates rx_free
108 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
109 * iwl_pcie_rxq_restock.
110 * Used only during initialization.
111 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
112 * queue, updates firmware pointers, and updates
114 * iwl_pcie_rx_allocator() Background work for allocating pages.
116 * -- enable interrupts --
117 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
118 * READ INDEX, detaching the SKB from the pool.
119 * Moves the packet buffer from queue to rx_used.
120 * Posts and claims requests to the allocator.
121 * Calls iwl_pcie_rxq_restock to refill any empty
127 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
129 * Regular Receive interrupt:
131 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
132 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
134 * rxq.queue -> rxq.rx_free -> rxq.queue
140 * iwl_rxq_space - Return number of free slots available in queue.
142 static int iwl_rxq_space(const struct iwl_rxq *rxq)
144 /* Make sure rx queue size is a power of 2 */
145 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
148 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
149 * between empty and completely full queues.
150 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
151 * defined for negative dividends.
153 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
157 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
159 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
161 return cpu_to_le32((u32)(dma_addr >> 8));
165 * iwl_pcie_rx_stop - stops the Rx DMA
167 int iwl_pcie_rx_stop(struct iwl_trans *trans)
169 if (trans->cfg->mq_rx_supported) {
170 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
171 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
172 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
174 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
175 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
176 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
182 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
184 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
189 lockdep_assert_held(&rxq->lock);
192 * explicitly wake up the NIC if:
193 * 1. shadow registers aren't enabled
194 * 2. there is a chance that the NIC is asleep
196 if (!trans->cfg->base_params->shadow_reg_enable &&
197 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
198 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
200 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
201 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
203 iwl_set_bit(trans, CSR_GP_CNTRL,
204 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
205 rxq->need_update = true;
210 rxq->write_actual = round_down(rxq->write, 8);
211 if (trans->cfg->mq_rx_supported)
212 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
215 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
218 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
220 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
223 for (i = 0; i < trans->num_rx_queues; i++) {
224 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
226 if (!rxq->need_update)
228 spin_lock(&rxq->lock);
229 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
230 rxq->need_update = false;
231 spin_unlock(&rxq->lock);
236 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
238 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
241 struct iwl_rx_mem_buffer *rxb;
244 * If the device isn't enabled - no need to try to add buffers...
245 * This can happen when we stop the device and still have an interrupt
246 * pending. We stop the APM before we sync the interrupts because we
247 * have to (see comment there). On the other hand, since the APM is
248 * stopped, we cannot access the HW (in particular not prph).
249 * So don't try to restock if the APM has been already stopped.
251 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
254 spin_lock(&rxq->lock);
255 while (rxq->free_count) {
256 __le64 *bd = (__le64 *)rxq->bd;
258 /* Get next free Rx buffer, remove from free list */
259 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
261 list_del(&rxb->list);
262 rxb->invalid = false;
263 /* 12 first bits are expected to be empty */
264 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
265 /* Point to Rx buffer via next RBD in circular buffer */
266 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
267 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
270 spin_unlock(&rxq->lock);
273 * If we've added more space for the firmware to place data, tell it.
274 * Increment device's write pointer in multiples of 8.
276 if (rxq->write_actual != (rxq->write & ~0x7)) {
277 spin_lock(&rxq->lock);
278 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
279 spin_unlock(&rxq->lock);
284 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
286 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
289 struct iwl_rx_mem_buffer *rxb;
292 * If the device isn't enabled - not need to try to add buffers...
293 * This can happen when we stop the device and still have an interrupt
294 * pending. We stop the APM before we sync the interrupts because we
295 * have to (see comment there). On the other hand, since the APM is
296 * stopped, we cannot access the HW (in particular not prph).
297 * So don't try to restock if the APM has been already stopped.
299 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
302 spin_lock(&rxq->lock);
303 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
304 __le32 *bd = (__le32 *)rxq->bd;
305 /* The overwritten rxb must be a used one */
306 rxb = rxq->queue[rxq->write];
307 BUG_ON(rxb && rxb->page);
309 /* Get next free Rx buffer, remove from free list */
310 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
312 list_del(&rxb->list);
313 rxb->invalid = false;
315 /* Point to Rx buffer via next RBD in circular buffer */
316 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
317 rxq->queue[rxq->write] = rxb;
318 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
321 spin_unlock(&rxq->lock);
323 /* If we've added more space for the firmware to place data, tell it.
324 * Increment device's write pointer in multiples of 8. */
325 if (rxq->write_actual != (rxq->write & ~0x7)) {
326 spin_lock(&rxq->lock);
327 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
328 spin_unlock(&rxq->lock);
333 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
335 * If there are slots in the RX queue that need to be restocked,
336 * and we have free pre-allocated buffers, fill the ranks as much
337 * as we can, pulling from rx_free.
339 * This moves the 'write' index forward to catch up with 'processed', and
340 * also updates the memory address in the firmware to reference the new
344 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
346 if (trans->cfg->mq_rx_supported)
347 iwl_pcie_rxmq_restock(trans, rxq);
349 iwl_pcie_rxsq_restock(trans, rxq);
353 * iwl_pcie_rx_alloc_page - allocates and returns a page.
356 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
359 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
361 gfp_t gfp_mask = priority;
363 if (trans_pcie->rx_page_order > 0)
364 gfp_mask |= __GFP_COMP;
366 /* Alloc a new receive buffer */
367 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
370 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
371 trans_pcie->rx_page_order);
373 * Issue an error if we don't have enough pre-allocated
376 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
378 "Failed to alloc_pages\n");
385 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
387 * A used RBD is an Rx buffer that has been given to the stack. To use it again
388 * a page must be allocated and the RBD must point to the page. This function
389 * doesn't change the HW pointer but handles the list of pages that is used by
390 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
393 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
396 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
397 struct iwl_rx_mem_buffer *rxb;
401 spin_lock(&rxq->lock);
402 if (list_empty(&rxq->rx_used)) {
403 spin_unlock(&rxq->lock);
406 spin_unlock(&rxq->lock);
408 /* Alloc a new receive buffer */
409 page = iwl_pcie_rx_alloc_page(trans, priority);
413 spin_lock(&rxq->lock);
415 if (list_empty(&rxq->rx_used)) {
416 spin_unlock(&rxq->lock);
417 __free_pages(page, trans_pcie->rx_page_order);
420 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
422 list_del(&rxb->list);
423 spin_unlock(&rxq->lock);
427 /* Get physical address of the RB */
429 dma_map_page(trans->dev, page, 0,
430 PAGE_SIZE << trans_pcie->rx_page_order,
432 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
434 spin_lock(&rxq->lock);
435 list_add(&rxb->list, &rxq->rx_used);
436 spin_unlock(&rxq->lock);
437 __free_pages(page, trans_pcie->rx_page_order);
441 spin_lock(&rxq->lock);
443 list_add_tail(&rxb->list, &rxq->rx_free);
446 spin_unlock(&rxq->lock);
450 static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
452 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
455 for (i = 0; i < RX_POOL_SIZE; i++) {
456 if (!trans_pcie->rx_pool[i].page)
458 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
459 PAGE_SIZE << trans_pcie->rx_page_order,
461 __free_pages(trans_pcie->rx_pool[i].page,
462 trans_pcie->rx_page_order);
463 trans_pcie->rx_pool[i].page = NULL;
468 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
470 * Allocates for each received request 8 pages
471 * Called as a scheduled work item.
473 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
475 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
476 struct iwl_rb_allocator *rba = &trans_pcie->rba;
477 struct list_head local_empty;
478 int pending = atomic_xchg(&rba->req_pending, 0);
480 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
482 /* If we were scheduled - there is at least one request */
483 spin_lock(&rba->lock);
484 /* swap out the rba->rbd_empty to a local list */
485 list_replace_init(&rba->rbd_empty, &local_empty);
486 spin_unlock(&rba->lock);
490 LIST_HEAD(local_allocated);
491 gfp_t gfp_mask = GFP_KERNEL;
493 /* Do not post a warning if there are only a few requests */
494 if (pending < RX_PENDING_WATERMARK)
495 gfp_mask |= __GFP_NOWARN;
497 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
498 struct iwl_rx_mem_buffer *rxb;
501 /* List should never be empty - each reused RBD is
502 * returned to the list, and initial pool covers any
503 * possible gap between the time the page is allocated
504 * to the time the RBD is added.
506 BUG_ON(list_empty(&local_empty));
507 /* Get the first rxb from the rbd list */
508 rxb = list_first_entry(&local_empty,
509 struct iwl_rx_mem_buffer, list);
512 /* Alloc a new receive buffer */
513 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
518 /* Get physical address of the RB */
519 rxb->page_dma = dma_map_page(trans->dev, page, 0,
520 PAGE_SIZE << trans_pcie->rx_page_order,
522 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
524 __free_pages(page, trans_pcie->rx_page_order);
528 /* move the allocated entry to the out list */
529 list_move(&rxb->list, &local_allocated);
535 pending = atomic_xchg(&rba->req_pending, 0);
537 "Pending allocation requests = %d\n",
541 spin_lock(&rba->lock);
542 /* add the allocated rbds to the allocator allocated list */
543 list_splice_tail(&local_allocated, &rba->rbd_allocated);
544 /* get more empty RBDs for current pending requests */
545 list_splice_tail_init(&rba->rbd_empty, &local_empty);
546 spin_unlock(&rba->lock);
548 atomic_inc(&rba->req_ready);
551 spin_lock(&rba->lock);
552 /* return unused rbds to the allocator empty list */
553 list_splice_tail(&local_empty, &rba->rbd_empty);
554 spin_unlock(&rba->lock);
558 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
560 .* Called by queue when the queue posted allocation request and
561 * has freed 8 RBDs in order to restock itself.
562 * This function directly moves the allocated RBs to the queue's ownership
563 * and updates the relevant counters.
565 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
568 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
569 struct iwl_rb_allocator *rba = &trans_pcie->rba;
572 lockdep_assert_held(&rxq->lock);
575 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
576 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
577 * function will return early, as there are no ready requests.
578 * atomic_dec_if_positive will perofrm the *actual* decrement only if
579 * req_ready > 0, i.e. - there are ready requests and the function
580 * hands one request to the caller.
582 if (atomic_dec_if_positive(&rba->req_ready) < 0)
585 spin_lock(&rba->lock);
586 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
587 /* Get next free Rx buffer, remove it from free list */
588 struct iwl_rx_mem_buffer *rxb =
589 list_first_entry(&rba->rbd_allocated,
590 struct iwl_rx_mem_buffer, list);
592 list_move(&rxb->list, &rxq->rx_free);
594 spin_unlock(&rba->lock);
596 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
597 rxq->free_count += RX_CLAIM_REQ_ALLOC;
600 static void iwl_pcie_rx_allocator_work(struct work_struct *data)
602 struct iwl_rb_allocator *rba_p =
603 container_of(data, struct iwl_rb_allocator, rx_alloc);
604 struct iwl_trans_pcie *trans_pcie =
605 container_of(rba_p, struct iwl_trans_pcie, rba);
607 iwl_pcie_rx_allocator(trans_pcie->trans);
610 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
613 struct iwl_rb_allocator *rba = &trans_pcie->rba;
614 struct device *dev = trans->dev;
616 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
619 if (WARN_ON(trans_pcie->rxq))
622 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
624 if (!trans_pcie->rxq)
627 spin_lock_init(&rba->lock);
629 for (i = 0; i < trans->num_rx_queues; i++) {
630 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
632 spin_lock_init(&rxq->lock);
633 if (trans->cfg->mq_rx_supported)
634 rxq->queue_size = MQ_RX_TABLE_SIZE;
636 rxq->queue_size = RX_QUEUE_SIZE;
639 * Allocate the circular buffer of Read Buffer Descriptors
642 rxq->bd = dma_zalloc_coherent(dev,
643 free_size * rxq->queue_size,
644 &rxq->bd_dma, GFP_KERNEL);
648 if (trans->cfg->mq_rx_supported) {
649 rxq->used_bd = dma_zalloc_coherent(dev,
658 /*Allocate the driver's pointer to receive buffer status */
659 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
668 for (i = 0; i < trans->num_rx_queues; i++) {
669 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
672 dma_free_coherent(dev, free_size * rxq->queue_size,
673 rxq->bd, rxq->bd_dma);
678 dma_free_coherent(trans->dev,
679 sizeof(struct iwl_rb_status),
680 rxq->rb_stts, rxq->rb_stts_dma);
683 dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
684 rxq->used_bd, rxq->used_bd_dma);
685 rxq->used_bd_dma = 0;
688 kfree(trans_pcie->rxq);
693 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
695 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
698 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
700 switch (trans_pcie->rx_buf_size) {
702 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
705 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
708 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
712 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
715 if (!iwl_trans_grab_nic_access(trans, &flags))
719 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
720 /* reset and flush pointers */
721 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
722 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
723 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
725 /* Reset driver's Rx queue write index */
726 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
728 /* Tell device where to find RBD circular buffer in DRAM */
729 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
730 (u32)(rxq->bd_dma >> 8));
732 /* Tell device where in DRAM to update its Rx status */
733 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
734 rxq->rb_stts_dma >> 4);
737 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
738 * the credit mechanism in 5000 HW RX FIFO
739 * Direct rx interrupts to hosts
740 * Rx buffer size 4 or 8k or 12k
744 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
745 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
746 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
747 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
749 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
750 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
752 iwl_trans_release_nic_access(trans, &flags);
754 /* Set interrupt coalescing timer to default (2048 usecs) */
755 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
757 /* W/A for interrupt coalescing bug in 7260 and 3160 */
758 if (trans->cfg->host_interrupt_operation_mode)
759 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
762 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
764 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
767 if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
770 if (!trans->cfg->integrated)
774 * Turn on the chicken-bits that cause MAC wakeup for RX-related
776 * This costs some power, but needed for W/A 9000 integrated A-step
777 * bug where shadow registers are not in the retention list and their
778 * value is lost when NIC powers down
780 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
781 CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
782 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
783 CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
786 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
788 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
789 u32 rb_size, enabled = 0;
793 switch (trans_pcie->rx_buf_size) {
795 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
798 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
801 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
805 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
808 if (!iwl_trans_grab_nic_access(trans, &flags))
812 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
813 /* disable free amd used rx queue operation */
814 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
816 for (i = 0; i < trans->num_rx_queues; i++) {
817 /* Tell device where to find RBD free table in DRAM */
818 iwl_write_prph64_no_grab(trans,
819 RFH_Q_FRBDCB_BA_LSB(i),
820 trans_pcie->rxq[i].bd_dma);
821 /* Tell device where to find RBD used table in DRAM */
822 iwl_write_prph64_no_grab(trans,
823 RFH_Q_URBDCB_BA_LSB(i),
824 trans_pcie->rxq[i].used_bd_dma);
825 /* Tell device where in DRAM to update its Rx status */
826 iwl_write_prph64_no_grab(trans,
827 RFH_Q_URBD_STTS_WPTR_LSB(i),
828 trans_pcie->rxq[i].rb_stts_dma);
829 /* Reset device indice tables */
830 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
831 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
832 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
834 enabled |= BIT(i) | BIT(i + 16);
839 * Rx buffer size 4 or 8k or 12k
841 * Drop frames that exceed RB size
844 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
845 RFH_DMA_EN_ENABLE_VAL | rb_size |
846 RFH_RXF_DMA_MIN_RB_4_8 |
847 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
848 RFH_RXF_DMA_RBDCB_SIZE_512);
851 * Activate DMA snooping.
852 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
855 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
856 RFH_GEN_CFG_RFH_DMA_SNOOP |
857 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
858 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
859 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
860 trans->cfg->integrated ?
861 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
862 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
863 /* Enable the relevant rx queues */
864 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
866 iwl_trans_release_nic_access(trans, &flags);
868 /* Set interrupt coalescing timer to default (2048 usecs) */
869 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
871 iwl_pcie_enable_rx_wake(trans, true);
874 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
876 lockdep_assert_held(&rxq->lock);
878 INIT_LIST_HEAD(&rxq->rx_free);
879 INIT_LIST_HEAD(&rxq->rx_used);
884 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
890 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
892 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
893 struct iwl_rxq *def_rxq;
894 struct iwl_rb_allocator *rba = &trans_pcie->rba;
895 int i, err, queue_size, allocator_pool_size, num_alloc;
897 if (!trans_pcie->rxq) {
898 err = iwl_pcie_rx_alloc(trans);
902 def_rxq = trans_pcie->rxq;
904 rba->alloc_wq = alloc_workqueue("rb_allocator",
905 WQ_HIGHPRI | WQ_UNBOUND, 1);
906 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
908 spin_lock(&rba->lock);
909 atomic_set(&rba->req_pending, 0);
910 atomic_set(&rba->req_ready, 0);
911 INIT_LIST_HEAD(&rba->rbd_allocated);
912 INIT_LIST_HEAD(&rba->rbd_empty);
913 spin_unlock(&rba->lock);
915 /* free all first - we might be reconfigured for a different size */
916 iwl_pcie_free_rbs_pool(trans);
918 for (i = 0; i < RX_QUEUE_SIZE; i++)
919 def_rxq->queue[i] = NULL;
921 for (i = 0; i < trans->num_rx_queues; i++) {
922 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
926 spin_lock(&rxq->lock);
928 * Set read write pointer to reflect that we have processed
929 * and used all buffers, but have not restocked the Rx queue
934 rxq->write_actual = 0;
935 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
937 iwl_pcie_rx_init_rxb_lists(rxq);
940 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
941 iwl_pcie_dummy_napi_poll, 64);
943 spin_unlock(&rxq->lock);
946 /* move the pool to the default queue and allocator ownerships */
947 queue_size = trans->cfg->mq_rx_supported ?
948 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
949 allocator_pool_size = trans->num_rx_queues *
950 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
951 num_alloc = queue_size + allocator_pool_size;
952 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
953 ARRAY_SIZE(trans_pcie->rx_pool));
954 for (i = 0; i < num_alloc; i++) {
955 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
957 if (i < allocator_pool_size)
958 list_add(&rxb->list, &rba->rbd_empty);
960 list_add(&rxb->list, &def_rxq->rx_used);
961 trans_pcie->global_table[i] = rxb;
962 rxb->vid = (u16)(i + 1);
966 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
971 int iwl_pcie_rx_init(struct iwl_trans *trans)
973 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
974 int ret = _iwl_pcie_rx_init(trans);
979 if (trans->cfg->mq_rx_supported)
980 iwl_pcie_rx_mq_hw_init(trans);
982 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
984 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
986 spin_lock(&trans_pcie->rxq->lock);
987 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
988 spin_unlock(&trans_pcie->rxq->lock);
993 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
996 * We don't configure the RFH.
997 * Restock will be done at alive, after firmware configured the RFH.
999 return _iwl_pcie_rx_init(trans);
1002 void iwl_pcie_rx_free(struct iwl_trans *trans)
1004 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1005 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1006 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
1011 * if rxq is NULL, it means that nothing has been allocated,
1014 if (!trans_pcie->rxq) {
1015 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1019 cancel_work_sync(&rba->rx_alloc);
1020 if (rba->alloc_wq) {
1021 destroy_workqueue(rba->alloc_wq);
1022 rba->alloc_wq = NULL;
1025 iwl_pcie_free_rbs_pool(trans);
1027 for (i = 0; i < trans->num_rx_queues; i++) {
1028 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1031 dma_free_coherent(trans->dev,
1032 free_size * rxq->queue_size,
1033 rxq->bd, rxq->bd_dma);
1038 dma_free_coherent(trans->dev,
1039 sizeof(struct iwl_rb_status),
1040 rxq->rb_stts, rxq->rb_stts_dma);
1042 IWL_DEBUG_INFO(trans,
1043 "Free rxq->rb_stts which is NULL\n");
1046 dma_free_coherent(trans->dev,
1047 sizeof(__le32) * rxq->queue_size,
1048 rxq->used_bd, rxq->used_bd_dma);
1049 rxq->used_bd_dma = 0;
1050 rxq->used_bd = NULL;
1053 netif_napi_del(&rxq->napi);
1055 kfree(trans_pcie->rxq);
1059 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1061 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1062 * When there are 2 empty RBDs - a request for allocation is posted
1064 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1065 struct iwl_rx_mem_buffer *rxb,
1066 struct iwl_rxq *rxq, bool emergency)
1068 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1069 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1071 /* Move the RBD to the used list, will be moved to allocator in batches
1072 * before claiming or posting a request*/
1073 list_add_tail(&rxb->list, &rxq->rx_used);
1075 if (unlikely(emergency))
1078 /* Count the allocator owned RBDs */
1081 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1082 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1083 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1084 * after but we still need to post another request.
1086 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1087 /* Move the 2 RBDs to the allocator ownership.
1088 Allocator has another 6 from pool for the request completion*/
1089 spin_lock(&rba->lock);
1090 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1091 spin_unlock(&rba->lock);
1093 atomic_inc(&rba->req_pending);
1094 queue_work(rba->alloc_wq, &rba->rx_alloc);
1098 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1099 struct iwl_rxq *rxq,
1100 struct iwl_rx_mem_buffer *rxb,
1103 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1104 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1105 bool page_stolen = false;
1106 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1112 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1114 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1115 struct iwl_rx_packet *pkt;
1118 int index, cmd_index, len;
1119 struct iwl_rx_cmd_buffer rxcb = {
1121 ._rx_page_order = trans_pcie->rx_page_order,
1123 ._page_stolen = false,
1124 .truesize = max_len,
1127 pkt = rxb_addr(&rxcb);
1129 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1131 "Q %d: RB end marker at offset %d\n",
1136 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1137 FH_RSCSR_RXQ_POS != rxq->id,
1138 "frame on invalid queue - is on %d and indicates %d\n",
1140 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1144 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1146 iwl_get_cmd_string(trans,
1147 iwl_cmd_id(pkt->hdr.cmd,
1150 pkt->hdr.group_id, pkt->hdr.cmd,
1151 le16_to_cpu(pkt->hdr.sequence));
1153 len = iwl_rx_packet_len(pkt);
1154 len += sizeof(u32); /* account for status word */
1155 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1156 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1158 /* Reclaim a command buffer only if this packet is a response
1159 * to a (driver-originated) command.
1160 * If the packet (e.g. Rx frame) originated from uCode,
1161 * there is no command buffer to reclaim.
1162 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1163 * but apparently a few don't get set; catch them here. */
1164 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1165 if (reclaim && !pkt->hdr.group_id) {
1168 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1169 if (trans_pcie->no_reclaim_cmds[i] ==
1177 sequence = le16_to_cpu(pkt->hdr.sequence);
1178 index = SEQ_TO_INDEX(sequence);
1179 cmd_index = get_cmd_index(txq, index);
1182 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1185 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1189 kzfree(txq->entries[cmd_index].free_buf);
1190 txq->entries[cmd_index].free_buf = NULL;
1194 * After here, we should always check rxcb._page_stolen,
1195 * if it is true then one of the handlers took the page.
1199 /* Invoke any callbacks, transfer the buffer to caller,
1200 * and fire off the (possibly) blocking
1201 * iwl_trans_send_cmd()
1202 * as we reclaim the driver command queue */
1203 if (!rxcb._page_stolen)
1204 iwl_pcie_hcmd_complete(trans, &rxcb);
1206 IWL_WARN(trans, "Claim null rxb?\n");
1209 page_stolen |= rxcb._page_stolen;
1210 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1213 /* page was stolen from us -- free our reference */
1215 __free_pages(rxb->page, trans_pcie->rx_page_order);
1219 /* Reuse the page if possible. For notification packets and
1220 * SKBs that fail to Rx correctly, add them back into the
1221 * rx_free list for reuse later. */
1222 if (rxb->page != NULL) {
1224 dma_map_page(trans->dev, rxb->page, 0,
1225 PAGE_SIZE << trans_pcie->rx_page_order,
1227 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1229 * free the page(s) as well to not break
1230 * the invariant that the items on the used
1231 * list have no page(s)
1233 __free_pages(rxb->page, trans_pcie->rx_page_order);
1235 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1237 list_add_tail(&rxb->list, &rxq->rx_free);
1241 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1245 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1247 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1249 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1250 struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1251 u32 r, i, count = 0;
1252 bool emergency = false;
1255 spin_lock(&rxq->lock);
1256 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1257 * buffer that the driver may process (last buffer filled by ucode). */
1258 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1261 /* W/A 9000 device step A0 wrap-around bug */
1262 r &= (rxq->queue_size - 1);
1264 /* Rx interrupt, but nothing sent from uCode */
1266 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1269 struct iwl_rx_mem_buffer *rxb;
1271 if (unlikely(rxq->used_count == rxq->queue_size / 2))
1274 if (trans->cfg->mq_rx_supported) {
1276 * used_bd is a 32 bit but only 12 are used to retrieve
1279 u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
1282 vid > ARRAY_SIZE(trans_pcie->global_table),
1283 "Invalid rxb index from HW %u\n", (u32)vid)) {
1284 iwl_force_nmi(trans);
1287 rxb = trans_pcie->global_table[vid - 1];
1288 if (WARN(rxb->invalid,
1289 "Invalid rxb from HW %u\n", (u32)vid)) {
1290 iwl_force_nmi(trans);
1293 rxb->invalid = true;
1295 rxb = rxq->queue[i];
1296 rxq->queue[i] = NULL;
1299 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1300 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1302 i = (i + 1) & (rxq->queue_size - 1);
1305 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1306 * try to claim the pre-allocated buffers from the allocator.
1307 * If not ready - will try to reclaim next time.
1308 * There is no need to reschedule work - allocator exits only
1311 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1312 iwl_pcie_rx_allocator_get(trans, rxq);
1314 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1315 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1317 /* Add the remaining empty RBDs for allocator use */
1318 spin_lock(&rba->lock);
1319 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1320 spin_unlock(&rba->lock);
1321 } else if (emergency) {
1325 if (rxq->used_count < rxq->queue_size / 3)
1329 spin_unlock(&rxq->lock);
1330 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1331 iwl_pcie_rxq_restock(trans, rxq);
1337 /* Backtrack one entry */
1339 spin_unlock(&rxq->lock);
1342 * handle a case where in emergency there are some unallocated RBDs.
1343 * those RBDs are in the used list, but are not tracked by the queue's
1344 * used_count which counts allocator owned RBDs.
1345 * unallocated emergency RBDs must be allocated on exit, otherwise
1346 * when called again the function may not be in emergency mode and
1347 * they will be handed to the allocator with no tracking in the RBD
1348 * allocator counters, which will lead to them never being claimed back
1350 * by allocating them here, they are now in the queue free list, and
1351 * will be restocked by the next call of iwl_pcie_rxq_restock.
1353 if (unlikely(emergency && count))
1354 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1357 napi_gro_flush(&rxq->napi, false);
1359 iwl_pcie_rxq_restock(trans, rxq);
1362 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1364 u8 queue = entry->entry;
1365 struct msix_entry *entries = entry - queue;
1367 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1370 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
1371 struct msix_entry *entry)
1374 * Before sending the interrupt the HW disables it to prevent
1375 * a nested interrupt. This is done by writing 1 to the corresponding
1376 * bit in the mask register. After handling the interrupt, it should be
1377 * re-enabled by clearing this bit. This register is defined as
1378 * write 1 clear (W1C) register, meaning that it's being clear
1379 * by writing 1 to the bit.
1381 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
1385 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1386 * This interrupt handler should be used with RSS queue only.
1388 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1390 struct msix_entry *entry = dev_id;
1391 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1392 struct iwl_trans *trans = trans_pcie->trans;
1394 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1396 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1399 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1402 iwl_pcie_rx_handle(trans, entry->entry);
1405 iwl_pcie_clear_irq(trans, entry);
1407 lock_map_release(&trans->sync_cmd_lockdep_map);
1413 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1415 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1417 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1420 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1421 if (trans->cfg->internal_wimax_coex &&
1422 !trans->cfg->apmg_not_supported &&
1423 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1424 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1425 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1426 APMG_PS_CTRL_VAL_RESET_REQ))) {
1427 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1428 iwl_op_mode_wimax_active(trans->op_mode);
1429 wake_up(&trans_pcie->wait_command_queue);
1433 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1434 if (!trans_pcie->txq[i])
1436 del_timer(&trans_pcie->txq[i]->stuck_timer);
1439 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1440 * before we wake up the command caller, to ensure a proper cleanup. */
1441 iwl_trans_fw_error(trans);
1443 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1444 wake_up(&trans_pcie->wait_command_queue);
1447 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1451 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1453 trace_iwlwifi_dev_irq(trans->dev);
1455 /* Discover which interrupts are active/pending */
1456 inta = iwl_read32(trans, CSR_INT);
1458 /* the thread will service interrupts and re-enable them */
1462 /* a device (PCI-E) page is 4096 bytes long */
1463 #define ICT_SHIFT 12
1464 #define ICT_SIZE (1 << ICT_SHIFT)
1465 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1467 /* interrupt handler using ict table, with this interrupt driver will
1468 * stop using INTA register to get device's interrupt, reading this register
1469 * is expensive, device will write interrupts in ICT dram table, increment
1470 * index then will fire interrupt to driver, driver will OR all ICT table
1471 * entries from current index up to table entry with 0 value. the result is
1472 * the interrupt we need to service, driver will set the entries back to 0 and
1475 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1477 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1482 trace_iwlwifi_dev_irq(trans->dev);
1484 /* Ignore interrupt if there's nothing in NIC to service.
1485 * This may be due to IRQ shared with another device,
1486 * or due to sporadic interrupts thrown from our NIC. */
1487 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1488 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1493 * Collect all entries up to the first 0, starting from ict_index;
1494 * note we already read at ict_index.
1498 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1499 trans_pcie->ict_index, read);
1500 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1501 trans_pcie->ict_index =
1502 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1504 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1505 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1509 /* We should not get this value, just ignore it. */
1510 if (val == 0xffffffff)
1514 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1515 * (bit 15 before shifting it to 31) to clear when using interrupt
1516 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1517 * so we use them to decide on the real state of the Rx bit.
1518 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1523 inta = (0xff & val) | ((0xff00 & val) << 16);
1527 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1530 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1531 bool hw_rfkill, prev, report;
1533 mutex_lock(&trans_pcie->mutex);
1534 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1535 hw_rfkill = iwl_is_rfkill_set(trans);
1537 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1538 set_bit(STATUS_RFKILL_HW, &trans->status);
1540 if (trans_pcie->opmode_down)
1543 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1545 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1546 hw_rfkill ? "disable radio" : "enable radio");
1548 isr_stats->rfkill++;
1551 iwl_trans_pcie_rf_kill(trans, report);
1552 mutex_unlock(&trans_pcie->mutex);
1555 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1557 IWL_DEBUG_RF_KILL(trans,
1558 "Rfkill while SYNC HCMD in flight\n");
1559 wake_up(&trans_pcie->wait_command_queue);
1561 clear_bit(STATUS_RFKILL_HW, &trans->status);
1562 if (trans_pcie->opmode_down)
1563 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1567 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1569 struct iwl_trans *trans = dev_id;
1570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1571 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1575 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1577 spin_lock(&trans_pcie->irq_lock);
1579 /* dram interrupt table not set yet,
1580 * use legacy interrupt.
1582 if (likely(trans_pcie->use_ict))
1583 inta = iwl_pcie_int_cause_ict(trans);
1585 inta = iwl_pcie_int_cause_non_ict(trans);
1587 if (iwl_have_debug_level(IWL_DL_ISR)) {
1588 IWL_DEBUG_ISR(trans,
1589 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1590 inta, trans_pcie->inta_mask,
1591 iwl_read32(trans, CSR_INT_MASK),
1592 iwl_read32(trans, CSR_FH_INT_STATUS));
1593 if (inta & (~trans_pcie->inta_mask))
1594 IWL_DEBUG_ISR(trans,
1595 "We got a masked interrupt (0x%08x)\n",
1596 inta & (~trans_pcie->inta_mask));
1599 inta &= trans_pcie->inta_mask;
1602 * Ignore interrupt if there's nothing in NIC to service.
1603 * This may be due to IRQ shared with another device,
1604 * or due to sporadic interrupts thrown from our NIC.
1606 if (unlikely(!inta)) {
1607 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1609 * Re-enable interrupts here since we don't
1610 * have anything to service
1612 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1613 _iwl_enable_interrupts(trans);
1614 spin_unlock(&trans_pcie->irq_lock);
1615 lock_map_release(&trans->sync_cmd_lockdep_map);
1619 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1621 * Hardware disappeared. It might have
1622 * already raised an interrupt.
1624 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1625 spin_unlock(&trans_pcie->irq_lock);
1629 /* Ack/clear/reset pending uCode interrupts.
1630 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1632 /* There is a hardware bug in the interrupt mask function that some
1633 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1634 * they are disabled in the CSR_INT_MASK register. Furthermore the
1635 * ICT interrupt handling mechanism has another bug that might cause
1636 * these unmasked interrupts fail to be detected. We workaround the
1637 * hardware bugs here by ACKing all the possible interrupts so that
1638 * interrupt coalescing can still be achieved.
1640 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1642 if (iwl_have_debug_level(IWL_DL_ISR))
1643 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1644 inta, iwl_read32(trans, CSR_INT_MASK));
1646 spin_unlock(&trans_pcie->irq_lock);
1648 /* Now service all interrupt bits discovered above. */
1649 if (inta & CSR_INT_BIT_HW_ERR) {
1650 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1652 /* Tell the device to stop sending interrupts */
1653 iwl_disable_interrupts(trans);
1656 iwl_pcie_irq_handle_error(trans);
1658 handled |= CSR_INT_BIT_HW_ERR;
1663 if (iwl_have_debug_level(IWL_DL_ISR)) {
1664 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1665 if (inta & CSR_INT_BIT_SCD) {
1666 IWL_DEBUG_ISR(trans,
1667 "Scheduler finished to transmit the frame/frames.\n");
1671 /* Alive notification via Rx interrupt will do the real work */
1672 if (inta & CSR_INT_BIT_ALIVE) {
1673 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1675 if (trans->cfg->gen2) {
1677 * We can restock, since firmware configured
1680 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1685 /* Safely ignore these bits for debug checks below */
1686 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1688 /* HW RF KILL switch toggled */
1689 if (inta & CSR_INT_BIT_RF_KILL) {
1690 iwl_pcie_handle_rfkill_irq(trans);
1691 handled |= CSR_INT_BIT_RF_KILL;
1694 /* Chip got too hot and stopped itself */
1695 if (inta & CSR_INT_BIT_CT_KILL) {
1696 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1697 isr_stats->ctkill++;
1698 handled |= CSR_INT_BIT_CT_KILL;
1701 /* Error detected by uCode */
1702 if (inta & CSR_INT_BIT_SW_ERR) {
1703 IWL_ERR(trans, "Microcode SW error detected. "
1704 " Restarting 0x%X.\n", inta);
1706 iwl_pcie_irq_handle_error(trans);
1707 handled |= CSR_INT_BIT_SW_ERR;
1710 /* uCode wakes up after power-down sleep */
1711 if (inta & CSR_INT_BIT_WAKEUP) {
1712 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1713 iwl_pcie_rxq_check_wrptr(trans);
1714 iwl_pcie_txq_check_wrptrs(trans);
1716 isr_stats->wakeup++;
1718 handled |= CSR_INT_BIT_WAKEUP;
1721 /* All uCode command responses, including Tx command responses,
1722 * Rx "responses" (frame-received notification), and other
1723 * notifications from uCode come through here*/
1724 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1725 CSR_INT_BIT_RX_PERIODIC)) {
1726 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1727 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1728 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1729 iwl_write32(trans, CSR_FH_INT_STATUS,
1730 CSR_FH_INT_RX_MASK);
1732 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1733 handled |= CSR_INT_BIT_RX_PERIODIC;
1735 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1737 /* Sending RX interrupt require many steps to be done in the
1739 * 1- write interrupt to current index in ICT table.
1741 * 3- update RX shared data to indicate last write index.
1742 * 4- send interrupt.
1743 * This could lead to RX race, driver could receive RX interrupt
1744 * but the shared data changes does not reflect this;
1745 * periodic interrupt will detect any dangling Rx activity.
1748 /* Disable periodic interrupt; we use it as just a one-shot. */
1749 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1750 CSR_INT_PERIODIC_DIS);
1753 * Enable periodic interrupt in 8 msec only if we received
1754 * real RX interrupt (instead of just periodic int), to catch
1755 * any dangling Rx interrupt. If it was just the periodic
1756 * interrupt, there was no dangling Rx activity, and no need
1757 * to extend the periodic interrupt; one-shot is enough.
1759 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1760 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1761 CSR_INT_PERIODIC_ENA);
1766 iwl_pcie_rx_handle(trans, 0);
1770 /* This "Tx" DMA channel is used only for loading uCode */
1771 if (inta & CSR_INT_BIT_FH_TX) {
1772 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1773 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1775 handled |= CSR_INT_BIT_FH_TX;
1776 /* Wake up uCode load routine, now that load is complete */
1777 trans_pcie->ucode_write_complete = true;
1778 wake_up(&trans_pcie->ucode_write_waitq);
1781 if (inta & ~handled) {
1782 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1783 isr_stats->unhandled++;
1786 if (inta & ~(trans_pcie->inta_mask)) {
1787 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1788 inta & ~trans_pcie->inta_mask);
1791 spin_lock(&trans_pcie->irq_lock);
1792 /* only Re-enable all interrupt if disabled by irq */
1793 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1794 _iwl_enable_interrupts(trans);
1795 /* we are loading the firmware, enable FH_TX interrupt only */
1796 else if (handled & CSR_INT_BIT_FH_TX)
1797 iwl_enable_fw_load_int(trans);
1798 /* Re-enable RF_KILL if it occurred */
1799 else if (handled & CSR_INT_BIT_RF_KILL)
1800 iwl_enable_rfkill_int(trans);
1801 spin_unlock(&trans_pcie->irq_lock);
1804 lock_map_release(&trans->sync_cmd_lockdep_map);
1808 /******************************************************************************
1812 ******************************************************************************/
1814 /* Free dram table */
1815 void iwl_pcie_free_ict(struct iwl_trans *trans)
1817 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1819 if (trans_pcie->ict_tbl) {
1820 dma_free_coherent(trans->dev, ICT_SIZE,
1821 trans_pcie->ict_tbl,
1822 trans_pcie->ict_tbl_dma);
1823 trans_pcie->ict_tbl = NULL;
1824 trans_pcie->ict_tbl_dma = 0;
1829 * allocate dram shared table, it is an aligned memory
1830 * block of ICT_SIZE.
1831 * also reset all data related to ICT table interrupt.
1833 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1835 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1837 trans_pcie->ict_tbl =
1838 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1839 &trans_pcie->ict_tbl_dma,
1841 if (!trans_pcie->ict_tbl)
1844 /* just an API sanity check ... it is guaranteed to be aligned */
1845 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1846 iwl_pcie_free_ict(trans);
1853 /* Device is going up inform it about using ICT interrupt table,
1854 * also we need to tell the driver to start using ICT interrupt.
1856 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1858 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1861 if (!trans_pcie->ict_tbl)
1864 spin_lock(&trans_pcie->irq_lock);
1865 _iwl_disable_interrupts(trans);
1867 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1869 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1871 val |= CSR_DRAM_INT_TBL_ENABLE |
1872 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1873 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1875 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1877 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1878 trans_pcie->use_ict = true;
1879 trans_pcie->ict_index = 0;
1880 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1881 _iwl_enable_interrupts(trans);
1882 spin_unlock(&trans_pcie->irq_lock);
1885 /* Device is going down disable ict interrupt usage */
1886 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1888 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1890 spin_lock(&trans_pcie->irq_lock);
1891 trans_pcie->use_ict = false;
1892 spin_unlock(&trans_pcie->irq_lock);
1895 irqreturn_t iwl_pcie_isr(int irq, void *data)
1897 struct iwl_trans *trans = data;
1902 /* Disable (but don't clear!) interrupts here to avoid
1903 * back-to-back ISRs and sporadic interrupts from our NIC.
1904 * If we have something to service, the tasklet will re-enable ints.
1905 * If we *don't* have something, we'll re-enable before leaving here.
1907 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1909 return IRQ_WAKE_THREAD;
1912 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
1914 return IRQ_WAKE_THREAD;
1917 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
1919 struct msix_entry *entry = dev_id;
1920 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1921 struct iwl_trans *trans = trans_pcie->trans;
1922 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1923 u32 inta_fh, inta_hw;
1925 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1927 spin_lock(&trans_pcie->irq_lock);
1928 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
1929 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
1931 * Clear causes registers to avoid being handling the same cause.
1933 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
1934 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
1935 spin_unlock(&trans_pcie->irq_lock);
1937 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
1939 if (unlikely(!(inta_fh | inta_hw))) {
1940 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1941 lock_map_release(&trans->sync_cmd_lockdep_map);
1945 if (iwl_have_debug_level(IWL_DL_ISR))
1946 IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
1948 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
1950 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
1951 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
1953 iwl_pcie_rx_handle(trans, 0);
1957 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
1958 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
1960 iwl_pcie_rx_handle(trans, 1);
1964 /* This "Tx" DMA channel is used only for loading uCode */
1965 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
1966 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1969 * Wake up uCode load routine,
1970 * now that load is complete
1972 trans_pcie->ucode_write_complete = true;
1973 wake_up(&trans_pcie->ucode_write_waitq);
1976 /* Error detected by uCode */
1977 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
1978 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
1980 "Microcode SW error detected. Restarting 0x%X.\n",
1983 iwl_pcie_irq_handle_error(trans);
1986 /* After checking FH register check HW register */
1987 if (iwl_have_debug_level(IWL_DL_ISR))
1988 IWL_DEBUG_ISR(trans,
1989 "ISR inta_hw 0x%08x, enabled 0x%08x\n",
1991 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
1993 /* Alive notification via Rx interrupt will do the real work */
1994 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
1995 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1997 if (trans->cfg->gen2) {
1998 /* We can restock, since firmware configured the RFH */
1999 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2003 /* uCode wakes up after power-down sleep */
2004 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2005 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2006 iwl_pcie_rxq_check_wrptr(trans);
2007 iwl_pcie_txq_check_wrptrs(trans);
2009 isr_stats->wakeup++;
2012 /* Chip got too hot and stopped itself */
2013 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2014 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2015 isr_stats->ctkill++;
2018 /* HW RF KILL switch toggled */
2019 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2020 iwl_pcie_handle_rfkill_irq(trans);
2022 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2024 "Hardware error detected. Restarting.\n");
2027 iwl_pcie_irq_handle_error(trans);
2030 iwl_pcie_clear_irq(trans, entry);
2032 lock_map_release(&trans->sync_cmd_lockdep_map);