2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
35 #include "registers.h"
38 #include "../dmaengine.h"
40 static void ioat_eh(struct ioatdma_chan *ioat_chan);
43 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
45 * @data: interrupt data
47 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
49 struct ioatdma_device *instance = data;
50 struct ioatdma_chan *ioat_chan;
51 unsigned long attnstatus;
55 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
57 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
60 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
61 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
66 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
67 ioat_chan = ioat_chan_by_index(instance, bit);
68 if (test_bit(IOAT_RUN, &ioat_chan->state))
69 tasklet_schedule(&ioat_chan->cleanup_task);
72 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
77 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
79 * @data: interrupt data
81 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
83 struct ioatdma_chan *ioat_chan = data;
85 if (test_bit(IOAT_RUN, &ioat_chan->state))
86 tasklet_schedule(&ioat_chan->cleanup_task);
91 void ioat_stop(struct ioatdma_chan *ioat_chan)
93 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
94 struct pci_dev *pdev = ioat_dma->pdev;
95 int chan_id = chan_num(ioat_chan);
96 struct msix_entry *msix;
98 /* 1/ stop irq from firing tasklets
99 * 2/ stop the tasklet from re-arming irqs
101 clear_bit(IOAT_RUN, &ioat_chan->state);
103 /* flush inflight interrupts */
104 switch (ioat_dma->irq_mode) {
106 msix = &ioat_dma->msix_entries[chan_id];
107 synchronize_irq(msix->vector);
111 synchronize_irq(pdev->irq);
117 /* flush inflight timers */
118 del_timer_sync(&ioat_chan->timer);
120 /* flush inflight tasklet runs */
121 tasklet_kill(&ioat_chan->cleanup_task);
123 /* final cleanup now that everything is quiesced and can't re-arm */
124 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
127 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
129 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
130 ioat_chan->issued = ioat_chan->head;
131 writew(ioat_chan->dmacount,
132 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
133 dev_dbg(to_dev(ioat_chan),
134 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
135 __func__, ioat_chan->head, ioat_chan->tail,
136 ioat_chan->issued, ioat_chan->dmacount);
139 void ioat_issue_pending(struct dma_chan *c)
141 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
143 if (ioat_ring_pending(ioat_chan)) {
144 spin_lock_bh(&ioat_chan->prep_lock);
145 __ioat_issue_pending(ioat_chan);
146 spin_unlock_bh(&ioat_chan->prep_lock);
151 * ioat_update_pending - log pending descriptors
152 * @ioat: ioat+ channel
154 * Check if the number of unsubmitted descriptors has exceeded the
155 * watermark. Called with prep_lock held
157 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
159 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
160 __ioat_issue_pending(ioat_chan);
163 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
165 struct ioat_ring_ent *desc;
166 struct ioat_dma_descriptor *hw;
168 if (ioat_ring_space(ioat_chan) < 1) {
169 dev_err(to_dev(ioat_chan),
170 "Unable to start null desc - ring full\n");
174 dev_dbg(to_dev(ioat_chan),
175 "%s: head: %#x tail: %#x issued: %#x\n",
176 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
177 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
182 hw->ctl_f.int_en = 1;
183 hw->ctl_f.compl_write = 1;
184 /* set size to non-zero value (channel returns error when size is 0) */
185 hw->size = NULL_DESC_BUFFER_SIZE;
188 async_tx_ack(&desc->txd);
189 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
190 dump_desc_dbg(ioat_chan, desc);
191 /* make sure descriptors are written before we submit */
193 ioat_chan->head += 1;
194 __ioat_issue_pending(ioat_chan);
197 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
199 spin_lock_bh(&ioat_chan->prep_lock);
200 __ioat_start_null_desc(ioat_chan);
201 spin_unlock_bh(&ioat_chan->prep_lock);
204 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
206 /* set the tail to be re-issued */
207 ioat_chan->issued = ioat_chan->tail;
208 ioat_chan->dmacount = 0;
209 set_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
210 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
212 dev_dbg(to_dev(ioat_chan),
213 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
214 __func__, ioat_chan->head, ioat_chan->tail,
215 ioat_chan->issued, ioat_chan->dmacount);
217 if (ioat_ring_pending(ioat_chan)) {
218 struct ioat_ring_ent *desc;
220 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
221 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
222 __ioat_issue_pending(ioat_chan);
224 __ioat_start_null_desc(ioat_chan);
227 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
229 unsigned long end = jiffies + tmo;
233 status = ioat_chansts(ioat_chan);
234 if (is_ioat_active(status) || is_ioat_idle(status))
235 ioat_suspend(ioat_chan);
236 while (is_ioat_active(status) || is_ioat_idle(status)) {
237 if (tmo && time_after(jiffies, end)) {
241 status = ioat_chansts(ioat_chan);
248 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
250 unsigned long end = jiffies + tmo;
253 ioat_reset(ioat_chan);
254 while (ioat_reset_pending(ioat_chan)) {
255 if (end && time_after(jiffies, end)) {
265 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
267 struct dma_chan *c = tx->chan;
268 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
271 cookie = dma_cookie_assign(tx);
272 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
274 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
275 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
277 /* make descriptor updates visible before advancing ioat->head,
278 * this is purposefully not smp_wmb() since we are also
279 * publishing the descriptor updates to a dma device
283 ioat_chan->head += ioat_chan->produce;
285 ioat_update_pending(ioat_chan);
286 spin_unlock_bh(&ioat_chan->prep_lock);
291 static struct ioat_ring_ent *
292 ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
294 struct ioat_dma_descriptor *hw;
295 struct ioat_ring_ent *desc;
296 struct ioatdma_device *ioat_dma;
299 ioat_dma = to_ioatdma_device(chan->device);
300 hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
303 memset(hw, 0, sizeof(*hw));
305 desc = kmem_cache_zalloc(ioat_cache, flags);
307 pci_pool_free(ioat_dma->dma_pool, hw, phys);
311 dma_async_tx_descriptor_init(&desc->txd, chan);
312 desc->txd.tx_submit = ioat_tx_submit_unlock;
314 desc->txd.phys = phys;
318 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
320 struct ioatdma_device *ioat_dma;
322 ioat_dma = to_ioatdma_device(chan->device);
323 pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
324 kmem_cache_free(ioat_cache, desc);
327 struct ioat_ring_ent **
328 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
330 struct ioat_ring_ent **ring;
331 int descs = 1 << order;
334 if (order > ioat_get_max_alloc_order())
337 /* allocate the array to hold the software ring */
338 ring = kcalloc(descs, sizeof(*ring), flags);
341 for (i = 0; i < descs; i++) {
342 ring[i] = ioat_alloc_ring_ent(c, flags);
345 ioat_free_ring_ent(ring[i], c);
349 set_desc_id(ring[i], i);
353 for (i = 0; i < descs-1; i++) {
354 struct ioat_ring_ent *next = ring[i+1];
355 struct ioat_dma_descriptor *hw = ring[i]->hw;
357 hw->next = next->txd.phys;
359 ring[i]->hw->next = ring[0]->txd.phys;
364 static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
366 /* reshape differs from normal ring allocation in that we want
367 * to allocate a new software ring while only
368 * extending/truncating the hardware ring
370 struct dma_chan *c = &ioat_chan->dma_chan;
371 const u32 curr_size = ioat_ring_size(ioat_chan);
372 const u16 active = ioat_ring_active(ioat_chan);
373 const u32 new_size = 1 << order;
374 struct ioat_ring_ent **ring;
377 if (order > ioat_get_max_alloc_order())
380 /* double check that we have at least 1 free descriptor */
381 if (active == curr_size)
384 /* when shrinking, verify that we can hold the current active
385 * set in the new ring
387 if (active >= new_size)
390 /* allocate the array to hold the software ring */
391 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
395 /* allocate/trim descriptors as needed */
396 if (new_size > curr_size) {
397 /* copy current descriptors to the new ring */
398 for (i = 0; i < curr_size; i++) {
399 u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
400 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
402 ring[new_idx] = ioat_chan->ring[curr_idx];
403 set_desc_id(ring[new_idx], new_idx);
406 /* add new descriptors to the ring */
407 for (i = curr_size; i < new_size; i++) {
408 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
410 ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
411 if (!ring[new_idx]) {
413 u16 new_idx = (ioat_chan->tail+i) &
416 ioat_free_ring_ent(ring[new_idx], c);
421 set_desc_id(ring[new_idx], new_idx);
424 /* hw link new descriptors */
425 for (i = curr_size-1; i < new_size; i++) {
426 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
427 struct ioat_ring_ent *next =
428 ring[(new_idx+1) & (new_size-1)];
429 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
431 hw->next = next->txd.phys;
434 struct ioat_dma_descriptor *hw;
435 struct ioat_ring_ent *next;
437 /* copy current descriptors to the new ring, dropping the
438 * removed descriptors
440 for (i = 0; i < new_size; i++) {
441 u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
442 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
444 ring[new_idx] = ioat_chan->ring[curr_idx];
445 set_desc_id(ring[new_idx], new_idx);
448 /* free deleted descriptors */
449 for (i = new_size; i < curr_size; i++) {
450 struct ioat_ring_ent *ent;
452 ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
453 ioat_free_ring_ent(ent, c);
456 /* fix up hardware ring */
457 hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
458 next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
459 hw->next = next->txd.phys;
462 dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
465 kfree(ioat_chan->ring);
466 ioat_chan->ring = ring;
467 ioat_chan->alloc_order = order;
473 * ioat_check_space_lock - verify space and grab ring producer lock
474 * @ioat: ioat,3 channel (ring) to operate on
475 * @num_descs: allocation length
477 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
482 spin_lock_bh(&ioat_chan->prep_lock);
483 /* never allow the last descriptor to be consumed, we need at
484 * least one free at all times to allow for on-the-fly ring
487 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
488 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
489 __func__, num_descs, ioat_chan->head,
490 ioat_chan->tail, ioat_chan->issued);
491 ioat_chan->produce = num_descs;
492 return 0; /* with ioat->prep_lock held */
494 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
495 spin_unlock_bh(&ioat_chan->prep_lock);
497 /* is another cpu already trying to expand the ring? */
501 spin_lock_bh(&ioat_chan->cleanup_lock);
502 spin_lock_bh(&ioat_chan->prep_lock);
503 retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
504 clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
505 spin_unlock_bh(&ioat_chan->prep_lock);
506 spin_unlock_bh(&ioat_chan->cleanup_lock);
508 /* if we were able to expand the ring retry the allocation */
512 dev_dbg_ratelimited(to_dev(ioat_chan),
513 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
514 __func__, num_descs, ioat_chan->head,
515 ioat_chan->tail, ioat_chan->issued);
517 /* progress reclaim in the allocation failure case we may be
518 * called under bh_disabled so we need to trigger the timer
521 if (time_is_before_jiffies(ioat_chan->timer.expires)
522 && timer_pending(&ioat_chan->timer)) {
523 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
524 ioat_timer_event((unsigned long)ioat_chan);
530 static bool desc_has_ext(struct ioat_ring_ent *desc)
532 struct ioat_dma_descriptor *hw = desc->hw;
534 if (hw->ctl_f.op == IOAT_OP_XOR ||
535 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
536 struct ioat_xor_descriptor *xor = desc->xor;
538 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
540 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
541 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
542 struct ioat_pq_descriptor *pq = desc->pq;
544 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
552 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
557 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
558 kmem_cache_free(ioat_sed_cache, sed);
561 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
566 completion = *ioat_chan->completion;
567 phys_complete = ioat_chansts_to_addr(completion);
569 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
570 (unsigned long long) phys_complete);
572 return phys_complete;
575 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
578 *phys_complete = ioat_get_current_completion(ioat_chan);
579 if (*phys_complete == ioat_chan->last_completion)
582 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
583 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
589 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
591 struct ioat_dma_descriptor *hw = desc->hw;
593 switch (hw->ctl_f.op) {
595 case IOAT_OP_PQ_VAL_16S:
597 struct ioat_pq_descriptor *pq = desc->pq;
599 /* check if there's error written */
600 if (!pq->dwbes_f.wbes)
603 /* need to set a chanerr var for checking to clear later */
605 if (pq->dwbes_f.p_val_err)
606 *desc->result |= SUM_CHECK_P_RESULT;
608 if (pq->dwbes_f.q_val_err)
609 *desc->result |= SUM_CHECK_Q_RESULT;
619 * __cleanup - reclaim used descriptors
620 * @ioat: channel (ring) to clean
622 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
624 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
625 struct ioat_ring_ent *desc;
626 bool seen_current = false;
627 int idx = ioat_chan->tail, i;
630 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
631 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
634 * At restart of the channel, the completion address and the
635 * channel status will be 0 due to starting a new chain. Since
636 * it's new chain and the first descriptor "fails", there is
637 * nothing to clean up. We do not want to reap the entire submitted
638 * chain due to this 0 address value and then BUG.
643 active = ioat_ring_active(ioat_chan);
644 for (i = 0; i < active && !seen_current; i++) {
645 struct dma_async_tx_descriptor *tx;
647 smp_read_barrier_depends();
648 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
649 desc = ioat_get_ring_ent(ioat_chan, idx + i);
650 dump_desc_dbg(ioat_chan, desc);
652 /* set err stat if we are using dwbes */
653 if (ioat_dma->cap & IOAT_CAP_DWBES)
654 desc_get_errstat(ioat_chan, desc);
658 dma_cookie_complete(tx);
659 dma_descriptor_unmap(tx);
661 tx->callback(tx->callback_param);
666 if (tx->phys == phys_complete)
669 /* skip extended descriptors */
670 if (desc_has_ext(desc)) {
671 BUG_ON(i + 1 >= active);
675 /* cleanup super extended descriptors */
677 ioat_free_sed(ioat_dma, desc->sed);
682 /* finish all descriptor reads before incrementing tail */
684 ioat_chan->tail = idx + i;
685 /* no active descs have written a completion? */
686 BUG_ON(active && !seen_current);
687 ioat_chan->last_completion = phys_complete;
689 if (active - i == 0) {
690 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
692 clear_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
693 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
696 /* 5 microsecond delay per pending descriptor */
697 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
698 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
701 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
705 spin_lock_bh(&ioat_chan->cleanup_lock);
707 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
708 __cleanup(ioat_chan, phys_complete);
710 if (is_ioat_halted(*ioat_chan->completion)) {
711 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
713 if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
714 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
719 spin_unlock_bh(&ioat_chan->cleanup_lock);
722 void ioat_cleanup_event(unsigned long data)
724 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
726 ioat_cleanup(ioat_chan);
727 if (!test_bit(IOAT_RUN, &ioat_chan->state))
729 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
732 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
736 ioat_quiesce(ioat_chan, 0);
737 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
738 __cleanup(ioat_chan, phys_complete);
740 __ioat_restart_chan(ioat_chan);
743 static void ioat_eh(struct ioatdma_chan *ioat_chan)
745 struct pci_dev *pdev = to_pdev(ioat_chan);
746 struct ioat_dma_descriptor *hw;
747 struct dma_async_tx_descriptor *tx;
749 struct ioat_ring_ent *desc;
754 /* cleanup so tail points to descriptor that caused the error */
755 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
756 __cleanup(ioat_chan, phys_complete);
758 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
759 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
761 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
762 __func__, chanerr, chanerr_int);
764 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
766 dump_desc_dbg(ioat_chan, desc);
768 switch (hw->ctl_f.op) {
769 case IOAT_OP_XOR_VAL:
770 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
771 *desc->result |= SUM_CHECK_P_RESULT;
772 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
776 case IOAT_OP_PQ_VAL_16S:
777 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
778 *desc->result |= SUM_CHECK_P_RESULT;
779 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
781 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
782 *desc->result |= SUM_CHECK_Q_RESULT;
783 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
788 /* fault on unhandled error or spurious halt */
789 if (chanerr ^ err_handled || chanerr == 0) {
790 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
791 __func__, chanerr, err_handled);
793 } else { /* cleanup the faulty descriptor */
796 dma_cookie_complete(tx);
797 dma_descriptor_unmap(tx);
799 tx->callback(tx->callback_param);
805 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
806 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
808 /* mark faulting descriptor as complete */
809 *ioat_chan->completion = desc->txd.phys;
811 spin_lock_bh(&ioat_chan->prep_lock);
812 ioat_restart_channel(ioat_chan);
813 spin_unlock_bh(&ioat_chan->prep_lock);
816 static void check_active(struct ioatdma_chan *ioat_chan)
818 if (ioat_ring_active(ioat_chan)) {
819 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
823 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
824 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
825 else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
826 /* if the ring is idle, empty, and oversized try to step
829 reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
831 /* keep shrinking until we get back to our minimum
834 if (ioat_chan->alloc_order > ioat_get_alloc_order())
835 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
840 void ioat_timer_event(unsigned long data)
842 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
843 dma_addr_t phys_complete;
846 status = ioat_chansts(ioat_chan);
848 /* when halted due to errors check for channel
849 * programming errors before advancing the completion state
851 if (is_ioat_halted(status)) {
854 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
855 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
857 if (test_bit(IOAT_RUN, &ioat_chan->state))
858 BUG_ON(is_ioat_bug(chanerr));
859 else /* we never got off the ground */
863 /* if we haven't made progress and we have already
864 * acknowledged a pending completion once, then be more
865 * forceful with a restart
867 spin_lock_bh(&ioat_chan->cleanup_lock);
868 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
869 __cleanup(ioat_chan, phys_complete);
870 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
871 spin_lock_bh(&ioat_chan->prep_lock);
872 ioat_restart_channel(ioat_chan);
873 spin_unlock_bh(&ioat_chan->prep_lock);
874 spin_unlock_bh(&ioat_chan->cleanup_lock);
877 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
878 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
882 if (ioat_ring_active(ioat_chan))
883 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 spin_lock_bh(&ioat_chan->prep_lock);
886 check_active(ioat_chan);
887 spin_unlock_bh(&ioat_chan->prep_lock);
889 spin_unlock_bh(&ioat_chan->cleanup_lock);
893 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
894 struct dma_tx_state *txstate)
896 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
899 ret = dma_cookie_status(c, cookie, txstate);
900 if (ret == DMA_COMPLETE)
903 ioat_cleanup(ioat_chan);
905 return dma_cookie_status(c, cookie, txstate);
908 static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
910 struct pci_dev *pdev = ioat_dma->pdev;
911 int irq = pdev->irq, i;
913 if (!is_bwd_ioat(pdev))
916 switch (ioat_dma->irq_mode) {
918 for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
919 struct msix_entry *msix = &ioat_dma->msix_entries[i];
920 struct ioatdma_chan *ioat_chan;
922 ioat_chan = ioat_chan_by_index(ioat_dma, i);
923 devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
926 pci_disable_msix(pdev);
929 pci_disable_msi(pdev);
932 devm_free_irq(&pdev->dev, irq, ioat_dma);
937 ioat_dma->irq_mode = IOAT_NOIRQ;
939 return ioat_dma_setup_interrupts(ioat_dma);
942 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
944 /* throw away whatever the channel was doing and get it
945 * initialized, with ioat3 specific workarounds
947 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
948 struct pci_dev *pdev = ioat_dma->pdev;
953 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
955 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
956 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
958 if (ioat_dma->version < IOAT_VER_3_3) {
959 /* clear any pending errors */
960 err = pci_read_config_dword(pdev,
961 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
964 "channel error register unreachable\n");
967 pci_write_config_dword(pdev,
968 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
970 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
971 * (workaround for spurious config parity error after restart)
973 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
974 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
975 pci_write_config_dword(pdev,
976 IOAT_PCI_DMAUNCERRSTS_OFFSET,
981 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
983 err = ioat_irq_reinit(ioat_dma);
986 dev_err(&pdev->dev, "Failed to reset: %d\n", err);