2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 static int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 static void ioat_dma_chan_reset_part2(struct work_struct *work);
47 static void ioat_dma_chan_watchdog(struct work_struct *work);
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
58 static inline struct ioat_dma_chan *
59 ioat_chan_by_index(struct ioatdma_device *device, int index)
61 return device->idx[index];
65 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67 * @data: interrupt data
69 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71 struct ioatdma_device *instance = data;
72 struct ioat_dma_chan *ioat_chan;
73 unsigned long attnstatus;
77 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
82 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
83 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
87 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
88 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
89 ioat_chan = ioat_chan_by_index(instance, bit);
90 tasklet_schedule(&ioat_chan->cleanup_task);
93 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
98 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100 * @data: interrupt data
102 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104 struct ioat_dma_chan *ioat_chan = data;
106 tasklet_schedule(&ioat_chan->cleanup_task);
111 static void ioat_dma_cleanup_tasklet(unsigned long data);
114 * ioat_dma_enumerate_channels - find and initialize the device's channels
115 * @device: the device to be enumerated
117 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
122 struct ioat_dma_chan *ioat_chan;
123 struct device *dev = &device->pdev->dev;
124 struct dma_device *dma = &device->common;
126 INIT_LIST_HEAD(&dma->channels);
127 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
128 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
129 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
131 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
132 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
135 for (i = 0; i < dma->chancnt; i++) {
136 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
142 ioat_chan->device = device;
143 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
144 ioat_chan->xfercap = xfercap;
145 ioat_chan->desccount = 0;
146 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
147 spin_lock_init(&ioat_chan->cleanup_lock);
148 spin_lock_init(&ioat_chan->desc_lock);
149 INIT_LIST_HEAD(&ioat_chan->free_desc);
150 INIT_LIST_HEAD(&ioat_chan->used_desc);
151 /* This should be made common somewhere in dmaengine.c */
152 ioat_chan->common.device = &device->common;
153 list_add_tail(&ioat_chan->common.device_node, &dma->channels);
154 device->idx[i] = ioat_chan;
155 tasklet_init(&ioat_chan->cleanup_task,
156 ioat_dma_cleanup_tasklet,
157 (unsigned long) ioat_chan);
158 tasklet_disable(&ioat_chan->cleanup_task);
164 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
166 * @chan: DMA channel handle
169 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
171 ioat_chan->pending = 0;
172 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
175 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
177 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
179 if (ioat_chan->pending > 0) {
180 spin_lock_bh(&ioat_chan->desc_lock);
181 __ioat1_dma_memcpy_issue_pending(ioat_chan);
182 spin_unlock_bh(&ioat_chan->desc_lock);
187 __ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
189 ioat_chan->pending = 0;
190 writew(ioat_chan->dmacount,
191 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
194 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
196 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
198 if (ioat_chan->pending > 0) {
199 spin_lock_bh(&ioat_chan->desc_lock);
200 __ioat2_dma_memcpy_issue_pending(ioat_chan);
201 spin_unlock_bh(&ioat_chan->desc_lock);
207 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
209 static void ioat_dma_chan_reset_part2(struct work_struct *work)
211 struct ioat_dma_chan *ioat_chan =
212 container_of(work, struct ioat_dma_chan, work.work);
213 struct ioat_desc_sw *desc;
215 spin_lock_bh(&ioat_chan->cleanup_lock);
216 spin_lock_bh(&ioat_chan->desc_lock);
218 ioat_chan->completion_virt->low = 0;
219 ioat_chan->completion_virt->high = 0;
220 ioat_chan->pending = 0;
223 * count the descriptors waiting, and be sure to do it
224 * right for both the CB1 line and the CB2 ring
226 ioat_chan->dmacount = 0;
227 if (ioat_chan->used_desc.prev) {
228 desc = to_ioat_desc(ioat_chan->used_desc.prev);
230 ioat_chan->dmacount++;
231 desc = to_ioat_desc(desc->node.next);
232 } while (&desc->node != ioat_chan->used_desc.next);
236 * write the new starting descriptor address
237 * this puts channel engine into ARMED state
239 desc = to_ioat_desc(ioat_chan->used_desc.prev);
240 switch (ioat_chan->device->version) {
242 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
243 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
244 writel(((u64) desc->txd.phys) >> 32,
245 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
247 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
248 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
251 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
252 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
253 writel(((u64) desc->txd.phys) >> 32,
254 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
256 /* tell the engine to go with what's left to be done */
257 writew(ioat_chan->dmacount,
258 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
262 dev_err(to_dev(ioat_chan),
263 "chan%d reset - %d descs waiting, %d total desc\n",
264 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
266 spin_unlock_bh(&ioat_chan->desc_lock);
267 spin_unlock_bh(&ioat_chan->cleanup_lock);
271 * ioat_dma_reset_channel - restart a channel
272 * @ioat_chan: IOAT DMA channel handle
274 static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
276 u32 chansts, chanerr;
278 if (!ioat_chan->used_desc.prev)
281 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
282 chansts = (ioat_chan->completion_virt->low
283 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
285 dev_err(to_dev(ioat_chan),
286 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
287 chan_num(ioat_chan), chansts, chanerr);
288 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
292 * whack it upside the head with a reset
293 * and wait for things to settle out.
294 * force the pending count to a really big negative
295 * to make sure no one forces an issue_pending
296 * while we're waiting.
299 spin_lock_bh(&ioat_chan->desc_lock);
300 ioat_chan->pending = INT_MIN;
301 writeb(IOAT_CHANCMD_RESET,
303 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
304 spin_unlock_bh(&ioat_chan->desc_lock);
306 /* schedule the 2nd half instead of sleeping a long time */
307 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
311 * ioat_dma_chan_watchdog - watch for stuck channels
313 static void ioat_dma_chan_watchdog(struct work_struct *work)
315 struct ioatdma_device *device =
316 container_of(work, struct ioatdma_device, work.work);
317 struct ioat_dma_chan *ioat_chan;
327 unsigned long compl_desc_addr_hw;
329 for (i = 0; i < device->common.chancnt; i++) {
330 ioat_chan = ioat_chan_by_index(device, i);
332 if (ioat_chan->device->version == IOAT_VER_1_2
333 /* have we started processing anything yet */
334 && ioat_chan->last_completion
335 /* have we completed any since last watchdog cycle? */
336 && (ioat_chan->last_completion ==
337 ioat_chan->watchdog_completion)
338 /* has TCP stuck on one cookie since last watchdog? */
339 && (ioat_chan->watchdog_tcp_cookie ==
340 ioat_chan->watchdog_last_tcp_cookie)
341 && (ioat_chan->watchdog_tcp_cookie !=
342 ioat_chan->completed_cookie)
343 /* is there something in the chain to be processed? */
344 /* CB1 chain always has at least the last one processed */
345 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
346 && ioat_chan->pending == 0) {
349 * check CHANSTS register for completed
350 * descriptor address.
351 * if it is different than completion writeback,
353 * and it has changed since the last watchdog
354 * we can assume that channel
355 * is still working correctly
356 * and the problem is in completion writeback.
357 * update completion writeback
358 * with actual CHANSTS value
360 * try resetting the channel
363 completion_hw.low = readl(ioat_chan->reg_base +
364 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
365 completion_hw.high = readl(ioat_chan->reg_base +
366 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
367 #if (BITS_PER_LONG == 64)
370 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
373 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
376 if ((compl_desc_addr_hw != 0)
377 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
378 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
379 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
380 ioat_chan->completion_virt->low = completion_hw.low;
381 ioat_chan->completion_virt->high = completion_hw.high;
383 ioat_dma_reset_channel(ioat_chan);
384 ioat_chan->watchdog_completion = 0;
385 ioat_chan->last_compl_desc_addr_hw = 0;
389 * for version 2.0 if there are descriptors yet to be processed
390 * and the last completed hasn't changed since the last watchdog
391 * if they haven't hit the pending level
392 * issue the pending to push them through
394 * try resetting the channel
396 } else if (ioat_chan->device->version == IOAT_VER_2_0
397 && ioat_chan->used_desc.prev
398 && ioat_chan->last_completion
399 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
401 if (ioat_chan->pending < ioat_pending_level)
402 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
404 ioat_dma_reset_channel(ioat_chan);
405 ioat_chan->watchdog_completion = 0;
408 ioat_chan->last_compl_desc_addr_hw = 0;
409 ioat_chan->watchdog_completion
410 = ioat_chan->last_completion;
413 ioat_chan->watchdog_last_tcp_cookie =
414 ioat_chan->watchdog_tcp_cookie;
417 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
420 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
422 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
423 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
424 struct ioat_desc_sw *first;
425 struct ioat_desc_sw *chain_tail;
428 spin_lock_bh(&ioat_chan->desc_lock);
429 /* cookie incr and addition to used_list must be atomic */
430 cookie = ioat_chan->common.cookie;
434 ioat_chan->common.cookie = tx->cookie = cookie;
436 /* write address into NextDescriptor field of last desc in chain */
437 first = to_ioat_desc(tx->tx_list.next);
438 chain_tail = to_ioat_desc(ioat_chan->used_desc.prev);
439 /* make descriptor updates globally visible before chaining */
441 chain_tail->hw->next = first->txd.phys;
442 list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc);
444 ioat_chan->dmacount += desc->tx_cnt;
445 ioat_chan->pending += desc->tx_cnt;
446 if (ioat_chan->pending >= ioat_pending_level)
447 __ioat1_dma_memcpy_issue_pending(ioat_chan);
448 spin_unlock_bh(&ioat_chan->desc_lock);
453 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
455 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
456 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
457 struct ioat_desc_sw *new;
458 struct ioat_dma_descriptor *hw;
463 unsigned long orig_flags;
464 unsigned int desc_count = 0;
466 /* src and dest and len are stored in the initial descriptor */
470 orig_flags = first->txd.flags;
474 * ioat_chan->desc_lock is still in force in version 2 path
475 * it gets unlocked at end of this function
478 copy = min_t(size_t, len, ioat_chan->xfercap);
480 async_tx_ack(&new->txd);
492 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
495 dev_err(to_dev(ioat_chan), "tx submit failed\n");
496 spin_unlock_bh(&ioat_chan->desc_lock);
500 hw->ctl_f.compl_write = 1;
501 if (first->txd.callback) {
502 hw->ctl_f.int_en = 1;
504 /* move callback into to last desc */
505 new->txd.callback = first->txd.callback;
506 new->txd.callback_param
507 = first->txd.callback_param;
508 first->txd.callback = NULL;
509 first->txd.callback_param = NULL;
513 new->tx_cnt = desc_count;
514 new->txd.flags = orig_flags; /* client is in control of this ack */
516 /* store the original values for use in later cleanup */
518 new->src = first->src;
519 new->dst = first->dst;
520 new->len = first->len;
523 /* cookie incr and addition to used_list must be atomic */
524 cookie = ioat_chan->common.cookie;
528 ioat_chan->common.cookie = new->txd.cookie = cookie;
530 ioat_chan->dmacount += desc_count;
531 ioat_chan->pending += desc_count;
532 if (ioat_chan->pending >= ioat_pending_level)
533 __ioat2_dma_memcpy_issue_pending(ioat_chan);
534 spin_unlock_bh(&ioat_chan->desc_lock);
540 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
541 * @ioat_chan: the channel supplying the memory pool for the descriptors
542 * @flags: allocation flags
544 static struct ioat_desc_sw *
545 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
547 struct ioat_dma_descriptor *desc;
548 struct ioat_desc_sw *desc_sw;
549 struct ioatdma_device *ioatdma_device;
552 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
553 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
557 desc_sw = kzalloc(sizeof(*desc_sw), flags);
558 if (unlikely(!desc_sw)) {
559 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
563 memset(desc, 0, sizeof(*desc));
564 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common);
565 switch (ioat_chan->device->version) {
567 desc_sw->txd.tx_submit = ioat1_tx_submit;
571 desc_sw->txd.tx_submit = ioat2_tx_submit;
576 desc_sw->txd.phys = phys;
581 static int ioat_initial_desc_count = 256;
582 module_param(ioat_initial_desc_count, int, 0644);
583 MODULE_PARM_DESC(ioat_initial_desc_count,
584 "initial descriptors per channel (default: 256)");
587 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
588 * @ioat_chan: the channel to be massaged
590 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
592 struct ioat_desc_sw *desc, *_desc;
594 /* setup used_desc */
595 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
596 ioat_chan->used_desc.prev = NULL;
598 /* pull free_desc out of the circle so that every node is a hw
599 * descriptor, but leave it pointing to the list
601 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
602 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
604 /* circle link the hw descriptors */
605 desc = to_ioat_desc(ioat_chan->free_desc.next);
606 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
607 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
608 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
613 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
614 * @chan: the channel to be filled out
616 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
618 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
619 struct ioat_desc_sw *desc;
625 /* have we already been set up? */
626 if (!list_empty(&ioat_chan->free_desc))
627 return ioat_chan->desccount;
629 /* Setup register to interrupt and write completion status on error */
630 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
631 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
632 IOAT_CHANCTRL_ERR_COMPLETION_EN;
633 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
635 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
637 dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr);
638 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
641 /* Allocate descriptors */
642 for (i = 0; i < ioat_initial_desc_count; i++) {
643 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
645 dev_err(to_dev(ioat_chan),
646 "Only %d initial descriptors\n", i);
649 list_add_tail(&desc->node, &tmp_list);
651 spin_lock_bh(&ioat_chan->desc_lock);
652 ioat_chan->desccount = i;
653 list_splice(&tmp_list, &ioat_chan->free_desc);
654 if (ioat_chan->device->version != IOAT_VER_1_2)
655 ioat2_dma_massage_chan_desc(ioat_chan);
656 spin_unlock_bh(&ioat_chan->desc_lock);
658 /* allocate a completion writeback area */
659 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
660 ioat_chan->completion_virt =
661 pci_pool_alloc(ioat_chan->device->completion_pool,
663 &ioat_chan->completion_addr);
664 memset(ioat_chan->completion_virt, 0,
665 sizeof(*ioat_chan->completion_virt));
666 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
667 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
668 writel(((u64) ioat_chan->completion_addr) >> 32,
669 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
671 tasklet_enable(&ioat_chan->cleanup_task);
672 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
673 return ioat_chan->desccount;
677 * ioat_dma_free_chan_resources - release all the descriptors
678 * @chan: the channel to be cleaned
680 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
682 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
683 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
684 struct ioat_desc_sw *desc, *_desc;
685 int in_use_descs = 0;
687 /* Before freeing channel resources first check
688 * if they have been previously allocated for this channel.
690 if (ioat_chan->desccount == 0)
693 tasklet_disable(&ioat_chan->cleanup_task);
694 ioat_dma_memcpy_cleanup(ioat_chan);
696 /* Delay 100ms after reset to allow internal DMA logic to quiesce
697 * before removing DMA descriptor resources.
699 writeb(IOAT_CHANCMD_RESET,
701 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
704 spin_lock_bh(&ioat_chan->desc_lock);
705 switch (ioat_chan->device->version) {
707 list_for_each_entry_safe(desc, _desc,
708 &ioat_chan->used_desc, node) {
710 list_del(&desc->node);
711 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
715 list_for_each_entry_safe(desc, _desc,
716 &ioat_chan->free_desc, node) {
717 list_del(&desc->node);
718 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
725 list_for_each_entry_safe(desc, _desc,
726 ioat_chan->free_desc.next, node) {
727 list_del(&desc->node);
728 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
732 desc = to_ioat_desc(ioat_chan->free_desc.next);
733 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
736 INIT_LIST_HEAD(&ioat_chan->free_desc);
737 INIT_LIST_HEAD(&ioat_chan->used_desc);
740 spin_unlock_bh(&ioat_chan->desc_lock);
742 pci_pool_free(ioatdma_device->completion_pool,
743 ioat_chan->completion_virt,
744 ioat_chan->completion_addr);
746 /* one is ok since we left it on there on purpose */
747 if (in_use_descs > 1)
748 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
751 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
752 ioat_chan->pending = 0;
753 ioat_chan->dmacount = 0;
754 ioat_chan->desccount = 0;
755 ioat_chan->watchdog_completion = 0;
756 ioat_chan->last_compl_desc_addr_hw = 0;
757 ioat_chan->watchdog_tcp_cookie =
758 ioat_chan->watchdog_last_tcp_cookie = 0;
762 * ioat_dma_get_next_descriptor - return the next available descriptor
763 * @ioat_chan: IOAT DMA channel handle
765 * Gets the next descriptor from the chain, and must be called with the
766 * channel's desc_lock held. Allocates more descriptors if the channel
769 static struct ioat_desc_sw *
770 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
772 struct ioat_desc_sw *new;
774 if (!list_empty(&ioat_chan->free_desc)) {
775 new = to_ioat_desc(ioat_chan->free_desc.next);
776 list_del(&new->node);
778 /* try to get another desc */
779 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
781 dev_err(to_dev(ioat_chan), "alloc failed\n");
790 static struct ioat_desc_sw *
791 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
793 struct ioat_desc_sw *new;
796 * used.prev points to where to start processing
797 * used.next points to next free descriptor
798 * if used.prev == NULL, there are none waiting to be processed
799 * if used.next == used.prev.prev, there is only one free descriptor,
800 * and we need to use it to as a noop descriptor before
801 * linking in a new set of descriptors, since the device
802 * has probably already read the pointer to it
804 if (ioat_chan->used_desc.prev &&
805 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
807 struct ioat_desc_sw *desc;
808 struct ioat_desc_sw *noop_desc;
811 /* set up the noop descriptor */
812 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
813 /* set size to non-zero value (channel returns error when size is 0) */
814 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
815 noop_desc->hw->ctl = 0;
816 noop_desc->hw->ctl_f.null = 1;
817 noop_desc->hw->src_addr = 0;
818 noop_desc->hw->dst_addr = 0;
820 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
821 ioat_chan->pending++;
822 ioat_chan->dmacount++;
824 /* try to get a few more descriptors */
825 for (i = 16; i; i--) {
826 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
828 dev_err(to_dev(ioat_chan), "alloc failed\n");
831 list_add_tail(&desc->node, ioat_chan->used_desc.next);
834 = to_ioat_desc(desc->node.next)->txd.phys;
835 to_ioat_desc(desc->node.prev)->hw->next
837 ioat_chan->desccount++;
840 ioat_chan->used_desc.next = noop_desc->node.next;
842 new = to_ioat_desc(ioat_chan->used_desc.next);
844 ioat_chan->used_desc.next = new->node.next;
846 if (ioat_chan->used_desc.prev == NULL)
847 ioat_chan->used_desc.prev = &new->node;
853 static struct ioat_desc_sw *
854 ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
859 switch (ioat_chan->device->version) {
861 return ioat1_dma_get_next_descriptor(ioat_chan);
864 return ioat2_dma_get_next_descriptor(ioat_chan);
869 static struct dma_async_tx_descriptor *
870 ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
871 dma_addr_t dma_src, size_t len, unsigned long flags)
873 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
874 struct ioat_desc_sw *desc;
877 dma_addr_t src = dma_src;
878 dma_addr_t dest = dma_dest;
879 size_t total_len = len;
880 struct ioat_dma_descriptor *hw = NULL;
883 spin_lock_bh(&ioat_chan->desc_lock);
884 desc = ioat_dma_get_next_descriptor(ioat_chan);
890 copy = min_t(size_t, len, ioat_chan->xfercap);
898 list_add_tail(&desc->node, &chain);
904 struct ioat_desc_sw *next;
906 async_tx_ack(&desc->txd);
907 next = ioat_dma_get_next_descriptor(ioat_chan);
908 hw->next = next ? next->txd.phys : 0;
915 dev_err(to_dev(ioat_chan),
916 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
917 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
918 list_splice(&chain, &ioat_chan->free_desc);
919 spin_unlock_bh(&ioat_chan->desc_lock);
922 spin_unlock_bh(&ioat_chan->desc_lock);
924 desc->txd.flags = flags;
925 desc->tx_cnt = tx_cnt;
927 desc->dst = dma_dest;
928 desc->len = total_len;
929 list_splice(&chain, &desc->txd.tx_list);
930 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
931 hw->ctl_f.compl_write = 1;
936 static struct dma_async_tx_descriptor *
937 ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
938 dma_addr_t dma_src, size_t len, unsigned long flags)
940 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
941 struct ioat_desc_sw *new;
943 spin_lock_bh(&ioat_chan->desc_lock);
944 new = ioat2_dma_get_next_descriptor(ioat_chan);
947 * leave ioat_chan->desc_lock set in ioat 2 path
948 * it will get unlocked at end of tx_submit
955 new->txd.flags = flags;
958 spin_unlock_bh(&ioat_chan->desc_lock);
959 dev_err(to_dev(ioat_chan),
960 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
961 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
966 static void ioat_dma_cleanup_tasklet(unsigned long data)
968 struct ioat_dma_chan *chan = (void *)data;
969 ioat_dma_memcpy_cleanup(chan);
970 writew(IOAT_CHANCTRL_INT_DISABLE,
971 chan->reg_base + IOAT_CHANCTRL_OFFSET);
975 ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
977 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
978 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
979 pci_unmap_single(ioat_chan->device->pdev,
980 pci_unmap_addr(desc, dst),
981 pci_unmap_len(desc, len),
984 pci_unmap_page(ioat_chan->device->pdev,
985 pci_unmap_addr(desc, dst),
986 pci_unmap_len(desc, len),
990 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
991 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
992 pci_unmap_single(ioat_chan->device->pdev,
993 pci_unmap_addr(desc, src),
994 pci_unmap_len(desc, len),
997 pci_unmap_page(ioat_chan->device->pdev,
998 pci_unmap_addr(desc, src),
999 pci_unmap_len(desc, len),
1005 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1006 * @chan: ioat channel to be cleaned up
1008 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1010 unsigned long phys_complete;
1011 struct ioat_desc_sw *desc, *_desc;
1012 dma_cookie_t cookie = 0;
1013 unsigned long desc_phys;
1014 struct ioat_desc_sw *latest_desc;
1015 struct dma_async_tx_descriptor *tx;
1017 prefetch(ioat_chan->completion_virt);
1019 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
1022 /* The completion writeback can happen at any time,
1023 so reads by the driver need to be atomic operations
1024 The descriptor physical addresses are limited to 32-bits
1025 when the CPU can only do a 32-bit mov */
1027 #if (BITS_PER_LONG == 64)
1029 ioat_chan->completion_virt->full
1030 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1033 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1036 if ((ioat_chan->completion_virt->full
1037 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
1038 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1039 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
1040 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
1042 /* TODO do something to salvage the situation */
1045 if (phys_complete == ioat_chan->last_completion) {
1046 spin_unlock_bh(&ioat_chan->cleanup_lock);
1048 * perhaps we're stuck so hard that the watchdog can't go off?
1049 * try to catch it after 2 seconds
1051 if (ioat_chan->device->version != IOAT_VER_3_0) {
1052 if (time_after(jiffies,
1053 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1054 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1055 ioat_chan->last_completion_time = jiffies;
1060 ioat_chan->last_completion_time = jiffies;
1063 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1064 spin_unlock_bh(&ioat_chan->cleanup_lock);
1068 switch (ioat_chan->device->version) {
1070 list_for_each_entry_safe(desc, _desc,
1071 &ioat_chan->used_desc, node) {
1074 * Incoming DMA requests may use multiple descriptors,
1075 * due to exceeding xfercap, perhaps. If so, only the
1076 * last one will have a cookie, and require unmapping.
1079 cookie = tx->cookie;
1080 ioat_dma_unmap(ioat_chan, desc);
1082 tx->callback(tx->callback_param);
1083 tx->callback = NULL;
1087 if (tx->phys != phys_complete) {
1089 * a completed entry, but not the last, so clean
1090 * up if the client is done with the descriptor
1092 if (async_tx_test_ack(tx)) {
1093 list_move_tail(&desc->node,
1094 &ioat_chan->free_desc);
1099 * last used desc. Do not remove, so we can
1100 * append from it, but don't look at it next
1105 /* TODO check status bits? */
1112 /* has some other thread has already cleaned up? */
1113 if (ioat_chan->used_desc.prev == NULL)
1116 /* work backwards to find latest finished desc */
1117 desc = to_ioat_desc(ioat_chan->used_desc.next);
1121 desc = to_ioat_desc(desc->node.prev);
1122 desc_phys = (unsigned long)tx->phys
1123 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1124 if (desc_phys == phys_complete) {
1128 } while (&desc->node != ioat_chan->used_desc.prev);
1130 if (latest_desc != NULL) {
1131 /* work forwards to clear finished descriptors */
1132 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1133 &desc->node != latest_desc->node.next &&
1134 &desc->node != ioat_chan->used_desc.next;
1135 desc = to_ioat_desc(desc->node.next)) {
1137 cookie = tx->cookie;
1139 ioat_dma_unmap(ioat_chan, desc);
1141 tx->callback(tx->callback_param);
1142 tx->callback = NULL;
1147 /* move used.prev up beyond those that are finished */
1148 if (&desc->node == ioat_chan->used_desc.next)
1149 ioat_chan->used_desc.prev = NULL;
1151 ioat_chan->used_desc.prev = &desc->node;
1156 spin_unlock_bh(&ioat_chan->desc_lock);
1158 ioat_chan->last_completion = phys_complete;
1160 ioat_chan->completed_cookie = cookie;
1162 spin_unlock_bh(&ioat_chan->cleanup_lock);
1166 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1167 * @chan: IOAT DMA channel handle
1168 * @cookie: DMA transaction identifier
1169 * @done: if not %NULL, updated with last completed transaction
1170 * @used: if not %NULL, updated with last used transaction
1172 static enum dma_status
1173 ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie,
1174 dma_cookie_t *done, dma_cookie_t *used)
1176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1177 dma_cookie_t last_used;
1178 dma_cookie_t last_complete;
1179 enum dma_status ret;
1181 last_used = chan->cookie;
1182 last_complete = ioat_chan->completed_cookie;
1183 ioat_chan->watchdog_tcp_cookie = cookie;
1186 *done = last_complete;
1190 ret = dma_async_is_complete(cookie, last_complete, last_used);
1191 if (ret == DMA_SUCCESS)
1194 ioat_dma_memcpy_cleanup(ioat_chan);
1196 last_used = chan->cookie;
1197 last_complete = ioat_chan->completed_cookie;
1200 *done = last_complete;
1204 return dma_async_is_complete(cookie, last_complete, last_used);
1207 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1209 struct ioat_desc_sw *desc;
1210 struct ioat_dma_descriptor *hw;
1212 spin_lock_bh(&ioat_chan->desc_lock);
1214 desc = ioat_dma_get_next_descriptor(ioat_chan);
1217 dev_err(to_dev(ioat_chan),
1218 "Unable to start null desc - get next desc failed\n");
1219 spin_unlock_bh(&ioat_chan->desc_lock);
1226 hw->ctl_f.int_en = 1;
1227 hw->ctl_f.compl_write = 1;
1228 /* set size to non-zero value (channel returns error when size is 0) */
1229 hw->size = NULL_DESC_BUFFER_SIZE;
1232 async_tx_ack(&desc->txd);
1233 switch (ioat_chan->device->version) {
1236 list_add_tail(&desc->node, &ioat_chan->used_desc);
1238 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
1239 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1240 writel(((u64) desc->txd.phys) >> 32,
1241 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1243 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1244 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1248 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
1249 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1250 writel(((u64) desc->txd.phys) >> 32,
1251 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1253 ioat_chan->dmacount++;
1254 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1257 spin_unlock_bh(&ioat_chan->desc_lock);
1261 * Perform a IOAT transaction to verify the HW works.
1263 #define IOAT_TEST_SIZE 2000
1265 static void ioat_dma_test_callback(void *dma_async_param)
1267 struct completion *cmp = dma_async_param;
1273 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1274 * @device: device to be tested
1276 static int ioat_dma_self_test(struct ioatdma_device *device)
1281 struct dma_device *dma = &device->common;
1282 struct device *dev = &device->pdev->dev;
1283 struct dma_chan *dma_chan;
1284 struct dma_async_tx_descriptor *tx;
1285 dma_addr_t dma_dest, dma_src;
1286 dma_cookie_t cookie;
1288 struct completion cmp;
1290 unsigned long flags;
1292 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1295 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1301 /* Fill in src buffer */
1302 for (i = 0; i < IOAT_TEST_SIZE; i++)
1305 /* Start copy, using first DMA channel */
1306 dma_chan = container_of(dma->channels.next, struct dma_chan,
1308 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1309 dev_err(dev, "selftest cannot allocate chan resource\n");
1314 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
1315 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
1316 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
1317 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1318 IOAT_TEST_SIZE, flags);
1320 dev_err(dev, "Self-test prep failed, disabling\n");
1322 goto free_resources;
1326 init_completion(&cmp);
1327 tx->callback = ioat_dma_test_callback;
1328 tx->callback_param = &cmp;
1329 cookie = tx->tx_submit(tx);
1331 dev_err(dev, "Self-test setup failed, disabling\n");
1333 goto free_resources;
1335 dma->device_issue_pending(dma_chan);
1337 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1340 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1342 dev_err(dev, "Self-test copy timed out, disabling\n");
1344 goto free_resources;
1346 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1347 dev_err(dev, "Self-test copy failed compare, disabling\n");
1349 goto free_resources;
1353 dma->device_free_chan_resources(dma_chan);
1360 static char ioat_interrupt_style[32] = "msix";
1361 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1362 sizeof(ioat_interrupt_style), 0644);
1363 MODULE_PARM_DESC(ioat_interrupt_style,
1364 "set ioat interrupt style: msix (default), "
1365 "msix-single-vector, msi, intx)");
1368 * ioat_dma_setup_interrupts - setup interrupt handler
1369 * @device: ioat device
1371 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1373 struct ioat_dma_chan *ioat_chan;
1374 struct pci_dev *pdev = device->pdev;
1375 struct device *dev = &pdev->dev;
1376 struct msix_entry *msix;
1381 if (!strcmp(ioat_interrupt_style, "msix"))
1383 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1384 goto msix_single_vector;
1385 if (!strcmp(ioat_interrupt_style, "msi"))
1387 if (!strcmp(ioat_interrupt_style, "intx"))
1389 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1393 /* The number of MSI-X vectors should equal the number of channels */
1394 msixcnt = device->common.chancnt;
1395 for (i = 0; i < msixcnt; i++)
1396 device->msix_entries[i].entry = i;
1398 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1402 goto msix_single_vector;
1404 for (i = 0; i < msixcnt; i++) {
1405 msix = &device->msix_entries[i];
1406 ioat_chan = ioat_chan_by_index(device, i);
1407 err = devm_request_irq(dev, msix->vector,
1408 ioat_dma_do_interrupt_msix, 0,
1409 "ioat-msix", ioat_chan);
1411 for (j = 0; j < i; j++) {
1412 msix = &device->msix_entries[j];
1413 ioat_chan = ioat_chan_by_index(device, j);
1414 devm_free_irq(dev, msix->vector, ioat_chan);
1416 goto msix_single_vector;
1419 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1423 msix = &device->msix_entries[0];
1425 err = pci_enable_msix(pdev, device->msix_entries, 1);
1429 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1430 "ioat-msix", device);
1432 pci_disable_msix(pdev);
1438 err = pci_enable_msi(pdev);
1442 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1443 "ioat-msi", device);
1445 pci_disable_msi(pdev);
1451 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1452 IRQF_SHARED, "ioat-intx", device);
1457 if (device->intr_quirk)
1458 device->intr_quirk(device);
1459 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1460 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1464 /* Disable all interrupt generation */
1465 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1466 dev_err(dev, "no usable interrupts\n");
1470 static void ioat_disable_interrupts(struct ioatdma_device *device)
1472 /* Disable all interrupt generation */
1473 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1476 static int ioat_probe(struct ioatdma_device *device)
1479 struct dma_device *dma = &device->common;
1480 struct pci_dev *pdev = device->pdev;
1481 struct device *dev = &pdev->dev;
1483 /* DMA coherent memory pool for DMA descriptor allocations */
1484 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1485 sizeof(struct ioat_dma_descriptor),
1487 if (!device->dma_pool) {
1492 device->completion_pool = pci_pool_create("completion_pool", pdev,
1493 sizeof(u64), SMP_CACHE_BYTES,
1495 if (!device->completion_pool) {
1497 goto err_completion_pool;
1500 ioat_dma_enumerate_channels(device);
1502 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1503 dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
1504 dma->device_free_chan_resources = ioat_dma_free_chan_resources;
1505 dma->device_is_tx_complete = ioat_dma_is_complete;
1506 dma->dev = &pdev->dev;
1508 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1509 " %d channels, device version 0x%02x, driver version %s\n",
1510 dma->chancnt, device->version, IOAT_DMA_VERSION);
1512 if (!dma->chancnt) {
1513 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1514 "zero channels detected\n");
1515 goto err_setup_interrupts;
1518 err = ioat_dma_setup_interrupts(device);
1520 goto err_setup_interrupts;
1522 err = ioat_dma_self_test(device);
1529 ioat_disable_interrupts(device);
1530 err_setup_interrupts:
1531 pci_pool_destroy(device->completion_pool);
1532 err_completion_pool:
1533 pci_pool_destroy(device->dma_pool);
1538 static int ioat_register(struct ioatdma_device *device)
1540 int err = dma_async_device_register(&device->common);
1543 ioat_disable_interrupts(device);
1544 pci_pool_destroy(device->completion_pool);
1545 pci_pool_destroy(device->dma_pool);
1551 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1552 static void ioat1_intr_quirk(struct ioatdma_device *device)
1554 struct pci_dev *pdev = device->pdev;
1557 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1558 if (pdev->msi_enabled)
1559 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1561 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1562 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1565 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1567 struct pci_dev *pdev = device->pdev;
1568 struct dma_device *dma;
1571 device->intr_quirk = ioat1_intr_quirk;
1572 dma = &device->common;
1573 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1574 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1576 err = ioat_probe(device);
1579 ioat_set_tcp_copy_break(4096);
1580 err = ioat_register(device);
1584 device->dca = ioat_dca_init(pdev, device->reg_base);
1586 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1587 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1592 int ioat2_dma_probe(struct ioatdma_device *device, int dca)
1594 struct pci_dev *pdev = device->pdev;
1595 struct dma_device *dma;
1596 struct dma_chan *chan;
1597 struct ioat_dma_chan *ioat_chan;
1600 dma = &device->common;
1601 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1602 dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
1604 err = ioat_probe(device);
1607 ioat_set_tcp_copy_break(2048);
1609 list_for_each_entry(chan, &dma->channels, device_node) {
1610 ioat_chan = to_ioat_chan(chan);
1611 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
1612 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1615 err = ioat_register(device);
1619 device->dca = ioat2_dca_init(pdev, device->reg_base);
1621 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1622 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1627 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1629 struct pci_dev *pdev = device->pdev;
1630 struct dma_device *dma;
1631 struct dma_chan *chan;
1632 struct ioat_dma_chan *ioat_chan;
1636 dma = &device->common;
1637 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1638 dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
1640 /* -= IOAT ver.3 workarounds =- */
1641 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1642 * that can cause stability issues for IOAT ver.3
1644 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1646 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1647 * (workaround for spurious config parity error after restart)
1649 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1650 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1651 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1653 err = ioat_probe(device);
1656 ioat_set_tcp_copy_break(262144);
1658 list_for_each_entry(chan, &dma->channels, device_node) {
1659 ioat_chan = to_ioat_chan(chan);
1660 writel(IOAT_DMA_DCA_ANY_CPU,
1661 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1664 err = ioat_register(device);
1668 device->dca = ioat3_dca_init(pdev, device->reg_base);
1673 void ioat_dma_remove(struct ioatdma_device *device)
1675 struct dma_chan *chan, *_chan;
1676 struct ioat_dma_chan *ioat_chan;
1677 struct dma_device *dma = &device->common;
1679 if (device->version != IOAT_VER_3_0)
1680 cancel_delayed_work(&device->work);
1682 ioat_disable_interrupts(device);
1684 dma_async_device_unregister(dma);
1686 pci_pool_destroy(device->dma_pool);
1687 pci_pool_destroy(device->completion_pool);
1689 list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) {
1690 ioat_chan = to_ioat_chan(chan);
1691 list_del(&chan->device_node);