2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @data: interrupt data
55 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 struct ioatdma_device *instance = data;
58 struct ioat_chan_common *chan;
59 unsigned long attnstatus;
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @data: interrupt data
88 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 struct ioat_chan_common *chan = data;
92 tasklet_schedule(&chan->cleanup_task);
97 static void ioat1_cleanup_tasklet(unsigned long data);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 work_func_t work_fn, void (*tasklet)(unsigned long),
103 unsigned long tasklet_data)
105 struct dma_device *dma = &device->common;
107 chan->device = device;
108 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
109 INIT_DELAYED_WORK(&chan->work, work_fn);
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 tasklet_init(&chan->cleanup_task, tasklet, tasklet_data);
115 tasklet_disable(&chan->cleanup_task);
118 static void ioat1_reset_part2(struct work_struct *work);
121 * ioat1_dma_enumerate_channels - find and initialize the device's channels
122 * @device: the device to be enumerated
124 static int ioat1_enumerate_channels(struct ioatdma_device *device)
129 struct ioat_dma_chan *ioat;
130 struct device *dev = &device->pdev->dev;
131 struct dma_device *dma = &device->common;
133 INIT_LIST_HEAD(&dma->channels);
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
135 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
136 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
138 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
139 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
142 for (i = 0; i < dma->chancnt; i++) {
143 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
147 ioat_init_channel(device, &ioat->base, i,
149 ioat1_cleanup_tasklet,
150 (unsigned long) ioat);
151 ioat->xfercap = xfercap;
152 spin_lock_init(&ioat->desc_lock);
153 INIT_LIST_HEAD(&ioat->free_desc);
154 INIT_LIST_HEAD(&ioat->used_desc);
161 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
163 * @chan: DMA channel handle
166 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
168 void __iomem *reg_base = ioat->base.reg_base;
171 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
174 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
176 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
178 if (ioat->pending > 0) {
179 spin_lock_bh(&ioat->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat);
181 spin_unlock_bh(&ioat->desc_lock);
186 * ioat1_reset_part2 - reinit the channel after a reset
188 static void ioat1_reset_part2(struct work_struct *work)
190 struct ioat_chan_common *chan;
191 struct ioat_dma_chan *ioat;
192 struct ioat_desc_sw *desc;
194 bool start_null = false;
196 chan = container_of(work, struct ioat_chan_common, work.work);
197 ioat = container_of(chan, struct ioat_dma_chan, base);
198 spin_lock_bh(&chan->cleanup_lock);
199 spin_lock_bh(&ioat->desc_lock);
201 chan->completion_virt->low = 0;
202 chan->completion_virt->high = 0;
205 /* count the descriptors waiting */
207 if (ioat->used_desc.prev) {
208 desc = to_ioat_desc(ioat->used_desc.prev);
211 desc = to_ioat_desc(desc->node.next);
212 } while (&desc->node != ioat->used_desc.next);
217 * write the new starting descriptor address
218 * this puts channel engine into ARMED state
220 desc = to_ioat_desc(ioat->used_desc.prev);
221 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
222 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
223 writel(((u64) desc->txd.phys) >> 32,
224 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
226 writeb(IOAT_CHANCMD_START, chan->reg_base
227 + IOAT_CHANCMD_OFFSET(chan->device->version));
230 spin_unlock_bh(&ioat->desc_lock);
231 spin_unlock_bh(&chan->cleanup_lock);
233 dev_err(to_dev(chan),
234 "chan%d reset - %d descs waiting, %d total desc\n",
235 chan_num(chan), dmacount, ioat->desccount);
238 ioat1_dma_start_null_desc(ioat);
242 * ioat1_reset_channel - restart a channel
243 * @ioat: IOAT DMA channel handle
245 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
247 struct ioat_chan_common *chan = &ioat->base;
248 void __iomem *reg_base = chan->reg_base;
249 u32 chansts, chanerr;
251 if (!ioat->used_desc.prev)
254 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
255 chansts = (chan->completion_virt->low
256 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
258 dev_err(to_dev(chan),
259 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
260 chan_num(chan), chansts, chanerr);
261 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
265 * whack it upside the head with a reset
266 * and wait for things to settle out.
267 * force the pending count to a really big negative
268 * to make sure no one forces an issue_pending
269 * while we're waiting.
272 spin_lock_bh(&ioat->desc_lock);
273 ioat->pending = INT_MIN;
274 writeb(IOAT_CHANCMD_RESET,
275 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
276 spin_unlock_bh(&ioat->desc_lock);
278 /* schedule the 2nd half instead of sleeping a long time */
279 schedule_delayed_work(&chan->work, RESET_DELAY);
283 * ioat1_chan_watchdog - watch for stuck channels
285 static void ioat1_chan_watchdog(struct work_struct *work)
287 struct ioatdma_device *device =
288 container_of(work, struct ioatdma_device, work.work);
289 struct ioat_dma_chan *ioat;
290 struct ioat_chan_common *chan;
300 unsigned long compl_desc_addr_hw;
302 for (i = 0; i < device->common.chancnt; i++) {
303 chan = ioat_chan_by_index(device, i);
304 ioat = container_of(chan, struct ioat_dma_chan, base);
306 if (/* have we started processing anything yet */
307 chan->last_completion
308 /* have we completed any since last watchdog cycle? */
309 && (chan->last_completion == chan->watchdog_completion)
310 /* has TCP stuck on one cookie since last watchdog? */
311 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
312 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
313 /* is there something in the chain to be processed? */
314 /* CB1 chain always has at least the last one processed */
315 && (ioat->used_desc.prev != ioat->used_desc.next)
316 && ioat->pending == 0) {
319 * check CHANSTS register for completed
320 * descriptor address.
321 * if it is different than completion writeback,
323 * and it has changed since the last watchdog
324 * we can assume that channel
325 * is still working correctly
326 * and the problem is in completion writeback.
327 * update completion writeback
328 * with actual CHANSTS value
330 * try resetting the channel
333 completion_hw.low = readl(chan->reg_base +
334 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
335 completion_hw.high = readl(chan->reg_base +
336 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
337 #if (BITS_PER_LONG == 64)
340 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
343 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
346 if ((compl_desc_addr_hw != 0)
347 && (compl_desc_addr_hw != chan->watchdog_completion)
348 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
349 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
350 chan->completion_virt->low = completion_hw.low;
351 chan->completion_virt->high = completion_hw.high;
353 ioat1_reset_channel(ioat);
354 chan->watchdog_completion = 0;
355 chan->last_compl_desc_addr_hw = 0;
358 chan->last_compl_desc_addr_hw = 0;
359 chan->watchdog_completion = chan->last_completion;
362 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
365 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
368 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
370 struct dma_chan *c = tx->chan;
371 struct ioat_dma_chan *ioat = to_ioat_chan(c);
372 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
373 struct ioat_desc_sw *first;
374 struct ioat_desc_sw *chain_tail;
377 spin_lock_bh(&ioat->desc_lock);
378 /* cookie incr and addition to used_list must be atomic */
386 /* write address into NextDescriptor field of last desc in chain */
387 first = to_ioat_desc(tx->tx_list.next);
388 chain_tail = to_ioat_desc(ioat->used_desc.prev);
389 /* make descriptor updates globally visible before chaining */
391 chain_tail->hw->next = first->txd.phys;
392 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
394 ioat->pending += desc->tx_cnt;
395 if (ioat->pending >= ioat_pending_level)
396 __ioat1_dma_memcpy_issue_pending(ioat);
397 spin_unlock_bh(&ioat->desc_lock);
403 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
404 * @ioat: the channel supplying the memory pool for the descriptors
405 * @flags: allocation flags
407 static struct ioat_desc_sw *
408 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
410 struct ioat_dma_descriptor *desc;
411 struct ioat_desc_sw *desc_sw;
412 struct ioatdma_device *ioatdma_device;
415 ioatdma_device = ioat->base.device;
416 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
420 desc_sw = kzalloc(sizeof(*desc_sw), flags);
421 if (unlikely(!desc_sw)) {
422 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
426 memset(desc, 0, sizeof(*desc));
428 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
429 desc_sw->txd.tx_submit = ioat1_tx_submit;
431 desc_sw->txd.phys = phys;
436 static int ioat_initial_desc_count = 256;
437 module_param(ioat_initial_desc_count, int, 0644);
438 MODULE_PARM_DESC(ioat_initial_desc_count,
439 "ioat1: initial descriptors per channel (default: 256)");
441 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
442 * @chan: the channel to be filled out
444 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
446 struct ioat_dma_chan *ioat = to_ioat_chan(c);
447 struct ioat_chan_common *chan = &ioat->base;
448 struct ioat_desc_sw *desc;
454 /* have we already been set up? */
455 if (!list_empty(&ioat->free_desc))
456 return ioat->desccount;
458 /* Setup register to interrupt and write completion status on error */
459 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
460 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
461 IOAT_CHANCTRL_ERR_COMPLETION_EN;
462 writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET);
464 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
466 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
467 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
470 /* Allocate descriptors */
471 for (i = 0; i < ioat_initial_desc_count; i++) {
472 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
474 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
477 list_add_tail(&desc->node, &tmp_list);
479 spin_lock_bh(&ioat->desc_lock);
481 list_splice(&tmp_list, &ioat->free_desc);
482 spin_unlock_bh(&ioat->desc_lock);
484 /* allocate a completion writeback area */
485 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
486 chan->completion_virt = pci_pool_alloc(chan->device->completion_pool,
488 &chan->completion_addr);
489 memset(chan->completion_virt, 0,
490 sizeof(*chan->completion_virt));
491 writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF,
492 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
493 writel(((u64) chan->completion_addr) >> 32,
494 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
496 tasklet_enable(&chan->cleanup_task);
497 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
498 return ioat->desccount;
502 * ioat1_dma_free_chan_resources - release all the descriptors
503 * @chan: the channel to be cleaned
505 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
507 struct ioat_dma_chan *ioat = to_ioat_chan(c);
508 struct ioat_chan_common *chan = &ioat->base;
509 struct ioatdma_device *ioatdma_device = chan->device;
510 struct ioat_desc_sw *desc, *_desc;
511 int in_use_descs = 0;
513 /* Before freeing channel resources first check
514 * if they have been previously allocated for this channel.
516 if (ioat->desccount == 0)
519 tasklet_disable(&chan->cleanup_task);
522 /* Delay 100ms after reset to allow internal DMA logic to quiesce
523 * before removing DMA descriptor resources.
525 writeb(IOAT_CHANCMD_RESET,
526 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
529 spin_lock_bh(&ioat->desc_lock);
530 list_for_each_entry_safe(desc, _desc,
531 &ioat->used_desc, node) {
533 list_del(&desc->node);
534 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
538 list_for_each_entry_safe(desc, _desc,
539 &ioat->free_desc, node) {
540 list_del(&desc->node);
541 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
545 spin_unlock_bh(&ioat->desc_lock);
547 pci_pool_free(ioatdma_device->completion_pool,
548 chan->completion_virt,
549 chan->completion_addr);
551 /* one is ok since we left it on there on purpose */
552 if (in_use_descs > 1)
553 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
556 chan->last_completion = chan->completion_addr = 0;
557 chan->watchdog_completion = 0;
558 chan->last_compl_desc_addr_hw = 0;
559 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
565 * ioat1_dma_get_next_descriptor - return the next available descriptor
566 * @ioat: IOAT DMA channel handle
568 * Gets the next descriptor from the chain, and must be called with the
569 * channel's desc_lock held. Allocates more descriptors if the channel
572 static struct ioat_desc_sw *
573 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
575 struct ioat_desc_sw *new;
577 if (!list_empty(&ioat->free_desc)) {
578 new = to_ioat_desc(ioat->free_desc.next);
579 list_del(&new->node);
581 /* try to get another desc */
582 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
584 dev_err(to_dev(&ioat->base), "alloc failed\n");
593 static struct dma_async_tx_descriptor *
594 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
595 dma_addr_t dma_src, size_t len, unsigned long flags)
597 struct ioat_dma_chan *ioat = to_ioat_chan(c);
598 struct ioat_desc_sw *desc;
601 dma_addr_t src = dma_src;
602 dma_addr_t dest = dma_dest;
603 size_t total_len = len;
604 struct ioat_dma_descriptor *hw = NULL;
607 spin_lock_bh(&ioat->desc_lock);
608 desc = ioat1_dma_get_next_descriptor(ioat);
614 copy = min_t(size_t, len, ioat->xfercap);
622 list_add_tail(&desc->node, &chain);
628 struct ioat_desc_sw *next;
630 async_tx_ack(&desc->txd);
631 next = ioat1_dma_get_next_descriptor(ioat);
632 hw->next = next ? next->txd.phys : 0;
639 struct ioat_chan_common *chan = &ioat->base;
641 dev_err(to_dev(chan),
642 "chan%d - get_next_desc failed\n", chan_num(chan));
643 list_splice(&chain, &ioat->free_desc);
644 spin_unlock_bh(&ioat->desc_lock);
647 spin_unlock_bh(&ioat->desc_lock);
649 desc->txd.flags = flags;
650 desc->tx_cnt = tx_cnt;
651 desc->len = total_len;
652 list_splice(&chain, &desc->txd.tx_list);
653 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
654 hw->ctl_f.compl_write = 1;
659 static void ioat1_cleanup_tasklet(unsigned long data)
661 struct ioat_dma_chan *chan = (void *)data;
663 writew(IOAT_CHANCTRL_INT_DISABLE,
664 chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
667 static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
668 int direction, enum dma_ctrl_flags flags, bool dst)
670 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
671 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
672 pci_unmap_single(pdev, addr, len, direction);
674 pci_unmap_page(pdev, addr, len, direction);
678 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
679 size_t len, struct ioat_dma_descriptor *hw)
681 struct pci_dev *pdev = chan->device->pdev;
682 size_t offset = len - hw->size;
684 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
685 ioat_unmap(pdev, hw->dst_addr - offset, len,
686 PCI_DMA_FROMDEVICE, flags, 1);
688 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
689 ioat_unmap(pdev, hw->src_addr - offset, len,
690 PCI_DMA_TODEVICE, flags, 0);
693 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
695 unsigned long phys_complete;
697 /* The completion writeback can happen at any time,
698 so reads by the driver need to be atomic operations
699 The descriptor physical addresses are limited to 32-bits
700 when the CPU can only do a 32-bit mov */
702 #if (BITS_PER_LONG == 64)
704 chan->completion_virt->full
705 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
707 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
710 if ((chan->completion_virt->full
711 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
712 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
713 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
714 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
716 /* TODO do something to salvage the situation */
719 return phys_complete;
723 * ioat1_cleanup - cleanup up finished descriptors
724 * @chan: ioat channel to be cleaned up
726 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
728 struct ioat_chan_common *chan = &ioat->base;
729 unsigned long phys_complete;
730 struct ioat_desc_sw *desc, *_desc;
731 dma_cookie_t cookie = 0;
732 struct dma_async_tx_descriptor *tx;
734 prefetch(chan->completion_virt);
736 if (!spin_trylock_bh(&chan->cleanup_lock))
739 phys_complete = ioat_get_current_completion(chan);
740 if (phys_complete == chan->last_completion) {
741 spin_unlock_bh(&chan->cleanup_lock);
743 * perhaps we're stuck so hard that the watchdog can't go off?
744 * try to catch it after 2 seconds
746 if (time_after(jiffies,
747 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
748 ioat1_chan_watchdog(&(chan->device->work.work));
749 chan->last_completion_time = jiffies;
753 chan->last_completion_time = jiffies;
756 if (!spin_trylock_bh(&ioat->desc_lock)) {
757 spin_unlock_bh(&chan->cleanup_lock);
761 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
764 * Incoming DMA requests may use multiple descriptors,
765 * due to exceeding xfercap, perhaps. If so, only the
766 * last one will have a cookie, and require unmapping.
770 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
772 tx->callback(tx->callback_param);
777 if (tx->phys != phys_complete) {
779 * a completed entry, but not the last, so clean
780 * up if the client is done with the descriptor
782 if (async_tx_test_ack(tx))
783 list_move_tail(&desc->node, &ioat->free_desc);
788 * last used desc. Do not remove, so we can
789 * append from it, but don't look at it next
794 /* TODO check status bits? */
799 spin_unlock_bh(&ioat->desc_lock);
801 chan->last_completion = phys_complete;
803 chan->completed_cookie = cookie;
805 spin_unlock_bh(&chan->cleanup_lock);
808 static enum dma_status
809 ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
810 dma_cookie_t *done, dma_cookie_t *used)
812 struct ioat_dma_chan *ioat = to_ioat_chan(c);
814 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
819 return ioat_is_complete(c, cookie, done, used);
822 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
824 struct ioat_chan_common *chan = &ioat->base;
825 struct ioat_desc_sw *desc;
826 struct ioat_dma_descriptor *hw;
828 spin_lock_bh(&ioat->desc_lock);
830 desc = ioat1_dma_get_next_descriptor(ioat);
833 dev_err(to_dev(chan),
834 "Unable to start null desc - get next desc failed\n");
835 spin_unlock_bh(&ioat->desc_lock);
842 hw->ctl_f.int_en = 1;
843 hw->ctl_f.compl_write = 1;
844 /* set size to non-zero value (channel returns error when size is 0) */
845 hw->size = NULL_DESC_BUFFER_SIZE;
848 async_tx_ack(&desc->txd);
850 list_add_tail(&desc->node, &ioat->used_desc);
852 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
853 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
854 writel(((u64) desc->txd.phys) >> 32,
855 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
857 writeb(IOAT_CHANCMD_START, chan->reg_base
858 + IOAT_CHANCMD_OFFSET(chan->device->version));
859 spin_unlock_bh(&ioat->desc_lock);
863 * Perform a IOAT transaction to verify the HW works.
865 #define IOAT_TEST_SIZE 2000
867 static void ioat_dma_test_callback(void *dma_async_param)
869 struct completion *cmp = dma_async_param;
875 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
876 * @device: device to be tested
878 static int ioat_dma_self_test(struct ioatdma_device *device)
883 struct dma_device *dma = &device->common;
884 struct device *dev = &device->pdev->dev;
885 struct dma_chan *dma_chan;
886 struct dma_async_tx_descriptor *tx;
887 dma_addr_t dma_dest, dma_src;
890 struct completion cmp;
894 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
897 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
903 /* Fill in src buffer */
904 for (i = 0; i < IOAT_TEST_SIZE; i++)
907 /* Start copy, using first DMA channel */
908 dma_chan = container_of(dma->channels.next, struct dma_chan,
910 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
911 dev_err(dev, "selftest cannot allocate chan resource\n");
916 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
917 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
918 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
920 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
921 IOAT_TEST_SIZE, flags);
923 dev_err(dev, "Self-test prep failed, disabling\n");
929 init_completion(&cmp);
930 tx->callback = ioat_dma_test_callback;
931 tx->callback_param = &cmp;
932 cookie = tx->tx_submit(tx);
934 dev_err(dev, "Self-test setup failed, disabling\n");
938 dma->device_issue_pending(dma_chan);
940 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
943 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
945 dev_err(dev, "Self-test copy timed out, disabling\n");
949 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
950 dev_err(dev, "Self-test copy failed compare, disabling\n");
956 dma->device_free_chan_resources(dma_chan);
963 static char ioat_interrupt_style[32] = "msix";
964 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
965 sizeof(ioat_interrupt_style), 0644);
966 MODULE_PARM_DESC(ioat_interrupt_style,
967 "set ioat interrupt style: msix (default), "
968 "msix-single-vector, msi, intx)");
971 * ioat_dma_setup_interrupts - setup interrupt handler
972 * @device: ioat device
974 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
976 struct ioat_chan_common *chan;
977 struct pci_dev *pdev = device->pdev;
978 struct device *dev = &pdev->dev;
979 struct msix_entry *msix;
984 if (!strcmp(ioat_interrupt_style, "msix"))
986 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
987 goto msix_single_vector;
988 if (!strcmp(ioat_interrupt_style, "msi"))
990 if (!strcmp(ioat_interrupt_style, "intx"))
992 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
996 /* The number of MSI-X vectors should equal the number of channels */
997 msixcnt = device->common.chancnt;
998 for (i = 0; i < msixcnt; i++)
999 device->msix_entries[i].entry = i;
1001 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1005 goto msix_single_vector;
1007 for (i = 0; i < msixcnt; i++) {
1008 msix = &device->msix_entries[i];
1009 chan = ioat_chan_by_index(device, i);
1010 err = devm_request_irq(dev, msix->vector,
1011 ioat_dma_do_interrupt_msix, 0,
1014 for (j = 0; j < i; j++) {
1015 msix = &device->msix_entries[j];
1016 chan = ioat_chan_by_index(device, j);
1017 devm_free_irq(dev, msix->vector, chan);
1019 goto msix_single_vector;
1022 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1026 msix = &device->msix_entries[0];
1028 err = pci_enable_msix(pdev, device->msix_entries, 1);
1032 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1033 "ioat-msix", device);
1035 pci_disable_msix(pdev);
1041 err = pci_enable_msi(pdev);
1045 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1046 "ioat-msi", device);
1048 pci_disable_msi(pdev);
1054 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1055 IRQF_SHARED, "ioat-intx", device);
1060 if (device->intr_quirk)
1061 device->intr_quirk(device);
1062 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1063 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1067 /* Disable all interrupt generation */
1068 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1069 dev_err(dev, "no usable interrupts\n");
1073 static void ioat_disable_interrupts(struct ioatdma_device *device)
1075 /* Disable all interrupt generation */
1076 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1079 int ioat_probe(struct ioatdma_device *device)
1082 struct dma_device *dma = &device->common;
1083 struct pci_dev *pdev = device->pdev;
1084 struct device *dev = &pdev->dev;
1086 /* DMA coherent memory pool for DMA descriptor allocations */
1087 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1088 sizeof(struct ioat_dma_descriptor),
1090 if (!device->dma_pool) {
1095 device->completion_pool = pci_pool_create("completion_pool", pdev,
1096 sizeof(u64), SMP_CACHE_BYTES,
1099 if (!device->completion_pool) {
1101 goto err_completion_pool;
1104 device->enumerate_channels(device);
1106 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1107 dma->dev = &pdev->dev;
1109 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1110 " %d channels, device version 0x%02x, driver version %s\n",
1111 dma->chancnt, device->version, IOAT_DMA_VERSION);
1113 if (!dma->chancnt) {
1114 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1115 "zero channels detected\n");
1116 goto err_setup_interrupts;
1119 err = ioat_dma_setup_interrupts(device);
1121 goto err_setup_interrupts;
1123 err = ioat_dma_self_test(device);
1130 ioat_disable_interrupts(device);
1131 err_setup_interrupts:
1132 pci_pool_destroy(device->completion_pool);
1133 err_completion_pool:
1134 pci_pool_destroy(device->dma_pool);
1139 int ioat_register(struct ioatdma_device *device)
1141 int err = dma_async_device_register(&device->common);
1144 ioat_disable_interrupts(device);
1145 pci_pool_destroy(device->completion_pool);
1146 pci_pool_destroy(device->dma_pool);
1152 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1153 static void ioat1_intr_quirk(struct ioatdma_device *device)
1155 struct pci_dev *pdev = device->pdev;
1158 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1159 if (pdev->msi_enabled)
1160 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1162 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1163 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1166 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1168 struct pci_dev *pdev = device->pdev;
1169 struct dma_device *dma;
1172 device->intr_quirk = ioat1_intr_quirk;
1173 device->enumerate_channels = ioat1_enumerate_channels;
1174 dma = &device->common;
1175 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1176 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1177 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1178 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1179 dma->device_is_tx_complete = ioat1_dma_is_complete;
1181 err = ioat_probe(device);
1184 ioat_set_tcp_copy_break(4096);
1185 err = ioat_register(device);
1189 device->dca = ioat_dca_init(pdev, device->reg_base);
1191 INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog);
1192 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1197 void ioat_dma_remove(struct ioatdma_device *device)
1199 struct dma_device *dma = &device->common;
1201 if (device->version != IOAT_VER_3_0)
1202 cancel_delayed_work(&device->work);
1204 ioat_disable_interrupts(device);
1206 dma_async_device_unregister(dma);
1208 pci_pool_destroy(device->dma_pool);
1209 pci_pool_destroy(device->completion_pool);
1211 INIT_LIST_HEAD(&dma->channels);