2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 static int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 static void ioat_dma_chan_reset_part2(struct work_struct *work);
47 static void ioat_dma_chan_watchdog(struct work_struct *work);
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
58 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59 struct ioatdma_device *device,
62 return device->idx[index];
66 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
68 * @data: interrupt data
70 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
72 struct ioatdma_device *instance = data;
73 struct ioat_dma_chan *ioat_chan;
74 unsigned long attnstatus;
78 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
80 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
83 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
88 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91 tasklet_schedule(&ioat_chan->cleanup_task);
94 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
99 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
101 * @data: interrupt data
103 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
105 struct ioat_dma_chan *ioat_chan = data;
107 tasklet_schedule(&ioat_chan->cleanup_task);
112 static void ioat_dma_cleanup_tasklet(unsigned long data);
115 * ioat_dma_enumerate_channels - find and initialize the device's channels
116 * @device: the device to be enumerated
118 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
123 struct ioat_dma_chan *ioat_chan;
124 struct device *dev = &device->pdev->dev;
127 * IOAT ver.3 workarounds
129 if (device->version == IOAT_VER_3_0) {
135 * Write CHANERRMSK_INT with 3E07h to mask out the errors
136 * that can cause stability issues for IOAT ver.3
138 chan_err_mask = 0x3E07;
139 pci_write_config_dword(device->pdev,
140 IOAT_PCI_CHANERRMASK_INT_OFFSET,
144 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
145 * (workaround for spurious config parity error after restart)
147 pci_read_config_word(device->pdev,
148 IOAT_PCI_DEVICE_ID_OFFSET,
150 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
152 pci_write_config_dword(device->pdev,
153 IOAT_PCI_DMAUNCERRSTS_OFFSET,
158 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
159 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
160 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
162 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
163 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
164 device->common.chancnt--;
167 for (i = 0; i < device->common.chancnt; i++) {
168 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
170 device->common.chancnt = i;
174 ioat_chan->device = device;
175 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
176 ioat_chan->xfercap = xfercap;
177 ioat_chan->desccount = 0;
178 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
179 if (ioat_chan->device->version == IOAT_VER_2_0)
180 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
181 IOAT_DMA_DCA_ANY_CPU,
182 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
183 else if (ioat_chan->device->version == IOAT_VER_3_0)
184 writel(IOAT_DMA_DCA_ANY_CPU,
185 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
186 spin_lock_init(&ioat_chan->cleanup_lock);
187 spin_lock_init(&ioat_chan->desc_lock);
188 INIT_LIST_HEAD(&ioat_chan->free_desc);
189 INIT_LIST_HEAD(&ioat_chan->used_desc);
190 /* This should be made common somewhere in dmaengine.c */
191 ioat_chan->common.device = &device->common;
192 list_add_tail(&ioat_chan->common.device_node,
193 &device->common.channels);
194 device->idx[i] = ioat_chan;
195 tasklet_init(&ioat_chan->cleanup_task,
196 ioat_dma_cleanup_tasklet,
197 (unsigned long) ioat_chan);
198 tasklet_disable(&ioat_chan->cleanup_task);
200 return device->common.chancnt;
204 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
206 * @chan: DMA channel handle
208 static inline void __ioat1_dma_memcpy_issue_pending(
209 struct ioat_dma_chan *ioat_chan)
211 ioat_chan->pending = 0;
212 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
215 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
217 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
219 if (ioat_chan->pending > 0) {
220 spin_lock_bh(&ioat_chan->desc_lock);
221 __ioat1_dma_memcpy_issue_pending(ioat_chan);
222 spin_unlock_bh(&ioat_chan->desc_lock);
226 static inline void __ioat2_dma_memcpy_issue_pending(
227 struct ioat_dma_chan *ioat_chan)
229 ioat_chan->pending = 0;
230 writew(ioat_chan->dmacount,
231 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
234 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
236 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
238 if (ioat_chan->pending > 0) {
239 spin_lock_bh(&ioat_chan->desc_lock);
240 __ioat2_dma_memcpy_issue_pending(ioat_chan);
241 spin_unlock_bh(&ioat_chan->desc_lock);
247 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
249 static void ioat_dma_chan_reset_part2(struct work_struct *work)
251 struct ioat_dma_chan *ioat_chan =
252 container_of(work, struct ioat_dma_chan, work.work);
253 struct ioat_desc_sw *desc;
255 spin_lock_bh(&ioat_chan->cleanup_lock);
256 spin_lock_bh(&ioat_chan->desc_lock);
258 ioat_chan->completion_virt->low = 0;
259 ioat_chan->completion_virt->high = 0;
260 ioat_chan->pending = 0;
263 * count the descriptors waiting, and be sure to do it
264 * right for both the CB1 line and the CB2 ring
266 ioat_chan->dmacount = 0;
267 if (ioat_chan->used_desc.prev) {
268 desc = to_ioat_desc(ioat_chan->used_desc.prev);
270 ioat_chan->dmacount++;
271 desc = to_ioat_desc(desc->node.next);
272 } while (&desc->node != ioat_chan->used_desc.next);
276 * write the new starting descriptor address
277 * this puts channel engine into ARMED state
279 desc = to_ioat_desc(ioat_chan->used_desc.prev);
280 switch (ioat_chan->device->version) {
282 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
283 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
284 writel(((u64) desc->async_tx.phys) >> 32,
285 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
287 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
288 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
291 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
292 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
293 writel(((u64) desc->async_tx.phys) >> 32,
294 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
296 /* tell the engine to go with what's left to be done */
297 writew(ioat_chan->dmacount,
298 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
302 dev_err(&ioat_chan->device->pdev->dev,
303 "chan%d reset - %d descs waiting, %d total desc\n",
304 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
306 spin_unlock_bh(&ioat_chan->desc_lock);
307 spin_unlock_bh(&ioat_chan->cleanup_lock);
311 * ioat_dma_reset_channel - restart a channel
312 * @ioat_chan: IOAT DMA channel handle
314 static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
316 u32 chansts, chanerr;
318 if (!ioat_chan->used_desc.prev)
321 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
322 chansts = (ioat_chan->completion_virt->low
323 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
325 dev_err(&ioat_chan->device->pdev->dev,
326 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
327 chan_num(ioat_chan), chansts, chanerr);
328 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
332 * whack it upside the head with a reset
333 * and wait for things to settle out.
334 * force the pending count to a really big negative
335 * to make sure no one forces an issue_pending
336 * while we're waiting.
339 spin_lock_bh(&ioat_chan->desc_lock);
340 ioat_chan->pending = INT_MIN;
341 writeb(IOAT_CHANCMD_RESET,
343 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
344 spin_unlock_bh(&ioat_chan->desc_lock);
346 /* schedule the 2nd half instead of sleeping a long time */
347 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
351 * ioat_dma_chan_watchdog - watch for stuck channels
353 static void ioat_dma_chan_watchdog(struct work_struct *work)
355 struct ioatdma_device *device =
356 container_of(work, struct ioatdma_device, work.work);
357 struct ioat_dma_chan *ioat_chan;
367 unsigned long compl_desc_addr_hw;
369 for (i = 0; i < device->common.chancnt; i++) {
370 ioat_chan = ioat_lookup_chan_by_index(device, i);
372 if (ioat_chan->device->version == IOAT_VER_1_2
373 /* have we started processing anything yet */
374 && ioat_chan->last_completion
375 /* have we completed any since last watchdog cycle? */
376 && (ioat_chan->last_completion ==
377 ioat_chan->watchdog_completion)
378 /* has TCP stuck on one cookie since last watchdog? */
379 && (ioat_chan->watchdog_tcp_cookie ==
380 ioat_chan->watchdog_last_tcp_cookie)
381 && (ioat_chan->watchdog_tcp_cookie !=
382 ioat_chan->completed_cookie)
383 /* is there something in the chain to be processed? */
384 /* CB1 chain always has at least the last one processed */
385 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
386 && ioat_chan->pending == 0) {
389 * check CHANSTS register for completed
390 * descriptor address.
391 * if it is different than completion writeback,
393 * and it has changed since the last watchdog
394 * we can assume that channel
395 * is still working correctly
396 * and the problem is in completion writeback.
397 * update completion writeback
398 * with actual CHANSTS value
400 * try resetting the channel
403 completion_hw.low = readl(ioat_chan->reg_base +
404 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
405 completion_hw.high = readl(ioat_chan->reg_base +
406 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
407 #if (BITS_PER_LONG == 64)
410 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
413 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
416 if ((compl_desc_addr_hw != 0)
417 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
418 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
419 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
420 ioat_chan->completion_virt->low = completion_hw.low;
421 ioat_chan->completion_virt->high = completion_hw.high;
423 ioat_dma_reset_channel(ioat_chan);
424 ioat_chan->watchdog_completion = 0;
425 ioat_chan->last_compl_desc_addr_hw = 0;
429 * for version 2.0 if there are descriptors yet to be processed
430 * and the last completed hasn't changed since the last watchdog
431 * if they haven't hit the pending level
432 * issue the pending to push them through
434 * try resetting the channel
436 } else if (ioat_chan->device->version == IOAT_VER_2_0
437 && ioat_chan->used_desc.prev
438 && ioat_chan->last_completion
439 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
441 if (ioat_chan->pending < ioat_pending_level)
442 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
444 ioat_dma_reset_channel(ioat_chan);
445 ioat_chan->watchdog_completion = 0;
448 ioat_chan->last_compl_desc_addr_hw = 0;
449 ioat_chan->watchdog_completion
450 = ioat_chan->last_completion;
453 ioat_chan->watchdog_last_tcp_cookie =
454 ioat_chan->watchdog_tcp_cookie;
457 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
460 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
462 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
463 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
464 struct ioat_desc_sw *prev, *new;
465 struct ioat_dma_descriptor *hw;
467 LIST_HEAD(new_chain);
471 unsigned long orig_flags;
472 unsigned int desc_count = 0;
474 /* src and dest and len are stored in the initial descriptor */
478 orig_flags = first->async_tx.flags;
481 spin_lock_bh(&ioat_chan->desc_lock);
482 prev = to_ioat_desc(ioat_chan->used_desc.prev);
485 copy = min_t(size_t, len, ioat_chan->xfercap);
487 async_tx_ack(&new->async_tx);
496 /* chain together the physical address list for the HW */
498 prev->hw->next = (u64) new->async_tx.phys;
504 list_add_tail(&new->node, &new_chain);
507 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
510 dev_err(&ioat_chan->device->pdev->dev,
511 "tx submit failed\n");
512 spin_unlock_bh(&ioat_chan->desc_lock);
516 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
517 if (first->async_tx.callback) {
518 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
520 /* move callback into to last desc */
521 new->async_tx.callback = first->async_tx.callback;
522 new->async_tx.callback_param
523 = first->async_tx.callback_param;
524 first->async_tx.callback = NULL;
525 first->async_tx.callback_param = NULL;
529 new->tx_cnt = desc_count;
530 new->async_tx.flags = orig_flags; /* client is in control of this ack */
532 /* store the original values for use in later cleanup */
534 new->src = first->src;
535 new->dst = first->dst;
536 new->len = first->len;
539 /* cookie incr and addition to used_list must be atomic */
540 cookie = ioat_chan->common.cookie;
544 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
546 /* write address into NextDescriptor field of last desc in chain */
547 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
548 first->async_tx.phys;
549 list_splice_tail(&new_chain, &ioat_chan->used_desc);
551 ioat_chan->dmacount += desc_count;
552 ioat_chan->pending += desc_count;
553 if (ioat_chan->pending >= ioat_pending_level)
554 __ioat1_dma_memcpy_issue_pending(ioat_chan);
555 spin_unlock_bh(&ioat_chan->desc_lock);
560 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
562 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
563 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
564 struct ioat_desc_sw *new;
565 struct ioat_dma_descriptor *hw;
570 unsigned long orig_flags;
571 unsigned int desc_count = 0;
573 /* src and dest and len are stored in the initial descriptor */
577 orig_flags = first->async_tx.flags;
581 * ioat_chan->desc_lock is still in force in version 2 path
582 * it gets unlocked at end of this function
585 copy = min_t(size_t, len, ioat_chan->xfercap);
587 async_tx_ack(&new->async_tx);
599 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
602 dev_err(&ioat_chan->device->pdev->dev,
603 "tx submit failed\n");
604 spin_unlock_bh(&ioat_chan->desc_lock);
608 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
609 if (first->async_tx.callback) {
610 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
612 /* move callback into to last desc */
613 new->async_tx.callback = first->async_tx.callback;
614 new->async_tx.callback_param
615 = first->async_tx.callback_param;
616 first->async_tx.callback = NULL;
617 first->async_tx.callback_param = NULL;
621 new->tx_cnt = desc_count;
622 new->async_tx.flags = orig_flags; /* client is in control of this ack */
624 /* store the original values for use in later cleanup */
626 new->src = first->src;
627 new->dst = first->dst;
628 new->len = first->len;
631 /* cookie incr and addition to used_list must be atomic */
632 cookie = ioat_chan->common.cookie;
636 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
638 ioat_chan->dmacount += desc_count;
639 ioat_chan->pending += desc_count;
640 if (ioat_chan->pending >= ioat_pending_level)
641 __ioat2_dma_memcpy_issue_pending(ioat_chan);
642 spin_unlock_bh(&ioat_chan->desc_lock);
648 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
649 * @ioat_chan: the channel supplying the memory pool for the descriptors
650 * @flags: allocation flags
652 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
653 struct ioat_dma_chan *ioat_chan,
656 struct ioat_dma_descriptor *desc;
657 struct ioat_desc_sw *desc_sw;
658 struct ioatdma_device *ioatdma_device;
661 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
662 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
666 desc_sw = kzalloc(sizeof(*desc_sw), flags);
667 if (unlikely(!desc_sw)) {
668 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
672 memset(desc, 0, sizeof(*desc));
673 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
674 switch (ioat_chan->device->version) {
676 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
680 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
685 desc_sw->async_tx.phys = phys;
690 static int ioat_initial_desc_count = 256;
691 module_param(ioat_initial_desc_count, int, 0644);
692 MODULE_PARM_DESC(ioat_initial_desc_count,
693 "initial descriptors per channel (default: 256)");
696 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
697 * @ioat_chan: the channel to be massaged
699 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
701 struct ioat_desc_sw *desc, *_desc;
703 /* setup used_desc */
704 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
705 ioat_chan->used_desc.prev = NULL;
707 /* pull free_desc out of the circle so that every node is a hw
708 * descriptor, but leave it pointing to the list
710 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
711 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
713 /* circle link the hw descriptors */
714 desc = to_ioat_desc(ioat_chan->free_desc.next);
715 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
716 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
717 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
722 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
723 * @chan: the channel to be filled out
725 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
727 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
728 struct ioat_desc_sw *desc;
734 /* have we already been set up? */
735 if (!list_empty(&ioat_chan->free_desc))
736 return ioat_chan->desccount;
738 /* Setup register to interrupt and write completion status on error */
739 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
740 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
741 IOAT_CHANCTRL_ERR_COMPLETION_EN;
742 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
744 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
746 dev_err(&ioat_chan->device->pdev->dev,
747 "CHANERR = %x, clearing\n", chanerr);
748 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
751 /* Allocate descriptors */
752 for (i = 0; i < ioat_initial_desc_count; i++) {
753 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
755 dev_err(&ioat_chan->device->pdev->dev,
756 "Only %d initial descriptors\n", i);
759 list_add_tail(&desc->node, &tmp_list);
761 spin_lock_bh(&ioat_chan->desc_lock);
762 ioat_chan->desccount = i;
763 list_splice(&tmp_list, &ioat_chan->free_desc);
764 if (ioat_chan->device->version != IOAT_VER_1_2)
765 ioat2_dma_massage_chan_desc(ioat_chan);
766 spin_unlock_bh(&ioat_chan->desc_lock);
768 /* allocate a completion writeback area */
769 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
770 ioat_chan->completion_virt =
771 pci_pool_alloc(ioat_chan->device->completion_pool,
773 &ioat_chan->completion_addr);
774 memset(ioat_chan->completion_virt, 0,
775 sizeof(*ioat_chan->completion_virt));
776 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
777 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
778 writel(((u64) ioat_chan->completion_addr) >> 32,
779 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
781 tasklet_enable(&ioat_chan->cleanup_task);
782 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
783 return ioat_chan->desccount;
787 * ioat_dma_free_chan_resources - release all the descriptors
788 * @chan: the channel to be cleaned
790 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
792 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
793 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
794 struct ioat_desc_sw *desc, *_desc;
795 int in_use_descs = 0;
797 /* Before freeing channel resources first check
798 * if they have been previously allocated for this channel.
800 if (ioat_chan->desccount == 0)
803 tasklet_disable(&ioat_chan->cleanup_task);
804 ioat_dma_memcpy_cleanup(ioat_chan);
806 /* Delay 100ms after reset to allow internal DMA logic to quiesce
807 * before removing DMA descriptor resources.
809 writeb(IOAT_CHANCMD_RESET,
811 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
814 spin_lock_bh(&ioat_chan->desc_lock);
815 switch (ioat_chan->device->version) {
817 list_for_each_entry_safe(desc, _desc,
818 &ioat_chan->used_desc, node) {
820 list_del(&desc->node);
821 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
822 desc->async_tx.phys);
825 list_for_each_entry_safe(desc, _desc,
826 &ioat_chan->free_desc, node) {
827 list_del(&desc->node);
828 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
829 desc->async_tx.phys);
835 list_for_each_entry_safe(desc, _desc,
836 ioat_chan->free_desc.next, node) {
837 list_del(&desc->node);
838 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
839 desc->async_tx.phys);
842 desc = to_ioat_desc(ioat_chan->free_desc.next);
843 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
844 desc->async_tx.phys);
846 INIT_LIST_HEAD(&ioat_chan->free_desc);
847 INIT_LIST_HEAD(&ioat_chan->used_desc);
850 spin_unlock_bh(&ioat_chan->desc_lock);
852 pci_pool_free(ioatdma_device->completion_pool,
853 ioat_chan->completion_virt,
854 ioat_chan->completion_addr);
856 /* one is ok since we left it on there on purpose */
857 if (in_use_descs > 1)
858 dev_err(&ioat_chan->device->pdev->dev,
859 "Freeing %d in use descriptors!\n",
862 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
863 ioat_chan->pending = 0;
864 ioat_chan->dmacount = 0;
865 ioat_chan->desccount = 0;
866 ioat_chan->watchdog_completion = 0;
867 ioat_chan->last_compl_desc_addr_hw = 0;
868 ioat_chan->watchdog_tcp_cookie =
869 ioat_chan->watchdog_last_tcp_cookie = 0;
873 * ioat_dma_get_next_descriptor - return the next available descriptor
874 * @ioat_chan: IOAT DMA channel handle
876 * Gets the next descriptor from the chain, and must be called with the
877 * channel's desc_lock held. Allocates more descriptors if the channel
880 static struct ioat_desc_sw *
881 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
883 struct ioat_desc_sw *new;
885 if (!list_empty(&ioat_chan->free_desc)) {
886 new = to_ioat_desc(ioat_chan->free_desc.next);
887 list_del(&new->node);
889 /* try to get another desc */
890 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
892 dev_err(&ioat_chan->device->pdev->dev,
902 static struct ioat_desc_sw *
903 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
905 struct ioat_desc_sw *new;
908 * used.prev points to where to start processing
909 * used.next points to next free descriptor
910 * if used.prev == NULL, there are none waiting to be processed
911 * if used.next == used.prev.prev, there is only one free descriptor,
912 * and we need to use it to as a noop descriptor before
913 * linking in a new set of descriptors, since the device
914 * has probably already read the pointer to it
916 if (ioat_chan->used_desc.prev &&
917 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
919 struct ioat_desc_sw *desc;
920 struct ioat_desc_sw *noop_desc;
923 /* set up the noop descriptor */
924 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
925 /* set size to non-zero value (channel returns error when size is 0) */
926 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
927 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
928 noop_desc->hw->src_addr = 0;
929 noop_desc->hw->dst_addr = 0;
931 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
932 ioat_chan->pending++;
933 ioat_chan->dmacount++;
935 /* try to get a few more descriptors */
936 for (i = 16; i; i--) {
937 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
939 dev_err(&ioat_chan->device->pdev->dev,
943 list_add_tail(&desc->node, ioat_chan->used_desc.next);
946 = to_ioat_desc(desc->node.next)->async_tx.phys;
947 to_ioat_desc(desc->node.prev)->hw->next
948 = desc->async_tx.phys;
949 ioat_chan->desccount++;
952 ioat_chan->used_desc.next = noop_desc->node.next;
954 new = to_ioat_desc(ioat_chan->used_desc.next);
956 ioat_chan->used_desc.next = new->node.next;
958 if (ioat_chan->used_desc.prev == NULL)
959 ioat_chan->used_desc.prev = &new->node;
965 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
966 struct ioat_dma_chan *ioat_chan)
971 switch (ioat_chan->device->version) {
973 return ioat1_dma_get_next_descriptor(ioat_chan);
976 return ioat2_dma_get_next_descriptor(ioat_chan);
981 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
982 struct dma_chan *chan,
988 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
989 struct ioat_desc_sw *new;
991 spin_lock_bh(&ioat_chan->desc_lock);
992 new = ioat_dma_get_next_descriptor(ioat_chan);
993 spin_unlock_bh(&ioat_chan->desc_lock);
999 new->async_tx.flags = flags;
1000 return &new->async_tx;
1002 dev_err(&ioat_chan->device->pdev->dev,
1003 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1004 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
1009 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
1010 struct dma_chan *chan,
1011 dma_addr_t dma_dest,
1014 unsigned long flags)
1016 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1017 struct ioat_desc_sw *new;
1019 spin_lock_bh(&ioat_chan->desc_lock);
1020 new = ioat2_dma_get_next_descriptor(ioat_chan);
1023 * leave ioat_chan->desc_lock set in ioat 2 path
1024 * it will get unlocked at end of tx_submit
1029 new->dst = dma_dest;
1031 new->async_tx.flags = flags;
1032 return &new->async_tx;
1034 spin_unlock_bh(&ioat_chan->desc_lock);
1035 dev_err(&ioat_chan->device->pdev->dev,
1036 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1037 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
1042 static void ioat_dma_cleanup_tasklet(unsigned long data)
1044 struct ioat_dma_chan *chan = (void *)data;
1045 ioat_dma_memcpy_cleanup(chan);
1046 writew(IOAT_CHANCTRL_INT_DISABLE,
1047 chan->reg_base + IOAT_CHANCTRL_OFFSET);
1051 ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1053 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1054 if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1055 pci_unmap_single(ioat_chan->device->pdev,
1056 pci_unmap_addr(desc, dst),
1057 pci_unmap_len(desc, len),
1058 PCI_DMA_FROMDEVICE);
1060 pci_unmap_page(ioat_chan->device->pdev,
1061 pci_unmap_addr(desc, dst),
1062 pci_unmap_len(desc, len),
1063 PCI_DMA_FROMDEVICE);
1066 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1067 if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1068 pci_unmap_single(ioat_chan->device->pdev,
1069 pci_unmap_addr(desc, src),
1070 pci_unmap_len(desc, len),
1073 pci_unmap_page(ioat_chan->device->pdev,
1074 pci_unmap_addr(desc, src),
1075 pci_unmap_len(desc, len),
1081 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1082 * @chan: ioat channel to be cleaned up
1084 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1086 unsigned long phys_complete;
1087 struct ioat_desc_sw *desc, *_desc;
1088 dma_cookie_t cookie = 0;
1089 unsigned long desc_phys;
1090 struct ioat_desc_sw *latest_desc;
1092 prefetch(ioat_chan->completion_virt);
1094 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
1097 /* The completion writeback can happen at any time,
1098 so reads by the driver need to be atomic operations
1099 The descriptor physical addresses are limited to 32-bits
1100 when the CPU can only do a 32-bit mov */
1102 #if (BITS_PER_LONG == 64)
1104 ioat_chan->completion_virt->full
1105 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1108 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1111 if ((ioat_chan->completion_virt->full
1112 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
1113 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1114 dev_err(&ioat_chan->device->pdev->dev,
1115 "Channel halted, chanerr = %x\n",
1116 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
1118 /* TODO do something to salvage the situation */
1121 if (phys_complete == ioat_chan->last_completion) {
1122 spin_unlock_bh(&ioat_chan->cleanup_lock);
1124 * perhaps we're stuck so hard that the watchdog can't go off?
1125 * try to catch it after 2 seconds
1127 if (ioat_chan->device->version != IOAT_VER_3_0) {
1128 if (time_after(jiffies,
1129 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1130 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1131 ioat_chan->last_completion_time = jiffies;
1136 ioat_chan->last_completion_time = jiffies;
1139 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1140 spin_unlock_bh(&ioat_chan->cleanup_lock);
1144 switch (ioat_chan->device->version) {
1146 list_for_each_entry_safe(desc, _desc,
1147 &ioat_chan->used_desc, node) {
1150 * Incoming DMA requests may use multiple descriptors,
1151 * due to exceeding xfercap, perhaps. If so, only the
1152 * last one will have a cookie, and require unmapping.
1154 if (desc->async_tx.cookie) {
1155 cookie = desc->async_tx.cookie;
1156 ioat_dma_unmap(ioat_chan, desc);
1157 if (desc->async_tx.callback) {
1158 desc->async_tx.callback(desc->async_tx.callback_param);
1159 desc->async_tx.callback = NULL;
1163 if (desc->async_tx.phys != phys_complete) {
1165 * a completed entry, but not the last, so clean
1166 * up if the client is done with the descriptor
1168 if (async_tx_test_ack(&desc->async_tx)) {
1169 list_move_tail(&desc->node,
1170 &ioat_chan->free_desc);
1172 desc->async_tx.cookie = 0;
1175 * last used desc. Do not remove, so we can
1176 * append from it, but don't look at it next
1179 desc->async_tx.cookie = 0;
1181 /* TODO check status bits? */
1188 /* has some other thread has already cleaned up? */
1189 if (ioat_chan->used_desc.prev == NULL)
1192 /* work backwards to find latest finished desc */
1193 desc = to_ioat_desc(ioat_chan->used_desc.next);
1196 desc = to_ioat_desc(desc->node.prev);
1197 desc_phys = (unsigned long)desc->async_tx.phys
1198 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1199 if (desc_phys == phys_complete) {
1203 } while (&desc->node != ioat_chan->used_desc.prev);
1205 if (latest_desc != NULL) {
1207 /* work forwards to clear finished descriptors */
1208 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1209 &desc->node != latest_desc->node.next &&
1210 &desc->node != ioat_chan->used_desc.next;
1211 desc = to_ioat_desc(desc->node.next)) {
1212 if (desc->async_tx.cookie) {
1213 cookie = desc->async_tx.cookie;
1214 desc->async_tx.cookie = 0;
1215 ioat_dma_unmap(ioat_chan, desc);
1216 if (desc->async_tx.callback) {
1217 desc->async_tx.callback(desc->async_tx.callback_param);
1218 desc->async_tx.callback = NULL;
1223 /* move used.prev up beyond those that are finished */
1224 if (&desc->node == ioat_chan->used_desc.next)
1225 ioat_chan->used_desc.prev = NULL;
1227 ioat_chan->used_desc.prev = &desc->node;
1232 spin_unlock_bh(&ioat_chan->desc_lock);
1234 ioat_chan->last_completion = phys_complete;
1236 ioat_chan->completed_cookie = cookie;
1238 spin_unlock_bh(&ioat_chan->cleanup_lock);
1242 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1243 * @chan: IOAT DMA channel handle
1244 * @cookie: DMA transaction identifier
1245 * @done: if not %NULL, updated with last completed transaction
1246 * @used: if not %NULL, updated with last used transaction
1248 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
1249 dma_cookie_t cookie,
1253 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1254 dma_cookie_t last_used;
1255 dma_cookie_t last_complete;
1256 enum dma_status ret;
1258 last_used = chan->cookie;
1259 last_complete = ioat_chan->completed_cookie;
1260 ioat_chan->watchdog_tcp_cookie = cookie;
1263 *done = last_complete;
1267 ret = dma_async_is_complete(cookie, last_complete, last_used);
1268 if (ret == DMA_SUCCESS)
1271 ioat_dma_memcpy_cleanup(ioat_chan);
1273 last_used = chan->cookie;
1274 last_complete = ioat_chan->completed_cookie;
1277 *done = last_complete;
1281 return dma_async_is_complete(cookie, last_complete, last_used);
1284 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1286 struct ioat_desc_sw *desc;
1288 spin_lock_bh(&ioat_chan->desc_lock);
1290 desc = ioat_dma_get_next_descriptor(ioat_chan);
1293 dev_err(&ioat_chan->device->pdev->dev,
1294 "Unable to start null desc - get next desc failed\n");
1295 spin_unlock_bh(&ioat_chan->desc_lock);
1299 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
1300 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
1301 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
1302 /* set size to non-zero value (channel returns error when size is 0) */
1303 desc->hw->size = NULL_DESC_BUFFER_SIZE;
1304 desc->hw->src_addr = 0;
1305 desc->hw->dst_addr = 0;
1306 async_tx_ack(&desc->async_tx);
1307 switch (ioat_chan->device->version) {
1310 list_add_tail(&desc->node, &ioat_chan->used_desc);
1312 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1313 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1314 writel(((u64) desc->async_tx.phys) >> 32,
1315 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1317 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1318 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1322 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1323 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1324 writel(((u64) desc->async_tx.phys) >> 32,
1325 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1327 ioat_chan->dmacount++;
1328 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1331 spin_unlock_bh(&ioat_chan->desc_lock);
1335 * Perform a IOAT transaction to verify the HW works.
1337 #define IOAT_TEST_SIZE 2000
1339 static void ioat_dma_test_callback(void *dma_async_param)
1341 struct completion *cmp = dma_async_param;
1347 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1348 * @device: device to be tested
1350 static int ioat_dma_self_test(struct ioatdma_device *device)
1355 struct dma_chan *dma_chan;
1356 struct dma_async_tx_descriptor *tx;
1357 dma_addr_t dma_dest, dma_src;
1358 dma_cookie_t cookie;
1360 struct completion cmp;
1362 unsigned long flags;
1364 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1367 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1373 /* Fill in src buffer */
1374 for (i = 0; i < IOAT_TEST_SIZE; i++)
1377 /* Start copy, using first DMA channel */
1378 dma_chan = container_of(device->common.channels.next,
1381 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1382 dev_err(&device->pdev->dev,
1383 "selftest cannot allocate chan resource\n");
1388 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1390 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1392 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
1393 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1394 IOAT_TEST_SIZE, flags);
1396 dev_err(&device->pdev->dev,
1397 "Self-test prep failed, disabling\n");
1399 goto free_resources;
1403 init_completion(&cmp);
1404 tx->callback = ioat_dma_test_callback;
1405 tx->callback_param = &cmp;
1406 cookie = tx->tx_submit(tx);
1408 dev_err(&device->pdev->dev,
1409 "Self-test setup failed, disabling\n");
1411 goto free_resources;
1413 device->common.device_issue_pending(dma_chan);
1415 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1418 device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1420 dev_err(&device->pdev->dev,
1421 "Self-test copy timed out, disabling\n");
1423 goto free_resources;
1425 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1426 dev_err(&device->pdev->dev,
1427 "Self-test copy failed compare, disabling\n");
1429 goto free_resources;
1433 device->common.device_free_chan_resources(dma_chan);
1440 static char ioat_interrupt_style[32] = "msix";
1441 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1442 sizeof(ioat_interrupt_style), 0644);
1443 MODULE_PARM_DESC(ioat_interrupt_style,
1444 "set ioat interrupt style: msix (default), "
1445 "msix-single-vector, msi, intx)");
1448 * ioat_dma_setup_interrupts - setup interrupt handler
1449 * @device: ioat device
1451 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1453 struct ioat_dma_chan *ioat_chan;
1454 struct pci_dev *pdev = device->pdev;
1455 struct device *dev = &pdev->dev;
1456 struct msix_entry *msix;
1461 if (!strcmp(ioat_interrupt_style, "msix"))
1463 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1464 goto msix_single_vector;
1465 if (!strcmp(ioat_interrupt_style, "msi"))
1467 if (!strcmp(ioat_interrupt_style, "intx"))
1469 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1473 /* The number of MSI-X vectors should equal the number of channels */
1474 msixcnt = device->common.chancnt;
1475 for (i = 0; i < msixcnt; i++)
1476 device->msix_entries[i].entry = i;
1478 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1482 goto msix_single_vector;
1484 for (i = 0; i < msixcnt; i++) {
1485 msix = &device->msix_entries[i];
1486 ioat_chan = ioat_lookup_chan_by_index(device, i);
1487 err = devm_request_irq(dev, msix->vector,
1488 ioat_dma_do_interrupt_msix, 0,
1489 "ioat-msix", ioat_chan);
1491 for (j = 0; j < i; j++) {
1492 msix = &device->msix_entries[j];
1494 ioat_lookup_chan_by_index(device, j);
1495 devm_free_irq(dev, msix->vector, ioat_chan);
1497 goto msix_single_vector;
1500 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1504 msix = &device->msix_entries[0];
1506 err = pci_enable_msix(pdev, device->msix_entries, 1);
1510 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1511 "ioat-msix", device);
1513 pci_disable_msix(pdev);
1519 err = pci_enable_msi(pdev);
1523 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1524 "ioat-msi", device);
1526 pci_disable_msi(pdev);
1530 * CB 1.2 devices need a bit set in configuration space to enable MSI
1532 if (device->version == IOAT_VER_1_2) {
1534 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1535 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1536 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1541 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1542 IRQF_SHARED, "ioat-intx", device);
1547 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1548 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1552 /* Disable all interrupt generation */
1553 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1554 dev_err(dev, "no usable interrupts\n");
1558 static void ioat_disable_interrupts(struct ioatdma_device *device)
1560 /* Disable all interrupt generation */
1561 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1564 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1565 void __iomem *iobase)
1568 struct device *dev = &pdev->dev;
1569 struct ioatdma_device *device;
1571 device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
1574 device->pdev = pdev;
1575 device->reg_base = iobase;
1576 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1578 /* DMA coherent memory pool for DMA descriptor allocations */
1579 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1580 sizeof(struct ioat_dma_descriptor),
1582 if (!device->dma_pool) {
1587 device->completion_pool = pci_pool_create("completion_pool", pdev,
1588 sizeof(u64), SMP_CACHE_BYTES,
1590 if (!device->completion_pool) {
1592 goto err_completion_pool;
1595 INIT_LIST_HEAD(&device->common.channels);
1596 ioat_dma_enumerate_channels(device);
1598 device->common.device_alloc_chan_resources =
1599 ioat_dma_alloc_chan_resources;
1600 device->common.device_free_chan_resources =
1601 ioat_dma_free_chan_resources;
1602 device->common.dev = &pdev->dev;
1604 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1605 device->common.device_is_tx_complete = ioat_dma_is_complete;
1606 switch (device->version) {
1608 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1609 device->common.device_issue_pending =
1610 ioat1_dma_memcpy_issue_pending;
1614 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1615 device->common.device_issue_pending =
1616 ioat2_dma_memcpy_issue_pending;
1620 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1621 " %d channels, device version 0x%02x, driver version %s\n",
1622 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1624 if (!device->common.chancnt) {
1625 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1626 "zero channels detected\n");
1627 goto err_setup_interrupts;
1630 err = ioat_dma_setup_interrupts(device);
1632 goto err_setup_interrupts;
1634 err = ioat_dma_self_test(device);
1638 err = dma_async_device_register(&device->common);
1642 ioat_set_tcp_copy_break(device);
1644 if (device->version != IOAT_VER_3_0) {
1645 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1646 schedule_delayed_work(&device->work,
1653 ioat_disable_interrupts(device);
1654 err_setup_interrupts:
1655 pci_pool_destroy(device->completion_pool);
1656 err_completion_pool:
1657 pci_pool_destroy(device->dma_pool);
1662 void ioat_dma_remove(struct ioatdma_device *device)
1664 struct dma_chan *chan, *_chan;
1665 struct ioat_dma_chan *ioat_chan;
1667 if (device->version != IOAT_VER_3_0)
1668 cancel_delayed_work(&device->work);
1670 ioat_disable_interrupts(device);
1672 dma_async_device_unregister(&device->common);
1674 pci_pool_destroy(device->dma_pool);
1675 pci_pool_destroy(device->completion_pool);
1677 list_for_each_entry_safe(chan, _chan,
1678 &device->common.channels, device_node) {
1679 ioat_chan = to_ioat_chan(chan);
1680 list_del(&chan->device_node);