2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
20 #include <linux/dmaengine.h>
21 #include <linux/init.h>
22 #include <linux/dmapool.h>
23 #include <linux/cache.h>
24 #include <linux/pci_ids.h>
25 #include <linux/circ_buf.h>
26 #include <linux/interrupt.h>
27 #include "registers.h"
30 #define IOAT_DMA_VERSION "4.00"
32 #define IOAT_DMA_DCA_ANY_CPU ~0
34 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
38 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
40 /* ioat hardware assumes at least two sources for raid operations */
41 #define src_cnt_to_sw(x) ((x) + 2)
42 #define src_cnt_to_hw(x) ((x) - 2)
43 #define ndest_to_sw(x) ((x) + 1)
44 #define ndest_to_hw(x) ((x) - 1)
45 #define src16_cnt_to_sw(x) ((x) + 9)
46 #define src16_cnt_to_hw(x) ((x) - 9)
49 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0)
52 #define NULL_DESC_BUFFER_SIZE 1
62 * struct ioatdma_device - internal representation of a IOAT device
63 * @pdev: PCI-Express device
64 * @reg_base: MMIO register space base address
65 * @dma_pool: for allocating DMA descriptors
66 * @dma_dev: embedded struct dma_device
67 * @version: version of ioatdma device
68 * @msix_entries: irq handlers
69 * @idx: per channel data
70 * @dca: direct cache access context
72 struct ioatdma_device {
74 void __iomem *reg_base;
75 struct pci_pool *dma_pool;
76 struct pci_pool *completion_pool;
77 #define MAX_SED_POOLS 5
78 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
79 struct dma_device dma_dev;
81 struct msix_entry msix_entries[4];
82 struct ioatdma_chan *idx[4];
83 struct dca_provider *dca;
84 enum ioat_irq_mode irq_mode;
89 struct dma_chan dma_chan;
90 void __iomem *reg_base;
91 dma_addr_t last_completion;
92 spinlock_t cleanup_lock;
94 #define IOAT_COMPLETION_PENDING 0
95 #define IOAT_COMPLETION_ACK 1
96 #define IOAT_RESET_PENDING 2
97 #define IOAT_KOBJ_INIT_FAIL 3
98 #define IOAT_RESHAPE_PENDING 4
100 #define IOAT_CHAN_ACTIVE 6
101 struct timer_list timer;
102 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
103 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
104 #define RESET_DELAY msecs_to_jiffies(100)
105 struct ioatdma_device *ioat_dma;
106 dma_addr_t completion_dma;
108 struct tasklet_struct cleanup_task;
111 /* ioat v2 / v3 channel attributes
112 * @xfercap_log; log2 of channel max transfer length (for fast division)
113 * @head: allocated index
114 * @issued: hardware notification point
115 * @tail: cleanup index
116 * @dmacount: identical to 'head' except for occasionally resetting to zero
117 * @alloc_order: log2 of the number of allocated descriptors
118 * @produce: number of descriptors to produce at submit time
119 * @ring: software ring buffer implementation of hardware ring
120 * @prep_lock: serializes descriptor preparation (producers)
129 struct ioat_ring_ent **ring;
130 spinlock_t prep_lock;
133 struct ioat_sysfs_entry {
134 struct attribute attr;
135 ssize_t (*show)(struct dma_chan *, char *);
139 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
141 * @sed_dma: dma address for the SED
143 * @parent: point to the dma descriptor that's the parent
145 struct ioat_sed_ent {
146 struct ioat_sed_raw_descriptor *hw;
148 struct ioat_ring_ent *parent;
149 unsigned int hw_pool;
153 * struct ioat_ring_ent - wrapper around hardware descriptor
154 * @hw: hardware DMA descriptor (for memcpy)
155 * @fill: hardware fill descriptor
156 * @xor: hardware xor descriptor
157 * @xor_ex: hardware xor extension descriptor
158 * @pq: hardware pq descriptor
159 * @pq_ex: hardware pq extension descriptor
160 * @pqu: hardware pq update descriptor
161 * @raw: hardware raw (un-typed) descriptor
162 * @txd: the generic software descriptor for all engines
163 * @len: total transaction length for unmap
164 * @result: asynchronous result of validate operations
165 * @id: identifier for debug
168 struct ioat_ring_ent {
170 struct ioat_dma_descriptor *hw;
171 struct ioat_xor_descriptor *xor;
172 struct ioat_xor_ext_descriptor *xor_ex;
173 struct ioat_pq_descriptor *pq;
174 struct ioat_pq_ext_descriptor *pq_ex;
175 struct ioat_pq_update_descriptor *pqu;
176 struct ioat_raw_descriptor *raw;
179 struct dma_async_tx_descriptor txd;
180 enum sum_check_flags *result;
184 struct ioat_sed_ent *sed;
187 extern const struct sysfs_ops ioat_sysfs_ops;
188 extern struct ioat_sysfs_entry ioat_version_attr;
189 extern struct ioat_sysfs_entry ioat_cap_attr;
190 extern int ioat_pending_level;
191 extern int ioat_ring_alloc_order;
192 extern struct kobj_type ioat_ktype;
193 extern struct kmem_cache *ioat_cache;
194 extern int ioat_ring_max_alloc_order;
195 extern struct kmem_cache *ioat_sed_cache;
197 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
199 return container_of(c, struct ioatdma_chan, dma_chan);
202 /* wrapper around hardware descriptor format + additional software fields */
204 #define set_desc_id(desc, i) ((desc)->id = (i))
205 #define desc_id(desc) ((desc)->id)
207 #define set_desc_id(desc, i)
208 #define desc_id(desc) (0)
212 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
213 struct dma_async_tx_descriptor *tx, int id)
215 struct device *dev = to_dev(ioat_chan);
217 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
218 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
219 (unsigned long long) tx->phys,
220 (unsigned long long) hw->next, tx->cookie, tx->flags,
221 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
224 #define dump_desc_dbg(c, d) \
225 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
227 static inline struct ioatdma_chan *
228 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
230 return ioat_dma->idx[index];
233 static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
235 u8 ver = ioat_chan->ioat_dma->version;
239 /* We need to read the low address first as this causes the
240 * chipset to latch the upper bits for the subsequent read
242 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
243 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
250 #if BITS_PER_LONG == 64
252 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
254 u8 ver = ioat_chan->ioat_dma->version;
257 /* With IOAT v3.3 the status register is 64bit. */
258 if (ver >= IOAT_VER_3_3)
259 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
261 status = ioat_chansts_32(ioat_chan);
267 #define ioat_chansts ioat_chansts_32
270 static inline u64 ioat_chansts_to_addr(u64 status)
272 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
275 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
277 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
280 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
282 u8 ver = ioat_chan->ioat_dma->version;
284 writeb(IOAT_CHANCMD_SUSPEND,
285 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
288 static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
290 u8 ver = ioat_chan->ioat_dma->version;
292 writeb(IOAT_CHANCMD_RESET,
293 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
296 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
298 u8 ver = ioat_chan->ioat_dma->version;
301 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
302 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
305 static inline bool is_ioat_active(unsigned long status)
307 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
310 static inline bool is_ioat_idle(unsigned long status)
312 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
315 static inline bool is_ioat_halted(unsigned long status)
317 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
320 static inline bool is_ioat_suspended(unsigned long status)
322 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
325 /* channel was fatally programmed */
326 static inline bool is_ioat_bug(unsigned long err)
331 #define IOAT_MAX_ORDER 16
332 #define ioat_get_alloc_order() \
333 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
334 #define ioat_get_max_alloc_order() \
335 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
337 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
339 return 1 << ioat_chan->alloc_order;
342 /* count of descriptors in flight with the engine */
343 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
345 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
346 ioat_ring_size(ioat_chan));
349 /* count of descriptors pending submission to hardware */
350 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
352 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
353 ioat_ring_size(ioat_chan));
356 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
358 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
362 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
364 u16 num_descs = len >> ioat_chan->xfercap_log;
366 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
370 static inline struct ioat_ring_ent *
371 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
373 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
377 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
379 writel(addr & 0x00000000FFFFFFFF,
380 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
382 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
385 /* IOAT Prep functions */
386 struct dma_async_tx_descriptor *
387 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
388 dma_addr_t dma_src, size_t len, unsigned long flags);
389 struct dma_async_tx_descriptor *
390 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
391 struct dma_async_tx_descriptor *
392 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
393 unsigned int src_cnt, size_t len, unsigned long flags);
394 struct dma_async_tx_descriptor *
395 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
396 unsigned int src_cnt, size_t len,
397 enum sum_check_flags *result, unsigned long flags);
398 struct dma_async_tx_descriptor *
399 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
400 unsigned int src_cnt, const unsigned char *scf, size_t len,
401 unsigned long flags);
402 struct dma_async_tx_descriptor *
403 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
404 unsigned int src_cnt, const unsigned char *scf, size_t len,
405 enum sum_check_flags *pqres, unsigned long flags);
406 struct dma_async_tx_descriptor *
407 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
408 unsigned int src_cnt, size_t len, unsigned long flags);
409 struct dma_async_tx_descriptor *
410 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
411 unsigned int src_cnt, size_t len,
412 enum sum_check_flags *result, unsigned long flags);
414 /* IOAT Operation functions */
415 irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
416 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
417 struct ioat_ring_ent **
418 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
419 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
420 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
421 int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
423 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
424 struct dma_tx_state *txstate);
425 void ioat_cleanup_event(unsigned long data);
426 void ioat_timer_event(unsigned long data);
427 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
428 void ioat_issue_pending(struct dma_chan *chan);
429 void ioat_timer_event(unsigned long data);
431 /* IOAT Init functions */
432 bool is_bwd_ioat(struct pci_dev *pdev);
433 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
434 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
435 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
436 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
437 void ioat_stop(struct ioatdma_chan *ioat_chan);
438 #endif /* IOATDMA_H */