]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/dma/ioat/dma_v2.c
ARM: delete struct sys_timer
[karo-tx-linux.git] / drivers / dma / ioat / dma_v2.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2009 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25  * does asynchronous data movement and checksumming operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/i7300_idle.h>
39 #include "dma.h"
40 #include "dma_v2.h"
41 #include "registers.h"
42 #include "hw.h"
43
44 #include "../dmaengine.h"
45
46 int ioat_ring_alloc_order = 8;
47 module_param(ioat_ring_alloc_order, int, 0644);
48 MODULE_PARM_DESC(ioat_ring_alloc_order,
49                  "ioat2+: allocate 2^n descriptors per channel"
50                  " (default: 8 max: 16)");
51 static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
52 module_param(ioat_ring_max_alloc_order, int, 0644);
53 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
54                  "ioat2+: upper limit for ring size (default: 16)");
55
56 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
57 {
58         struct ioat_chan_common *chan = &ioat->base;
59
60         ioat->dmacount += ioat2_ring_pending(ioat);
61         ioat->issued = ioat->head;
62         writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
63         dev_dbg(to_dev(chan),
64                 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
65                 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
66 }
67
68 void ioat2_issue_pending(struct dma_chan *c)
69 {
70         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
71
72         if (ioat2_ring_pending(ioat)) {
73                 spin_lock_bh(&ioat->prep_lock);
74                 __ioat2_issue_pending(ioat);
75                 spin_unlock_bh(&ioat->prep_lock);
76         }
77 }
78
79 /**
80  * ioat2_update_pending - log pending descriptors
81  * @ioat: ioat2+ channel
82  *
83  * Check if the number of unsubmitted descriptors has exceeded the
84  * watermark.  Called with prep_lock held
85  */
86 static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
87 {
88         if (ioat2_ring_pending(ioat) > ioat_pending_level)
89                 __ioat2_issue_pending(ioat);
90 }
91
92 static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
93 {
94         struct ioat_ring_ent *desc;
95         struct ioat_dma_descriptor *hw;
96
97         if (ioat2_ring_space(ioat) < 1) {
98                 dev_err(to_dev(&ioat->base),
99                         "Unable to start null desc - ring full\n");
100                 return;
101         }
102
103         dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
104                 __func__, ioat->head, ioat->tail, ioat->issued);
105         desc = ioat2_get_ring_ent(ioat, ioat->head);
106
107         hw = desc->hw;
108         hw->ctl = 0;
109         hw->ctl_f.null = 1;
110         hw->ctl_f.int_en = 1;
111         hw->ctl_f.compl_write = 1;
112         /* set size to non-zero value (channel returns error when size is 0) */
113         hw->size = NULL_DESC_BUFFER_SIZE;
114         hw->src_addr = 0;
115         hw->dst_addr = 0;
116         async_tx_ack(&desc->txd);
117         ioat2_set_chainaddr(ioat, desc->txd.phys);
118         dump_desc_dbg(ioat, desc);
119         wmb();
120         ioat->head += 1;
121         __ioat2_issue_pending(ioat);
122 }
123
124 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
125 {
126         spin_lock_bh(&ioat->prep_lock);
127         __ioat2_start_null_desc(ioat);
128         spin_unlock_bh(&ioat->prep_lock);
129 }
130
131 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
132 {
133         struct ioat_chan_common *chan = &ioat->base;
134         struct dma_async_tx_descriptor *tx;
135         struct ioat_ring_ent *desc;
136         bool seen_current = false;
137         u16 active;
138         int idx = ioat->tail, i;
139
140         dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
141                 __func__, ioat->head, ioat->tail, ioat->issued);
142
143         active = ioat2_ring_active(ioat);
144         for (i = 0; i < active && !seen_current; i++) {
145                 smp_read_barrier_depends();
146                 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
147                 desc = ioat2_get_ring_ent(ioat, idx + i);
148                 tx = &desc->txd;
149                 dump_desc_dbg(ioat, desc);
150                 if (tx->cookie) {
151                         ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
152                         dma_cookie_complete(tx);
153                         if (tx->callback) {
154                                 tx->callback(tx->callback_param);
155                                 tx->callback = NULL;
156                         }
157                 }
158
159                 if (tx->phys == phys_complete)
160                         seen_current = true;
161         }
162         smp_mb(); /* finish all descriptor reads before incrementing tail */
163         ioat->tail = idx + i;
164         BUG_ON(active && !seen_current); /* no active descs have written a completion? */
165
166         chan->last_completion = phys_complete;
167         if (active - i == 0) {
168                 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
169                         __func__);
170                 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
171                 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
172         }
173 }
174
175 /**
176  * ioat2_cleanup - clean finished descriptors (advance tail pointer)
177  * @chan: ioat channel to be cleaned up
178  */
179 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180 {
181         struct ioat_chan_common *chan = &ioat->base;
182         dma_addr_t phys_complete;
183
184         spin_lock_bh(&chan->cleanup_lock);
185         if (ioat_cleanup_preamble(chan, &phys_complete))
186                 __cleanup(ioat, phys_complete);
187         spin_unlock_bh(&chan->cleanup_lock);
188 }
189
190 void ioat2_cleanup_event(unsigned long data)
191 {
192         struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
193
194         ioat2_cleanup(ioat);
195         writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
196 }
197
198 void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
199 {
200         struct ioat_chan_common *chan = &ioat->base;
201
202         /* set the tail to be re-issued */
203         ioat->issued = ioat->tail;
204         ioat->dmacount = 0;
205         set_bit(IOAT_COMPLETION_PENDING, &chan->state);
206         mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
207
208         dev_dbg(to_dev(chan),
209                 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
210                 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
211
212         if (ioat2_ring_pending(ioat)) {
213                 struct ioat_ring_ent *desc;
214
215                 desc = ioat2_get_ring_ent(ioat, ioat->tail);
216                 ioat2_set_chainaddr(ioat, desc->txd.phys);
217                 __ioat2_issue_pending(ioat);
218         } else
219                 __ioat2_start_null_desc(ioat);
220 }
221
222 int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
223 {
224         unsigned long end = jiffies + tmo;
225         int err = 0;
226         u32 status;
227
228         status = ioat_chansts(chan);
229         if (is_ioat_active(status) || is_ioat_idle(status))
230                 ioat_suspend(chan);
231         while (is_ioat_active(status) || is_ioat_idle(status)) {
232                 if (tmo && time_after(jiffies, end)) {
233                         err = -ETIMEDOUT;
234                         break;
235                 }
236                 status = ioat_chansts(chan);
237                 cpu_relax();
238         }
239
240         return err;
241 }
242
243 int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
244 {
245         unsigned long end = jiffies + tmo;
246         int err = 0;
247
248         ioat_reset(chan);
249         while (ioat_reset_pending(chan)) {
250                 if (end && time_after(jiffies, end)) {
251                         err = -ETIMEDOUT;
252                         break;
253                 }
254                 cpu_relax();
255         }
256
257         return err;
258 }
259
260 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
261 {
262         struct ioat_chan_common *chan = &ioat->base;
263         dma_addr_t phys_complete;
264
265         ioat2_quiesce(chan, 0);
266         if (ioat_cleanup_preamble(chan, &phys_complete))
267                 __cleanup(ioat, phys_complete);
268
269         __ioat2_restart_chan(ioat);
270 }
271
272 void ioat2_timer_event(unsigned long data)
273 {
274         struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
275         struct ioat_chan_common *chan = &ioat->base;
276
277         if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
278                 dma_addr_t phys_complete;
279                 u64 status;
280
281                 status = ioat_chansts(chan);
282
283                 /* when halted due to errors check for channel
284                  * programming errors before advancing the completion state
285                  */
286                 if (is_ioat_halted(status)) {
287                         u32 chanerr;
288
289                         chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
290                         dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
291                                 __func__, chanerr);
292                         if (test_bit(IOAT_RUN, &chan->state))
293                                 BUG_ON(is_ioat_bug(chanerr));
294                         else /* we never got off the ground */
295                                 return;
296                 }
297
298                 /* if we haven't made progress and we have already
299                  * acknowledged a pending completion once, then be more
300                  * forceful with a restart
301                  */
302                 spin_lock_bh(&chan->cleanup_lock);
303                 if (ioat_cleanup_preamble(chan, &phys_complete)) {
304                         __cleanup(ioat, phys_complete);
305                 } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
306                         spin_lock_bh(&ioat->prep_lock);
307                         ioat2_restart_channel(ioat);
308                         spin_unlock_bh(&ioat->prep_lock);
309                 } else {
310                         set_bit(IOAT_COMPLETION_ACK, &chan->state);
311                         mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
312                 }
313                 spin_unlock_bh(&chan->cleanup_lock);
314         } else {
315                 u16 active;
316
317                 /* if the ring is idle, empty, and oversized try to step
318                  * down the size
319                  */
320                 spin_lock_bh(&chan->cleanup_lock);
321                 spin_lock_bh(&ioat->prep_lock);
322                 active = ioat2_ring_active(ioat);
323                 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
324                         reshape_ring(ioat, ioat->alloc_order-1);
325                 spin_unlock_bh(&ioat->prep_lock);
326                 spin_unlock_bh(&chan->cleanup_lock);
327
328                 /* keep shrinking until we get back to our minimum
329                  * default size
330                  */
331                 if (ioat->alloc_order > ioat_get_alloc_order())
332                         mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
333         }
334 }
335
336 static int ioat2_reset_hw(struct ioat_chan_common *chan)
337 {
338         /* throw away whatever the channel was doing and get it initialized */
339         u32 chanerr;
340
341         ioat2_quiesce(chan, msecs_to_jiffies(100));
342
343         chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
344         writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
345
346         return ioat2_reset_sync(chan, msecs_to_jiffies(200));
347 }
348
349 /**
350  * ioat2_enumerate_channels - find and initialize the device's channels
351  * @device: the device to be enumerated
352  */
353 int ioat2_enumerate_channels(struct ioatdma_device *device)
354 {
355         struct ioat2_dma_chan *ioat;
356         struct device *dev = &device->pdev->dev;
357         struct dma_device *dma = &device->common;
358         u8 xfercap_log;
359         int i;
360
361         INIT_LIST_HEAD(&dma->channels);
362         dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
363         dma->chancnt &= 0x1f; /* bits [4:0] valid */
364         if (dma->chancnt > ARRAY_SIZE(device->idx)) {
365                 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
366                          dma->chancnt, ARRAY_SIZE(device->idx));
367                 dma->chancnt = ARRAY_SIZE(device->idx);
368         }
369         xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
370         xfercap_log &= 0x1f; /* bits [4:0] valid */
371         if (xfercap_log == 0)
372                 return 0;
373         dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
374
375         /* FIXME which i/oat version is i7300? */
376 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
377         if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
378                 dma->chancnt--;
379 #endif
380         for (i = 0; i < dma->chancnt; i++) {
381                 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
382                 if (!ioat)
383                         break;
384
385                 ioat_init_channel(device, &ioat->base, i);
386                 ioat->xfercap_log = xfercap_log;
387                 spin_lock_init(&ioat->prep_lock);
388                 if (device->reset_hw(&ioat->base)) {
389                         i = 0;
390                         break;
391                 }
392         }
393         dma->chancnt = i;
394         return i;
395 }
396
397 static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
398 {
399         struct dma_chan *c = tx->chan;
400         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
401         struct ioat_chan_common *chan = &ioat->base;
402         dma_cookie_t cookie;
403
404         cookie = dma_cookie_assign(tx);
405         dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
406
407         if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
408                 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
409
410         /* make descriptor updates visible before advancing ioat->head,
411          * this is purposefully not smp_wmb() since we are also
412          * publishing the descriptor updates to a dma device
413          */
414         wmb();
415
416         ioat->head += ioat->produce;
417
418         ioat2_update_pending(ioat);
419         spin_unlock_bh(&ioat->prep_lock);
420
421         return cookie;
422 }
423
424 static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
425 {
426         struct ioat_dma_descriptor *hw;
427         struct ioat_ring_ent *desc;
428         struct ioatdma_device *dma;
429         dma_addr_t phys;
430
431         dma = to_ioatdma_device(chan->device);
432         hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
433         if (!hw)
434                 return NULL;
435         memset(hw, 0, sizeof(*hw));
436
437         desc = kmem_cache_zalloc(ioat2_cache, flags);
438         if (!desc) {
439                 pci_pool_free(dma->dma_pool, hw, phys);
440                 return NULL;
441         }
442
443         dma_async_tx_descriptor_init(&desc->txd, chan);
444         desc->txd.tx_submit = ioat2_tx_submit_unlock;
445         desc->hw = hw;
446         desc->txd.phys = phys;
447         return desc;
448 }
449
450 static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
451 {
452         struct ioatdma_device *dma;
453
454         dma = to_ioatdma_device(chan->device);
455         pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
456         kmem_cache_free(ioat2_cache, desc);
457 }
458
459 static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
460 {
461         struct ioat_ring_ent **ring;
462         int descs = 1 << order;
463         int i;
464
465         if (order > ioat_get_max_alloc_order())
466                 return NULL;
467
468         /* allocate the array to hold the software ring */
469         ring = kcalloc(descs, sizeof(*ring), flags);
470         if (!ring)
471                 return NULL;
472         for (i = 0; i < descs; i++) {
473                 ring[i] = ioat2_alloc_ring_ent(c, flags);
474                 if (!ring[i]) {
475                         while (i--)
476                                 ioat2_free_ring_ent(ring[i], c);
477                         kfree(ring);
478                         return NULL;
479                 }
480                 set_desc_id(ring[i], i);
481         }
482
483         /* link descs */
484         for (i = 0; i < descs-1; i++) {
485                 struct ioat_ring_ent *next = ring[i+1];
486                 struct ioat_dma_descriptor *hw = ring[i]->hw;
487
488                 hw->next = next->txd.phys;
489         }
490         ring[i]->hw->next = ring[0]->txd.phys;
491
492         return ring;
493 }
494
495 void ioat2_free_chan_resources(struct dma_chan *c);
496
497 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
498  * @chan: channel to be initialized
499  */
500 int ioat2_alloc_chan_resources(struct dma_chan *c)
501 {
502         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
503         struct ioat_chan_common *chan = &ioat->base;
504         struct ioat_ring_ent **ring;
505         u64 status;
506         int order;
507         int i = 0;
508
509         /* have we already been set up? */
510         if (ioat->ring)
511                 return 1 << ioat->alloc_order;
512
513         /* Setup register to interrupt and write completion status on error */
514         writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
515
516         /* allocate a completion writeback area */
517         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
518         chan->completion = pci_pool_alloc(chan->device->completion_pool,
519                                           GFP_KERNEL, &chan->completion_dma);
520         if (!chan->completion)
521                 return -ENOMEM;
522
523         memset(chan->completion, 0, sizeof(*chan->completion));
524         writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
525                chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
526         writel(((u64) chan->completion_dma) >> 32,
527                chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
528
529         order = ioat_get_alloc_order();
530         ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
531         if (!ring)
532                 return -ENOMEM;
533
534         spin_lock_bh(&chan->cleanup_lock);
535         spin_lock_bh(&ioat->prep_lock);
536         ioat->ring = ring;
537         ioat->head = 0;
538         ioat->issued = 0;
539         ioat->tail = 0;
540         ioat->alloc_order = order;
541         spin_unlock_bh(&ioat->prep_lock);
542         spin_unlock_bh(&chan->cleanup_lock);
543
544         tasklet_enable(&chan->cleanup_task);
545         ioat2_start_null_desc(ioat);
546
547         /* check that we got off the ground */
548         do {
549                 udelay(1);
550                 status = ioat_chansts(chan);
551         } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
552
553         if (is_ioat_active(status) || is_ioat_idle(status)) {
554                 set_bit(IOAT_RUN, &chan->state);
555                 return 1 << ioat->alloc_order;
556         } else {
557                 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
558
559                 dev_WARN(to_dev(chan),
560                         "failed to start channel chanerr: %#x\n", chanerr);
561                 ioat2_free_chan_resources(c);
562                 return -EFAULT;
563         }
564 }
565
566 bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
567 {
568         /* reshape differs from normal ring allocation in that we want
569          * to allocate a new software ring while only
570          * extending/truncating the hardware ring
571          */
572         struct ioat_chan_common *chan = &ioat->base;
573         struct dma_chan *c = &chan->common;
574         const u32 curr_size = ioat2_ring_size(ioat);
575         const u16 active = ioat2_ring_active(ioat);
576         const u32 new_size = 1 << order;
577         struct ioat_ring_ent **ring;
578         u16 i;
579
580         if (order > ioat_get_max_alloc_order())
581                 return false;
582
583         /* double check that we have at least 1 free descriptor */
584         if (active == curr_size)
585                 return false;
586
587         /* when shrinking, verify that we can hold the current active
588          * set in the new ring
589          */
590         if (active >= new_size)
591                 return false;
592
593         /* allocate the array to hold the software ring */
594         ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
595         if (!ring)
596                 return false;
597
598         /* allocate/trim descriptors as needed */
599         if (new_size > curr_size) {
600                 /* copy current descriptors to the new ring */
601                 for (i = 0; i < curr_size; i++) {
602                         u16 curr_idx = (ioat->tail+i) & (curr_size-1);
603                         u16 new_idx = (ioat->tail+i) & (new_size-1);
604
605                         ring[new_idx] = ioat->ring[curr_idx];
606                         set_desc_id(ring[new_idx], new_idx);
607                 }
608
609                 /* add new descriptors to the ring */
610                 for (i = curr_size; i < new_size; i++) {
611                         u16 new_idx = (ioat->tail+i) & (new_size-1);
612
613                         ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
614                         if (!ring[new_idx]) {
615                                 while (i--) {
616                                         u16 new_idx = (ioat->tail+i) & (new_size-1);
617
618                                         ioat2_free_ring_ent(ring[new_idx], c);
619                                 }
620                                 kfree(ring);
621                                 return false;
622                         }
623                         set_desc_id(ring[new_idx], new_idx);
624                 }
625
626                 /* hw link new descriptors */
627                 for (i = curr_size-1; i < new_size; i++) {
628                         u16 new_idx = (ioat->tail+i) & (new_size-1);
629                         struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
630                         struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
631
632                         hw->next = next->txd.phys;
633                 }
634         } else {
635                 struct ioat_dma_descriptor *hw;
636                 struct ioat_ring_ent *next;
637
638                 /* copy current descriptors to the new ring, dropping the
639                  * removed descriptors
640                  */
641                 for (i = 0; i < new_size; i++) {
642                         u16 curr_idx = (ioat->tail+i) & (curr_size-1);
643                         u16 new_idx = (ioat->tail+i) & (new_size-1);
644
645                         ring[new_idx] = ioat->ring[curr_idx];
646                         set_desc_id(ring[new_idx], new_idx);
647                 }
648
649                 /* free deleted descriptors */
650                 for (i = new_size; i < curr_size; i++) {
651                         struct ioat_ring_ent *ent;
652
653                         ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
654                         ioat2_free_ring_ent(ent, c);
655                 }
656
657                 /* fix up hardware ring */
658                 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
659                 next = ring[(ioat->tail+new_size) & (new_size-1)];
660                 hw->next = next->txd.phys;
661         }
662
663         dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
664                 __func__, new_size);
665
666         kfree(ioat->ring);
667         ioat->ring = ring;
668         ioat->alloc_order = order;
669
670         return true;
671 }
672
673 /**
674  * ioat2_check_space_lock - verify space and grab ring producer lock
675  * @ioat: ioat2,3 channel (ring) to operate on
676  * @num_descs: allocation length
677  */
678 int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
679 {
680         struct ioat_chan_common *chan = &ioat->base;
681         bool retry;
682
683  retry:
684         spin_lock_bh(&ioat->prep_lock);
685         /* never allow the last descriptor to be consumed, we need at
686          * least one free at all times to allow for on-the-fly ring
687          * resizing.
688          */
689         if (likely(ioat2_ring_space(ioat) > num_descs)) {
690                 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
691                         __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
692                 ioat->produce = num_descs;
693                 return 0;  /* with ioat->prep_lock held */
694         }
695         retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
696         spin_unlock_bh(&ioat->prep_lock);
697
698         /* is another cpu already trying to expand the ring? */
699         if (retry)
700                 goto retry;
701
702         spin_lock_bh(&chan->cleanup_lock);
703         spin_lock_bh(&ioat->prep_lock);
704         retry = reshape_ring(ioat, ioat->alloc_order + 1);
705         clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
706         spin_unlock_bh(&ioat->prep_lock);
707         spin_unlock_bh(&chan->cleanup_lock);
708
709         /* if we were able to expand the ring retry the allocation */
710         if (retry)
711                 goto retry;
712
713         if (printk_ratelimit())
714                 dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
715                         __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
716
717         /* progress reclaim in the allocation failure case we may be
718          * called under bh_disabled so we need to trigger the timer
719          * event directly
720          */
721         if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
722                 struct ioatdma_device *device = chan->device;
723
724                 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
725                 device->timer_fn((unsigned long) &chan->common);
726         }
727
728         return -ENOMEM;
729 }
730
731 struct dma_async_tx_descriptor *
732 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
733                            dma_addr_t dma_src, size_t len, unsigned long flags)
734 {
735         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
736         struct ioat_dma_descriptor *hw;
737         struct ioat_ring_ent *desc;
738         dma_addr_t dst = dma_dest;
739         dma_addr_t src = dma_src;
740         size_t total_len = len;
741         int num_descs, idx, i;
742
743         num_descs = ioat2_xferlen_to_descs(ioat, len);
744         if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
745                 idx = ioat->head;
746         else
747                 return NULL;
748         i = 0;
749         do {
750                 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
751
752                 desc = ioat2_get_ring_ent(ioat, idx + i);
753                 hw = desc->hw;
754
755                 hw->size = copy;
756                 hw->ctl = 0;
757                 hw->src_addr = src;
758                 hw->dst_addr = dst;
759
760                 len -= copy;
761                 dst += copy;
762                 src += copy;
763                 dump_desc_dbg(ioat, desc);
764         } while (++i < num_descs);
765
766         desc->txd.flags = flags;
767         desc->len = total_len;
768         hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
769         hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
770         hw->ctl_f.compl_write = 1;
771         dump_desc_dbg(ioat, desc);
772         /* we leave the channel locked to ensure in order submission */
773
774         return &desc->txd;
775 }
776
777 /**
778  * ioat2_free_chan_resources - release all the descriptors
779  * @chan: the channel to be cleaned
780  */
781 void ioat2_free_chan_resources(struct dma_chan *c)
782 {
783         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
784         struct ioat_chan_common *chan = &ioat->base;
785         struct ioatdma_device *device = chan->device;
786         struct ioat_ring_ent *desc;
787         const u16 total_descs = 1 << ioat->alloc_order;
788         int descs;
789         int i;
790
791         /* Before freeing channel resources first check
792          * if they have been previously allocated for this channel.
793          */
794         if (!ioat->ring)
795                 return;
796
797         tasklet_disable(&chan->cleanup_task);
798         del_timer_sync(&chan->timer);
799         device->cleanup_fn((unsigned long) c);
800         device->reset_hw(chan);
801         clear_bit(IOAT_RUN, &chan->state);
802
803         spin_lock_bh(&chan->cleanup_lock);
804         spin_lock_bh(&ioat->prep_lock);
805         descs = ioat2_ring_space(ioat);
806         dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
807         for (i = 0; i < descs; i++) {
808                 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
809                 ioat2_free_ring_ent(desc, c);
810         }
811
812         if (descs < total_descs)
813                 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
814                         total_descs - descs);
815
816         for (i = 0; i < total_descs - descs; i++) {
817                 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
818                 dump_desc_dbg(ioat, desc);
819                 ioat2_free_ring_ent(desc, c);
820         }
821
822         kfree(ioat->ring);
823         ioat->ring = NULL;
824         ioat->alloc_order = 0;
825         pci_pool_free(device->completion_pool, chan->completion,
826                       chan->completion_dma);
827         spin_unlock_bh(&ioat->prep_lock);
828         spin_unlock_bh(&chan->cleanup_lock);
829
830         chan->last_completion = 0;
831         chan->completion_dma = 0;
832         ioat->dmacount = 0;
833 }
834
835 static ssize_t ring_size_show(struct dma_chan *c, char *page)
836 {
837         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
838
839         return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
840 }
841 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
842
843 static ssize_t ring_active_show(struct dma_chan *c, char *page)
844 {
845         struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
846
847         /* ...taken outside the lock, no need to be precise */
848         return sprintf(page, "%d\n", ioat2_ring_active(ioat));
849 }
850 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
851
852 static struct attribute *ioat2_attrs[] = {
853         &ring_size_attr.attr,
854         &ring_active_attr.attr,
855         &ioat_cap_attr.attr,
856         &ioat_version_attr.attr,
857         NULL,
858 };
859
860 struct kobj_type ioat2_ktype = {
861         .sysfs_ops = &ioat_sysfs_ops,
862         .default_attrs = ioat2_attrs,
863 };
864
865 int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
866 {
867         struct pci_dev *pdev = device->pdev;
868         struct dma_device *dma;
869         struct dma_chan *c;
870         struct ioat_chan_common *chan;
871         int err;
872
873         device->enumerate_channels = ioat2_enumerate_channels;
874         device->reset_hw = ioat2_reset_hw;
875         device->cleanup_fn = ioat2_cleanup_event;
876         device->timer_fn = ioat2_timer_event;
877         device->self_test = ioat_dma_self_test;
878         dma = &device->common;
879         dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
880         dma->device_issue_pending = ioat2_issue_pending;
881         dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
882         dma->device_free_chan_resources = ioat2_free_chan_resources;
883         dma->device_tx_status = ioat_dma_tx_status;
884
885         err = ioat_probe(device);
886         if (err)
887                 return err;
888         ioat_set_tcp_copy_break(2048);
889
890         list_for_each_entry(c, &dma->channels, device_node) {
891                 chan = to_chan_common(c);
892                 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
893                        chan->reg_base + IOAT_DCACTRL_OFFSET);
894         }
895
896         err = ioat_register(device);
897         if (err)
898                 return err;
899
900         ioat_kobject_add(device, &ioat2_ktype);
901
902         if (dca)
903                 device->dca = ioat2_dca_init(pdev, device->reg_base);
904
905         return err;
906 }