]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/ide/ide-pm.c
block: introduce new block status code type
[karo-tx-linux.git] / drivers / ide / ide-pm.c
1 #include <linux/kernel.h>
2 #include <linux/gfp.h>
3 #include <linux/ide.h>
4
5 int generic_ide_suspend(struct device *dev, pm_message_t mesg)
6 {
7         ide_drive_t *drive = to_ide_device(dev);
8         ide_drive_t *pair = ide_get_pair_dev(drive);
9         ide_hwif_t *hwif = drive->hwif;
10         struct request *rq;
11         struct ide_pm_state rqpm;
12         int ret;
13
14         if (ide_port_acpi(hwif)) {
15                 /* call ACPI _GTM only once */
16                 if ((drive->dn & 1) == 0 || pair == NULL)
17                         ide_acpi_get_timing(hwif);
18         }
19
20         memset(&rqpm, 0, sizeof(rqpm));
21         rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
22         scsi_req_init(rq);
23         ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
24         rq->special = &rqpm;
25         rqpm.pm_step = IDE_PM_START_SUSPEND;
26         if (mesg.event == PM_EVENT_PRETHAW)
27                 mesg.event = PM_EVENT_FREEZE;
28         rqpm.pm_state = mesg.event;
29
30         blk_execute_rq(drive->queue, NULL, rq, 0);
31         ret = scsi_req(rq)->result ? -EIO : 0;
32         blk_put_request(rq);
33
34         if (ret == 0 && ide_port_acpi(hwif)) {
35                 /* call ACPI _PS3 only after both devices are suspended */
36                 if ((drive->dn & 1) || pair == NULL)
37                         ide_acpi_set_state(hwif, 0);
38         }
39
40         return ret;
41 }
42
43 static void ide_end_sync_rq(struct request *rq, blk_status_t error)
44 {
45         complete(rq->end_io_data);
46 }
47
48 static int ide_pm_execute_rq(struct request *rq)
49 {
50         struct request_queue *q = rq->q;
51         DECLARE_COMPLETION_ONSTACK(wait);
52
53         rq->end_io_data = &wait;
54         rq->end_io = ide_end_sync_rq;
55
56         spin_lock_irq(q->queue_lock);
57         if (unlikely(blk_queue_dying(q))) {
58                 rq->rq_flags |= RQF_QUIET;
59                 scsi_req(rq)->result = -ENXIO;
60                 __blk_end_request_all(rq, BLK_STS_OK);
61                 spin_unlock_irq(q->queue_lock);
62                 return -ENXIO;
63         }
64         __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
65         __blk_run_queue_uncond(q);
66         spin_unlock_irq(q->queue_lock);
67
68         wait_for_completion_io(&wait);
69
70         return scsi_req(rq)->result ? -EIO : 0;
71 }
72
73 int generic_ide_resume(struct device *dev)
74 {
75         ide_drive_t *drive = to_ide_device(dev);
76         ide_drive_t *pair = ide_get_pair_dev(drive);
77         ide_hwif_t *hwif = drive->hwif;
78         struct request *rq;
79         struct ide_pm_state rqpm;
80         int err;
81
82         if (ide_port_acpi(hwif)) {
83                 /* call ACPI _PS0 / _STM only once */
84                 if ((drive->dn & 1) == 0 || pair == NULL) {
85                         ide_acpi_set_state(hwif, 1);
86                         ide_acpi_push_timing(hwif);
87                 }
88
89                 ide_acpi_exec_tfs(drive);
90         }
91
92         memset(&rqpm, 0, sizeof(rqpm));
93         rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
94         scsi_req_init(rq);
95         ide_req(rq)->type = ATA_PRIV_PM_RESUME;
96         rq->rq_flags |= RQF_PREEMPT;
97         rq->special = &rqpm;
98         rqpm.pm_step = IDE_PM_START_RESUME;
99         rqpm.pm_state = PM_EVENT_ON;
100
101         err = ide_pm_execute_rq(rq);
102         blk_put_request(rq);
103
104         if (err == 0 && dev->driver) {
105                 struct ide_driver *drv = to_ide_driver(dev->driver);
106
107                 if (drv->resume)
108                         drv->resume(drive);
109         }
110
111         return err;
112 }
113
114 void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
115 {
116         struct ide_pm_state *pm = rq->special;
117
118 #ifdef DEBUG_PM
119         printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
120                 drive->name, pm->pm_step);
121 #endif
122         if (drive->media != ide_disk)
123                 return;
124
125         switch (pm->pm_step) {
126         case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
127                 if (pm->pm_state == PM_EVENT_FREEZE)
128                         pm->pm_step = IDE_PM_COMPLETED;
129                 else
130                         pm->pm_step = IDE_PM_STANDBY;
131                 break;
132         case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
133                 pm->pm_step = IDE_PM_COMPLETED;
134                 break;
135         case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
136                 pm->pm_step = IDE_PM_IDLE;
137                 break;
138         case IDE_PM_IDLE:               /* Resume step 2 (idle)*/
139                 pm->pm_step = IDE_PM_RESTORE_DMA;
140                 break;
141         }
142 }
143
144 ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
145 {
146         struct ide_pm_state *pm = rq->special;
147         struct ide_cmd cmd = { };
148
149         switch (pm->pm_step) {
150         case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
151                 if (drive->media != ide_disk)
152                         break;
153                 /* Not supported? Switch to next step now. */
154                 if (ata_id_flush_enabled(drive->id) == 0 ||
155                     (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
156                         ide_complete_power_step(drive, rq);
157                         return ide_stopped;
158                 }
159                 if (ata_id_flush_ext_enabled(drive->id))
160                         cmd.tf.command = ATA_CMD_FLUSH_EXT;
161                 else
162                         cmd.tf.command = ATA_CMD_FLUSH;
163                 goto out_do_tf;
164         case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
165                 cmd.tf.command = ATA_CMD_STANDBYNOW1;
166                 goto out_do_tf;
167         case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
168                 ide_set_max_pio(drive);
169                 /*
170                  * skip IDE_PM_IDLE for ATAPI devices
171                  */
172                 if (drive->media != ide_disk)
173                         pm->pm_step = IDE_PM_RESTORE_DMA;
174                 else
175                         ide_complete_power_step(drive, rq);
176                 return ide_stopped;
177         case IDE_PM_IDLE:               /* Resume step 2 (idle) */
178                 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
179                 goto out_do_tf;
180         case IDE_PM_RESTORE_DMA:        /* Resume step 3 (restore DMA) */
181                 /*
182                  * Right now, all we do is call ide_set_dma(drive),
183                  * we could be smarter and check for current xfer_speed
184                  * in struct drive etc...
185                  */
186                 if (drive->hwif->dma_ops == NULL)
187                         break;
188                 /*
189                  * TODO: respect IDE_DFLAG_USING_DMA
190                  */
191                 ide_set_dma(drive);
192                 break;
193         }
194
195         pm->pm_step = IDE_PM_COMPLETED;
196
197         return ide_stopped;
198
199 out_do_tf:
200         cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
201         cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
202         cmd.protocol = ATA_PROT_NODATA;
203
204         return do_rw_taskfile(drive, &cmd);
205 }
206
207 /**
208  *      ide_complete_pm_rq - end the current Power Management request
209  *      @drive: target drive
210  *      @rq: request
211  *
212  *      This function cleans up the current PM request and stops the queue
213  *      if necessary.
214  */
215 void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
216 {
217         struct request_queue *q = drive->queue;
218         struct ide_pm_state *pm = rq->special;
219         unsigned long flags;
220
221         ide_complete_power_step(drive, rq);
222         if (pm->pm_step != IDE_PM_COMPLETED)
223                 return;
224
225 #ifdef DEBUG_PM
226         printk("%s: completing PM request, %s\n", drive->name,
227                (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
228 #endif
229         spin_lock_irqsave(q->queue_lock, flags);
230         if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
231                 blk_stop_queue(q);
232         else
233                 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
234         spin_unlock_irqrestore(q->queue_lock, flags);
235
236         drive->hwif->rq = NULL;
237
238         if (blk_end_request(rq, BLK_STS_OK, 0))
239                 BUG();
240 }
241
242 void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
243 {
244         struct ide_pm_state *pm = rq->special;
245
246         if (blk_rq_is_private(rq) &&
247             ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
248             pm->pm_step == IDE_PM_START_SUSPEND)
249                 /* Mark drive blocked when starting the suspend sequence. */
250                 drive->dev_flags |= IDE_DFLAG_BLOCKED;
251         else if (blk_rq_is_private(rq) &&
252                  ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
253                  pm->pm_step == IDE_PM_START_RESUME) {
254                 /*
255                  * The first thing we do on wakeup is to wait for BSY bit to
256                  * go away (with a looong timeout) as a drive on this hwif may
257                  * just be POSTing itself.
258                  * We do that before even selecting as the "other" device on
259                  * the bus may be broken enough to walk on our toes at this
260                  * point.
261                  */
262                 ide_hwif_t *hwif = drive->hwif;
263                 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
264                 struct request_queue *q = drive->queue;
265                 unsigned long flags;
266                 int rc;
267 #ifdef DEBUG_PM
268                 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
269 #endif
270                 rc = ide_wait_not_busy(hwif, 35000);
271                 if (rc)
272                         printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
273                 tp_ops->dev_select(drive);
274                 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
275                 rc = ide_wait_not_busy(hwif, 100000);
276                 if (rc)
277                         printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
278
279                 spin_lock_irqsave(q->queue_lock, flags);
280                 blk_start_queue(q);
281                 spin_unlock_irqrestore(q->queue_lock, flags);
282         }
283 }