]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/ibmvscsi/rpa_vscsi.c
Merge branch 'akpm' (Andrew's patch-bomb)
[karo-tx-linux.git] / drivers / scsi / ibmvscsi / rpa_vscsi.c
1 /* ------------------------------------------------------------
2  * rpa_vscsi.c
3  * (C) Copyright IBM Corporation 1994, 2003
4  * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5  *          Santiago Leon (santil@us.ibm.com)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
20  * USA
21  *
22  * ------------------------------------------------------------
23  * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
24  *
25  * This driver allows the Linux SCSI peripheral drivers to directly
26  * access devices in the hosting partition, either on an iSeries
27  * hypervisor system or a converged hypervisor system.
28  */
29
30 #include <asm/vio.h>
31 #include <asm/prom.h>
32 #include <asm/iommu.h>
33 #include <asm/hvcall.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/gfp.h>
37 #include <linux/interrupt.h>
38 #include "ibmvscsi.h"
39
40 static char partition_name[97] = "UNKNOWN";
41 static unsigned int partition_number = -1;
42
43 /* ------------------------------------------------------------
44  * Routines for managing the command/response queue
45  */
46 /**
47  * rpavscsi_handle_event: - Interrupt handler for crq events
48  * @irq:        number of irq to handle, not used
49  * @dev_instance: ibmvscsi_host_data of host that received interrupt
50  *
51  * Disables interrupts and schedules srp_task
52  * Always returns IRQ_HANDLED
53  */
54 static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
55 {
56         struct ibmvscsi_host_data *hostdata =
57             (struct ibmvscsi_host_data *)dev_instance;
58         vio_disable_interrupts(to_vio_dev(hostdata->dev));
59         tasklet_schedule(&hostdata->srp_task);
60         return IRQ_HANDLED;
61 }
62
63 /**
64  * release_crq_queue: - Deallocates data and unregisters CRQ
65  * @queue:      crq_queue to initialize and register
66  * @host_data:  ibmvscsi_host_data of host
67  *
68  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
69  * the crq with the hypervisor.
70  */
71 static void rpavscsi_release_crq_queue(struct crq_queue *queue,
72                                        struct ibmvscsi_host_data *hostdata,
73                                        int max_requests)
74 {
75         long rc = 0;
76         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
77         free_irq(vdev->irq, (void *)hostdata);
78         tasklet_kill(&hostdata->srp_task);
79         do {
80                 if (rc)
81                         msleep(100);
82                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
83         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
84         dma_unmap_single(hostdata->dev,
85                          queue->msg_token,
86                          queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
87         free_page((unsigned long)queue->msgs);
88 }
89
90 /**
91  * crq_queue_next_crq: - Returns the next entry in message queue
92  * @queue:      crq_queue to use
93  *
94  * Returns pointer to next entry in queue, or NULL if there are no new 
95  * entried in the CRQ.
96  */
97 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
98 {
99         struct viosrp_crq *crq;
100         unsigned long flags;
101
102         spin_lock_irqsave(&queue->lock, flags);
103         crq = &queue->msgs[queue->cur];
104         if (crq->valid & 0x80) {
105                 if (++queue->cur == queue->size)
106                         queue->cur = 0;
107         } else
108                 crq = NULL;
109         spin_unlock_irqrestore(&queue->lock, flags);
110
111         return crq;
112 }
113
114 /**
115  * rpavscsi_send_crq: - Send a CRQ
116  * @hostdata:   the adapter
117  * @word1:      the first 64 bits of the data
118  * @word2:      the second 64 bits of the data
119  */
120 static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
121                              u64 word1, u64 word2)
122 {
123         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
124
125         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
126 }
127
128 /**
129  * rpavscsi_task: - Process srps asynchronously
130  * @data:       ibmvscsi_host_data of host
131  */
132 static void rpavscsi_task(void *data)
133 {
134         struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
135         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
136         struct viosrp_crq *crq;
137         int done = 0;
138
139         while (!done) {
140                 /* Pull all the valid messages off the CRQ */
141                 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
142                         ibmvscsi_handle_crq(crq, hostdata);
143                         crq->valid = 0x00;
144                 }
145
146                 vio_enable_interrupts(vdev);
147                 if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
148                         vio_disable_interrupts(vdev);
149                         ibmvscsi_handle_crq(crq, hostdata);
150                         crq->valid = 0x00;
151                 } else {
152                         done = 1;
153                 }
154         }
155 }
156
157 static void gather_partition_info(void)
158 {
159         struct device_node *rootdn;
160
161         const char *ppartition_name;
162         const unsigned int *p_number_ptr;
163
164         /* Retrieve information about this partition */
165         rootdn = of_find_node_by_path("/");
166         if (!rootdn) {
167                 return;
168         }
169
170         ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
171         if (ppartition_name)
172                 strncpy(partition_name, ppartition_name,
173                                 sizeof(partition_name));
174         p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
175         if (p_number_ptr)
176                 partition_number = *p_number_ptr;
177         of_node_put(rootdn);
178 }
179
180 static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
181 {
182         memset(&hostdata->madapter_info, 0x00,
183                         sizeof(hostdata->madapter_info));
184
185         dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
186         strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
187
188         strncpy(hostdata->madapter_info.partition_name, partition_name,
189                         sizeof(hostdata->madapter_info.partition_name));
190
191         hostdata->madapter_info.partition_number = partition_number;
192
193         hostdata->madapter_info.mad_version = 1;
194         hostdata->madapter_info.os_type = 2;
195 }
196
197 /**
198  * reset_crq_queue: - resets a crq after a failure
199  * @queue:      crq_queue to initialize and register
200  * @hostdata:   ibmvscsi_host_data of host
201  *
202  */
203 static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
204                                     struct ibmvscsi_host_data *hostdata)
205 {
206         int rc = 0;
207         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
208
209         /* Close the CRQ */
210         do {
211                 if (rc)
212                         msleep(100);
213                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
214         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
215
216         /* Clean out the queue */
217         memset(queue->msgs, 0x00, PAGE_SIZE);
218         queue->cur = 0;
219
220         set_adapter_info(hostdata);
221
222         /* And re-open it again */
223         rc = plpar_hcall_norets(H_REG_CRQ,
224                                 vdev->unit_address,
225                                 queue->msg_token, PAGE_SIZE);
226         if (rc == 2) {
227                 /* Adapter is good, but other end is not ready */
228                 dev_warn(hostdata->dev, "Partner adapter not ready\n");
229         } else if (rc != 0) {
230                 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
231         }
232         return rc;
233 }
234
235 /**
236  * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
237  * @queue:      crq_queue to initialize and register
238  * @hostdata:   ibmvscsi_host_data of host
239  *
240  * Allocates a page for messages, maps it for dma, and registers
241  * the crq with the hypervisor.
242  * Returns zero on success.
243  */
244 static int rpavscsi_init_crq_queue(struct crq_queue *queue,
245                                    struct ibmvscsi_host_data *hostdata,
246                                    int max_requests)
247 {
248         int rc;
249         int retrc;
250         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
251
252         queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
253
254         if (!queue->msgs)
255                 goto malloc_failed;
256         queue->size = PAGE_SIZE / sizeof(*queue->msgs);
257
258         queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
259                                           queue->size * sizeof(*queue->msgs),
260                                           DMA_BIDIRECTIONAL);
261
262         if (dma_mapping_error(hostdata->dev, queue->msg_token))
263                 goto map_failed;
264
265         gather_partition_info();
266         set_adapter_info(hostdata);
267
268         retrc = rc = plpar_hcall_norets(H_REG_CRQ,
269                                 vdev->unit_address,
270                                 queue->msg_token, PAGE_SIZE);
271         if (rc == H_RESOURCE)
272                 /* maybe kexecing and resource is busy. try a reset */
273                 rc = rpavscsi_reset_crq_queue(queue,
274                                               hostdata);
275
276         if (rc == 2) {
277                 /* Adapter is good, but other end is not ready */
278                 dev_warn(hostdata->dev, "Partner adapter not ready\n");
279                 retrc = 0;
280         } else if (rc != 0) {
281                 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
282                 goto reg_crq_failed;
283         }
284
285         queue->cur = 0;
286         spin_lock_init(&queue->lock);
287
288         tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
289                      (unsigned long)hostdata);
290
291         if (request_irq(vdev->irq,
292                         rpavscsi_handle_event,
293                         0, "ibmvscsi", (void *)hostdata) != 0) {
294                 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
295                         vdev->irq);
296                 goto req_irq_failed;
297         }
298
299         rc = vio_enable_interrupts(vdev);
300         if (rc != 0) {
301                 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
302                 goto req_irq_failed;
303         }
304
305         return retrc;
306
307       req_irq_failed:
308         tasklet_kill(&hostdata->srp_task);
309         rc = 0;
310         do {
311                 if (rc)
312                         msleep(100);
313                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
314         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
315       reg_crq_failed:
316         dma_unmap_single(hostdata->dev,
317                          queue->msg_token,
318                          queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
319       map_failed:
320         free_page((unsigned long)queue->msgs);
321       malloc_failed:
322         return -1;
323 }
324
325 /**
326  * reenable_crq_queue: - reenables a crq after
327  * @queue:      crq_queue to initialize and register
328  * @hostdata:   ibmvscsi_host_data of host
329  *
330  */
331 static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
332                                        struct ibmvscsi_host_data *hostdata)
333 {
334         int rc = 0;
335         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
336
337         /* Re-enable the CRQ */
338         do {
339                 if (rc)
340                         msleep(100);
341                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
342         } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
343
344         if (rc)
345                 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
346         return rc;
347 }
348
349 /**
350  * rpavscsi_resume: - resume after suspend
351  * @hostdata:   ibmvscsi_host_data of host
352  *
353  */
354 static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
355 {
356         vio_disable_interrupts(to_vio_dev(hostdata->dev));
357         tasklet_schedule(&hostdata->srp_task);
358         return 0;
359 }
360
361 struct ibmvscsi_ops rpavscsi_ops = {
362         .init_crq_queue = rpavscsi_init_crq_queue,
363         .release_crq_queue = rpavscsi_release_crq_queue,
364         .reset_crq_queue = rpavscsi_reset_crq_queue,
365         .reenable_crq_queue = rpavscsi_reenable_crq_queue,
366         .send_crq = rpavscsi_send_crq,
367         .resume = rpavscsi_resume,
368 };