]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/ibmvscsi/rpa_vscsi.c
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / drivers / scsi / ibmvscsi / rpa_vscsi.c
1 /* ------------------------------------------------------------
2  * rpa_vscsi.c
3  * (C) Copyright IBM Corporation 1994, 2003
4  * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5  *          Santiago Leon (santil@us.ibm.com)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
20  * USA
21  *
22  * ------------------------------------------------------------
23  * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
24  *
25  * This driver allows the Linux SCSI peripheral drivers to directly
26  * access devices in the hosting partition, either on an iSeries
27  * hypervisor system or a converged hypervisor system.
28  */
29
30 #include <asm/vio.h>
31 #include <asm/prom.h>
32 #include <asm/iommu.h>
33 #include <asm/hvcall.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/interrupt.h>
36 #include "ibmvscsi.h"
37
38 static char partition_name[97] = "UNKNOWN";
39 static unsigned int partition_number = -1;
40
41 /* ------------------------------------------------------------
42  * Routines for managing the command/response queue
43  */
44 /**
45  * rpavscsi_handle_event: - Interrupt handler for crq events
46  * @irq:        number of irq to handle, not used
47  * @dev_instance: ibmvscsi_host_data of host that received interrupt
48  *
49  * Disables interrupts and schedules srp_task
50  * Always returns IRQ_HANDLED
51  */
52 static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
53 {
54         struct ibmvscsi_host_data *hostdata =
55             (struct ibmvscsi_host_data *)dev_instance;
56         vio_disable_interrupts(to_vio_dev(hostdata->dev));
57         tasklet_schedule(&hostdata->srp_task);
58         return IRQ_HANDLED;
59 }
60
61 /**
62  * release_crq_queue: - Deallocates data and unregisters CRQ
63  * @queue:      crq_queue to initialize and register
64  * @host_data:  ibmvscsi_host_data of host
65  *
66  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
67  * the crq with the hypervisor.
68  */
69 static void rpavscsi_release_crq_queue(struct crq_queue *queue,
70                                        struct ibmvscsi_host_data *hostdata,
71                                        int max_requests)
72 {
73         long rc;
74         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
75         free_irq(vdev->irq, (void *)hostdata);
76         tasklet_kill(&hostdata->srp_task);
77         do {
78                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
79         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80         dma_unmap_single(hostdata->dev,
81                          queue->msg_token,
82                          queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
83         free_page((unsigned long)queue->msgs);
84 }
85
86 /**
87  * crq_queue_next_crq: - Returns the next entry in message queue
88  * @queue:      crq_queue to use
89  *
90  * Returns pointer to next entry in queue, or NULL if there are no new 
91  * entried in the CRQ.
92  */
93 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
94 {
95         struct viosrp_crq *crq;
96         unsigned long flags;
97
98         spin_lock_irqsave(&queue->lock, flags);
99         crq = &queue->msgs[queue->cur];
100         if (crq->valid & 0x80) {
101                 if (++queue->cur == queue->size)
102                         queue->cur = 0;
103         } else
104                 crq = NULL;
105         spin_unlock_irqrestore(&queue->lock, flags);
106
107         return crq;
108 }
109
110 /**
111  * rpavscsi_send_crq: - Send a CRQ
112  * @hostdata:   the adapter
113  * @word1:      the first 64 bits of the data
114  * @word2:      the second 64 bits of the data
115  */
116 static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
117                              u64 word1, u64 word2)
118 {
119         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
120
121         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
122 }
123
124 /**
125  * rpavscsi_task: - Process srps asynchronously
126  * @data:       ibmvscsi_host_data of host
127  */
128 static void rpavscsi_task(void *data)
129 {
130         struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
131         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
132         struct viosrp_crq *crq;
133         int done = 0;
134
135         while (!done) {
136                 /* Pull all the valid messages off the CRQ */
137                 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
138                         ibmvscsi_handle_crq(crq, hostdata);
139                         crq->valid = 0x00;
140                 }
141
142                 vio_enable_interrupts(vdev);
143                 if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
144                         vio_disable_interrupts(vdev);
145                         ibmvscsi_handle_crq(crq, hostdata);
146                         crq->valid = 0x00;
147                 } else {
148                         done = 1;
149                 }
150         }
151 }
152
153 static void gather_partition_info(void)
154 {
155         struct device_node *rootdn;
156
157         const char *ppartition_name;
158         const unsigned int *p_number_ptr;
159
160         /* Retrieve information about this partition */
161         rootdn = of_find_node_by_path("/");
162         if (!rootdn) {
163                 return;
164         }
165
166         ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
167         if (ppartition_name)
168                 strncpy(partition_name, ppartition_name,
169                                 sizeof(partition_name));
170         p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
171         if (p_number_ptr)
172                 partition_number = *p_number_ptr;
173         of_node_put(rootdn);
174 }
175
176 static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
177 {
178         memset(&hostdata->madapter_info, 0x00,
179                         sizeof(hostdata->madapter_info));
180
181         dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
182         strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
183
184         strncpy(hostdata->madapter_info.partition_name, partition_name,
185                         sizeof(hostdata->madapter_info.partition_name));
186
187         hostdata->madapter_info.partition_number = partition_number;
188
189         hostdata->madapter_info.mad_version = 1;
190         hostdata->madapter_info.os_type = 2;
191 }
192
193 /**
194  * reset_crq_queue: - resets a crq after a failure
195  * @queue:      crq_queue to initialize and register
196  * @hostdata:   ibmvscsi_host_data of host
197  *
198  */
199 static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
200                                     struct ibmvscsi_host_data *hostdata)
201 {
202         int rc;
203         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
204
205         /* Close the CRQ */
206         do {
207                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
208         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
209
210         /* Clean out the queue */
211         memset(queue->msgs, 0x00, PAGE_SIZE);
212         queue->cur = 0;
213
214         set_adapter_info(hostdata);
215
216         /* And re-open it again */
217         rc = plpar_hcall_norets(H_REG_CRQ,
218                                 vdev->unit_address,
219                                 queue->msg_token, PAGE_SIZE);
220         if (rc == 2) {
221                 /* Adapter is good, but other end is not ready */
222                 dev_warn(hostdata->dev, "Partner adapter not ready\n");
223         } else if (rc != 0) {
224                 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
225         }
226         return rc;
227 }
228
229 /**
230  * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
231  * @queue:      crq_queue to initialize and register
232  * @hostdata:   ibmvscsi_host_data of host
233  *
234  * Allocates a page for messages, maps it for dma, and registers
235  * the crq with the hypervisor.
236  * Returns zero on success.
237  */
238 static int rpavscsi_init_crq_queue(struct crq_queue *queue,
239                                    struct ibmvscsi_host_data *hostdata,
240                                    int max_requests)
241 {
242         int rc;
243         int retrc;
244         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
245
246         queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
247
248         if (!queue->msgs)
249                 goto malloc_failed;
250         queue->size = PAGE_SIZE / sizeof(*queue->msgs);
251
252         queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
253                                           queue->size * sizeof(*queue->msgs),
254                                           DMA_BIDIRECTIONAL);
255
256         if (dma_mapping_error(hostdata->dev, queue->msg_token))
257                 goto map_failed;
258
259         gather_partition_info();
260         set_adapter_info(hostdata);
261
262         retrc = rc = plpar_hcall_norets(H_REG_CRQ,
263                                 vdev->unit_address,
264                                 queue->msg_token, PAGE_SIZE);
265         if (rc == H_RESOURCE)
266                 /* maybe kexecing and resource is busy. try a reset */
267                 rc = rpavscsi_reset_crq_queue(queue,
268                                               hostdata);
269
270         if (rc == 2) {
271                 /* Adapter is good, but other end is not ready */
272                 dev_warn(hostdata->dev, "Partner adapter not ready\n");
273                 retrc = 0;
274         } else if (rc != 0) {
275                 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
276                 goto reg_crq_failed;
277         }
278
279         if (request_irq(vdev->irq,
280                         rpavscsi_handle_event,
281                         0, "ibmvscsi", (void *)hostdata) != 0) {
282                 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
283                         vdev->irq);
284                 goto req_irq_failed;
285         }
286
287         rc = vio_enable_interrupts(vdev);
288         if (rc != 0) {
289                 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
290                 goto req_irq_failed;
291         }
292
293         queue->cur = 0;
294         spin_lock_init(&queue->lock);
295
296         tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
297                      (unsigned long)hostdata);
298
299         return retrc;
300
301       req_irq_failed:
302         do {
303                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
304         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
305       reg_crq_failed:
306         dma_unmap_single(hostdata->dev,
307                          queue->msg_token,
308                          queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
309       map_failed:
310         free_page((unsigned long)queue->msgs);
311       malloc_failed:
312         return -1;
313 }
314
315 /**
316  * reenable_crq_queue: - reenables a crq after
317  * @queue:      crq_queue to initialize and register
318  * @hostdata:   ibmvscsi_host_data of host
319  *
320  */
321 static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
322                                        struct ibmvscsi_host_data *hostdata)
323 {
324         int rc;
325         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
326
327         /* Re-enable the CRQ */
328         do {
329                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
330         } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
331
332         if (rc)
333                 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
334         return rc;
335 }
336
337 /**
338  * rpavscsi_resume: - resume after suspend
339  * @hostdata:   ibmvscsi_host_data of host
340  *
341  */
342 static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
343 {
344         vio_disable_interrupts(to_vio_dev(hostdata->dev));
345         tasklet_schedule(&hostdata->srp_task);
346         return 0;
347 }
348
349 struct ibmvscsi_ops rpavscsi_ops = {
350         .init_crq_queue = rpavscsi_init_crq_queue,
351         .release_crq_queue = rpavscsi_release_crq_queue,
352         .reset_crq_queue = rpavscsi_reset_crq_queue,
353         .reenable_crq_queue = rpavscsi_reenable_crq_queue,
354         .send_crq = rpavscsi_send_crq,
355         .resume = rpavscsi_resume,
356 };