]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/s390/char/sclp.c
[S390] irq: merge irq.c and s390_ext.c
[karo-tx-linux.git] / drivers / s390 / char / sclp.c
1 /*
2  * core function to access sclp interface
3  *
4  * Copyright IBM Corp. 1999, 2009
5  *
6  * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7  *            Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
23 #include <asm/irq.h>
24
25 #include "sclp.h"
26
27 #define SCLP_HEADER             "sclp: "
28
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock);
31
32 /* Mask of events that we can send to the sclp interface. */
33 static sccb_mask_t sclp_receive_mask;
34
35 /* Mask of events that we can receive from the sclp interface. */
36 static sccb_mask_t sclp_send_mask;
37
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list;
40
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue;
43
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req;
46 static struct sclp_req sclp_init_req;
47 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49
50 /* Suspend request */
51 static DECLARE_COMPLETION(sclp_request_queue_flushed);
52
53 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
54 {
55         complete(&sclp_request_queue_flushed);
56 }
57
58 static struct sclp_req sclp_suspend_req;
59
60 /* Timer for request retries. */
61 static struct timer_list sclp_request_timer;
62
63 /* Internal state: is the driver initialized? */
64 static volatile enum sclp_init_state_t {
65         sclp_init_state_uninitialized,
66         sclp_init_state_initializing,
67         sclp_init_state_initialized
68 } sclp_init_state = sclp_init_state_uninitialized;
69
70 /* Internal state: is a request active at the sclp? */
71 static volatile enum sclp_running_state_t {
72         sclp_running_state_idle,
73         sclp_running_state_running,
74         sclp_running_state_reset_pending
75 } sclp_running_state = sclp_running_state_idle;
76
77 /* Internal state: is a read request pending? */
78 static volatile enum sclp_reading_state_t {
79         sclp_reading_state_idle,
80         sclp_reading_state_reading
81 } sclp_reading_state = sclp_reading_state_idle;
82
83 /* Internal state: is the driver currently serving requests? */
84 static volatile enum sclp_activation_state_t {
85         sclp_activation_state_active,
86         sclp_activation_state_deactivating,
87         sclp_activation_state_inactive,
88         sclp_activation_state_activating
89 } sclp_activation_state = sclp_activation_state_active;
90
91 /* Internal state: is an init mask request pending? */
92 static volatile enum sclp_mask_state_t {
93         sclp_mask_state_idle,
94         sclp_mask_state_initializing
95 } sclp_mask_state = sclp_mask_state_idle;
96
97 /* Internal state: is the driver suspended? */
98 static enum sclp_suspend_state_t {
99         sclp_suspend_state_running,
100         sclp_suspend_state_suspended,
101 } sclp_suspend_state = sclp_suspend_state_running;
102
103 /* Maximum retry counts */
104 #define SCLP_INIT_RETRY         3
105 #define SCLP_MASK_RETRY         3
106
107 /* Timeout intervals in seconds.*/
108 #define SCLP_BUSY_INTERVAL      10
109 #define SCLP_RETRY_INTERVAL     30
110
111 static void sclp_process_queue(void);
112 static void __sclp_make_read_req(void);
113 static int sclp_init_mask(int calculate);
114 static int sclp_init(void);
115
116 /* Perform service call. Return 0 on success, non-zero otherwise. */
117 int
118 sclp_service_call(sclp_cmdw_t command, void *sccb)
119 {
120         int cc;
121
122         asm volatile(
123                 "       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
124                 "       ipm     %0\n"
125                 "       srl     %0,28"
126                 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
127                 : "cc", "memory");
128         if (cc == 3)
129                 return -EIO;
130         if (cc == 2)
131                 return -EBUSY;
132         return 0;
133 }
134
135
136 static void
137 __sclp_queue_read_req(void)
138 {
139         if (sclp_reading_state == sclp_reading_state_idle) {
140                 sclp_reading_state = sclp_reading_state_reading;
141                 __sclp_make_read_req();
142                 /* Add request to head of queue */
143                 list_add(&sclp_read_req.list, &sclp_req_queue);
144         }
145 }
146
147 /* Set up request retry timer. Called while sclp_lock is locked. */
148 static inline void
149 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
150                          unsigned long data)
151 {
152         del_timer(&sclp_request_timer);
153         sclp_request_timer.function = function;
154         sclp_request_timer.data = data;
155         sclp_request_timer.expires = jiffies + time;
156         add_timer(&sclp_request_timer);
157 }
158
159 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
160  * force restart of running request. */
161 static void
162 sclp_request_timeout(unsigned long data)
163 {
164         unsigned long flags;
165
166         spin_lock_irqsave(&sclp_lock, flags);
167         if (data) {
168                 if (sclp_running_state == sclp_running_state_running) {
169                         /* Break running state and queue NOP read event request
170                          * to get a defined interface state. */
171                         __sclp_queue_read_req();
172                         sclp_running_state = sclp_running_state_idle;
173                 }
174         } else {
175                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
176                                          sclp_request_timeout, 0);
177         }
178         spin_unlock_irqrestore(&sclp_lock, flags);
179         sclp_process_queue();
180 }
181
182 /* Try to start a request. Return zero if the request was successfully
183  * started or if it will be started at a later time. Return non-zero otherwise.
184  * Called while sclp_lock is locked. */
185 static int
186 __sclp_start_request(struct sclp_req *req)
187 {
188         int rc;
189
190         if (sclp_running_state != sclp_running_state_idle)
191                 return 0;
192         del_timer(&sclp_request_timer);
193         rc = sclp_service_call(req->command, req->sccb);
194         req->start_count++;
195
196         if (rc == 0) {
197                 /* Successfully started request */
198                 req->status = SCLP_REQ_RUNNING;
199                 sclp_running_state = sclp_running_state_running;
200                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
201                                          sclp_request_timeout, 1);
202                 return 0;
203         } else if (rc == -EBUSY) {
204                 /* Try again later */
205                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
206                                          sclp_request_timeout, 0);
207                 return 0;
208         }
209         /* Request failed */
210         req->status = SCLP_REQ_FAILED;
211         return rc;
212 }
213
214 /* Try to start queued requests. */
215 static void
216 sclp_process_queue(void)
217 {
218         struct sclp_req *req;
219         int rc;
220         unsigned long flags;
221
222         spin_lock_irqsave(&sclp_lock, flags);
223         if (sclp_running_state != sclp_running_state_idle) {
224                 spin_unlock_irqrestore(&sclp_lock, flags);
225                 return;
226         }
227         del_timer(&sclp_request_timer);
228         while (!list_empty(&sclp_req_queue)) {
229                 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
230                 if (!req->sccb)
231                         goto do_post;
232                 rc = __sclp_start_request(req);
233                 if (rc == 0)
234                         break;
235                 /* Request failed */
236                 if (req->start_count > 1) {
237                         /* Cannot abort already submitted request - could still
238                          * be active at the SCLP */
239                         __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
240                                                  sclp_request_timeout, 0);
241                         break;
242                 }
243 do_post:
244                 /* Post-processing for aborted request */
245                 list_del(&req->list);
246                 if (req->callback) {
247                         spin_unlock_irqrestore(&sclp_lock, flags);
248                         req->callback(req, req->callback_data);
249                         spin_lock_irqsave(&sclp_lock, flags);
250                 }
251         }
252         spin_unlock_irqrestore(&sclp_lock, flags);
253 }
254
255 static int __sclp_can_add_request(struct sclp_req *req)
256 {
257         if (req == &sclp_suspend_req || req == &sclp_init_req)
258                 return 1;
259         if (sclp_suspend_state != sclp_suspend_state_running)
260                 return 0;
261         if (sclp_init_state != sclp_init_state_initialized)
262                 return 0;
263         if (sclp_activation_state != sclp_activation_state_active)
264                 return 0;
265         return 1;
266 }
267
268 /* Queue a new request. Return zero on success, non-zero otherwise. */
269 int
270 sclp_add_request(struct sclp_req *req)
271 {
272         unsigned long flags;
273         int rc;
274
275         spin_lock_irqsave(&sclp_lock, flags);
276         if (!__sclp_can_add_request(req)) {
277                 spin_unlock_irqrestore(&sclp_lock, flags);
278                 return -EIO;
279         }
280         req->status = SCLP_REQ_QUEUED;
281         req->start_count = 0;
282         list_add_tail(&req->list, &sclp_req_queue);
283         rc = 0;
284         /* Start if request is first in list */
285         if (sclp_running_state == sclp_running_state_idle &&
286             req->list.prev == &sclp_req_queue) {
287                 if (!req->sccb) {
288                         list_del(&req->list);
289                         rc = -ENODATA;
290                         goto out;
291                 }
292                 rc = __sclp_start_request(req);
293                 if (rc)
294                         list_del(&req->list);
295         }
296 out:
297         spin_unlock_irqrestore(&sclp_lock, flags);
298         return rc;
299 }
300
301 EXPORT_SYMBOL(sclp_add_request);
302
303 /* Dispatch events found in request buffer to registered listeners. Return 0
304  * if all events were dispatched, non-zero otherwise. */
305 static int
306 sclp_dispatch_evbufs(struct sccb_header *sccb)
307 {
308         unsigned long flags;
309         struct evbuf_header *evbuf;
310         struct list_head *l;
311         struct sclp_register *reg;
312         int offset;
313         int rc;
314
315         spin_lock_irqsave(&sclp_lock, flags);
316         rc = 0;
317         for (offset = sizeof(struct sccb_header); offset < sccb->length;
318              offset += evbuf->length) {
319                 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
320                 /* Check for malformed hardware response */
321                 if (evbuf->length == 0)
322                         break;
323                 /* Search for event handler */
324                 reg = NULL;
325                 list_for_each(l, &sclp_reg_list) {
326                         reg = list_entry(l, struct sclp_register, list);
327                         if (reg->receive_mask & (1 << (32 - evbuf->type)))
328                                 break;
329                         else
330                                 reg = NULL;
331                 }
332                 if (reg && reg->receiver_fn) {
333                         spin_unlock_irqrestore(&sclp_lock, flags);
334                         reg->receiver_fn(evbuf);
335                         spin_lock_irqsave(&sclp_lock, flags);
336                 } else if (reg == NULL)
337                         rc = -ENOSYS;
338         }
339         spin_unlock_irqrestore(&sclp_lock, flags);
340         return rc;
341 }
342
343 /* Read event data request callback. */
344 static void
345 sclp_read_cb(struct sclp_req *req, void *data)
346 {
347         unsigned long flags;
348         struct sccb_header *sccb;
349
350         sccb = (struct sccb_header *) req->sccb;
351         if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
352             sccb->response_code == 0x220))
353                 sclp_dispatch_evbufs(sccb);
354         spin_lock_irqsave(&sclp_lock, flags);
355         sclp_reading_state = sclp_reading_state_idle;
356         spin_unlock_irqrestore(&sclp_lock, flags);
357 }
358
359 /* Prepare read event data request. Called while sclp_lock is locked. */
360 static void __sclp_make_read_req(void)
361 {
362         struct sccb_header *sccb;
363
364         sccb = (struct sccb_header *) sclp_read_sccb;
365         clear_page(sccb);
366         memset(&sclp_read_req, 0, sizeof(struct sclp_req));
367         sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
368         sclp_read_req.status = SCLP_REQ_QUEUED;
369         sclp_read_req.start_count = 0;
370         sclp_read_req.callback = sclp_read_cb;
371         sclp_read_req.sccb = sccb;
372         sccb->length = PAGE_SIZE;
373         sccb->function_code = 0;
374         sccb->control_mask[2] = 0x80;
375 }
376
377 /* Search request list for request with matching sccb. Return request if found,
378  * NULL otherwise. Called while sclp_lock is locked. */
379 static inline struct sclp_req *
380 __sclp_find_req(u32 sccb)
381 {
382         struct list_head *l;
383         struct sclp_req *req;
384
385         list_for_each(l, &sclp_req_queue) {
386                 req = list_entry(l, struct sclp_req, list);
387                 if (sccb == (u32) (addr_t) req->sccb)
388                                 return req;
389         }
390         return NULL;
391 }
392
393 /* Handler for external interruption. Perform request post-processing.
394  * Prepare read event data request if necessary. Start processing of next
395  * request on queue. */
396 static void sclp_interrupt_handler(unsigned int ext_int_code,
397                                    unsigned int param32, unsigned long param64)
398 {
399         struct sclp_req *req;
400         u32 finished_sccb;
401         u32 evbuf_pending;
402
403         kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
404         spin_lock(&sclp_lock);
405         finished_sccb = param32 & 0xfffffff8;
406         evbuf_pending = param32 & 0x3;
407         if (finished_sccb) {
408                 del_timer(&sclp_request_timer);
409                 sclp_running_state = sclp_running_state_reset_pending;
410                 req = __sclp_find_req(finished_sccb);
411                 if (req) {
412                         /* Request post-processing */
413                         list_del(&req->list);
414                         req->status = SCLP_REQ_DONE;
415                         if (req->callback) {
416                                 spin_unlock(&sclp_lock);
417                                 req->callback(req, req->callback_data);
418                                 spin_lock(&sclp_lock);
419                         }
420                 }
421                 sclp_running_state = sclp_running_state_idle;
422         }
423         if (evbuf_pending &&
424             sclp_activation_state == sclp_activation_state_active)
425                 __sclp_queue_read_req();
426         spin_unlock(&sclp_lock);
427         sclp_process_queue();
428 }
429
430 /* Convert interval in jiffies to TOD ticks. */
431 static inline u64
432 sclp_tod_from_jiffies(unsigned long jiffies)
433 {
434         return (u64) (jiffies / HZ) << 32;
435 }
436
437 /* Wait until a currently running request finished. Note: while this function
438  * is running, no timers are served on the calling CPU. */
439 void
440 sclp_sync_wait(void)
441 {
442         unsigned long long old_tick;
443         unsigned long flags;
444         unsigned long cr0, cr0_sync;
445         u64 timeout;
446         int irq_context;
447
448         /* We'll be disabling timer interrupts, so we need a custom timeout
449          * mechanism */
450         timeout = 0;
451         if (timer_pending(&sclp_request_timer)) {
452                 /* Get timeout TOD value */
453                 timeout = get_clock() +
454                           sclp_tod_from_jiffies(sclp_request_timer.expires -
455                                                 jiffies);
456         }
457         local_irq_save(flags);
458         /* Prevent bottom half from executing once we force interrupts open */
459         irq_context = in_interrupt();
460         if (!irq_context)
461                 local_bh_disable();
462         /* Enable service-signal interruption, disable timer interrupts */
463         old_tick = local_tick_disable();
464         trace_hardirqs_on();
465         __ctl_store(cr0, 0, 0);
466         cr0_sync = cr0;
467         cr0_sync &= 0xffff00a0;
468         cr0_sync |= 0x00000200;
469         __ctl_load(cr0_sync, 0, 0);
470         __arch_local_irq_stosm(0x01);
471         /* Loop until driver state indicates finished request */
472         while (sclp_running_state != sclp_running_state_idle) {
473                 /* Check for expired request timer */
474                 if (timer_pending(&sclp_request_timer) &&
475                     get_clock() > timeout &&
476                     del_timer(&sclp_request_timer))
477                         sclp_request_timer.function(sclp_request_timer.data);
478                 cpu_relax();
479         }
480         local_irq_disable();
481         __ctl_load(cr0, 0, 0);
482         if (!irq_context)
483                 _local_bh_enable();
484         local_tick_enable(old_tick);
485         local_irq_restore(flags);
486 }
487 EXPORT_SYMBOL(sclp_sync_wait);
488
489 /* Dispatch changes in send and receive mask to registered listeners. */
490 static void
491 sclp_dispatch_state_change(void)
492 {
493         struct list_head *l;
494         struct sclp_register *reg;
495         unsigned long flags;
496         sccb_mask_t receive_mask;
497         sccb_mask_t send_mask;
498
499         do {
500                 spin_lock_irqsave(&sclp_lock, flags);
501                 reg = NULL;
502                 list_for_each(l, &sclp_reg_list) {
503                         reg = list_entry(l, struct sclp_register, list);
504                         receive_mask = reg->send_mask & sclp_receive_mask;
505                         send_mask = reg->receive_mask & sclp_send_mask;
506                         if (reg->sclp_receive_mask != receive_mask ||
507                             reg->sclp_send_mask != send_mask) {
508                                 reg->sclp_receive_mask = receive_mask;
509                                 reg->sclp_send_mask = send_mask;
510                                 break;
511                         } else
512                                 reg = NULL;
513                 }
514                 spin_unlock_irqrestore(&sclp_lock, flags);
515                 if (reg && reg->state_change_fn)
516                         reg->state_change_fn(reg);
517         } while (reg);
518 }
519
520 struct sclp_statechangebuf {
521         struct evbuf_header     header;
522         u8              validity_sclp_active_facility_mask : 1;
523         u8              validity_sclp_receive_mask : 1;
524         u8              validity_sclp_send_mask : 1;
525         u8              validity_read_data_function_mask : 1;
526         u16             _zeros : 12;
527         u16             mask_length;
528         u64             sclp_active_facility_mask;
529         sccb_mask_t     sclp_receive_mask;
530         sccb_mask_t     sclp_send_mask;
531         u32             read_data_function_mask;
532 } __attribute__((packed));
533
534
535 /* State change event callback. Inform listeners of changes. */
536 static void
537 sclp_state_change_cb(struct evbuf_header *evbuf)
538 {
539         unsigned long flags;
540         struct sclp_statechangebuf *scbuf;
541
542         scbuf = (struct sclp_statechangebuf *) evbuf;
543         if (scbuf->mask_length != sizeof(sccb_mask_t))
544                 return;
545         spin_lock_irqsave(&sclp_lock, flags);
546         if (scbuf->validity_sclp_receive_mask)
547                 sclp_receive_mask = scbuf->sclp_receive_mask;
548         if (scbuf->validity_sclp_send_mask)
549                 sclp_send_mask = scbuf->sclp_send_mask;
550         spin_unlock_irqrestore(&sclp_lock, flags);
551         if (scbuf->validity_sclp_active_facility_mask)
552                 sclp_facilities = scbuf->sclp_active_facility_mask;
553         sclp_dispatch_state_change();
554 }
555
556 static struct sclp_register sclp_state_change_event = {
557         .receive_mask = EVTYP_STATECHANGE_MASK,
558         .receiver_fn = sclp_state_change_cb
559 };
560
561 /* Calculate receive and send mask of currently registered listeners.
562  * Called while sclp_lock is locked. */
563 static inline void
564 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
565 {
566         struct list_head *l;
567         struct sclp_register *t;
568
569         *receive_mask = 0;
570         *send_mask = 0;
571         list_for_each(l, &sclp_reg_list) {
572                 t = list_entry(l, struct sclp_register, list);
573                 *receive_mask |= t->receive_mask;
574                 *send_mask |= t->send_mask;
575         }
576 }
577
578 /* Register event listener. Return 0 on success, non-zero otherwise. */
579 int
580 sclp_register(struct sclp_register *reg)
581 {
582         unsigned long flags;
583         sccb_mask_t receive_mask;
584         sccb_mask_t send_mask;
585         int rc;
586
587         rc = sclp_init();
588         if (rc)
589                 return rc;
590         spin_lock_irqsave(&sclp_lock, flags);
591         /* Check event mask for collisions */
592         __sclp_get_mask(&receive_mask, &send_mask);
593         if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
594                 spin_unlock_irqrestore(&sclp_lock, flags);
595                 return -EBUSY;
596         }
597         /* Trigger initial state change callback */
598         reg->sclp_receive_mask = 0;
599         reg->sclp_send_mask = 0;
600         reg->pm_event_posted = 0;
601         list_add(&reg->list, &sclp_reg_list);
602         spin_unlock_irqrestore(&sclp_lock, flags);
603         rc = sclp_init_mask(1);
604         if (rc) {
605                 spin_lock_irqsave(&sclp_lock, flags);
606                 list_del(&reg->list);
607                 spin_unlock_irqrestore(&sclp_lock, flags);
608         }
609         return rc;
610 }
611
612 EXPORT_SYMBOL(sclp_register);
613
614 /* Unregister event listener. */
615 void
616 sclp_unregister(struct sclp_register *reg)
617 {
618         unsigned long flags;
619
620         spin_lock_irqsave(&sclp_lock, flags);
621         list_del(&reg->list);
622         spin_unlock_irqrestore(&sclp_lock, flags);
623         sclp_init_mask(1);
624 }
625
626 EXPORT_SYMBOL(sclp_unregister);
627
628 /* Remove event buffers which are marked processed. Return the number of
629  * remaining event buffers. */
630 int
631 sclp_remove_processed(struct sccb_header *sccb)
632 {
633         struct evbuf_header *evbuf;
634         int unprocessed;
635         u16 remaining;
636
637         evbuf = (struct evbuf_header *) (sccb + 1);
638         unprocessed = 0;
639         remaining = sccb->length - sizeof(struct sccb_header);
640         while (remaining > 0) {
641                 remaining -= evbuf->length;
642                 if (evbuf->flags & 0x80) {
643                         sccb->length -= evbuf->length;
644                         memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
645                                remaining);
646                 } else {
647                         unprocessed++;
648                         evbuf = (struct evbuf_header *)
649                                         ((addr_t) evbuf + evbuf->length);
650                 }
651         }
652         return unprocessed;
653 }
654
655 EXPORT_SYMBOL(sclp_remove_processed);
656
657 struct init_sccb {
658         struct sccb_header header;
659         u16 _reserved;
660         u16 mask_length;
661         sccb_mask_t receive_mask;
662         sccb_mask_t send_mask;
663         sccb_mask_t sclp_receive_mask;
664         sccb_mask_t sclp_send_mask;
665 } __attribute__((packed));
666
667 /* Prepare init mask request. Called while sclp_lock is locked. */
668 static inline void
669 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
670 {
671         struct init_sccb *sccb;
672
673         sccb = (struct init_sccb *) sclp_init_sccb;
674         clear_page(sccb);
675         memset(&sclp_init_req, 0, sizeof(struct sclp_req));
676         sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
677         sclp_init_req.status = SCLP_REQ_FILLED;
678         sclp_init_req.start_count = 0;
679         sclp_init_req.callback = NULL;
680         sclp_init_req.callback_data = NULL;
681         sclp_init_req.sccb = sccb;
682         sccb->header.length = sizeof(struct init_sccb);
683         sccb->mask_length = sizeof(sccb_mask_t);
684         sccb->receive_mask = receive_mask;
685         sccb->send_mask = send_mask;
686         sccb->sclp_receive_mask = 0;
687         sccb->sclp_send_mask = 0;
688 }
689
690 /* Start init mask request. If calculate is non-zero, calculate the mask as
691  * requested by registered listeners. Use zero mask otherwise. Return 0 on
692  * success, non-zero otherwise. */
693 static int
694 sclp_init_mask(int calculate)
695 {
696         unsigned long flags;
697         struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
698         sccb_mask_t receive_mask;
699         sccb_mask_t send_mask;
700         int retry;
701         int rc;
702         unsigned long wait;
703
704         spin_lock_irqsave(&sclp_lock, flags);
705         /* Check if interface is in appropriate state */
706         if (sclp_mask_state != sclp_mask_state_idle) {
707                 spin_unlock_irqrestore(&sclp_lock, flags);
708                 return -EBUSY;
709         }
710         if (sclp_activation_state == sclp_activation_state_inactive) {
711                 spin_unlock_irqrestore(&sclp_lock, flags);
712                 return -EINVAL;
713         }
714         sclp_mask_state = sclp_mask_state_initializing;
715         /* Determine mask */
716         if (calculate)
717                 __sclp_get_mask(&receive_mask, &send_mask);
718         else {
719                 receive_mask = 0;
720                 send_mask = 0;
721         }
722         rc = -EIO;
723         for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
724                 /* Prepare request */
725                 __sclp_make_init_req(receive_mask, send_mask);
726                 spin_unlock_irqrestore(&sclp_lock, flags);
727                 if (sclp_add_request(&sclp_init_req)) {
728                         /* Try again later */
729                         wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
730                         while (time_before(jiffies, wait))
731                                 sclp_sync_wait();
732                         spin_lock_irqsave(&sclp_lock, flags);
733                         continue;
734                 }
735                 while (sclp_init_req.status != SCLP_REQ_DONE &&
736                        sclp_init_req.status != SCLP_REQ_FAILED)
737                         sclp_sync_wait();
738                 spin_lock_irqsave(&sclp_lock, flags);
739                 if (sclp_init_req.status == SCLP_REQ_DONE &&
740                     sccb->header.response_code == 0x20) {
741                         /* Successful request */
742                         if (calculate) {
743                                 sclp_receive_mask = sccb->sclp_receive_mask;
744                                 sclp_send_mask = sccb->sclp_send_mask;
745                         } else {
746                                 sclp_receive_mask = 0;
747                                 sclp_send_mask = 0;
748                         }
749                         spin_unlock_irqrestore(&sclp_lock, flags);
750                         sclp_dispatch_state_change();
751                         spin_lock_irqsave(&sclp_lock, flags);
752                         rc = 0;
753                         break;
754                 }
755         }
756         sclp_mask_state = sclp_mask_state_idle;
757         spin_unlock_irqrestore(&sclp_lock, flags);
758         return rc;
759 }
760
761 /* Deactivate SCLP interface. On success, new requests will be rejected,
762  * events will no longer be dispatched. Return 0 on success, non-zero
763  * otherwise. */
764 int
765 sclp_deactivate(void)
766 {
767         unsigned long flags;
768         int rc;
769
770         spin_lock_irqsave(&sclp_lock, flags);
771         /* Deactivate can only be called when active */
772         if (sclp_activation_state != sclp_activation_state_active) {
773                 spin_unlock_irqrestore(&sclp_lock, flags);
774                 return -EINVAL;
775         }
776         sclp_activation_state = sclp_activation_state_deactivating;
777         spin_unlock_irqrestore(&sclp_lock, flags);
778         rc = sclp_init_mask(0);
779         spin_lock_irqsave(&sclp_lock, flags);
780         if (rc == 0)
781                 sclp_activation_state = sclp_activation_state_inactive;
782         else
783                 sclp_activation_state = sclp_activation_state_active;
784         spin_unlock_irqrestore(&sclp_lock, flags);
785         return rc;
786 }
787
788 EXPORT_SYMBOL(sclp_deactivate);
789
790 /* Reactivate SCLP interface after sclp_deactivate. On success, new
791  * requests will be accepted, events will be dispatched again. Return 0 on
792  * success, non-zero otherwise. */
793 int
794 sclp_reactivate(void)
795 {
796         unsigned long flags;
797         int rc;
798
799         spin_lock_irqsave(&sclp_lock, flags);
800         /* Reactivate can only be called when inactive */
801         if (sclp_activation_state != sclp_activation_state_inactive) {
802                 spin_unlock_irqrestore(&sclp_lock, flags);
803                 return -EINVAL;
804         }
805         sclp_activation_state = sclp_activation_state_activating;
806         spin_unlock_irqrestore(&sclp_lock, flags);
807         rc = sclp_init_mask(1);
808         spin_lock_irqsave(&sclp_lock, flags);
809         if (rc == 0)
810                 sclp_activation_state = sclp_activation_state_active;
811         else
812                 sclp_activation_state = sclp_activation_state_inactive;
813         spin_unlock_irqrestore(&sclp_lock, flags);
814         return rc;
815 }
816
817 EXPORT_SYMBOL(sclp_reactivate);
818
819 /* Handler for external interruption used during initialization. Modify
820  * request state to done. */
821 static void sclp_check_handler(unsigned int ext_int_code,
822                                unsigned int param32, unsigned long param64)
823 {
824         u32 finished_sccb;
825
826         kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
827         finished_sccb = param32 & 0xfffffff8;
828         /* Is this the interrupt we are waiting for? */
829         if (finished_sccb == 0)
830                 return;
831         if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
832                 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
833                       finished_sccb);
834         spin_lock(&sclp_lock);
835         if (sclp_running_state == sclp_running_state_running) {
836                 sclp_init_req.status = SCLP_REQ_DONE;
837                 sclp_running_state = sclp_running_state_idle;
838         }
839         spin_unlock(&sclp_lock);
840 }
841
842 /* Initial init mask request timed out. Modify request state to failed. */
843 static void
844 sclp_check_timeout(unsigned long data)
845 {
846         unsigned long flags;
847
848         spin_lock_irqsave(&sclp_lock, flags);
849         if (sclp_running_state == sclp_running_state_running) {
850                 sclp_init_req.status = SCLP_REQ_FAILED;
851                 sclp_running_state = sclp_running_state_idle;
852         }
853         spin_unlock_irqrestore(&sclp_lock, flags);
854 }
855
856 /* Perform a check of the SCLP interface. Return zero if the interface is
857  * available and there are no pending requests from a previous instance.
858  * Return non-zero otherwise. */
859 static int
860 sclp_check_interface(void)
861 {
862         struct init_sccb *sccb;
863         unsigned long flags;
864         int retry;
865         int rc;
866
867         spin_lock_irqsave(&sclp_lock, flags);
868         /* Prepare init mask command */
869         rc = register_external_interrupt(0x2401, sclp_check_handler);
870         if (rc) {
871                 spin_unlock_irqrestore(&sclp_lock, flags);
872                 return rc;
873         }
874         for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
875                 __sclp_make_init_req(0, 0);
876                 sccb = (struct init_sccb *) sclp_init_req.sccb;
877                 rc = sclp_service_call(sclp_init_req.command, sccb);
878                 if (rc == -EIO)
879                         break;
880                 sclp_init_req.status = SCLP_REQ_RUNNING;
881                 sclp_running_state = sclp_running_state_running;
882                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
883                                          sclp_check_timeout, 0);
884                 spin_unlock_irqrestore(&sclp_lock, flags);
885                 /* Enable service-signal interruption - needs to happen
886                  * with IRQs enabled. */
887                 service_subclass_irq_register();
888                 /* Wait for signal from interrupt or timeout */
889                 sclp_sync_wait();
890                 /* Disable service-signal interruption - needs to happen
891                  * with IRQs enabled. */
892                 service_subclass_irq_unregister();
893                 spin_lock_irqsave(&sclp_lock, flags);
894                 del_timer(&sclp_request_timer);
895                 if (sclp_init_req.status == SCLP_REQ_DONE &&
896                     sccb->header.response_code == 0x20) {
897                         rc = 0;
898                         break;
899                 } else
900                         rc = -EBUSY;
901         }
902         unregister_external_interrupt(0x2401, sclp_check_handler);
903         spin_unlock_irqrestore(&sclp_lock, flags);
904         return rc;
905 }
906
907 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
908  * events from interfering with rebooted system. */
909 static int
910 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
911 {
912         sclp_deactivate();
913         return NOTIFY_DONE;
914 }
915
916 static struct notifier_block sclp_reboot_notifier = {
917         .notifier_call = sclp_reboot_event
918 };
919
920 /*
921  * Suspend/resume SCLP notifier implementation
922  */
923
924 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
925 {
926         struct sclp_register *reg;
927         unsigned long flags;
928
929         if (!rollback) {
930                 spin_lock_irqsave(&sclp_lock, flags);
931                 list_for_each_entry(reg, &sclp_reg_list, list)
932                         reg->pm_event_posted = 0;
933                 spin_unlock_irqrestore(&sclp_lock, flags);
934         }
935         do {
936                 spin_lock_irqsave(&sclp_lock, flags);
937                 list_for_each_entry(reg, &sclp_reg_list, list) {
938                         if (rollback && reg->pm_event_posted)
939                                 goto found;
940                         if (!rollback && !reg->pm_event_posted)
941                                 goto found;
942                 }
943                 spin_unlock_irqrestore(&sclp_lock, flags);
944                 return;
945 found:
946                 spin_unlock_irqrestore(&sclp_lock, flags);
947                 if (reg->pm_event_fn)
948                         reg->pm_event_fn(reg, sclp_pm_event);
949                 reg->pm_event_posted = rollback ? 0 : 1;
950         } while (1);
951 }
952
953 /*
954  * Susend/resume callbacks for platform device
955  */
956
957 static int sclp_freeze(struct device *dev)
958 {
959         unsigned long flags;
960         int rc;
961
962         sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
963
964         spin_lock_irqsave(&sclp_lock, flags);
965         sclp_suspend_state = sclp_suspend_state_suspended;
966         spin_unlock_irqrestore(&sclp_lock, flags);
967
968         /* Init supend data */
969         memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
970         sclp_suspend_req.callback = sclp_suspend_req_cb;
971         sclp_suspend_req.status = SCLP_REQ_FILLED;
972         init_completion(&sclp_request_queue_flushed);
973
974         rc = sclp_add_request(&sclp_suspend_req);
975         if (rc == 0)
976                 wait_for_completion(&sclp_request_queue_flushed);
977         else if (rc != -ENODATA)
978                 goto fail_thaw;
979
980         rc = sclp_deactivate();
981         if (rc)
982                 goto fail_thaw;
983         return 0;
984
985 fail_thaw:
986         spin_lock_irqsave(&sclp_lock, flags);
987         sclp_suspend_state = sclp_suspend_state_running;
988         spin_unlock_irqrestore(&sclp_lock, flags);
989         sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
990         return rc;
991 }
992
993 static int sclp_undo_suspend(enum sclp_pm_event event)
994 {
995         unsigned long flags;
996         int rc;
997
998         rc = sclp_reactivate();
999         if (rc)
1000                 return rc;
1001
1002         spin_lock_irqsave(&sclp_lock, flags);
1003         sclp_suspend_state = sclp_suspend_state_running;
1004         spin_unlock_irqrestore(&sclp_lock, flags);
1005
1006         sclp_pm_event(event, 0);
1007         return 0;
1008 }
1009
1010 static int sclp_thaw(struct device *dev)
1011 {
1012         return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1013 }
1014
1015 static int sclp_restore(struct device *dev)
1016 {
1017         return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1018 }
1019
1020 static const struct dev_pm_ops sclp_pm_ops = {
1021         .freeze         = sclp_freeze,
1022         .thaw           = sclp_thaw,
1023         .restore        = sclp_restore,
1024 };
1025
1026 static struct platform_driver sclp_pdrv = {
1027         .driver = {
1028                 .name   = "sclp",
1029                 .owner  = THIS_MODULE,
1030                 .pm     = &sclp_pm_ops,
1031         },
1032 };
1033
1034 static struct platform_device *sclp_pdev;
1035
1036 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1037  * otherwise. */
1038 static int
1039 sclp_init(void)
1040 {
1041         unsigned long flags;
1042         int rc = 0;
1043
1044         spin_lock_irqsave(&sclp_lock, flags);
1045         /* Check for previous or running initialization */
1046         if (sclp_init_state != sclp_init_state_uninitialized)
1047                 goto fail_unlock;
1048         sclp_init_state = sclp_init_state_initializing;
1049         /* Set up variables */
1050         INIT_LIST_HEAD(&sclp_req_queue);
1051         INIT_LIST_HEAD(&sclp_reg_list);
1052         list_add(&sclp_state_change_event.list, &sclp_reg_list);
1053         init_timer(&sclp_request_timer);
1054         /* Check interface */
1055         spin_unlock_irqrestore(&sclp_lock, flags);
1056         rc = sclp_check_interface();
1057         spin_lock_irqsave(&sclp_lock, flags);
1058         if (rc)
1059                 goto fail_init_state_uninitialized;
1060         /* Register reboot handler */
1061         rc = register_reboot_notifier(&sclp_reboot_notifier);
1062         if (rc)
1063                 goto fail_init_state_uninitialized;
1064         /* Register interrupt handler */
1065         rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
1066         if (rc)
1067                 goto fail_unregister_reboot_notifier;
1068         sclp_init_state = sclp_init_state_initialized;
1069         spin_unlock_irqrestore(&sclp_lock, flags);
1070         /* Enable service-signal external interruption - needs to happen with
1071          * IRQs enabled. */
1072         service_subclass_irq_register();
1073         sclp_init_mask(1);
1074         return 0;
1075
1076 fail_unregister_reboot_notifier:
1077         unregister_reboot_notifier(&sclp_reboot_notifier);
1078 fail_init_state_uninitialized:
1079         sclp_init_state = sclp_init_state_uninitialized;
1080 fail_unlock:
1081         spin_unlock_irqrestore(&sclp_lock, flags);
1082         return rc;
1083 }
1084
1085 /*
1086  * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1087  * to print the panic message.
1088  */
1089 static int sclp_panic_notify(struct notifier_block *self,
1090                              unsigned long event, void *data)
1091 {
1092         if (sclp_suspend_state == sclp_suspend_state_suspended)
1093                 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1094         return NOTIFY_OK;
1095 }
1096
1097 static struct notifier_block sclp_on_panic_nb = {
1098         .notifier_call = sclp_panic_notify,
1099         .priority = SCLP_PANIC_PRIO,
1100 };
1101
1102 static __init int sclp_initcall(void)
1103 {
1104         int rc;
1105
1106         rc = platform_driver_register(&sclp_pdrv);
1107         if (rc)
1108                 return rc;
1109         sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1110         rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1111         if (rc)
1112                 goto fail_platform_driver_unregister;
1113         rc = atomic_notifier_chain_register(&panic_notifier_list,
1114                                             &sclp_on_panic_nb);
1115         if (rc)
1116                 goto fail_platform_device_unregister;
1117
1118         return sclp_init();
1119
1120 fail_platform_device_unregister:
1121         platform_device_unregister(sclp_pdev);
1122 fail_platform_driver_unregister:
1123         platform_driver_unregister(&sclp_pdrv);
1124         return rc;
1125 }
1126
1127 arch_initcall(sclp_initcall);