]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/s390/cio/css.c
516108779f6038b9e4c4ca2f9e68997b345ceba8
[karo-tx-linux.git] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *   $Revision: 1.96 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
9  *               Cornelia Huck (cornelia.huck@de.ibm.com)
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17
18 #include "css.h"
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "ioasm.h"
22 #include "chsc.h"
23
24 int need_rescan = 0;
25 int css_init_done = 0;
26 static int max_ssid = 0;
27
28 struct channel_subsystem *css[__MAX_CSSID + 1];
29
30 int css_characteristics_avail = 0;
31
32 inline int
33 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
34 {
35         struct subchannel_id schid;
36         int ret;
37
38         init_subchannel_id(&schid);
39         ret = -ENODEV;
40         do {
41                 do {
42                         ret = fn(schid, data);
43                         if (ret)
44                                 break;
45                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
46                 schid.sch_no = 0;
47         } while (schid.ssid++ < max_ssid);
48         return ret;
49 }
50
51 static struct subchannel *
52 css_alloc_subchannel(struct subchannel_id schid)
53 {
54         struct subchannel *sch;
55         int ret;
56
57         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
58         if (sch == NULL)
59                 return ERR_PTR(-ENOMEM);
60         ret = cio_validate_subchannel (sch, schid);
61         if (ret < 0) {
62                 kfree(sch);
63                 return ERR_PTR(ret);
64         }
65
66         if (sch->st != SUBCHANNEL_TYPE_IO) {
67                 /* For now we ignore all non-io subchannels. */
68                 kfree(sch);
69                 return ERR_PTR(-EINVAL);
70         }
71
72         /* 
73          * Set intparm to subchannel address.
74          * This is fine even on 64bit since the subchannel is always located
75          * under 2G.
76          */
77         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
78         ret = cio_modify(sch);
79         if (ret) {
80                 kfree(sch);
81                 return ERR_PTR(ret);
82         }
83         return sch;
84 }
85
86 static void
87 css_free_subchannel(struct subchannel *sch)
88 {
89         if (sch) {
90                 /* Reset intparm to zeroes. */
91                 sch->schib.pmcw.intparm = 0;
92                 cio_modify(sch);
93                 kfree(sch);
94         }
95         
96 }
97
98 static void
99 css_subchannel_release(struct device *dev)
100 {
101         struct subchannel *sch;
102
103         sch = to_subchannel(dev);
104         if (!cio_is_console(sch->schid))
105                 kfree(sch);
106 }
107
108 extern int css_get_ssd_info(struct subchannel *sch);
109
110 static int
111 css_register_subchannel(struct subchannel *sch)
112 {
113         int ret;
114
115         /* Initialize the subchannel structure */
116         sch->dev.parent = &css[0]->device;
117         sch->dev.bus = &css_bus_type;
118         sch->dev.release = &css_subchannel_release;
119         
120         /* make it known to the system */
121         ret = device_register(&sch->dev);
122         if (ret)
123                 printk (KERN_WARNING "%s: could not register %s\n",
124                         __func__, sch->dev.bus_id);
125         else
126                 css_get_ssd_info(sch);
127         return ret;
128 }
129
130 int
131 css_probe_device(struct subchannel_id schid)
132 {
133         int ret;
134         struct subchannel *sch;
135
136         sch = css_alloc_subchannel(schid);
137         if (IS_ERR(sch))
138                 return PTR_ERR(sch);
139         ret = css_register_subchannel(sch);
140         if (ret)
141                 css_free_subchannel(sch);
142         return ret;
143 }
144
145 static int
146 check_subchannel(struct device * dev, void * data)
147 {
148         struct subchannel *sch;
149         struct subchannel_id *schid = data;
150
151         sch = to_subchannel(dev);
152         return schid_equal(&sch->schid, schid);
153 }
154
155 struct subchannel *
156 get_subchannel_by_schid(struct subchannel_id schid)
157 {
158         struct device *dev;
159
160         dev = bus_find_device(&css_bus_type, NULL,
161                               (void *)&schid, check_subchannel);
162
163         return dev ? to_subchannel(dev) : NULL;
164 }
165
166
167 static inline int
168 css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
169 {
170         struct schib schib;
171         int cc;
172
173         cc = stsch(schid, &schib);
174         if (cc)
175                 return CIO_GONE;
176         if (!schib.pmcw.dnv)
177                 return CIO_GONE;
178         if (sch && sch->schib.pmcw.dnv &&
179             (schib.pmcw.dev != sch->schib.pmcw.dev))
180                 return CIO_REVALIDATE;
181         if (sch && !sch->lpm)
182                 return CIO_NO_PATH;
183         return CIO_OPER;
184 }
185         
186 static int
187 css_evaluate_subchannel(struct subchannel_id schid, int slow)
188 {
189         int event, ret, disc;
190         struct subchannel *sch;
191         unsigned long flags;
192
193         sch = get_subchannel_by_schid(schid);
194         disc = sch ? device_is_disconnected(sch) : 0;
195         if (disc && slow) {
196                 if (sch)
197                         put_device(&sch->dev);
198                 return 0; /* Already processed. */
199         }
200         /*
201          * We've got a machine check, so running I/O won't get an interrupt.
202          * Kill any pending timers.
203          */
204         if (sch)
205                 device_kill_pending_timer(sch);
206         if (!disc && !slow) {
207                 if (sch)
208                         put_device(&sch->dev);
209                 return -EAGAIN; /* Will be done on the slow path. */
210         }
211         event = css_get_subchannel_status(sch, schid);
212         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
213                       schid.ssid, schid.sch_no, event,
214                       sch?(disc?"disconnected":"normal"):"unknown",
215                       slow?"slow":"fast");
216         switch (event) {
217         case CIO_NO_PATH:
218         case CIO_GONE:
219                 if (!sch) {
220                         /* Never used this subchannel. Ignore. */
221                         ret = 0;
222                         break;
223                 }
224                 if (disc && (event == CIO_NO_PATH)) {
225                         /*
226                          * Uargh, hack again. Because we don't get a machine
227                          * check on configure on, our path bookkeeping can
228                          * be out of date here (it's fine while we only do
229                          * logical varying or get chsc machine checks). We
230                          * need to force reprobing or we might miss devices
231                          * coming operational again. It won't do harm in real
232                          * no path situations.
233                          */
234                         spin_lock_irqsave(&sch->lock, flags);
235                         device_trigger_reprobe(sch);
236                         spin_unlock_irqrestore(&sch->lock, flags);
237                         ret = 0;
238                         break;
239                 }
240                 if (sch->driver && sch->driver->notify &&
241                     sch->driver->notify(&sch->dev, event)) {
242                         cio_disable_subchannel(sch);
243                         device_set_disconnected(sch);
244                         ret = 0;
245                         break;
246                 }
247                 /*
248                  * Unregister subchannel.
249                  * The device will be killed automatically.
250                  */
251                 cio_disable_subchannel(sch);
252                 device_unregister(&sch->dev);
253                 /* Reset intparm to zeroes. */
254                 sch->schib.pmcw.intparm = 0;
255                 cio_modify(sch);
256                 put_device(&sch->dev);
257                 ret = 0;
258                 break;
259         case CIO_REVALIDATE:
260                 /* 
261                  * Revalidation machine check. Sick.
262                  * We don't notify the driver since we have to throw the device
263                  * away in any case.
264                  */
265                 if (!disc) {
266                         device_unregister(&sch->dev);
267                         /* Reset intparm to zeroes. */
268                         sch->schib.pmcw.intparm = 0;
269                         cio_modify(sch);
270                         put_device(&sch->dev);
271                         ret = css_probe_device(schid);
272                 } else {
273                         /*
274                          * We can't immediately deregister the disconnected
275                          * device since it might block.
276                          */
277                         spin_lock_irqsave(&sch->lock, flags);
278                         device_trigger_reprobe(sch);
279                         spin_unlock_irqrestore(&sch->lock, flags);
280                         ret = 0;
281                 }
282                 break;
283         case CIO_OPER:
284                 if (disc) {
285                         spin_lock_irqsave(&sch->lock, flags);
286                         /* Get device operational again. */
287                         device_trigger_reprobe(sch);
288                         spin_unlock_irqrestore(&sch->lock, flags);
289                 }
290                 ret = sch ? 0 : css_probe_device(schid);
291                 break;
292         default:
293                 BUG();
294                 ret = 0;
295         }
296         return ret;
297 }
298
299 static int
300 css_rescan_devices(struct subchannel_id schid, void *data)
301 {
302         return css_evaluate_subchannel(schid, 1);
303 }
304
305 struct slow_subchannel {
306         struct list_head slow_list;
307         struct subchannel_id schid;
308 };
309
310 static LIST_HEAD(slow_subchannels_head);
311 static DEFINE_SPINLOCK(slow_subchannel_lock);
312
313 static void
314 css_trigger_slow_path(void)
315 {
316         CIO_TRACE_EVENT(4, "slowpath");
317
318         if (need_rescan) {
319                 need_rescan = 0;
320                 for_each_subchannel(css_rescan_devices, NULL);
321                 return;
322         }
323
324         spin_lock_irq(&slow_subchannel_lock);
325         while (!list_empty(&slow_subchannels_head)) {
326                 struct slow_subchannel *slow_sch =
327                         list_entry(slow_subchannels_head.next,
328                                    struct slow_subchannel, slow_list);
329
330                 list_del_init(slow_subchannels_head.next);
331                 spin_unlock_irq(&slow_subchannel_lock);
332                 css_evaluate_subchannel(slow_sch->schid, 1);
333                 spin_lock_irq(&slow_subchannel_lock);
334                 kfree(slow_sch);
335         }
336         spin_unlock_irq(&slow_subchannel_lock);
337 }
338
339 typedef void (*workfunc)(void *);
340 DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
341 struct workqueue_struct *slow_path_wq;
342
343 /*
344  * Rescan for new devices. FIXME: This is slow.
345  * This function is called when we have lost CRWs due to overflows and we have
346  * to do subchannel housekeeping.
347  */
348 void
349 css_reiterate_subchannels(void)
350 {
351         css_clear_subchannel_slow_list();
352         need_rescan = 1;
353 }
354
355 /*
356  * Called from the machine check handler for subchannel report words.
357  */
358 int
359 css_process_crw(int rsid1, int rsid2)
360 {
361         int ret;
362         struct subchannel_id mchk_schid;
363
364         CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
365                       rsid1, rsid2);
366
367         if (need_rescan)
368                 /* We need to iterate all subchannels anyway. */
369                 return -EAGAIN;
370
371         init_subchannel_id(&mchk_schid);
372         mchk_schid.sch_no = rsid1;
373         if (rsid2 != 0)
374                 mchk_schid.ssid = (rsid2 >> 8) & 3;
375
376         /* 
377          * Since we are always presented with IPI in the CRW, we have to
378          * use stsch() to find out if the subchannel in question has come
379          * or gone.
380          */
381         ret = css_evaluate_subchannel(mchk_schid, 0);
382         if (ret == -EAGAIN) {
383                 if (css_enqueue_subchannel_slow(mchk_schid)) {
384                         css_clear_subchannel_slow_list();
385                         need_rescan = 1;
386                 }
387         }
388         return ret;
389 }
390
391 static int __init
392 __init_channel_subsystem(struct subchannel_id schid, void *data)
393 {
394         struct subchannel *sch;
395         int ret;
396
397         if (cio_is_console(schid))
398                 sch = cio_get_console_subchannel();
399         else {
400                 sch = css_alloc_subchannel(schid);
401                 if (IS_ERR(sch))
402                         ret = PTR_ERR(sch);
403                 else
404                         ret = 0;
405                 switch (ret) {
406                 case 0:
407                         break;
408                 case -ENOMEM:
409                         panic("Out of memory in init_channel_subsystem\n");
410                 /* -ENXIO: no more subchannels. */
411                 case -ENXIO:
412                         return ret;
413                 default:
414                         return 0;
415                 }
416         }
417         /*
418          * We register ALL valid subchannels in ioinfo, even those
419          * that have been present before init_channel_subsystem.
420          * These subchannels can't have been registered yet (kmalloc
421          * not working) so we do it now. This is true e.g. for the
422          * console subchannel.
423          */
424         css_register_subchannel(sch);
425         return 0;
426 }
427
428 static void __init
429 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
430 {
431         if (css_characteristics_avail && css_general_characteristics.mcss) {
432                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
433                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
434         } else {
435 #ifdef CONFIG_SMP
436                 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
437 #else
438                 css->global_pgid.pgid_high.cpu_addr = 0;
439 #endif
440         }
441         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
442         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
443         css->global_pgid.tod_high = tod_high;
444
445 }
446
447 static void
448 channel_subsystem_release(struct device *dev)
449 {
450         struct channel_subsystem *css;
451
452         css = to_css(dev);
453         kfree(css);
454 }
455
456 static inline void __init
457 setup_css(int nr)
458 {
459         u32 tod_high;
460
461         memset(css[nr], 0, sizeof(struct channel_subsystem));
462         css[nr]->valid = 1;
463         css[nr]->cssid = nr;
464         sprintf(css[nr]->device.bus_id, "css%x", nr);
465         css[nr]->device.release = channel_subsystem_release;
466         tod_high = (u32) (get_clock() >> 32);
467         css_generate_pgid(css[nr], tod_high);
468 }
469
470 /*
471  * Now that the driver core is running, we can setup our channel subsystem.
472  * The struct subchannel's are created during probing (except for the
473  * static console subchannel).
474  */
475 static int __init
476 init_channel_subsystem (void)
477 {
478         int ret, i;
479
480         if (chsc_determine_css_characteristics() == 0)
481                 css_characteristics_avail = 1;
482
483         if ((ret = bus_register(&css_bus_type)))
484                 goto out;
485
486         /* Try to enable MSS. */
487         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
488         switch (ret) {
489         case 0: /* Success. */
490                 max_ssid = __MAX_SSID;
491                 break;
492         case -ENOMEM:
493                 goto out_bus;
494         default:
495                 max_ssid = 0;
496         }
497         /* Setup css structure. */
498         for (i = 0; i <= __MAX_CSSID; i++) {
499                 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
500                 if (!css[i]) {
501                         ret = -ENOMEM;
502                         goto out_unregister;
503                 }
504                 setup_css(i);
505                 ret = device_register(&css[i]->device);
506                 if (ret)
507                         goto out_free;
508         }
509         css_init_done = 1;
510
511         ctl_set_bit(6, 28);
512
513         for_each_subchannel(__init_channel_subsystem, NULL);
514         return 0;
515 out_free:
516         kfree(css[i]);
517 out_unregister:
518         while (i > 0) {
519                 i--;
520                 device_unregister(&css[i]->device);
521         }
522 out_bus:
523         bus_unregister(&css_bus_type);
524 out:
525         return ret;
526 }
527
528 /*
529  * find a driver for a subchannel. They identify by the subchannel
530  * type with the exception that the console subchannel driver has its own
531  * subchannel type although the device is an i/o subchannel
532  */
533 static int
534 css_bus_match (struct device *dev, struct device_driver *drv)
535 {
536         struct subchannel *sch = container_of (dev, struct subchannel, dev);
537         struct css_driver *driver = container_of (drv, struct css_driver, drv);
538
539         if (sch->st == driver->subchannel_type)
540                 return 1;
541
542         return 0;
543 }
544
545 static int
546 css_probe (struct device *dev)
547 {
548         struct subchannel *sch;
549
550         sch = to_subchannel(dev);
551         sch->driver = container_of (dev->driver, struct css_driver, drv);
552         return (sch->driver->probe ? sch->driver->probe(sch) : 0);
553 }
554
555 static int
556 css_remove (struct device *dev)
557 {
558         struct subchannel *sch;
559
560         sch = to_subchannel(dev);
561         return (sch->driver->remove ? sch->driver->remove(sch) : 0);
562 }
563
564 static void
565 css_shutdown (struct device *dev)
566 {
567         struct subchannel *sch;
568
569         sch = to_subchannel(dev);
570         if (sch->driver->shutdown)
571                 sch->driver->shutdown(sch);
572 }
573
574 struct bus_type css_bus_type = {
575         .name     = "css",
576         .match    = css_bus_match,
577         .probe    = css_probe,
578         .remove   = css_remove,
579         .shutdown = css_shutdown,
580 };
581
582 subsys_initcall(init_channel_subsystem);
583
584 int
585 css_enqueue_subchannel_slow(struct subchannel_id schid)
586 {
587         struct slow_subchannel *new_slow_sch;
588         unsigned long flags;
589
590         new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
591         if (!new_slow_sch)
592                 return -ENOMEM;
593         memset(new_slow_sch, 0, sizeof(struct slow_subchannel));
594         new_slow_sch->schid = schid;
595         spin_lock_irqsave(&slow_subchannel_lock, flags);
596         list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
597         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
598         return 0;
599 }
600
601 void
602 css_clear_subchannel_slow_list(void)
603 {
604         unsigned long flags;
605
606         spin_lock_irqsave(&slow_subchannel_lock, flags);
607         while (!list_empty(&slow_subchannels_head)) {
608                 struct slow_subchannel *slow_sch =
609                         list_entry(slow_subchannels_head.next,
610                                    struct slow_subchannel, slow_list);
611
612                 list_del_init(slow_subchannels_head.next);
613                 kfree(slow_sch);
614         }
615         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
616 }
617
618
619
620 int
621 css_slow_subchannels_exist(void)
622 {
623         return (!list_empty(&slow_subchannels_head));
624 }
625
626 MODULE_LICENSE("GPL");
627 EXPORT_SYMBOL(css_bus_type);
628 EXPORT_SYMBOL_GPL(css_characteristics_avail);