]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/s390/cio/css.c
bc3a8e4a49f78d7884c77b087e12aaf9845d845d
[linux-2.6-omap-h63xx.git] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/reboot.h>
16
17 #include "../s390mach.h"
18 #include "css.h"
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "ioasm.h"
22 #include "chsc.h"
23 #include "device.h"
24 #include "idset.h"
25 #include "chp.h"
26
27 int css_init_done = 0;
28 static int need_reprobe = 0;
29 static int max_ssid = 0;
30
31 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
32
33 int css_characteristics_avail = 0;
34
35 int
36 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
37 {
38         struct subchannel_id schid;
39         int ret;
40
41         init_subchannel_id(&schid);
42         ret = -ENODEV;
43         do {
44                 do {
45                         ret = fn(schid, data);
46                         if (ret)
47                                 break;
48                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
49                 schid.sch_no = 0;
50         } while (schid.ssid++ < max_ssid);
51         return ret;
52 }
53
54 struct cb_data {
55         void *data;
56         struct idset *set;
57         int (*fn_known_sch)(struct subchannel *, void *);
58         int (*fn_unknown_sch)(struct subchannel_id, void *);
59 };
60
61 static int call_fn_known_sch(struct device *dev, void *data)
62 {
63         struct subchannel *sch = to_subchannel(dev);
64         struct cb_data *cb = data;
65         int rc = 0;
66
67         idset_sch_del(cb->set, sch->schid);
68         if (cb->fn_known_sch)
69                 rc = cb->fn_known_sch(sch, cb->data);
70         return rc;
71 }
72
73 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
74 {
75         struct cb_data *cb = data;
76         int rc = 0;
77
78         if (idset_sch_contains(cb->set, schid))
79                 rc = cb->fn_unknown_sch(schid, cb->data);
80         return rc;
81 }
82
83 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
84                                int (*fn_unknown)(struct subchannel_id,
85                                void *), void *data)
86 {
87         struct cb_data cb;
88         int rc;
89
90         cb.set = idset_sch_new();
91         if (!cb.set)
92                 return -ENOMEM;
93         idset_fill(cb.set);
94         cb.data = data;
95         cb.fn_known_sch = fn_known;
96         cb.fn_unknown_sch = fn_unknown;
97         /* Process registered subchannels. */
98         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
99         if (rc)
100                 goto out;
101         /* Process unregistered subchannels. */
102         if (fn_unknown)
103                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
104 out:
105         idset_free(cb.set);
106
107         return rc;
108 }
109
110 static struct subchannel *
111 css_alloc_subchannel(struct subchannel_id schid)
112 {
113         struct subchannel *sch;
114         int ret;
115
116         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
117         if (sch == NULL)
118                 return ERR_PTR(-ENOMEM);
119         ret = cio_validate_subchannel (sch, schid);
120         if (ret < 0) {
121                 kfree(sch);
122                 return ERR_PTR(ret);
123         }
124         return sch;
125 }
126
127 static void
128 css_free_subchannel(struct subchannel *sch)
129 {
130         if (sch) {
131                 /* Reset intparm to zeroes. */
132                 sch->schib.pmcw.intparm = 0;
133                 cio_modify(sch);
134                 kfree(sch->lock);
135                 kfree(sch);
136         }
137 }
138
139 static void
140 css_subchannel_release(struct device *dev)
141 {
142         struct subchannel *sch;
143
144         sch = to_subchannel(dev);
145         if (!cio_is_console(sch->schid)) {
146                 kfree(sch->lock);
147                 kfree(sch);
148         }
149 }
150
151 static int css_sch_device_register(struct subchannel *sch)
152 {
153         int ret;
154
155         mutex_lock(&sch->reg_mutex);
156         ret = device_register(&sch->dev);
157         mutex_unlock(&sch->reg_mutex);
158         return ret;
159 }
160
161 /**
162  * css_sch_device_unregister - unregister a subchannel
163  * @sch: subchannel to be unregistered
164  */
165 void css_sch_device_unregister(struct subchannel *sch)
166 {
167         mutex_lock(&sch->reg_mutex);
168         device_unregister(&sch->dev);
169         mutex_unlock(&sch->reg_mutex);
170 }
171 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
172
173 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
174 {
175         int i;
176         int mask;
177
178         memset(ssd, 0, sizeof(struct chsc_ssd_info));
179         ssd->path_mask = pmcw->pim;
180         for (i = 0; i < 8; i++) {
181                 mask = 0x80 >> i;
182                 if (pmcw->pim & mask) {
183                         chp_id_init(&ssd->chpid[i]);
184                         ssd->chpid[i].id = pmcw->chpid[i];
185                 }
186         }
187 }
188
189 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
190 {
191         int i;
192         int mask;
193
194         for (i = 0; i < 8; i++) {
195                 mask = 0x80 >> i;
196                 if (ssd->path_mask & mask)
197                         if (!chp_is_registered(ssd->chpid[i]))
198                                 chp_new(ssd->chpid[i]);
199         }
200 }
201
202 void css_update_ssd_info(struct subchannel *sch)
203 {
204         int ret;
205
206         if (cio_is_console(sch->schid)) {
207                 /* Console is initialized too early for functions requiring
208                  * memory allocation. */
209                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
210         } else {
211                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
212                 if (ret)
213                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
214                 ssd_register_chpids(&sch->ssd_info);
215         }
216 }
217
218 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
219                          char *buf)
220 {
221         struct subchannel *sch = to_subchannel(dev);
222
223         return sprintf(buf, "%01x\n", sch->st);
224 }
225
226 static DEVICE_ATTR(type, 0444, type_show, NULL);
227
228 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
229                              char *buf)
230 {
231         struct subchannel *sch = to_subchannel(dev);
232
233         return sprintf(buf, "css:t%01X\n", sch->st);
234 }
235
236 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
237
238 static struct attribute *subch_attrs[] = {
239         &dev_attr_type.attr,
240         &dev_attr_modalias.attr,
241         NULL,
242 };
243
244 static struct attribute_group subch_attr_group = {
245         .attrs = subch_attrs,
246 };
247
248 static struct attribute_group *default_subch_attr_groups[] = {
249         &subch_attr_group,
250         NULL,
251 };
252
253 static int css_register_subchannel(struct subchannel *sch)
254 {
255         int ret;
256
257         /* Initialize the subchannel structure */
258         sch->dev.parent = &channel_subsystems[0]->device;
259         sch->dev.bus = &css_bus_type;
260         sch->dev.release = &css_subchannel_release;
261         sch->dev.groups = default_subch_attr_groups;
262         /*
263          * We don't want to generate uevents for I/O subchannels that don't
264          * have a working ccw device behind them since they will be
265          * unregistered before they can be used anyway, so we delay the add
266          * uevent until after device recognition was successful.
267          * Note that we suppress the uevent for all subchannel types;
268          * the subchannel driver can decide itself when it wants to inform
269          * userspace of its existence.
270          */
271         sch->dev.uevent_suppress = 1;
272         css_update_ssd_info(sch);
273         /* make it known to the system */
274         ret = css_sch_device_register(sch);
275         if (ret) {
276                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
277                               sch->schid.ssid, sch->schid.sch_no, ret);
278                 return ret;
279         }
280         if (!sch->driver) {
281                 /*
282                  * No driver matched. Generate the uevent now so that
283                  * a fitting driver module may be loaded based on the
284                  * modalias.
285                  */
286                 sch->dev.uevent_suppress = 0;
287                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
288         }
289         return ret;
290 }
291
292 int css_probe_device(struct subchannel_id schid)
293 {
294         int ret;
295         struct subchannel *sch;
296
297         sch = css_alloc_subchannel(schid);
298         if (IS_ERR(sch))
299                 return PTR_ERR(sch);
300         ret = css_register_subchannel(sch);
301         if (ret)
302                 css_free_subchannel(sch);
303         return ret;
304 }
305
306 static int
307 check_subchannel(struct device * dev, void * data)
308 {
309         struct subchannel *sch;
310         struct subchannel_id *schid = data;
311
312         sch = to_subchannel(dev);
313         return schid_equal(&sch->schid, schid);
314 }
315
316 struct subchannel *
317 get_subchannel_by_schid(struct subchannel_id schid)
318 {
319         struct device *dev;
320
321         dev = bus_find_device(&css_bus_type, NULL,
322                               &schid, check_subchannel);
323
324         return dev ? to_subchannel(dev) : NULL;
325 }
326
327 /**
328  * css_sch_is_valid() - check if a subchannel is valid
329  * @schib: subchannel information block for the subchannel
330  */
331 int css_sch_is_valid(struct schib *schib)
332 {
333         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
334                 return 0;
335         return 1;
336 }
337 EXPORT_SYMBOL_GPL(css_sch_is_valid);
338
339 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
340 {
341         struct schib schib;
342
343         if (!slow) {
344                 /* Will be done on the slow path. */
345                 return -EAGAIN;
346         }
347         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
348                 /* Unusable - ignore. */
349                 return 0;
350         }
351         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
352                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
353
354         return css_probe_device(schid);
355 }
356
357 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
358 {
359         int ret = 0;
360
361         if (sch->driver) {
362                 if (sch->driver->sch_event)
363                         ret = sch->driver->sch_event(sch, slow);
364                 else
365                         dev_dbg(&sch->dev,
366                                 "Got subchannel machine check but "
367                                 "no sch_event handler provided.\n");
368         }
369         return ret;
370 }
371
372 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
373 {
374         struct subchannel *sch;
375         int ret;
376
377         sch = get_subchannel_by_schid(schid);
378         if (sch) {
379                 ret = css_evaluate_known_subchannel(sch, slow);
380                 put_device(&sch->dev);
381         } else
382                 ret = css_evaluate_new_subchannel(schid, slow);
383         if (ret == -EAGAIN)
384                 css_schedule_eval(schid);
385 }
386
387 static struct idset *slow_subchannel_set;
388 static spinlock_t slow_subchannel_lock;
389
390 static int __init slow_subchannel_init(void)
391 {
392         spin_lock_init(&slow_subchannel_lock);
393         slow_subchannel_set = idset_sch_new();
394         if (!slow_subchannel_set) {
395                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
396                 return -ENOMEM;
397         }
398         return 0;
399 }
400
401 static int slow_eval_known_fn(struct subchannel *sch, void *data)
402 {
403         int eval;
404         int rc;
405
406         spin_lock_irq(&slow_subchannel_lock);
407         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
408         idset_sch_del(slow_subchannel_set, sch->schid);
409         spin_unlock_irq(&slow_subchannel_lock);
410         if (eval) {
411                 rc = css_evaluate_known_subchannel(sch, 1);
412                 if (rc == -EAGAIN)
413                         css_schedule_eval(sch->schid);
414         }
415         return 0;
416 }
417
418 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
419 {
420         int eval;
421         int rc = 0;
422
423         spin_lock_irq(&slow_subchannel_lock);
424         eval = idset_sch_contains(slow_subchannel_set, schid);
425         idset_sch_del(slow_subchannel_set, schid);
426         spin_unlock_irq(&slow_subchannel_lock);
427         if (eval) {
428                 rc = css_evaluate_new_subchannel(schid, 1);
429                 switch (rc) {
430                 case -EAGAIN:
431                         css_schedule_eval(schid);
432                         rc = 0;
433                         break;
434                 case -ENXIO:
435                 case -ENOMEM:
436                 case -EIO:
437                         /* These should abort looping */
438                         break;
439                 default:
440                         rc = 0;
441                 }
442         }
443         return rc;
444 }
445
446 static void css_slow_path_func(struct work_struct *unused)
447 {
448         CIO_TRACE_EVENT(4, "slowpath");
449         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
450                                    NULL);
451 }
452
453 static DECLARE_WORK(slow_path_work, css_slow_path_func);
454 struct workqueue_struct *slow_path_wq;
455
456 void css_schedule_eval(struct subchannel_id schid)
457 {
458         unsigned long flags;
459
460         spin_lock_irqsave(&slow_subchannel_lock, flags);
461         idset_sch_add(slow_subchannel_set, schid);
462         queue_work(slow_path_wq, &slow_path_work);
463         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
464 }
465
466 void css_schedule_eval_all(void)
467 {
468         unsigned long flags;
469
470         spin_lock_irqsave(&slow_subchannel_lock, flags);
471         idset_fill(slow_subchannel_set);
472         queue_work(slow_path_wq, &slow_path_work);
473         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
474 }
475
476 void css_wait_for_slow_path(void)
477 {
478         flush_workqueue(ccw_device_notify_work);
479         flush_workqueue(slow_path_wq);
480 }
481
482 /* Reprobe subchannel if unregistered. */
483 static int reprobe_subchannel(struct subchannel_id schid, void *data)
484 {
485         int ret;
486
487         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
488                       schid.ssid, schid.sch_no);
489         if (need_reprobe)
490                 return -EAGAIN;
491
492         ret = css_probe_device(schid);
493         switch (ret) {
494         case 0:
495                 break;
496         case -ENXIO:
497         case -ENOMEM:
498         case -EIO:
499                 /* These should abort looping */
500                 break;
501         default:
502                 ret = 0;
503         }
504
505         return ret;
506 }
507
508 /* Work function used to reprobe all unregistered subchannels. */
509 static void reprobe_all(struct work_struct *unused)
510 {
511         int ret;
512
513         CIO_MSG_EVENT(4, "reprobe start\n");
514
515         need_reprobe = 0;
516         /* Make sure initial subchannel scan is done. */
517         wait_event(ccw_device_init_wq,
518                    atomic_read(&ccw_device_init_count) == 0);
519         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
520
521         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
522                       need_reprobe);
523 }
524
525 static DECLARE_WORK(css_reprobe_work, reprobe_all);
526
527 /* Schedule reprobing of all unregistered subchannels. */
528 void css_schedule_reprobe(void)
529 {
530         need_reprobe = 1;
531         queue_work(slow_path_wq, &css_reprobe_work);
532 }
533
534 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
535
536 /*
537  * Called from the machine check handler for subchannel report words.
538  */
539 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
540 {
541         struct subchannel_id mchk_schid;
542
543         if (overflow) {
544                 css_schedule_eval_all();
545                 return;
546         }
547         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
548                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
549                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
550                       crw0->erc, crw0->rsid);
551         if (crw1)
552                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
553                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
554                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
555                               crw1->anc, crw1->erc, crw1->rsid);
556         init_subchannel_id(&mchk_schid);
557         mchk_schid.sch_no = crw0->rsid;
558         if (crw1)
559                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
560
561         /*
562          * Since we are always presented with IPI in the CRW, we have to
563          * use stsch() to find out if the subchannel in question has come
564          * or gone.
565          */
566         css_evaluate_subchannel(mchk_schid, 0);
567 }
568
569 static int __init
570 __init_channel_subsystem(struct subchannel_id schid, void *data)
571 {
572         struct subchannel *sch;
573         int ret;
574
575         if (cio_is_console(schid))
576                 sch = cio_get_console_subchannel();
577         else {
578                 sch = css_alloc_subchannel(schid);
579                 if (IS_ERR(sch))
580                         ret = PTR_ERR(sch);
581                 else
582                         ret = 0;
583                 switch (ret) {
584                 case 0:
585                         break;
586                 case -ENOMEM:
587                         panic("Out of memory in init_channel_subsystem\n");
588                 /* -ENXIO: no more subchannels. */
589                 case -ENXIO:
590                         return ret;
591                 /* -EIO: this subchannel set not supported. */
592                 case -EIO:
593                         return ret;
594                 default:
595                         return 0;
596                 }
597         }
598         /*
599          * We register ALL valid subchannels in ioinfo, even those
600          * that have been present before init_channel_subsystem.
601          * These subchannels can't have been registered yet (kmalloc
602          * not working) so we do it now. This is true e.g. for the
603          * console subchannel.
604          */
605         css_register_subchannel(sch);
606         return 0;
607 }
608
609 static void __init
610 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
611 {
612         if (css_characteristics_avail && css_general_characteristics.mcss) {
613                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
614                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
615         } else {
616 #ifdef CONFIG_SMP
617                 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
618 #else
619                 css->global_pgid.pgid_high.cpu_addr = 0;
620 #endif
621         }
622         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
623         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
624         css->global_pgid.tod_high = tod_high;
625
626 }
627
628 static void
629 channel_subsystem_release(struct device *dev)
630 {
631         struct channel_subsystem *css;
632
633         css = to_css(dev);
634         mutex_destroy(&css->mutex);
635         kfree(css);
636 }
637
638 static ssize_t
639 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
640                    char *buf)
641 {
642         struct channel_subsystem *css = to_css(dev);
643         int ret;
644
645         if (!css)
646                 return 0;
647         mutex_lock(&css->mutex);
648         ret = sprintf(buf, "%x\n", css->cm_enabled);
649         mutex_unlock(&css->mutex);
650         return ret;
651 }
652
653 static ssize_t
654 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
655                     const char *buf, size_t count)
656 {
657         struct channel_subsystem *css = to_css(dev);
658         int ret;
659         unsigned long val;
660
661         ret = strict_strtoul(buf, 16, &val);
662         if (ret)
663                 return ret;
664         mutex_lock(&css->mutex);
665         switch (val) {
666         case 0:
667                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
668                 break;
669         case 1:
670                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
671                 break;
672         default:
673                 ret = -EINVAL;
674         }
675         mutex_unlock(&css->mutex);
676         return ret < 0 ? ret : count;
677 }
678
679 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
680
681 static int __init setup_css(int nr)
682 {
683         u32 tod_high;
684         int ret;
685         struct channel_subsystem *css;
686
687         css = channel_subsystems[nr];
688         memset(css, 0, sizeof(struct channel_subsystem));
689         css->pseudo_subchannel =
690                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
691         if (!css->pseudo_subchannel)
692                 return -ENOMEM;
693         css->pseudo_subchannel->dev.parent = &css->device;
694         css->pseudo_subchannel->dev.release = css_subchannel_release;
695         sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
696         ret = cio_create_sch_lock(css->pseudo_subchannel);
697         if (ret) {
698                 kfree(css->pseudo_subchannel);
699                 return ret;
700         }
701         mutex_init(&css->mutex);
702         css->valid = 1;
703         css->cssid = nr;
704         sprintf(css->device.bus_id, "css%x", nr);
705         css->device.release = channel_subsystem_release;
706         tod_high = (u32) (get_clock() >> 32);
707         css_generate_pgid(css, tod_high);
708         return 0;
709 }
710
711 static int css_reboot_event(struct notifier_block *this,
712                             unsigned long event,
713                             void *ptr)
714 {
715         int ret, i;
716
717         ret = NOTIFY_DONE;
718         for (i = 0; i <= __MAX_CSSID; i++) {
719                 struct channel_subsystem *css;
720
721                 css = channel_subsystems[i];
722                 mutex_lock(&css->mutex);
723                 if (css->cm_enabled)
724                         if (chsc_secm(css, 0))
725                                 ret = NOTIFY_BAD;
726                 mutex_unlock(&css->mutex);
727         }
728
729         return ret;
730 }
731
732 static struct notifier_block css_reboot_notifier = {
733         .notifier_call = css_reboot_event,
734 };
735
736 /*
737  * Now that the driver core is running, we can setup our channel subsystem.
738  * The struct subchannel's are created during probing (except for the
739  * static console subchannel).
740  */
741 static int __init
742 init_channel_subsystem (void)
743 {
744         int ret, i;
745
746         ret = chsc_determine_css_characteristics();
747         if (ret == -ENOMEM)
748                 goto out; /* No need to continue. */
749         if (ret == 0)
750                 css_characteristics_avail = 1;
751
752         ret = chsc_alloc_sei_area();
753         if (ret)
754                 goto out;
755
756         ret = slow_subchannel_init();
757         if (ret)
758                 goto out;
759
760         ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
761         if (ret)
762                 goto out;
763
764         if ((ret = bus_register(&css_bus_type)))
765                 goto out;
766
767         /* Try to enable MSS. */
768         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
769         switch (ret) {
770         case 0: /* Success. */
771                 max_ssid = __MAX_SSID;
772                 break;
773         case -ENOMEM:
774                 goto out_bus;
775         default:
776                 max_ssid = 0;
777         }
778         /* Setup css structure. */
779         for (i = 0; i <= __MAX_CSSID; i++) {
780                 struct channel_subsystem *css;
781
782                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
783                 if (!css) {
784                         ret = -ENOMEM;
785                         goto out_unregister;
786                 }
787                 channel_subsystems[i] = css;
788                 ret = setup_css(i);
789                 if (ret)
790                         goto out_free;
791                 ret = device_register(&css->device);
792                 if (ret)
793                         goto out_free_all;
794                 if (css_characteristics_avail &&
795                     css_chsc_characteristics.secm) {
796                         ret = device_create_file(&css->device,
797                                                  &dev_attr_cm_enable);
798                         if (ret)
799                                 goto out_device;
800                 }
801                 ret = device_register(&css->pseudo_subchannel->dev);
802                 if (ret)
803                         goto out_file;
804         }
805         ret = register_reboot_notifier(&css_reboot_notifier);
806         if (ret)
807                 goto out_pseudo;
808         css_init_done = 1;
809
810         ctl_set_bit(6, 28);
811
812         for_each_subchannel(__init_channel_subsystem, NULL);
813         return 0;
814 out_pseudo:
815         device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
816 out_file:
817         device_remove_file(&channel_subsystems[i]->device,
818                            &dev_attr_cm_enable);
819 out_device:
820         device_unregister(&channel_subsystems[i]->device);
821 out_free_all:
822         kfree(channel_subsystems[i]->pseudo_subchannel->lock);
823         kfree(channel_subsystems[i]->pseudo_subchannel);
824 out_free:
825         kfree(channel_subsystems[i]);
826 out_unregister:
827         while (i > 0) {
828                 struct channel_subsystem *css;
829
830                 i--;
831                 css = channel_subsystems[i];
832                 device_unregister(&css->pseudo_subchannel->dev);
833                 if (css_characteristics_avail && css_chsc_characteristics.secm)
834                         device_remove_file(&css->device,
835                                            &dev_attr_cm_enable);
836                 device_unregister(&css->device);
837         }
838 out_bus:
839         bus_unregister(&css_bus_type);
840 out:
841         s390_unregister_crw_handler(CRW_RSC_CSS);
842         chsc_free_sei_area();
843         kfree(slow_subchannel_set);
844         printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
845                ret);
846         return ret;
847 }
848
849 int sch_is_pseudo_sch(struct subchannel *sch)
850 {
851         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
852 }
853
854 /*
855  * find a driver for a subchannel. They identify by the subchannel
856  * type with the exception that the console subchannel driver has its own
857  * subchannel type although the device is an i/o subchannel
858  */
859 static int
860 css_bus_match (struct device *dev, struct device_driver *drv)
861 {
862         struct subchannel *sch = to_subchannel(dev);
863         struct css_driver *driver = to_cssdriver(drv);
864
865         if (sch->st == driver->subchannel_type)
866                 return 1;
867
868         return 0;
869 }
870
871 static int css_probe(struct device *dev)
872 {
873         struct subchannel *sch;
874         int ret;
875
876         sch = to_subchannel(dev);
877         sch->driver = to_cssdriver(dev->driver);
878         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
879         if (ret)
880                 sch->driver = NULL;
881         return ret;
882 }
883
884 static int css_remove(struct device *dev)
885 {
886         struct subchannel *sch;
887         int ret;
888
889         sch = to_subchannel(dev);
890         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
891         sch->driver = NULL;
892         return ret;
893 }
894
895 static void css_shutdown(struct device *dev)
896 {
897         struct subchannel *sch;
898
899         sch = to_subchannel(dev);
900         if (sch->driver && sch->driver->shutdown)
901                 sch->driver->shutdown(sch);
902 }
903
904 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
905 {
906         struct subchannel *sch = to_subchannel(dev);
907         int ret;
908
909         ret = add_uevent_var(env, "ST=%01X", sch->st);
910         if (ret)
911                 return ret;
912         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
913         return ret;
914 }
915
916 struct bus_type css_bus_type = {
917         .name     = "css",
918         .match    = css_bus_match,
919         .probe    = css_probe,
920         .remove   = css_remove,
921         .shutdown = css_shutdown,
922         .uevent   = css_uevent,
923 };
924
925 /**
926  * css_driver_register - register a css driver
927  * @cdrv: css driver to register
928  *
929  * This is mainly a wrapper around driver_register that sets name
930  * and bus_type in the embedded struct device_driver correctly.
931  */
932 int css_driver_register(struct css_driver *cdrv)
933 {
934         cdrv->drv.name = cdrv->name;
935         cdrv->drv.bus = &css_bus_type;
936         cdrv->drv.owner = cdrv->owner;
937         return driver_register(&cdrv->drv);
938 }
939 EXPORT_SYMBOL_GPL(css_driver_register);
940
941 /**
942  * css_driver_unregister - unregister a css driver
943  * @cdrv: css driver to unregister
944  *
945  * This is a wrapper around driver_unregister.
946  */
947 void css_driver_unregister(struct css_driver *cdrv)
948 {
949         driver_unregister(&cdrv->drv);
950 }
951 EXPORT_SYMBOL_GPL(css_driver_unregister);
952
953 subsys_initcall(init_channel_subsystem);
954
955 MODULE_LICENSE("GPL");
956 EXPORT_SYMBOL(css_bus_type);
957 EXPORT_SYMBOL_GPL(css_characteristics_avail);