]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/s390/cio/device_fsm.c
[S390] cio: Introduce subchannel->private.
[linux-2.6-omap-h63xx.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "css.h"
23 #include "device.h"
24 #include "chsc.h"
25 #include "ioasm.h"
26 #include "chp.h"
27
28 static int timeout_log_enabled;
29
30 int
31 device_is_online(struct subchannel *sch)
32 {
33         struct ccw_device *cdev;
34
35         if (!sch->dev.driver_data)
36                 return 0;
37         cdev = sch->dev.driver_data;
38         return (cdev->private->state == DEV_STATE_ONLINE);
39 }
40
41 int
42 device_is_disconnected(struct subchannel *sch)
43 {
44         struct ccw_device *cdev;
45
46         if (!sch->dev.driver_data)
47                 return 0;
48         cdev = sch->dev.driver_data;
49         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51 }
52
53 void
54 device_set_disconnected(struct subchannel *sch)
55 {
56         struct ccw_device *cdev;
57
58         if (!sch->dev.driver_data)
59                 return;
60         cdev = sch->dev.driver_data;
61         ccw_device_set_timeout(cdev, 0);
62         cdev->private->flags.fake_irb = 0;
63         cdev->private->state = DEV_STATE_DISCONNECTED;
64 }
65
66 void device_set_intretry(struct subchannel *sch)
67 {
68         struct ccw_device *cdev;
69
70         cdev = sch->dev.driver_data;
71         if (!cdev)
72                 return;
73         cdev->private->flags.intretry = 1;
74 }
75
76 int device_trigger_verify(struct subchannel *sch)
77 {
78         struct ccw_device *cdev;
79
80         cdev = sch->dev.driver_data;
81         if (!cdev || !cdev->online)
82                 return -EINVAL;
83         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
84         return 0;
85 }
86
87 static int __init ccw_timeout_log_setup(char *unused)
88 {
89         timeout_log_enabled = 1;
90         return 1;
91 }
92
93 __setup("ccw_timeout_log", ccw_timeout_log_setup);
94
95 static void ccw_timeout_log(struct ccw_device *cdev)
96 {
97         struct schib schib;
98         struct subchannel *sch;
99         struct io_subchannel_private *private;
100         int cc;
101
102         sch = to_subchannel(cdev->dev.parent);
103         private = to_io_private(sch);
104         cc = stsch(sch->schid, &schib);
105
106         printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
107                "device information:\n", get_clock());
108         printk(KERN_WARNING "cio: orb:\n");
109         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
110                        &private->orb, sizeof(private->orb), 0);
111         printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
112         printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
113         printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
114                "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
115
116         if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
117             (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
118                 printk(KERN_WARNING "cio: last channel program (intern):\n");
119         else
120                 printk(KERN_WARNING "cio: last channel program:\n");
121
122         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
123                        (void *)(addr_t)private->orb.cpa,
124                        sizeof(struct ccw1), 0);
125         printk(KERN_WARNING "cio: ccw device state: %d\n",
126                cdev->private->state);
127         printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
128         printk(KERN_WARNING "cio: schib:\n");
129         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
130                        &schib, sizeof(schib), 0);
131         printk(KERN_WARNING "cio: ccw device flags:\n");
132         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
133                        &cdev->private->flags, sizeof(cdev->private->flags), 0);
134 }
135
136 /*
137  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
138  */
139 static void
140 ccw_device_timeout(unsigned long data)
141 {
142         struct ccw_device *cdev;
143
144         cdev = (struct ccw_device *) data;
145         spin_lock_irq(cdev->ccwlock);
146         if (timeout_log_enabled)
147                 ccw_timeout_log(cdev);
148         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
149         spin_unlock_irq(cdev->ccwlock);
150 }
151
152 /*
153  * Set timeout
154  */
155 void
156 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
157 {
158         if (expires == 0) {
159                 del_timer(&cdev->private->timer);
160                 return;
161         }
162         if (timer_pending(&cdev->private->timer)) {
163                 if (mod_timer(&cdev->private->timer, jiffies + expires))
164                         return;
165         }
166         cdev->private->timer.function = ccw_device_timeout;
167         cdev->private->timer.data = (unsigned long) cdev;
168         cdev->private->timer.expires = jiffies + expires;
169         add_timer(&cdev->private->timer);
170 }
171
172 /* Kill any pending timers after machine check. */
173 void
174 device_kill_pending_timer(struct subchannel *sch)
175 {
176         struct ccw_device *cdev;
177
178         if (!sch->dev.driver_data)
179                 return;
180         cdev = sch->dev.driver_data;
181         ccw_device_set_timeout(cdev, 0);
182 }
183
184 /*
185  * Cancel running i/o. This is called repeatedly since halt/clear are
186  * asynchronous operations. We do one try with cio_cancel, two tries
187  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
188  * Returns 0 if device now idle, -ENODEV for device not operational and
189  * -EBUSY if an interrupt is expected (either from halt/clear or from a
190  * status pending).
191  */
192 int
193 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
194 {
195         struct subchannel *sch;
196         int ret;
197
198         sch = to_subchannel(cdev->dev.parent);
199         ret = stsch(sch->schid, &sch->schib);
200         if (ret || !sch->schib.pmcw.dnv)
201                 return -ENODEV; 
202         if (!sch->schib.pmcw.ena)
203                 /* Not operational -> done. */
204                 return 0;
205         /* Stage 1: cancel io. */
206         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
207             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
208                 ret = cio_cancel(sch);
209                 if (ret != -EINVAL)
210                         return ret;
211                 /* cancel io unsuccessful. From now on it is asynchronous. */
212                 cdev->private->iretry = 3;      /* 3 halt retries. */
213         }
214         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
215                 /* Stage 2: halt io. */
216                 if (cdev->private->iretry) {
217                         cdev->private->iretry--;
218                         ret = cio_halt(sch);
219                         if (ret != -EBUSY)
220                                 return (ret == 0) ? -EBUSY : ret;
221                 }
222                 /* halt io unsuccessful. */
223                 cdev->private->iretry = 255;    /* 255 clear retries. */
224         }
225         /* Stage 3: clear io. */
226         if (cdev->private->iretry) {
227                 cdev->private->iretry--;
228                 ret = cio_clear (sch);
229                 return (ret == 0) ? -EBUSY : ret;
230         }
231         panic("Can't stop i/o on subchannel.\n");
232 }
233
234 static int
235 ccw_device_handle_oper(struct ccw_device *cdev)
236 {
237         struct subchannel *sch;
238
239         sch = to_subchannel(cdev->dev.parent);
240         cdev->private->flags.recog_done = 1;
241         /*
242          * Check if cu type and device type still match. If
243          * not, it is certainly another device and we have to
244          * de- and re-register.
245          */
246         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
247             cdev->id.cu_model != cdev->private->senseid.cu_model ||
248             cdev->id.dev_type != cdev->private->senseid.dev_type ||
249             cdev->id.dev_model != cdev->private->senseid.dev_model) {
250                 PREPARE_WORK(&cdev->private->kick_work,
251                              ccw_device_do_unreg_rereg);
252                 queue_work(ccw_device_work, &cdev->private->kick_work);
253                 return 0;
254         }
255         cdev->private->flags.donotify = 1;
256         return 1;
257 }
258
259 /*
260  * The machine won't give us any notification by machine check if a chpid has
261  * been varied online on the SE so we have to find out by magic (i. e. driving
262  * the channel subsystem to device selection and updating our path masks).
263  */
264 static void
265 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
266 {
267         int mask, i;
268         struct chp_id chpid;
269
270         chp_id_init(&chpid);
271         for (i = 0; i<8; i++) {
272                 mask = 0x80 >> i;
273                 if (!(sch->lpm & mask))
274                         continue;
275                 if (old_lpm & mask)
276                         continue;
277                 chpid.id = sch->schib.pmcw.chpid[i];
278                 if (!chp_is_registered(chpid))
279                         css_schedule_eval_all();
280         }
281 }
282
283 /*
284  * Stop device recognition.
285  */
286 static void
287 ccw_device_recog_done(struct ccw_device *cdev, int state)
288 {
289         struct subchannel *sch;
290         int notify, old_lpm, same_dev;
291
292         sch = to_subchannel(cdev->dev.parent);
293
294         ccw_device_set_timeout(cdev, 0);
295         cio_disable_subchannel(sch);
296         /*
297          * Now that we tried recognition, we have performed device selection
298          * through ssch() and the path information is up to date.
299          */
300         old_lpm = sch->lpm;
301         stsch(sch->schid, &sch->schib);
302         sch->lpm = sch->schib.pmcw.pam & sch->opm;
303         /* Check since device may again have become not operational. */
304         if (!sch->schib.pmcw.dnv)
305                 state = DEV_STATE_NOT_OPER;
306         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
307                 /* Force reprobe on all chpids. */
308                 old_lpm = 0;
309         if (sch->lpm != old_lpm)
310                 __recover_lost_chpids(sch, old_lpm);
311         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
312                 if (state == DEV_STATE_NOT_OPER) {
313                         cdev->private->flags.recog_done = 1;
314                         cdev->private->state = DEV_STATE_DISCONNECTED;
315                         return;
316                 }
317                 /* Boxed devices don't need extra treatment. */
318         }
319         notify = 0;
320         same_dev = 0; /* Keep the compiler quiet... */
321         switch (state) {
322         case DEV_STATE_NOT_OPER:
323                 CIO_DEBUG(KERN_WARNING, 2,
324                           "SenseID : unknown device %04x on subchannel "
325                           "0.%x.%04x\n", cdev->private->dev_id.devno,
326                           sch->schid.ssid, sch->schid.sch_no);
327                 break;
328         case DEV_STATE_OFFLINE:
329                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
330                         same_dev = ccw_device_handle_oper(cdev);
331                         notify = 1;
332                 }
333                 /* fill out sense information */
334                 memset(&cdev->id, 0, sizeof(cdev->id));
335                 cdev->id.cu_type   = cdev->private->senseid.cu_type;
336                 cdev->id.cu_model  = cdev->private->senseid.cu_model;
337                 cdev->id.dev_type  = cdev->private->senseid.dev_type;
338                 cdev->id.dev_model = cdev->private->senseid.dev_model;
339                 if (notify) {
340                         cdev->private->state = DEV_STATE_OFFLINE;
341                         if (same_dev) {
342                                 /* Get device online again. */
343                                 ccw_device_online(cdev);
344                                 wake_up(&cdev->private->wait_q);
345                         }
346                         return;
347                 }
348                 /* Issue device info message. */
349                 CIO_DEBUG(KERN_INFO, 2,
350                           "SenseID : device 0.%x.%04x reports: "
351                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
352                           "%04X/%02X\n",
353                           cdev->private->dev_id.ssid,
354                           cdev->private->dev_id.devno,
355                           cdev->id.cu_type, cdev->id.cu_model,
356                           cdev->id.dev_type, cdev->id.dev_model);
357                 break;
358         case DEV_STATE_BOXED:
359                 CIO_DEBUG(KERN_WARNING, 2,
360                           "SenseID : boxed device %04x on subchannel "
361                           "0.%x.%04x\n", cdev->private->dev_id.devno,
362                           sch->schid.ssid, sch->schid.sch_no);
363                 break;
364         }
365         cdev->private->state = state;
366         io_subchannel_recog_done(cdev);
367         if (state != DEV_STATE_NOT_OPER)
368                 wake_up(&cdev->private->wait_q);
369 }
370
371 /*
372  * Function called from device_id.c after sense id has completed.
373  */
374 void
375 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
376 {
377         switch (err) {
378         case 0:
379                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
380                 break;
381         case -ETIME:            /* Sense id stopped by timeout. */
382                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
383                 break;
384         default:
385                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
386                 break;
387         }
388 }
389
390 static void
391 ccw_device_oper_notify(struct work_struct *work)
392 {
393         struct ccw_device_private *priv;
394         struct ccw_device *cdev;
395         struct subchannel *sch;
396         int ret;
397         unsigned long flags;
398
399         priv = container_of(work, struct ccw_device_private, kick_work);
400         cdev = priv->cdev;
401         spin_lock_irqsave(cdev->ccwlock, flags);
402         sch = to_subchannel(cdev->dev.parent);
403         if (sch->driver && sch->driver->notify) {
404                 spin_unlock_irqrestore(cdev->ccwlock, flags);
405                 ret = sch->driver->notify(sch, CIO_OPER);
406                 spin_lock_irqsave(cdev->ccwlock, flags);
407         } else
408                 ret = 0;
409         if (ret) {
410                 /* Reenable channel measurements, if needed. */
411                 spin_unlock_irqrestore(cdev->ccwlock, flags);
412                 cmf_reenable(cdev);
413                 spin_lock_irqsave(cdev->ccwlock, flags);
414                 wake_up(&cdev->private->wait_q);
415         }
416         spin_unlock_irqrestore(cdev->ccwlock, flags);
417         if (!ret)
418                 /* Driver doesn't want device back. */
419                 ccw_device_do_unreg_rereg(work);
420 }
421
422 /*
423  * Finished with online/offline processing.
424  */
425 static void
426 ccw_device_done(struct ccw_device *cdev, int state)
427 {
428         struct subchannel *sch;
429
430         sch = to_subchannel(cdev->dev.parent);
431
432         ccw_device_set_timeout(cdev, 0);
433
434         if (state != DEV_STATE_ONLINE)
435                 cio_disable_subchannel(sch);
436
437         /* Reset device status. */
438         memset(&cdev->private->irb, 0, sizeof(struct irb));
439
440         cdev->private->state = state;
441
442
443         if (state == DEV_STATE_BOXED)
444                 CIO_DEBUG(KERN_WARNING, 2,
445                           "Boxed device %04x on subchannel %04x\n",
446                           cdev->private->dev_id.devno, sch->schid.sch_no);
447
448         if (cdev->private->flags.donotify) {
449                 cdev->private->flags.donotify = 0;
450                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
451                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
452         }
453         wake_up(&cdev->private->wait_q);
454
455         if (css_init_done && state != DEV_STATE_ONLINE)
456                 put_device (&cdev->dev);
457 }
458
459 static int cmp_pgid(struct pgid *p1, struct pgid *p2)
460 {
461         char *c1;
462         char *c2;
463
464         c1 = (char *)p1;
465         c2 = (char *)p2;
466
467         return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
468 }
469
470 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
471 {
472         int i;
473         int last;
474
475         last = 0;
476         for (i = 0; i < 8; i++) {
477                 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
478                         /* No PGID yet */
479                         continue;
480                 if (cdev->private->pgid[last].inf.ps.state1 ==
481                     SNID_STATE1_RESET) {
482                         /* First non-zero PGID */
483                         last = i;
484                         continue;
485                 }
486                 if (cmp_pgid(&cdev->private->pgid[i],
487                              &cdev->private->pgid[last]) == 0)
488                         /* Non-conflicting PGIDs */
489                         continue;
490
491                 /* PGID mismatch, can't pathgroup. */
492                 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
493                               "0.%x.%04x, can't pathgroup\n",
494                               cdev->private->dev_id.ssid,
495                               cdev->private->dev_id.devno);
496                 cdev->private->options.pgroup = 0;
497                 return;
498         }
499         if (cdev->private->pgid[last].inf.ps.state1 ==
500             SNID_STATE1_RESET)
501                 /* No previous pgid found */
502                 memcpy(&cdev->private->pgid[0],
503                        &channel_subsystems[0]->global_pgid,
504                        sizeof(struct pgid));
505         else
506                 /* Use existing pgid */
507                 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
508                        sizeof(struct pgid));
509 }
510
511 /*
512  * Function called from device_pgid.c after sense path ground has completed.
513  */
514 void
515 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
516 {
517         struct subchannel *sch;
518
519         sch = to_subchannel(cdev->dev.parent);
520         switch (err) {
521         case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
522                 cdev->private->options.pgroup = 0;
523                 break;
524         case 0: /* success */
525         case -EACCES: /* partial success, some paths not operational */
526                 /* Check if all pgids are equal or 0. */
527                 __ccw_device_get_common_pgid(cdev);
528                 break;
529         case -ETIME:            /* Sense path group id stopped by timeout. */
530         case -EUSERS:           /* device is reserved for someone else. */
531                 ccw_device_done(cdev, DEV_STATE_BOXED);
532                 return;
533         default:
534                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
535                 return;
536         }
537         /* Start Path Group verification. */
538         cdev->private->state = DEV_STATE_VERIFY;
539         cdev->private->flags.doverify = 0;
540         ccw_device_verify_start(cdev);
541 }
542
543 /*
544  * Start device recognition.
545  */
546 int
547 ccw_device_recognition(struct ccw_device *cdev)
548 {
549         struct subchannel *sch;
550         int ret;
551
552         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
553             (cdev->private->state != DEV_STATE_BOXED))
554                 return -EINVAL;
555         sch = to_subchannel(cdev->dev.parent);
556         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
557         if (ret != 0)
558                 /* Couldn't enable the subchannel for i/o. Sick device. */
559                 return ret;
560
561         /* After 60s the device recognition is considered to have failed. */
562         ccw_device_set_timeout(cdev, 60*HZ);
563
564         /*
565          * We used to start here with a sense pgid to find out whether a device
566          * is locked by someone else. Unfortunately, the sense pgid command
567          * code has other meanings on devices predating the path grouping
568          * algorithm, so we start with sense id and box the device after an
569          * timeout (or if sense pgid during path verification detects the device
570          * is locked, as may happen on newer devices).
571          */
572         cdev->private->flags.recog_done = 0;
573         cdev->private->state = DEV_STATE_SENSE_ID;
574         ccw_device_sense_id_start(cdev);
575         return 0;
576 }
577
578 /*
579  * Handle timeout in device recognition.
580  */
581 static void
582 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
583 {
584         int ret;
585
586         ret = ccw_device_cancel_halt_clear(cdev);
587         switch (ret) {
588         case 0:
589                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
590                 break;
591         case -ENODEV:
592                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
593                 break;
594         default:
595                 ccw_device_set_timeout(cdev, 3*HZ);
596         }
597 }
598
599
600 void
601 ccw_device_verify_done(struct ccw_device *cdev, int err)
602 {
603         struct subchannel *sch;
604
605         sch = to_subchannel(cdev->dev.parent);
606         /* Update schib - pom may have changed. */
607         stsch(sch->schid, &sch->schib);
608         /* Update lpm with verified path mask. */
609         sch->lpm = sch->vpm;
610         /* Repeat path verification? */
611         if (cdev->private->flags.doverify) {
612                 cdev->private->flags.doverify = 0;
613                 ccw_device_verify_start(cdev);
614                 return;
615         }
616         switch (err) {
617         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
618                 cdev->private->options.pgroup = 0;
619         case 0:
620                 ccw_device_done(cdev, DEV_STATE_ONLINE);
621                 /* Deliver fake irb to device driver, if needed. */
622                 if (cdev->private->flags.fake_irb) {
623                         memset(&cdev->private->irb, 0, sizeof(struct irb));
624                         cdev->private->irb.scsw.cc = 1;
625                         cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
626                         cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
627                         cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
628                         cdev->private->flags.fake_irb = 0;
629                         if (cdev->handler)
630                                 cdev->handler(cdev, cdev->private->intparm,
631                                               &cdev->private->irb);
632                         memset(&cdev->private->irb, 0, sizeof(struct irb));
633                 }
634                 break;
635         case -ETIME:
636                 /* Reset oper notify indication after verify error. */
637                 cdev->private->flags.donotify = 0;
638                 ccw_device_done(cdev, DEV_STATE_BOXED);
639                 break;
640         default:
641                 /* Reset oper notify indication after verify error. */
642                 cdev->private->flags.donotify = 0;
643                 if (cdev->online)
644                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
645                 else
646                         ccw_device_done(cdev, DEV_STATE_NOT_OPER);
647                 break;
648         }
649 }
650
651 /*
652  * Get device online.
653  */
654 int
655 ccw_device_online(struct ccw_device *cdev)
656 {
657         struct subchannel *sch;
658         int ret;
659
660         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
661             (cdev->private->state != DEV_STATE_BOXED))
662                 return -EINVAL;
663         sch = to_subchannel(cdev->dev.parent);
664         if (css_init_done && !get_device(&cdev->dev))
665                 return -ENODEV;
666         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
667         if (ret != 0) {
668                 /* Couldn't enable the subchannel for i/o. Sick device. */
669                 if (ret == -ENODEV)
670                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
671                 return ret;
672         }
673         /* Do we want to do path grouping? */
674         if (!cdev->private->options.pgroup) {
675                 /* Start initial path verification. */
676                 cdev->private->state = DEV_STATE_VERIFY;
677                 cdev->private->flags.doverify = 0;
678                 ccw_device_verify_start(cdev);
679                 return 0;
680         }
681         /* Do a SensePGID first. */
682         cdev->private->state = DEV_STATE_SENSE_PGID;
683         ccw_device_sense_pgid_start(cdev);
684         return 0;
685 }
686
687 void
688 ccw_device_disband_done(struct ccw_device *cdev, int err)
689 {
690         switch (err) {
691         case 0:
692                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
693                 break;
694         case -ETIME:
695                 ccw_device_done(cdev, DEV_STATE_BOXED);
696                 break;
697         default:
698                 cdev->private->flags.donotify = 0;
699                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
700                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
701                 break;
702         }
703 }
704
705 /*
706  * Shutdown device.
707  */
708 int
709 ccw_device_offline(struct ccw_device *cdev)
710 {
711         struct subchannel *sch;
712
713         if (ccw_device_is_orphan(cdev)) {
714                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
715                 return 0;
716         }
717         sch = to_subchannel(cdev->dev.parent);
718         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
719                 return -ENODEV;
720         if (cdev->private->state != DEV_STATE_ONLINE) {
721                 if (sch->schib.scsw.actl != 0)
722                         return -EBUSY;
723                 return -EINVAL;
724         }
725         if (sch->schib.scsw.actl != 0)
726                 return -EBUSY;
727         /* Are we doing path grouping? */
728         if (!cdev->private->options.pgroup) {
729                 /* No, set state offline immediately. */
730                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
731                 return 0;
732         }
733         /* Start Set Path Group commands. */
734         cdev->private->state = DEV_STATE_DISBAND_PGID;
735         ccw_device_disband_start(cdev);
736         return 0;
737 }
738
739 /*
740  * Handle timeout in device online/offline process.
741  */
742 static void
743 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
744 {
745         int ret;
746
747         ret = ccw_device_cancel_halt_clear(cdev);
748         switch (ret) {
749         case 0:
750                 ccw_device_done(cdev, DEV_STATE_BOXED);
751                 break;
752         case -ENODEV:
753                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
754                 break;
755         default:
756                 ccw_device_set_timeout(cdev, 3*HZ);
757         }
758 }
759
760 /*
761  * Handle not oper event in device recognition.
762  */
763 static void
764 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
765 {
766         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
767 }
768
769 /*
770  * Handle not operational event in non-special state.
771  */
772 static void ccw_device_generic_notoper(struct ccw_device *cdev,
773                                        enum dev_event dev_event)
774 {
775         struct subchannel *sch;
776
777         cdev->private->state = DEV_STATE_NOT_OPER;
778         sch = to_subchannel(cdev->dev.parent);
779         css_schedule_eval(sch->schid);
780 }
781
782 /*
783  * Handle path verification event.
784  */
785 static void
786 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
787 {
788         struct subchannel *sch;
789
790         if (cdev->private->state == DEV_STATE_W4SENSE) {
791                 cdev->private->flags.doverify = 1;
792                 return;
793         }
794         sch = to_subchannel(cdev->dev.parent);
795         /*
796          * Since we might not just be coming from an interrupt from the
797          * subchannel we have to update the schib.
798          */
799         stsch(sch->schid, &sch->schib);
800
801         if (sch->schib.scsw.actl != 0 ||
802             (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
803             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
804                 /*
805                  * No final status yet or final status not yet delivered
806                  * to the device driver. Can't do path verfication now,
807                  * delay until final status was delivered.
808                  */
809                 cdev->private->flags.doverify = 1;
810                 return;
811         }
812         /* Device is idle, we can do the path verification. */
813         cdev->private->state = DEV_STATE_VERIFY;
814         cdev->private->flags.doverify = 0;
815         ccw_device_verify_start(cdev);
816 }
817
818 /*
819  * Got an interrupt for a normal io (state online).
820  */
821 static void
822 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
823 {
824         struct irb *irb;
825
826         irb = (struct irb *) __LC_IRB;
827         /* Check for unsolicited interrupt. */
828         if ((irb->scsw.stctl ==
829                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
830             && (!irb->scsw.cc)) {
831                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
832                     !irb->esw.esw0.erw.cons) {
833                         /* Unit check but no sense data. Need basic sense. */
834                         if (ccw_device_do_sense(cdev, irb) != 0)
835                                 goto call_handler_unsol;
836                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
837                         cdev->private->state = DEV_STATE_W4SENSE;
838                         cdev->private->intparm = 0;
839                         return;
840                 }
841 call_handler_unsol:
842                 if (cdev->handler)
843                         cdev->handler (cdev, 0, irb);
844                 if (cdev->private->flags.doverify)
845                         ccw_device_online_verify(cdev, 0);
846                 return;
847         }
848         /* Accumulate status and find out if a basic sense is needed. */
849         ccw_device_accumulate_irb(cdev, irb);
850         if (cdev->private->flags.dosense) {
851                 if (ccw_device_do_sense(cdev, irb) == 0) {
852                         cdev->private->state = DEV_STATE_W4SENSE;
853                 }
854                 return;
855         }
856         /* Call the handler. */
857         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
858                 /* Start delayed path verification. */
859                 ccw_device_online_verify(cdev, 0);
860 }
861
862 /*
863  * Got an timeout in online state.
864  */
865 static void
866 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
867 {
868         int ret;
869
870         ccw_device_set_timeout(cdev, 0);
871         ret = ccw_device_cancel_halt_clear(cdev);
872         if (ret == -EBUSY) {
873                 ccw_device_set_timeout(cdev, 3*HZ);
874                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
875                 return;
876         }
877         if (ret == -ENODEV)
878                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
879         else if (cdev->handler)
880                 cdev->handler(cdev, cdev->private->intparm,
881                               ERR_PTR(-ETIMEDOUT));
882 }
883
884 /*
885  * Got an interrupt for a basic sense.
886  */
887 static void
888 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
889 {
890         struct irb *irb;
891
892         irb = (struct irb *) __LC_IRB;
893         /* Check for unsolicited interrupt. */
894         if (irb->scsw.stctl ==
895                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
896                 if (irb->scsw.cc == 1)
897                         /* Basic sense hasn't started. Try again. */
898                         ccw_device_do_sense(cdev, irb);
899                 else {
900                         CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited "
901                                       "interrupt during w4sense...\n",
902                                       cdev->private->dev_id.ssid,
903                                       cdev->private->dev_id.devno);
904                         if (cdev->handler)
905                                 cdev->handler (cdev, 0, irb);
906                 }
907                 return;
908         }
909         /*
910          * Check if a halt or clear has been issued in the meanwhile. If yes,
911          * only deliver the halt/clear interrupt to the device driver as if it
912          * had killed the original request.
913          */
914         if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
915                 /* Retry Basic Sense if requested. */
916                 if (cdev->private->flags.intretry) {
917                         cdev->private->flags.intretry = 0;
918                         ccw_device_do_sense(cdev, irb);
919                         return;
920                 }
921                 cdev->private->flags.dosense = 0;
922                 memset(&cdev->private->irb, 0, sizeof(struct irb));
923                 ccw_device_accumulate_irb(cdev, irb);
924                 goto call_handler;
925         }
926         /* Add basic sense info to irb. */
927         ccw_device_accumulate_basic_sense(cdev, irb);
928         if (cdev->private->flags.dosense) {
929                 /* Another basic sense is needed. */
930                 ccw_device_do_sense(cdev, irb);
931                 return;
932         }
933 call_handler:
934         cdev->private->state = DEV_STATE_ONLINE;
935         /* Call the handler. */
936         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
937                 /* Start delayed path verification. */
938                 ccw_device_online_verify(cdev, 0);
939 }
940
941 static void
942 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
943 {
944         struct irb *irb;
945
946         irb = (struct irb *) __LC_IRB;
947         /* Accumulate status. We don't do basic sense. */
948         ccw_device_accumulate_irb(cdev, irb);
949         /* Remember to clear irb to avoid residuals. */
950         memset(&cdev->private->irb, 0, sizeof(struct irb));
951         /* Try to start delayed device verification. */
952         ccw_device_online_verify(cdev, 0);
953         /* Note: Don't call handler for cio initiated clear! */
954 }
955
956 static void
957 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
958 {
959         struct subchannel *sch;
960
961         sch = to_subchannel(cdev->dev.parent);
962         ccw_device_set_timeout(cdev, 0);
963         /* Start delayed path verification. */
964         ccw_device_online_verify(cdev, 0);
965         /* OK, i/o is dead now. Call interrupt handler. */
966         if (cdev->handler)
967                 cdev->handler(cdev, cdev->private->intparm,
968                               ERR_PTR(-EIO));
969 }
970
971 static void
972 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
973 {
974         int ret;
975
976         ret = ccw_device_cancel_halt_clear(cdev);
977         if (ret == -EBUSY) {
978                 ccw_device_set_timeout(cdev, 3*HZ);
979                 return;
980         }
981         /* Start delayed path verification. */
982         ccw_device_online_verify(cdev, 0);
983         if (cdev->handler)
984                 cdev->handler(cdev, cdev->private->intparm,
985                               ERR_PTR(-EIO));
986 }
987
988 void device_kill_io(struct subchannel *sch)
989 {
990         int ret;
991         struct ccw_device *cdev;
992
993         cdev = sch->dev.driver_data;
994         ret = ccw_device_cancel_halt_clear(cdev);
995         if (ret == -EBUSY) {
996                 ccw_device_set_timeout(cdev, 3*HZ);
997                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
998                 return;
999         }
1000         /* Start delayed path verification. */
1001         ccw_device_online_verify(cdev, 0);
1002         if (cdev->handler)
1003                 cdev->handler(cdev, cdev->private->intparm,
1004                               ERR_PTR(-EIO));
1005 }
1006
1007 static void
1008 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1009 {
1010         /* Start verification after current task finished. */
1011         cdev->private->flags.doverify = 1;
1012 }
1013
1014 static void
1015 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1016 {
1017         struct irb *irb;
1018
1019         switch (dev_event) {
1020         case DEV_EVENT_INTERRUPT:
1021                 irb = (struct irb *) __LC_IRB;
1022                 /* Check for unsolicited interrupt. */
1023                 if ((irb->scsw.stctl ==
1024                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1025                     (!irb->scsw.cc))
1026                         /* FIXME: we should restart stlck here, but this
1027                          * is extremely unlikely ... */
1028                         goto out_wakeup;
1029
1030                 ccw_device_accumulate_irb(cdev, irb);
1031                 /* We don't care about basic sense etc. */
1032                 break;
1033         default: /* timeout */
1034                 break;
1035         }
1036 out_wakeup:
1037         wake_up(&cdev->private->wait_q);
1038 }
1039
1040 static void
1041 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1042 {
1043         struct subchannel *sch;
1044
1045         sch = to_subchannel(cdev->dev.parent);
1046         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1047                 /* Couldn't enable the subchannel for i/o. Sick device. */
1048                 return;
1049
1050         /* After 60s the device recognition is considered to have failed. */
1051         ccw_device_set_timeout(cdev, 60*HZ);
1052
1053         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1054         ccw_device_sense_id_start(cdev);
1055 }
1056
1057 void
1058 device_trigger_reprobe(struct subchannel *sch)
1059 {
1060         struct ccw_device *cdev;
1061
1062         if (!sch->dev.driver_data)
1063                 return;
1064         cdev = sch->dev.driver_data;
1065         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1066                 return;
1067
1068         /* Update some values. */
1069         if (stsch(sch->schid, &sch->schib))
1070                 return;
1071         if (!sch->schib.pmcw.dnv)
1072                 return;
1073         /*
1074          * The pim, pam, pom values may not be accurate, but they are the best
1075          * we have before performing device selection :/
1076          */
1077         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1078         /* Re-set some bits in the pmcw that were lost. */
1079         sch->schib.pmcw.isc = 3;
1080         sch->schib.pmcw.csense = 1;
1081         sch->schib.pmcw.ena = 0;
1082         if ((sch->lpm & (sch->lpm - 1)) != 0)
1083                 sch->schib.pmcw.mp = 1;
1084         sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085         /* We should also udate ssd info, but this has to wait. */
1086         /* Check if this is another device which appeared on the same sch. */
1087         if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1088                 PREPARE_WORK(&cdev->private->kick_work,
1089                              ccw_device_move_to_orphanage);
1090                 queue_work(slow_path_wq, &cdev->private->kick_work);
1091         } else
1092                 ccw_device_start_id(cdev, 0);
1093 }
1094
1095 static void
1096 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1097 {
1098         struct subchannel *sch;
1099
1100         sch = to_subchannel(cdev->dev.parent);
1101         /*
1102          * An interrupt in state offline means a previous disable was not
1103          * successful. Try again.
1104          */
1105         cio_disable_subchannel(sch);
1106 }
1107
1108 static void
1109 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1110 {
1111         retry_set_schib(cdev);
1112         cdev->private->state = DEV_STATE_ONLINE;
1113         dev_fsm_event(cdev, dev_event);
1114 }
1115
1116 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1117                                        enum dev_event dev_event)
1118 {
1119         cmf_retry_copy_block(cdev);
1120         cdev->private->state = DEV_STATE_ONLINE;
1121         dev_fsm_event(cdev, dev_event);
1122 }
1123
1124 static void
1125 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1126 {
1127         ccw_device_set_timeout(cdev, 0);
1128         if (dev_event == DEV_EVENT_NOTOPER)
1129                 cdev->private->state = DEV_STATE_NOT_OPER;
1130         else
1131                 cdev->private->state = DEV_STATE_OFFLINE;
1132         wake_up(&cdev->private->wait_q);
1133 }
1134
1135 static void
1136 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1137 {
1138         int ret;
1139
1140         ret = ccw_device_cancel_halt_clear(cdev);
1141         switch (ret) {
1142         case 0:
1143                 cdev->private->state = DEV_STATE_OFFLINE;
1144                 wake_up(&cdev->private->wait_q);
1145                 break;
1146         case -ENODEV:
1147                 cdev->private->state = DEV_STATE_NOT_OPER;
1148                 wake_up(&cdev->private->wait_q);
1149                 break;
1150         default:
1151                 ccw_device_set_timeout(cdev, HZ/10);
1152         }
1153 }
1154
1155 /*
1156  * No operation action. This is used e.g. to ignore a timeout event in
1157  * state offline.
1158  */
1159 static void
1160 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1161 {
1162 }
1163
1164 /*
1165  * Bug operation action. 
1166  */
1167 static void
1168 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1169 {
1170         CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n",
1171                       cdev->private->state, dev_event);
1172         BUG();
1173 }
1174
1175 /*
1176  * device statemachine
1177  */
1178 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1179         [DEV_STATE_NOT_OPER] = {
1180                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1181                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1182                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1183                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1184         },
1185         [DEV_STATE_SENSE_PGID] = {
1186                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1187                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1188                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1189                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1190         },
1191         [DEV_STATE_SENSE_ID] = {
1192                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1193                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1194                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1195                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1196         },
1197         [DEV_STATE_OFFLINE] = {
1198                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1199                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1200                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1201                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1202         },
1203         [DEV_STATE_VERIFY] = {
1204                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1205                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1206                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1207                 [DEV_EVENT_VERIFY]      = ccw_device_delay_verify,
1208         },
1209         [DEV_STATE_ONLINE] = {
1210                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1211                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1212                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1213                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1214         },
1215         [DEV_STATE_W4SENSE] = {
1216                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1217                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1218                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1219                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1220         },
1221         [DEV_STATE_DISBAND_PGID] = {
1222                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1223                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1224                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1225                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1226         },
1227         [DEV_STATE_BOXED] = {
1228                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1229                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1230                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1231                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1232         },
1233         /* states to wait for i/o completion before doing something */
1234         [DEV_STATE_CLEAR_VERIFY] = {
1235                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1236                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1237                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1238                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1239         },
1240         [DEV_STATE_TIMEOUT_KILL] = {
1241                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1242                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1243                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1244                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1245         },
1246         [DEV_STATE_QUIESCE] = {
1247                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1248                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1249                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1250                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1251         },
1252         /* special states for devices gone not operational */
1253         [DEV_STATE_DISCONNECTED] = {
1254                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1255                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1256                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1257                 [DEV_EVENT_VERIFY]      = ccw_device_start_id,
1258         },
1259         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1260                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1261                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1262                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1263                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1264         },
1265         [DEV_STATE_CMFCHANGE] = {
1266                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1267                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1268                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1269                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1270         },
1271         [DEV_STATE_CMFUPDATE] = {
1272                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1273                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1274                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1275                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1276         },
1277 };
1278
1279 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);