]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/s390/cio/qdio_main.c
fix similar typos to successfull
[linux-2.6-omap-h63xx.git] / drivers / s390 / cio / qdio_main.c
1 /*
2  * linux/drivers/s390/cio/qdio_main.c
3  *
4  * Linux for s390 qdio support, buffer handling, qdio API and module support.
5  *
6  * Copyright 2000,2008 IBM Corp.
7  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8  *            Jan Glauber <jang@linux.vnet.ibm.com>
9  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
18 #include <asm/qdio.h>
19
20 #include "cio.h"
21 #include "css.h"
22 #include "device.h"
23 #include "qdio.h"
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
26
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28         "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
31
32 static inline int do_siga_sync(struct subchannel_id schid,
33                                unsigned int out_mask, unsigned int in_mask)
34 {
35         register unsigned long __fc asm ("0") = 2;
36         register struct subchannel_id __schid asm ("1") = schid;
37         register unsigned long out asm ("2") = out_mask;
38         register unsigned long in asm ("3") = in_mask;
39         int cc;
40
41         asm volatile(
42                 "       siga    0\n"
43                 "       ipm     %0\n"
44                 "       srl     %0,28\n"
45                 : "=d" (cc)
46                 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
47         return cc;
48 }
49
50 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51 {
52         register unsigned long __fc asm ("0") = 1;
53         register struct subchannel_id __schid asm ("1") = schid;
54         register unsigned long __mask asm ("2") = mask;
55         int cc;
56
57         asm volatile(
58                 "       siga    0\n"
59                 "       ipm     %0\n"
60                 "       srl     %0,28\n"
61                 : "=d" (cc)
62                 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
63         return cc;
64 }
65
66 /**
67  * do_siga_output - perform SIGA-w/wt function
68  * @schid: subchannel id or in case of QEBSM the subchannel token
69  * @mask: which output queues to process
70  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71  * @fc: function code to perform
72  *
73  * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74  * Note: For IQDC unicast queues only the highest priority queue is processed.
75  */
76 static inline int do_siga_output(unsigned long schid, unsigned long mask,
77                                  unsigned int *bb, unsigned int fc)
78 {
79         register unsigned long __fc asm("0") = fc;
80         register unsigned long __schid asm("1") = schid;
81         register unsigned long __mask asm("2") = mask;
82         int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
83
84         asm volatile(
85                 "       siga    0\n"
86                 "0:     ipm     %0\n"
87                 "       srl     %0,28\n"
88                 "1:\n"
89                 EX_TABLE(0b, 1b)
90                 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91                 : : "cc", "memory");
92         *bb = ((unsigned int) __fc) >> 31;
93         return cc;
94 }
95
96 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97 {
98         /* all done or next buffer state different */
99         if (ccq == 0 || ccq == 32)
100                 return 0;
101         /* not all buffers processed */
102         if (ccq == 96 || ccq == 97)
103                 return 1;
104         /* notify devices immediately */
105         DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
106         return -EIO;
107 }
108
109 /**
110  * qdio_do_eqbs - extract buffer states for QEBSM
111  * @q: queue to manipulate
112  * @state: state of the extracted buffers
113  * @start: buffer number to start at
114  * @count: count of buffers to examine
115  * @auto_ack: automatically acknowledge buffers
116  *
117  * Returns the number of successfully extracted equal buffer states.
118  * Stops processing if a state is different from the last buffers state.
119  */
120 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
121                         int start, int count, int auto_ack)
122 {
123         unsigned int ccq = 0;
124         int tmp_count = count, tmp_start = start;
125         int nr = q->nr;
126         int rc;
127
128         BUG_ON(!q->irq_ptr->sch_token);
129         qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
130
131         if (!q->is_input_q)
132                 nr += q->irq_ptr->nr_input_qs;
133 again:
134         ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
135                       auto_ack);
136         rc = qdio_check_ccq(q, ccq);
137
138         /* At least one buffer was processed, return and extract the remaining
139          * buffers later.
140          */
141         if ((ccq == 96) && (count != tmp_count)) {
142                 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
143                 return (count - tmp_count);
144         }
145
146         if (rc == 1) {
147                 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
148                 goto again;
149         }
150
151         if (rc < 0) {
152                 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
153                 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
154                 q->handler(q->irq_ptr->cdev,
155                            QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
156                            0, -1, -1, q->irq_ptr->int_parm);
157                 return 0;
158         }
159         return count - tmp_count;
160 }
161
162 /**
163  * qdio_do_sqbs - set buffer states for QEBSM
164  * @q: queue to manipulate
165  * @state: new state of the buffers
166  * @start: first buffer number to change
167  * @count: how many buffers to change
168  *
169  * Returns the number of successfully changed buffers.
170  * Does retrying until the specified count of buffer states is set or an
171  * error occurs.
172  */
173 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
174                         int count)
175 {
176         unsigned int ccq = 0;
177         int tmp_count = count, tmp_start = start;
178         int nr = q->nr;
179         int rc;
180
181         if (!count)
182                 return 0;
183
184         BUG_ON(!q->irq_ptr->sch_token);
185         qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
186
187         if (!q->is_input_q)
188                 nr += q->irq_ptr->nr_input_qs;
189 again:
190         ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
191         rc = qdio_check_ccq(q, ccq);
192         if (rc == 1) {
193                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
194                 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
195                 goto again;
196         }
197         if (rc < 0) {
198                 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
199                 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
200                 q->handler(q->irq_ptr->cdev,
201                            QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
202                            0, -1, -1, q->irq_ptr->int_parm);
203                 return 0;
204         }
205         WARN_ON(tmp_count);
206         return count - tmp_count;
207 }
208
209 /* returns number of examined buffers and their common state in *state */
210 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
211                                  unsigned char *state, unsigned int count,
212                                  int auto_ack)
213 {
214         unsigned char __state = 0;
215         int i;
216
217         BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
218         BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
219
220         if (is_qebsm(q))
221                 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
222
223         for (i = 0; i < count; i++) {
224                 if (!__state)
225                         __state = q->slsb.val[bufnr];
226                 else if (q->slsb.val[bufnr] != __state)
227                         break;
228                 bufnr = next_buf(bufnr);
229         }
230         *state = __state;
231         return i;
232 }
233
234 inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
235                   unsigned char *state, int auto_ack)
236 {
237         return get_buf_states(q, bufnr, state, 1, auto_ack);
238 }
239
240 /* wrap-around safe setting of slsb states, returns number of changed buffers */
241 static inline int set_buf_states(struct qdio_q *q, int bufnr,
242                                  unsigned char state, int count)
243 {
244         int i;
245
246         BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
247         BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
248
249         if (is_qebsm(q))
250                 return qdio_do_sqbs(q, state, bufnr, count);
251
252         for (i = 0; i < count; i++) {
253                 xchg(&q->slsb.val[bufnr], state);
254                 bufnr = next_buf(bufnr);
255         }
256         return count;
257 }
258
259 static inline int set_buf_state(struct qdio_q *q, int bufnr,
260                                 unsigned char state)
261 {
262         return set_buf_states(q, bufnr, state, 1);
263 }
264
265 /* set slsb states to initial state */
266 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
267 {
268         struct qdio_q *q;
269         int i;
270
271         for_each_input_queue(irq_ptr, q, i)
272                 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
273                                QDIO_MAX_BUFFERS_PER_Q);
274         for_each_output_queue(irq_ptr, q, i)
275                 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
276                                QDIO_MAX_BUFFERS_PER_Q);
277 }
278
279 static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280                           unsigned int input)
281 {
282         int cc;
283
284         if (!need_siga_sync(q))
285                 return 0;
286
287         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288         qdio_perf_stat_inc(&perf_stats.siga_sync);
289
290         cc = do_siga_sync(q->irq_ptr->schid, output, input);
291         if (cc)
292                 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
293         return cc;
294 }
295
296 inline int qdio_siga_sync_q(struct qdio_q *q)
297 {
298         if (q->is_input_q)
299                 return qdio_siga_sync(q, 0, q->mask);
300         else
301                 return qdio_siga_sync(q, q->mask, 0);
302 }
303
304 static inline int qdio_siga_sync_out(struct qdio_q *q)
305 {
306         return qdio_siga_sync(q, ~0U, 0);
307 }
308
309 static inline int qdio_siga_sync_all(struct qdio_q *q)
310 {
311         return qdio_siga_sync(q, ~0U, ~0U);
312 }
313
314 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315 {
316         unsigned long schid;
317         unsigned int fc = 0;
318         u64 start_time = 0;
319         int cc;
320
321         if (q->u.out.use_enh_siga)
322                 fc = 3;
323
324         if (is_qebsm(q)) {
325                 schid = q->irq_ptr->sch_token;
326                 fc |= 0x80;
327         }
328         else
329                 schid = *((u32 *)&q->irq_ptr->schid);
330
331 again:
332         cc = do_siga_output(schid, q->mask, busy_bit, fc);
333
334         /* hipersocket busy condition */
335         if (*busy_bit) {
336                 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337
338                 if (!start_time) {
339                         start_time = get_usecs();
340                         goto again;
341                 }
342                 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
343                         goto again;
344         }
345         return cc;
346 }
347
348 static inline int qdio_siga_input(struct qdio_q *q)
349 {
350         int cc;
351
352         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353         qdio_perf_stat_inc(&perf_stats.siga_in);
354
355         cc = do_siga_input(q->irq_ptr->schid, q->mask);
356         if (cc)
357                 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
358         return cc;
359 }
360
361 /* called from thinint inbound handler */
362 void qdio_sync_after_thinint(struct qdio_q *q)
363 {
364         if (pci_out_supported(q)) {
365                 if (need_siga_sync_thinint(q))
366                         qdio_siga_sync_all(q);
367                 else if (need_siga_sync_out_thinint(q))
368                         qdio_siga_sync_out(q);
369         } else
370                 qdio_siga_sync_q(q);
371 }
372
373 inline void qdio_stop_polling(struct qdio_q *q)
374 {
375         if (!q->u.in.polling)
376                 return;
377
378         q->u.in.polling = 0;
379         qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
380
381         /* show the card that we are not polling anymore */
382         if (is_qebsm(q)) {
383                 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
384                                q->u.in.ack_count);
385                 q->u.in.ack_count = 0;
386         } else
387                 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
388 }
389
390 static void announce_buffer_error(struct qdio_q *q, int count)
391 {
392         q->qdio_error |= QDIO_ERROR_SLSB_STATE;
393
394         /* special handling for no target buffer empty */
395         if ((!q->is_input_q &&
396             (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
397                 qdio_perf_stat_inc(&perf_stats.outbound_target_full);
398                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
399                               q->first_to_check);
400                 return;
401         }
402
403         DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
404         DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
405         DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
406         DBF_ERROR("F14:%2x F15:%2x",
407                   q->sbal[q->first_to_check]->element[14].flags & 0xff,
408                   q->sbal[q->first_to_check]->element[15].flags & 0xff);
409 }
410
411 static inline void inbound_primed(struct qdio_q *q, int count)
412 {
413         int new;
414
415         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
416
417         /* for QEBSM the ACK was already set by EQBS */
418         if (is_qebsm(q)) {
419                 if (!q->u.in.polling) {
420                         q->u.in.polling = 1;
421                         q->u.in.ack_count = count;
422                         q->last_move_ftc = q->first_to_check;
423                         return;
424                 }
425
426                 /* delete the previous ACK's */
427                 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
428                                q->u.in.ack_count);
429                 q->u.in.ack_count = count;
430                 q->last_move_ftc = q->first_to_check;
431                 return;
432         }
433
434         /*
435          * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
436          * or by the next inbound run.
437          */
438         new = add_buf(q->first_to_check, count - 1);
439         if (q->u.in.polling) {
440                 /* reset the previous ACK but first set the new one */
441                 set_buf_state(q, new, SLSB_P_INPUT_ACK);
442                 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
443         }
444         else {
445                 q->u.in.polling = 1;
446                 set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK);
447         }
448
449         q->last_move_ftc = new;
450         count--;
451         if (!count)
452                 return;
453
454         /*
455          * Need to change all PRIMED buffers to NOT_INIT, otherwise
456          * we're loosing initiative in the thinint code.
457          */
458         set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT,
459                        count);
460 }
461
462 static int get_inbound_buffer_frontier(struct qdio_q *q)
463 {
464         int count, stop;
465         unsigned char state;
466
467         /*
468          * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
469          * would return 0.
470          */
471         count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
472         stop = add_buf(q->first_to_check, count);
473
474         /*
475          * No siga sync here, as a PCI or we after a thin interrupt
476          * will sync the queues.
477          */
478
479         /* need to set count to 1 for non-qebsm */
480         if (!is_qebsm(q))
481                 count = 1;
482
483 check_next:
484         if (q->first_to_check == stop)
485                 goto out;
486
487         count = get_buf_states(q, q->first_to_check, &state, count, 1);
488         if (!count)
489                 goto out;
490
491         switch (state) {
492         case SLSB_P_INPUT_PRIMED:
493                 inbound_primed(q, count);
494                 /*
495                  * No siga-sync needed for non-qebsm here, as the inbound queue
496                  * will be synced on the next siga-r, resp.
497                  * tiqdio_is_inbound_q_done will do the siga-sync.
498                  */
499                 q->first_to_check = add_buf(q->first_to_check, count);
500                 atomic_sub(count, &q->nr_buf_used);
501                 goto check_next;
502         case SLSB_P_INPUT_ERROR:
503                 announce_buffer_error(q, count);
504                 /* process the buffer, the upper layer will take care of it */
505                 q->first_to_check = add_buf(q->first_to_check, count);
506                 atomic_sub(count, &q->nr_buf_used);
507                 break;
508         case SLSB_CU_INPUT_EMPTY:
509         case SLSB_P_INPUT_NOT_INIT:
510         case SLSB_P_INPUT_ACK:
511                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
512                 break;
513         default:
514                 BUG();
515         }
516 out:
517         return q->first_to_check;
518 }
519
520 int qdio_inbound_q_moved(struct qdio_q *q)
521 {
522         int bufnr;
523
524         bufnr = get_inbound_buffer_frontier(q);
525
526         if ((bufnr != q->last_move_ftc) || q->qdio_error) {
527                 if (!need_siga_sync(q) && !pci_out_supported(q))
528                         q->u.in.timestamp = get_usecs();
529
530                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
531                 return 1;
532         } else
533                 return 0;
534 }
535
536 static int qdio_inbound_q_done(struct qdio_q *q)
537 {
538         unsigned char state = 0;
539
540         if (!atomic_read(&q->nr_buf_used))
541                 return 1;
542
543         /*
544          * We need that one for synchronization with the adapter, as it
545          * does a kind of PCI avoidance.
546          */
547         qdio_siga_sync_q(q);
548
549         get_buf_state(q, q->first_to_check, &state, 0);
550         if (state == SLSB_P_INPUT_PRIMED)
551                 /* we got something to do */
552                 return 0;
553
554         /* on VM, we don't poll, so the q is always done here */
555         if (need_siga_sync(q) || pci_out_supported(q))
556                 return 1;
557
558         /*
559          * At this point we know, that inbound first_to_check
560          * has (probably) not moved (see qdio_inbound_processing).
561          */
562         if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
563                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
564                               q->first_to_check);
565                 return 1;
566         } else {
567                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
568                               q->first_to_check);
569                 return 0;
570         }
571 }
572
573 void qdio_kick_inbound_handler(struct qdio_q *q)
574 {
575         int count, start, end;
576
577         qdio_perf_stat_inc(&perf_stats.inbound_handler);
578
579         start = q->first_to_kick;
580         end = q->first_to_check;
581         if (end >= start)
582                 count = end - start;
583         else
584                 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
585
586         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
587
588         if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
589                 return;
590
591         q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
592                    start, count, q->irq_ptr->int_parm);
593
594         /* for the next time */
595         q->first_to_kick = q->first_to_check;
596         q->qdio_error = 0;
597 }
598
599 static void __qdio_inbound_processing(struct qdio_q *q)
600 {
601         qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
602 again:
603         if (!qdio_inbound_q_moved(q))
604                 return;
605
606         qdio_kick_inbound_handler(q);
607
608         if (!qdio_inbound_q_done(q))
609                 /* means poll time is not yet over */
610                 goto again;
611
612         qdio_stop_polling(q);
613         /*
614          * We need to check again to not lose initiative after
615          * resetting the ACK state.
616          */
617         if (!qdio_inbound_q_done(q))
618                 goto again;
619 }
620
621 /* inbound tasklet */
622 void qdio_inbound_processing(unsigned long data)
623 {
624         struct qdio_q *q = (struct qdio_q *)data;
625         __qdio_inbound_processing(q);
626 }
627
628 static int get_outbound_buffer_frontier(struct qdio_q *q)
629 {
630         int count, stop;
631         unsigned char state;
632
633         if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
634             (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
635                 qdio_siga_sync_q(q);
636
637         /*
638          * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
639          * would return 0.
640          */
641         count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
642         stop = add_buf(q->first_to_check, count);
643
644         /* need to set count to 1 for non-qebsm */
645         if (!is_qebsm(q))
646                 count = 1;
647
648 check_next:
649         if (q->first_to_check == stop)
650                 return q->first_to_check;
651
652         count = get_buf_states(q, q->first_to_check, &state, count, 0);
653         if (!count)
654                 return q->first_to_check;
655
656         switch (state) {
657         case SLSB_P_OUTPUT_EMPTY:
658                 /* the adapter got it */
659                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
660
661                 atomic_sub(count, &q->nr_buf_used);
662                 q->first_to_check = add_buf(q->first_to_check, count);
663                 /*
664                  * We fetch all buffer states at once. get_buf_states may
665                  * return count < stop. For QEBSM we do not loop.
666                  */
667                 if (is_qebsm(q))
668                         break;
669                 goto check_next;
670         case SLSB_P_OUTPUT_ERROR:
671                 announce_buffer_error(q, count);
672                 /* process the buffer, the upper layer will take care of it */
673                 q->first_to_check = add_buf(q->first_to_check, count);
674                 atomic_sub(count, &q->nr_buf_used);
675                 break;
676         case SLSB_CU_OUTPUT_PRIMED:
677                 /* the adapter has not fetched the output yet */
678                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
679                 break;
680         case SLSB_P_OUTPUT_NOT_INIT:
681         case SLSB_P_OUTPUT_HALTED:
682                 break;
683         default:
684                 BUG();
685         }
686         return q->first_to_check;
687 }
688
689 /* all buffers processed? */
690 static inline int qdio_outbound_q_done(struct qdio_q *q)
691 {
692         return atomic_read(&q->nr_buf_used) == 0;
693 }
694
695 static inline int qdio_outbound_q_moved(struct qdio_q *q)
696 {
697         int bufnr;
698
699         bufnr = get_outbound_buffer_frontier(q);
700
701         if ((bufnr != q->last_move_ftc) || q->qdio_error) {
702                 q->last_move_ftc = bufnr;
703                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
704                 return 1;
705         } else
706                 return 0;
707 }
708
709 static void qdio_kick_outbound_q(struct qdio_q *q)
710 {
711         unsigned int busy_bit;
712         int cc;
713
714         if (!need_siga_out(q))
715                 return;
716
717         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
718         qdio_perf_stat_inc(&perf_stats.siga_out);
719
720         cc = qdio_siga_output(q, &busy_bit);
721         switch (cc) {
722         case 0:
723                 break;
724         case 2:
725                 if (busy_bit) {
726                         DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
727                         q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY;
728                 } else {
729                         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d",
730                                       q->nr);
731                         q->qdio_error = cc;
732                 }
733                 break;
734         case 1:
735         case 3:
736                 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
737                 q->qdio_error = cc;
738                 break;
739         }
740 }
741
742 static void qdio_kick_outbound_handler(struct qdio_q *q)
743 {
744         int start, end, count;
745
746         start = q->first_to_kick;
747         end = q->last_move_ftc;
748         if (end >= start)
749                 count = end - start;
750         else
751                 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
752
753         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
754         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
755
756         if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
757                 return;
758
759         q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
760                    q->irq_ptr->int_parm);
761
762         /* for the next time: */
763         q->first_to_kick = q->last_move_ftc;
764         q->qdio_error = 0;
765 }
766
767 static void __qdio_outbound_processing(struct qdio_q *q)
768 {
769         unsigned long flags;
770
771         qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
772         spin_lock_irqsave(&q->lock, flags);
773
774         BUG_ON(atomic_read(&q->nr_buf_used) < 0);
775
776         if (qdio_outbound_q_moved(q))
777                 qdio_kick_outbound_handler(q);
778
779         spin_unlock_irqrestore(&q->lock, flags);
780
781         if (queue_type(q) == QDIO_ZFCP_QFMT) {
782                 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
783                         tasklet_schedule(&q->tasklet);
784                 return;
785         }
786
787         /* bail out for HiperSockets unicast queues */
788         if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
789                 return;
790
791         if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
792             (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
793                 tasklet_schedule(&q->tasklet);
794                 return;
795         }
796
797         if (q->u.out.pci_out_enabled)
798                 return;
799
800         /*
801          * Now we know that queue type is either qeth without pci enabled
802          * or HiperSockets multicast. Make sure buffer switch from PRIMED to
803          * EMPTY is noticed and outbound_handler is called after some time.
804          */
805         if (qdio_outbound_q_done(q))
806                 del_timer(&q->u.out.timer);
807         else {
808                 if (!timer_pending(&q->u.out.timer)) {
809                         mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
810                         qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
811                 }
812         }
813 }
814
815 /* outbound tasklet */
816 void qdio_outbound_processing(unsigned long data)
817 {
818         struct qdio_q *q = (struct qdio_q *)data;
819         __qdio_outbound_processing(q);
820 }
821
822 void qdio_outbound_timer(unsigned long data)
823 {
824         struct qdio_q *q = (struct qdio_q *)data;
825         tasklet_schedule(&q->tasklet);
826 }
827
828 /* called from thinint inbound tasklet */
829 void qdio_check_outbound_after_thinint(struct qdio_q *q)
830 {
831         struct qdio_q *out;
832         int i;
833
834         if (!pci_out_supported(q))
835                 return;
836
837         for_each_output_queue(q->irq_ptr, out, i)
838                 if (!qdio_outbound_q_done(out))
839                         tasklet_schedule(&out->tasklet);
840 }
841
842 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
843                                   enum qdio_irq_states state)
844 {
845         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
846
847         irq_ptr->state = state;
848         mb();
849 }
850
851 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
852 {
853         if (irb->esw.esw0.erw.cons) {
854                 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
855                 DBF_ERROR_HEX(irb, 64);
856                 DBF_ERROR_HEX(irb->ecw, 64);
857         }
858 }
859
860 /* PCI interrupt handler */
861 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
862 {
863         int i;
864         struct qdio_q *q;
865
866         qdio_perf_stat_inc(&perf_stats.pci_int);
867
868         for_each_input_queue(irq_ptr, q, i)
869                 tasklet_schedule(&q->tasklet);
870
871         if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
872                 return;
873
874         for_each_output_queue(irq_ptr, q, i) {
875                 if (qdio_outbound_q_done(q))
876                         continue;
877
878                 if (!siga_syncs_out_pci(q))
879                         qdio_siga_sync_q(q);
880
881                 tasklet_schedule(&q->tasklet);
882         }
883 }
884
885 static void qdio_handle_activate_check(struct ccw_device *cdev,
886                                 unsigned long intparm, int cstat, int dstat)
887 {
888         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
889         struct qdio_q *q;
890
891         DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
892         DBF_ERROR("intp :%lx", intparm);
893         DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
894
895         if (irq_ptr->nr_input_qs) {
896                 q = irq_ptr->input_qs[0];
897         } else if (irq_ptr->nr_output_qs) {
898                 q = irq_ptr->output_qs[0];
899         } else {
900                 dump_stack();
901                 goto no_handler;
902         }
903         q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
904                    0, -1, -1, irq_ptr->int_parm);
905 no_handler:
906         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
907 }
908
909 static void qdio_call_shutdown(struct work_struct *work)
910 {
911         struct ccw_device_private *priv;
912         struct ccw_device *cdev;
913
914         priv = container_of(work, struct ccw_device_private, kick_work);
915         cdev = priv->cdev;
916         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
917         put_device(&cdev->dev);
918 }
919
920 static void qdio_int_error(struct ccw_device *cdev)
921 {
922         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
923
924         switch (irq_ptr->state) {
925         case QDIO_IRQ_STATE_INACTIVE:
926         case QDIO_IRQ_STATE_CLEANUP:
927                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
928                 break;
929         case QDIO_IRQ_STATE_ESTABLISHED:
930         case QDIO_IRQ_STATE_ACTIVE:
931                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
932                 if (get_device(&cdev->dev)) {
933                         /* Can't call shutdown from interrupt context. */
934                         PREPARE_WORK(&cdev->private->kick_work,
935                                      qdio_call_shutdown);
936                         queue_work(ccw_device_work, &cdev->private->kick_work);
937                 }
938                 break;
939         default:
940                 WARN_ON(1);
941         }
942         wake_up(&cdev->private->wait_q);
943 }
944
945 static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
946                                        int dstat)
947 {
948         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
949
950         if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
951                 DBF_ERROR("EQ:ck con");
952                 goto error;
953         }
954
955         if (!(dstat & DEV_STAT_DEV_END)) {
956                 DBF_ERROR("EQ:no dev");
957                 goto error;
958         }
959
960         if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
961                 DBF_ERROR("EQ: bad io");
962                 goto error;
963         }
964         return 0;
965 error:
966         DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
967         DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
968
969         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
970         return 1;
971 }
972
973 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
974                                       int dstat)
975 {
976         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
977
978         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
979         if (!qdio_establish_check_errors(cdev, cstat, dstat))
980                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
981 }
982
983 /* qdio interrupt handler */
984 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
985                       struct irb *irb)
986 {
987         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
988         int cstat, dstat;
989
990         qdio_perf_stat_inc(&perf_stats.qdio_int);
991
992         if (!intparm || !irq_ptr) {
993                 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
994                 return;
995         }
996
997         if (IS_ERR(irb)) {
998                 switch (PTR_ERR(irb)) {
999                 case -EIO:
1000                         DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1001                         return;
1002                 case -ETIMEDOUT:
1003                         DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no);
1004                         qdio_int_error(cdev);
1005                         return;
1006                 default:
1007                         WARN_ON(1);
1008                         return;
1009                 }
1010         }
1011         qdio_irq_check_sense(irq_ptr, irb);
1012
1013         cstat = irb->scsw.cmd.cstat;
1014         dstat = irb->scsw.cmd.dstat;
1015
1016         switch (irq_ptr->state) {
1017         case QDIO_IRQ_STATE_INACTIVE:
1018                 qdio_establish_handle_irq(cdev, cstat, dstat);
1019                 break;
1020
1021         case QDIO_IRQ_STATE_CLEANUP:
1022                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1023                 break;
1024
1025         case QDIO_IRQ_STATE_ESTABLISHED:
1026         case QDIO_IRQ_STATE_ACTIVE:
1027                 if (cstat & SCHN_STAT_PCI) {
1028                         qdio_int_handler_pci(irq_ptr);
1029                         /* no state change so no need to wake up wait_q */
1030                         return;
1031                 }
1032                 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1033                         qdio_handle_activate_check(cdev, intparm, cstat,
1034                                                    dstat);
1035                         break;
1036                 }
1037         default:
1038                 WARN_ON(1);
1039         }
1040         wake_up(&cdev->private->wait_q);
1041 }
1042
1043 /**
1044  * qdio_get_ssqd_desc - get qdio subchannel description
1045  * @cdev: ccw device to get description for
1046  * @data: where to store the ssqd
1047  *
1048  * Returns 0 or an error code. The results of the chsc are stored in the
1049  * specified structure.
1050  */
1051 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1052                        struct qdio_ssqd_desc *data)
1053 {
1054
1055         if (!cdev || !cdev->private)
1056                 return -EINVAL;
1057
1058         DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1059         return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1060 }
1061 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1062
1063 /**
1064  * qdio_cleanup - shutdown queues and free data structures
1065  * @cdev: associated ccw device
1066  * @how: use halt or clear to shutdown
1067  *
1068  * This function calls qdio_shutdown() for @cdev with method @how
1069  * and on success qdio_free() for @cdev.
1070  */
1071 int qdio_cleanup(struct ccw_device *cdev, int how)
1072 {
1073         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1074         int rc;
1075
1076         if (!irq_ptr)
1077                 return -ENODEV;
1078
1079         rc = qdio_shutdown(cdev, how);
1080         if (rc == 0)
1081                 rc = qdio_free(cdev);
1082         return rc;
1083 }
1084 EXPORT_SYMBOL_GPL(qdio_cleanup);
1085
1086 static void qdio_shutdown_queues(struct ccw_device *cdev)
1087 {
1088         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1089         struct qdio_q *q;
1090         int i;
1091
1092         for_each_input_queue(irq_ptr, q, i)
1093                 tasklet_disable(&q->tasklet);
1094
1095         for_each_output_queue(irq_ptr, q, i) {
1096                 tasklet_disable(&q->tasklet);
1097                 del_timer(&q->u.out.timer);
1098         }
1099 }
1100
1101 /**
1102  * qdio_shutdown - shut down a qdio subchannel
1103  * @cdev: associated ccw device
1104  * @how: use halt or clear to shutdown
1105  */
1106 int qdio_shutdown(struct ccw_device *cdev, int how)
1107 {
1108         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1109         int rc;
1110         unsigned long flags;
1111
1112         if (!irq_ptr)
1113                 return -ENODEV;
1114
1115         DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1116
1117         mutex_lock(&irq_ptr->setup_mutex);
1118         /*
1119          * Subchannel was already shot down. We cannot prevent being called
1120          * twice since cio may trigger a shutdown asynchronously.
1121          */
1122         if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1123                 mutex_unlock(&irq_ptr->setup_mutex);
1124                 return 0;
1125         }
1126
1127         tiqdio_remove_input_queues(irq_ptr);
1128         qdio_shutdown_queues(cdev);
1129         qdio_shutdown_debug_entries(irq_ptr, cdev);
1130
1131         /* cleanup subchannel */
1132         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1133
1134         if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1135                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1136         else
1137                 /* default behaviour is halt */
1138                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1139         if (rc) {
1140                 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1141                 DBF_ERROR("rc:%4d", rc);
1142                 goto no_cleanup;
1143         }
1144
1145         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1146         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1147         wait_event_interruptible_timeout(cdev->private->wait_q,
1148                 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1149                 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1150                 10 * HZ);
1151         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1152
1153 no_cleanup:
1154         qdio_shutdown_thinint(irq_ptr);
1155
1156         /* restore interrupt handler */
1157         if ((void *)cdev->handler == (void *)qdio_int_handler)
1158                 cdev->handler = irq_ptr->orig_handler;
1159         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1160
1161         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1162         mutex_unlock(&irq_ptr->setup_mutex);
1163         if (rc)
1164                 return rc;
1165         return 0;
1166 }
1167 EXPORT_SYMBOL_GPL(qdio_shutdown);
1168
1169 /**
1170  * qdio_free - free data structures for a qdio subchannel
1171  * @cdev: associated ccw device
1172  */
1173 int qdio_free(struct ccw_device *cdev)
1174 {
1175         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1176
1177         if (!irq_ptr)
1178                 return -ENODEV;
1179
1180         DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1181         mutex_lock(&irq_ptr->setup_mutex);
1182
1183         if (irq_ptr->debug_area != NULL) {
1184                 debug_unregister(irq_ptr->debug_area);
1185                 irq_ptr->debug_area = NULL;
1186         }
1187         cdev->private->qdio_data = NULL;
1188         mutex_unlock(&irq_ptr->setup_mutex);
1189
1190         qdio_release_memory(irq_ptr);
1191         return 0;
1192 }
1193 EXPORT_SYMBOL_GPL(qdio_free);
1194
1195 /**
1196  * qdio_initialize - allocate and establish queues for a qdio subchannel
1197  * @init_data: initialization data
1198  *
1199  * This function first allocates queues via qdio_allocate() and on success
1200  * establishes them via qdio_establish().
1201  */
1202 int qdio_initialize(struct qdio_initialize *init_data)
1203 {
1204         int rc;
1205
1206         rc = qdio_allocate(init_data);
1207         if (rc)
1208                 return rc;
1209
1210         rc = qdio_establish(init_data);
1211         if (rc)
1212                 qdio_free(init_data->cdev);
1213         return rc;
1214 }
1215 EXPORT_SYMBOL_GPL(qdio_initialize);
1216
1217 /**
1218  * qdio_allocate - allocate qdio queues and associated data
1219  * @init_data: initialization data
1220  */
1221 int qdio_allocate(struct qdio_initialize *init_data)
1222 {
1223         struct qdio_irq *irq_ptr;
1224
1225         DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1226
1227         if ((init_data->no_input_qs && !init_data->input_handler) ||
1228             (init_data->no_output_qs && !init_data->output_handler))
1229                 return -EINVAL;
1230
1231         if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1232             (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1233                 return -EINVAL;
1234
1235         if ((!init_data->input_sbal_addr_array) ||
1236             (!init_data->output_sbal_addr_array))
1237                 return -EINVAL;
1238
1239         /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1240         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1241         if (!irq_ptr)
1242                 goto out_err;
1243
1244         mutex_init(&irq_ptr->setup_mutex);
1245         qdio_allocate_dbf(init_data, irq_ptr);
1246
1247         /*
1248          * Allocate a page for the chsc calls in qdio_establish.
1249          * Must be pre-allocated since a zfcp recovery will call
1250          * qdio_establish. In case of low memory and swap on a zfcp disk
1251          * we may not be able to allocate memory otherwise.
1252          */
1253         irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1254         if (!irq_ptr->chsc_page)
1255                 goto out_rel;
1256
1257         /* qdr is used in ccw1.cda which is u32 */
1258         irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1259         if (!irq_ptr->qdr)
1260                 goto out_rel;
1261         WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1262
1263         if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1264                              init_data->no_output_qs))
1265                 goto out_rel;
1266
1267         init_data->cdev->private->qdio_data = irq_ptr;
1268         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1269         return 0;
1270 out_rel:
1271         qdio_release_memory(irq_ptr);
1272 out_err:
1273         return -ENOMEM;
1274 }
1275 EXPORT_SYMBOL_GPL(qdio_allocate);
1276
1277 /**
1278  * qdio_establish - establish queues on a qdio subchannel
1279  * @init_data: initialization data
1280  */
1281 int qdio_establish(struct qdio_initialize *init_data)
1282 {
1283         struct qdio_irq *irq_ptr;
1284         struct ccw_device *cdev = init_data->cdev;
1285         unsigned long saveflags;
1286         int rc;
1287
1288         DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1289
1290         irq_ptr = cdev->private->qdio_data;
1291         if (!irq_ptr)
1292                 return -ENODEV;
1293
1294         if (cdev->private->state != DEV_STATE_ONLINE)
1295                 return -EINVAL;
1296
1297         mutex_lock(&irq_ptr->setup_mutex);
1298         qdio_setup_irq(init_data);
1299
1300         rc = qdio_establish_thinint(irq_ptr);
1301         if (rc) {
1302                 mutex_unlock(&irq_ptr->setup_mutex);
1303                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1304                 return rc;
1305         }
1306
1307         /* establish q */
1308         irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1309         irq_ptr->ccw.flags = CCW_FLAG_SLI;
1310         irq_ptr->ccw.count = irq_ptr->equeue.count;
1311         irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1312
1313         spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1314         ccw_device_set_options_mask(cdev, 0);
1315
1316         rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1317         if (rc) {
1318                 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1319                 DBF_ERROR("rc:%4x", rc);
1320         }
1321         spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1322
1323         if (rc) {
1324                 mutex_unlock(&irq_ptr->setup_mutex);
1325                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1326                 return rc;
1327         }
1328
1329         wait_event_interruptible_timeout(cdev->private->wait_q,
1330                 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1331                 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1332
1333         if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1334                 mutex_unlock(&irq_ptr->setup_mutex);
1335                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1336                 return -EIO;
1337         }
1338
1339         qdio_setup_ssqd_info(irq_ptr);
1340         DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1341         DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1342
1343         /* qebsm is now setup if available, initialize buffer states */
1344         qdio_init_buf_states(irq_ptr);
1345
1346         mutex_unlock(&irq_ptr->setup_mutex);
1347         qdio_print_subchannel_info(irq_ptr, cdev);
1348         qdio_setup_debug_entries(irq_ptr, cdev);
1349         return 0;
1350 }
1351 EXPORT_SYMBOL_GPL(qdio_establish);
1352
1353 /**
1354  * qdio_activate - activate queues on a qdio subchannel
1355  * @cdev: associated cdev
1356  */
1357 int qdio_activate(struct ccw_device *cdev)
1358 {
1359         struct qdio_irq *irq_ptr;
1360         int rc;
1361         unsigned long saveflags;
1362
1363         DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1364
1365         irq_ptr = cdev->private->qdio_data;
1366         if (!irq_ptr)
1367                 return -ENODEV;
1368
1369         if (cdev->private->state != DEV_STATE_ONLINE)
1370                 return -EINVAL;
1371
1372         mutex_lock(&irq_ptr->setup_mutex);
1373         if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1374                 rc = -EBUSY;
1375                 goto out;
1376         }
1377
1378         irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1379         irq_ptr->ccw.flags = CCW_FLAG_SLI;
1380         irq_ptr->ccw.count = irq_ptr->aqueue.count;
1381         irq_ptr->ccw.cda = 0;
1382
1383         spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1384         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1385
1386         rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1387                               0, DOIO_DENY_PREFETCH);
1388         if (rc) {
1389                 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1390                 DBF_ERROR("rc:%4x", rc);
1391         }
1392         spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1393
1394         if (rc)
1395                 goto out;
1396
1397         if (is_thinint_irq(irq_ptr))
1398                 tiqdio_add_input_queues(irq_ptr);
1399
1400         /* wait for subchannel to become active */
1401         msleep(5);
1402
1403         switch (irq_ptr->state) {
1404         case QDIO_IRQ_STATE_STOPPED:
1405         case QDIO_IRQ_STATE_ERR:
1406                 mutex_unlock(&irq_ptr->setup_mutex);
1407                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1408                 return -EIO;
1409         default:
1410                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1411                 rc = 0;
1412         }
1413 out:
1414         mutex_unlock(&irq_ptr->setup_mutex);
1415         return rc;
1416 }
1417 EXPORT_SYMBOL_GPL(qdio_activate);
1418
1419 static inline int buf_in_between(int bufnr, int start, int count)
1420 {
1421         int end = add_buf(start, count);
1422
1423         if (end > start) {
1424                 if (bufnr >= start && bufnr < end)
1425                         return 1;
1426                 else
1427                         return 0;
1428         }
1429
1430         /* wrap-around case */
1431         if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1432             (bufnr < end))
1433                 return 1;
1434         else
1435                 return 0;
1436 }
1437
1438 /**
1439  * handle_inbound - reset processed input buffers
1440  * @q: queue containing the buffers
1441  * @callflags: flags
1442  * @bufnr: first buffer to process
1443  * @count: how many buffers are emptied
1444  */
1445 static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1446                            int bufnr, int count)
1447 {
1448         int used, cc, diff;
1449
1450         if (!q->u.in.polling)
1451                 goto set;
1452
1453         /* protect against stop polling setting an ACK for an emptied slsb */
1454         if (count == QDIO_MAX_BUFFERS_PER_Q) {
1455                 /* overwriting everything, just delete polling status */
1456                 q->u.in.polling = 0;
1457                 q->u.in.ack_count = 0;
1458                 goto set;
1459         } else if (buf_in_between(q->last_move_ftc, bufnr, count)) {
1460                 if (is_qebsm(q)) {
1461                         /* partial overwrite, just update last_move_ftc */
1462                         diff = add_buf(bufnr, count);
1463                         diff = sub_buf(diff, q->last_move_ftc);
1464                         q->u.in.ack_count -= diff;
1465                         if (q->u.in.ack_count <= 0) {
1466                                 q->u.in.polling = 0;
1467                                 q->u.in.ack_count = 0;
1468                                 /* TODO: must we set last_move_ftc to something meaningful? */
1469                                 goto set;
1470                         }
1471                         q->last_move_ftc = add_buf(q->last_move_ftc, diff);
1472                 }
1473                 else
1474                         /* the only ACK will be deleted, so stop polling */
1475                         q->u.in.polling = 0;
1476         }
1477
1478 set:
1479         count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1480
1481         used = atomic_add_return(count, &q->nr_buf_used) - count;
1482         BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1483
1484         /* no need to signal as long as the adapter had free buffers */
1485         if (used)
1486                 return;
1487
1488         if (need_siga_in(q)) {
1489                 cc = qdio_siga_input(q);
1490                 if (cc)
1491                         q->qdio_error = cc;
1492         }
1493 }
1494
1495 /**
1496  * handle_outbound - process filled outbound buffers
1497  * @q: queue containing the buffers
1498  * @callflags: flags
1499  * @bufnr: first buffer to process
1500  * @count: how many buffers are filled
1501  */
1502 static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1503                             int bufnr, int count)
1504 {
1505         unsigned char state;
1506         int used;
1507
1508         qdio_perf_stat_inc(&perf_stats.outbound_handler);
1509
1510         count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1511         used = atomic_add_return(count, &q->nr_buf_used);
1512         BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1513
1514         if (callflags & QDIO_FLAG_PCI_OUT)
1515                 q->u.out.pci_out_enabled = 1;
1516         else
1517                 q->u.out.pci_out_enabled = 0;
1518
1519         if (queue_type(q) == QDIO_IQDIO_QFMT) {
1520                 if (multicast_outbound(q))
1521                         qdio_kick_outbound_q(q);
1522                 else
1523                         if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1524                             (count > 1) &&
1525                             (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1526                                 /* exploit enhanced SIGA */
1527                                 q->u.out.use_enh_siga = 1;
1528                                 qdio_kick_outbound_q(q);
1529                         } else {
1530                                 /*
1531                                 * One siga-w per buffer required for unicast
1532                                 * HiperSockets.
1533                                 */
1534                                 q->u.out.use_enh_siga = 0;
1535                                 while (count--)
1536                                         qdio_kick_outbound_q(q);
1537                         }
1538
1539                 /* report CC=2 conditions synchronously */
1540                 if (q->qdio_error)
1541                         __qdio_outbound_processing(q);
1542                 goto out;
1543         }
1544
1545         if (need_siga_sync(q)) {
1546                 qdio_siga_sync_q(q);
1547                 goto out;
1548         }
1549
1550         /* try to fast requeue buffers */
1551         get_buf_state(q, prev_buf(bufnr), &state, 0);
1552         if (state != SLSB_CU_OUTPUT_PRIMED)
1553                 qdio_kick_outbound_q(q);
1554         else {
1555                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
1556                 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1557         }
1558 out:
1559         /* Fixme: could wait forever if called from process context */
1560         tasklet_schedule(&q->tasklet);
1561 }
1562
1563 /**
1564  * do_QDIO - process input or output buffers
1565  * @cdev: associated ccw_device for the qdio subchannel
1566  * @callflags: input or output and special flags from the program
1567  * @q_nr: queue number
1568  * @bufnr: buffer number
1569  * @count: how many buffers to process
1570  */
1571 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1572             int q_nr, int bufnr, int count)
1573 {
1574         struct qdio_irq *irq_ptr;
1575
1576         if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1577             (count > QDIO_MAX_BUFFERS_PER_Q) ||
1578             (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1579                 return -EINVAL;
1580
1581         if (!count)
1582                 return 0;
1583
1584         irq_ptr = cdev->private->qdio_data;
1585         if (!irq_ptr)
1586                 return -ENODEV;
1587
1588         if (callflags & QDIO_FLAG_SYNC_INPUT)
1589                 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
1590         else
1591                 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
1592         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
1593         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
1594
1595         if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1596                 return -EBUSY;
1597
1598         if (callflags & QDIO_FLAG_SYNC_INPUT)
1599                 handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr,
1600                                count);
1601         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1602                 handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr,
1603                                 count);
1604         else
1605                 return -EINVAL;
1606         return 0;
1607 }
1608 EXPORT_SYMBOL_GPL(do_QDIO);
1609
1610 static int __init init_QDIO(void)
1611 {
1612         int rc;
1613
1614         rc = qdio_setup_init();
1615         if (rc)
1616                 return rc;
1617         rc = tiqdio_allocate_memory();
1618         if (rc)
1619                 goto out_cache;
1620         rc = qdio_debug_init();
1621         if (rc)
1622                 goto out_ti;
1623         rc = qdio_setup_perf_stats();
1624         if (rc)
1625                 goto out_debug;
1626         rc = tiqdio_register_thinints();
1627         if (rc)
1628                 goto out_perf;
1629         return 0;
1630
1631 out_perf:
1632         qdio_remove_perf_stats();
1633 out_debug:
1634         qdio_debug_exit();
1635 out_ti:
1636         tiqdio_free_memory();
1637 out_cache:
1638         qdio_setup_exit();
1639         return rc;
1640 }
1641
1642 static void __exit exit_QDIO(void)
1643 {
1644         tiqdio_unregister_thinints();
1645         tiqdio_free_memory();
1646         qdio_remove_perf_stats();
1647         qdio_debug_exit();
1648         qdio_setup_exit();
1649 }
1650
1651 module_init(init_QDIO);
1652 module_exit(exit_QDIO);