]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/scsi/qla2xxx/qla_isr.c
NOMMU: Support XIP on initramfs
[linux-2.6-omap-h63xx.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33         scsi_qla_host_t *vha;
34         struct qla_hw_data *ha;
35         struct device_reg_2xxx __iomem *reg;
36         int             status;
37         unsigned long   iter;
38         uint16_t        hccr;
39         uint16_t        mb[4];
40         struct rsp_que *rsp;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock(&ha->hardware_lock);
54         vha = qla2x00_get_rsp_host(rsp);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock(&ha->hardware_lock);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137
138         rsp = (struct rsp_que *) dev_id;
139         if (!rsp) {
140                 printk(KERN_INFO
141                     "%s(): NULL response queue pointer\n", __func__);
142                 return (IRQ_NONE);
143         }
144
145         ha = rsp->hw;
146         reg = &ha->iobase->isp;
147         status = 0;
148
149         spin_lock(&ha->hardware_lock);
150         vha = qla2x00_get_rsp_host(rsp);
151         for (iter = 50; iter--; ) {
152                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153                 if (stat & HSR_RISC_PAUSED) {
154                         if (pci_channel_offline(ha->pdev))
155                                 break;
156
157                         hccr = RD_REG_WORD(&reg->hccr);
158                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
159                                 qla_printk(KERN_INFO, ha, "Parity error -- "
160                                     "HCCR=%x, Dumping firmware!\n", hccr);
161                         else
162                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
163                                     "HCCR=%x, Dumping firmware!\n", hccr);
164
165                         /*
166                          * Issue a "HARD" reset in order for the RISC
167                          * interrupt bit to be cleared.  Schedule a big
168                          * hammmer to get out of the RISC PAUSED state.
169                          */
170                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171                         RD_REG_WORD(&reg->hccr);
172
173                         ha->isp_ops->fw_dump(vha, 1);
174                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175                         break;
176                 } else if ((stat & HSR_RISC_INT) == 0)
177                         break;
178
179                 switch (stat & 0xff) {
180                 case 0x1:
181                 case 0x2:
182                 case 0x10:
183                 case 0x11:
184                         qla2x00_mbx_completion(vha, MSW(stat));
185                         status |= MBX_INTERRUPT;
186
187                         /* Release mailbox registers. */
188                         WRT_REG_WORD(&reg->semaphore, 0);
189                         break;
190                 case 0x12:
191                         mb[0] = MSW(stat);
192                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195                         qla2x00_async_event(vha, rsp, mb);
196                         break;
197                 case 0x13:
198                         qla2x00_process_response_queue(rsp);
199                         break;
200                 case 0x15:
201                         mb[0] = MBA_CMPLT_1_16BIT;
202                         mb[1] = MSW(stat);
203                         qla2x00_async_event(vha, rsp, mb);
204                         break;
205                 case 0x16:
206                         mb[0] = MBA_SCSI_COMPLETION;
207                         mb[1] = MSW(stat);
208                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209                         qla2x00_async_event(vha, rsp, mb);
210                         break;
211                 default:
212                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
213                             "(%d).\n",
214                             vha->host_no, stat & 0xff));
215                         break;
216                 }
217                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218                 RD_REG_WORD_RELAXED(&reg->hccr);
219         }
220         spin_unlock(&ha->hardware_lock);
221
222         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
224                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
225                 complete(&ha->mbx_intr_comp);
226         }
227
228         return (IRQ_HANDLED);
229 }
230
231 /**
232  * qla2x00_mbx_completion() - Process mailbox command completions.
233  * @ha: SCSI driver HA context
234  * @mb0: Mailbox0 register
235  */
236 static void
237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
238 {
239         uint16_t        cnt;
240         uint16_t __iomem *wptr;
241         struct qla_hw_data *ha = vha->hw;
242         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
243
244         /* Load return mailbox registers. */
245         ha->flags.mbox_int = 1;
246         ha->mailbox_out[0] = mb0;
247         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
250                 if (IS_QLA2200(ha) && cnt == 8)
251                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252                 if (cnt == 4 || cnt == 5)
253                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254                 else
255                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
256
257                 wptr++;
258         }
259
260         if (ha->mcp) {
261                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262                     __func__, vha->host_no, ha->mcp->mb[0]));
263         } else {
264                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265                     __func__, vha->host_no));
266         }
267 }
268
269 /**
270  * qla2x00_async_event() - Process aynchronous events.
271  * @ha: SCSI driver HA context
272  * @mb: Mailbox registers (0 - 3)
273  */
274 void
275 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276 {
277 #define LS_UNKNOWN      2
278         static char     *link_speeds[5] = { "1", "2", "?", "4", "8" };
279         char            *link_speed;
280         uint16_t        handle_cnt;
281         uint16_t        cnt;
282         uint32_t        handles[5];
283         struct qla_hw_data *ha = vha->hw;
284         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
285         uint32_t        rscn_entry, host_pid;
286         uint8_t         rscn_queue_index;
287         unsigned long   flags;
288
289         /* Setup to process RIO completion. */
290         handle_cnt = 0;
291         switch (mb[0]) {
292         case MBA_SCSI_COMPLETION:
293                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
294                 handle_cnt = 1;
295                 break;
296         case MBA_CMPLT_1_16BIT:
297                 handles[0] = mb[1];
298                 handle_cnt = 1;
299                 mb[0] = MBA_SCSI_COMPLETION;
300                 break;
301         case MBA_CMPLT_2_16BIT:
302                 handles[0] = mb[1];
303                 handles[1] = mb[2];
304                 handle_cnt = 2;
305                 mb[0] = MBA_SCSI_COMPLETION;
306                 break;
307         case MBA_CMPLT_3_16BIT:
308                 handles[0] = mb[1];
309                 handles[1] = mb[2];
310                 handles[2] = mb[3];
311                 handle_cnt = 3;
312                 mb[0] = MBA_SCSI_COMPLETION;
313                 break;
314         case MBA_CMPLT_4_16BIT:
315                 handles[0] = mb[1];
316                 handles[1] = mb[2];
317                 handles[2] = mb[3];
318                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
319                 handle_cnt = 4;
320                 mb[0] = MBA_SCSI_COMPLETION;
321                 break;
322         case MBA_CMPLT_5_16BIT:
323                 handles[0] = mb[1];
324                 handles[1] = mb[2];
325                 handles[2] = mb[3];
326                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
327                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
328                 handle_cnt = 5;
329                 mb[0] = MBA_SCSI_COMPLETION;
330                 break;
331         case MBA_CMPLT_2_32BIT:
332                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
333                 handles[1] = le32_to_cpu(
334                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
335                     RD_MAILBOX_REG(ha, reg, 6));
336                 handle_cnt = 2;
337                 mb[0] = MBA_SCSI_COMPLETION;
338                 break;
339         default:
340                 break;
341         }
342
343         switch (mb[0]) {
344         case MBA_SCSI_COMPLETION:       /* Fast Post */
345                 if (!vha->flags.online)
346                         break;
347
348                 for (cnt = 0; cnt < handle_cnt; cnt++)
349                         qla2x00_process_completed_request(vha, rsp->req,
350                                 handles[cnt]);
351                 break;
352
353         case MBA_RESET:                 /* Reset */
354                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355                         vha->host_no));
356
357                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
358                 break;
359
360         case MBA_SYSTEM_ERR:            /* System Error */
361                 qla_printk(KERN_INFO, ha,
362                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363                     mb[1], mb[2], mb[3]);
364
365                 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
366                 ha->isp_ops->fw_dump(vha, 1);
367
368                 if (IS_FWI2_CAPABLE(ha)) {
369                         if (mb[1] == 0 && mb[2] == 0) {
370                                 qla_printk(KERN_ERR, ha,
371                                     "Unrecoverable Hardware Error: adapter "
372                                     "marked OFFLINE!\n");
373                                 vha->flags.online = 0;
374                         } else
375                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
376                 } else if (mb[1] == 0) {
377                         qla_printk(KERN_INFO, ha,
378                             "Unrecoverable Hardware Error: adapter marked "
379                             "OFFLINE!\n");
380                         vha->flags.online = 0;
381                 } else
382                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
383                 break;
384
385         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
386                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
387                     vha->host_no));
388                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
389
390                 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
391                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
392                 break;
393
394         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
395                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
396                     vha->host_no));
397                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398
399                 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
400                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
401                 break;
402
403         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
404                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
405                     vha->host_no));
406                 break;
407
408         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
409                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
410                     mb[1]));
411                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
412
413                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
414                         atomic_set(&vha->loop_state, LOOP_DOWN);
415                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
416                         qla2x00_mark_all_devices_lost(vha, 1);
417                 }
418
419                 if (vha->vp_idx) {
420                         atomic_set(&vha->vp_state, VP_FAILED);
421                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
422                 }
423
424                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
425                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
426
427                 vha->flags.management_server_logged_in = 0;
428                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
429                 break;
430
431         case MBA_LOOP_UP:               /* Loop Up Event */
432                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
433                         link_speed = link_speeds[0];
434                         ha->link_data_rate = PORT_SPEED_1GB;
435                 } else {
436                         link_speed = link_speeds[LS_UNKNOWN];
437                         if (mb[1] < 5)
438                                 link_speed = link_speeds[mb[1]];
439                         ha->link_data_rate = mb[1];
440                 }
441
442                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
443                     vha->host_no, link_speed));
444                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
445                     link_speed);
446
447                 vha->flags.management_server_logged_in = 0;
448                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
449                 break;
450
451         case MBA_LOOP_DOWN:             /* Loop Down Event */
452                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
453                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
454                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
455                     mb[1], mb[2], mb[3]);
456
457                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
458                         atomic_set(&vha->loop_state, LOOP_DOWN);
459                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
460                         vha->device_flags |= DFLG_NO_CABLE;
461                         qla2x00_mark_all_devices_lost(vha, 1);
462                 }
463
464                 if (vha->vp_idx) {
465                         atomic_set(&vha->vp_state, VP_FAILED);
466                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
467                 }
468
469                 vha->flags.management_server_logged_in = 0;
470                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
471                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
472                 break;
473
474         case MBA_LIP_RESET:             /* LIP reset occurred */
475                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
476                     vha->host_no, mb[1]));
477                 qla_printk(KERN_INFO, ha,
478                     "LIP reset occurred (%x).\n", mb[1]);
479
480                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
481                         atomic_set(&vha->loop_state, LOOP_DOWN);
482                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
483                         qla2x00_mark_all_devices_lost(vha, 1);
484                 }
485
486                 if (vha->vp_idx) {
487                         atomic_set(&vha->vp_state, VP_FAILED);
488                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
489                 }
490
491                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
492
493                 ha->operating_mode = LOOP;
494                 vha->flags.management_server_logged_in = 0;
495                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
496                 break;
497
498         case MBA_POINT_TO_POINT:        /* Point-to-Point */
499                 if (IS_QLA2100(ha))
500                         break;
501
502                 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
503                     vha->host_no));
504
505                 /*
506                  * Until there's a transition from loop down to loop up, treat
507                  * this as loop down only.
508                  */
509                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
510                         atomic_set(&vha->loop_state, LOOP_DOWN);
511                         if (!atomic_read(&vha->loop_down_timer))
512                                 atomic_set(&vha->loop_down_timer,
513                                     LOOP_DOWN_TIME);
514                         qla2x00_mark_all_devices_lost(vha, 1);
515                 }
516
517                 if (vha->vp_idx) {
518                         atomic_set(&vha->vp_state, VP_FAILED);
519                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
520                 }
521
522                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
523                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
524
525                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
526                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
527
528                 ha->flags.gpsc_supported = 1;
529                 vha->flags.management_server_logged_in = 0;
530                 break;
531
532         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
533                 if (IS_QLA2100(ha))
534                         break;
535
536                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
537                     "received.\n",
538                     vha->host_no));
539                 qla_printk(KERN_INFO, ha,
540                     "Configuration change detected: value=%x.\n", mb[1]);
541
542                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
543                         atomic_set(&vha->loop_state, LOOP_DOWN);
544                         if (!atomic_read(&vha->loop_down_timer))
545                                 atomic_set(&vha->loop_down_timer,
546                                     LOOP_DOWN_TIME);
547                         qla2x00_mark_all_devices_lost(vha, 1);
548                 }
549
550                 if (vha->vp_idx) {
551                         atomic_set(&vha->vp_state, VP_FAILED);
552                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
553                 }
554
555                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
556                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
557                 break;
558
559         case MBA_PORT_UPDATE:           /* Port database update */
560                 /* Only handle SCNs for our Vport index. */
561                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562                         break;
563
564                 /*
565                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
566                  * event etc. earlier indicating loop is down) then process
567                  * it.  Otherwise ignore it and Wait for RSCN to come in.
568                  */
569                 atomic_set(&vha->loop_down_timer, 0);
570                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
571                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
572                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
573                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
574                             mb[2], mb[3]));
575                         break;
576                 }
577
578                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
579                     vha->host_no));
580                 DEBUG(printk(KERN_INFO
581                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
582                     vha->host_no, mb[1], mb[2], mb[3]));
583
584                 /*
585                  * Mark all devices as missing so we will login again.
586                  */
587                 atomic_set(&vha->loop_state, LOOP_UP);
588
589                 qla2x00_mark_all_devices_lost(vha, 1);
590
591                 vha->flags.rscn_queue_overflow = 1;
592
593                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
594                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
595                 break;
596
597         case MBA_RSCN_UPDATE:           /* State Change Registration */
598                 /* Check if the Vport has issued a SCR */
599                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
600                         break;
601                 /* Only handle SCNs for our Vport index. */
602                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
603                         break;
604                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
605                     vha->host_no));
606                 DEBUG(printk(KERN_INFO
607                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
608                     vha->host_no, mb[1], mb[2], mb[3]));
609
610                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
611                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
612                                 | vha->d_id.b.al_pa;
613                 if (rscn_entry == host_pid) {
614                         DEBUG(printk(KERN_INFO
615                             "scsi(%ld): Ignoring RSCN update to local host "
616                             "port ID (%06x)\n",
617                             vha->host_no, host_pid));
618                         break;
619                 }
620
621                 /* Ignore reserved bits from RSCN-payload. */
622                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
623                 rscn_queue_index = vha->rscn_in_ptr + 1;
624                 if (rscn_queue_index == MAX_RSCN_COUNT)
625                         rscn_queue_index = 0;
626                 if (rscn_queue_index != vha->rscn_out_ptr) {
627                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
628                         vha->rscn_in_ptr = rscn_queue_index;
629                 } else {
630                         vha->flags.rscn_queue_overflow = 1;
631                 }
632
633                 atomic_set(&vha->loop_state, LOOP_UPDATE);
634                 atomic_set(&vha->loop_down_timer, 0);
635                 vha->flags.management_server_logged_in = 0;
636
637                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
638                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
639                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
640                 break;
641
642         /* case MBA_RIO_RESPONSE: */
643         case MBA_ZIO_RESPONSE:
644                 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
645                     vha->host_no));
646                 DEBUG(printk(KERN_INFO
647                     "scsi(%ld): [R|Z]IO update completion.\n",
648                     vha->host_no));
649
650                 if (IS_FWI2_CAPABLE(ha))
651                         qla24xx_process_response_queue(rsp);
652                 else
653                         qla2x00_process_response_queue(rsp);
654                 break;
655
656         case MBA_DISCARD_RND_FRAME:
657                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
658                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
659                 break;
660
661         case MBA_TRACE_NOTIFICATION:
662                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
663                 vha->host_no, mb[1], mb[2]));
664                 break;
665
666         case MBA_ISP84XX_ALERT:
667                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
668                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
669
670                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
671                 switch (mb[1]) {
672                 case A84_PANIC_RECOVERY:
673                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
674                             "%04x %04x\n", mb[2], mb[3]);
675                         break;
676                 case A84_OP_LOGIN_COMPLETE:
677                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
678                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
679                             "firmware version %x\n", ha->cs84xx->op_fw_version));
680                         break;
681                 case A84_DIAG_LOGIN_COMPLETE:
682                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
683                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
684                             "diagnostic firmware version %x\n",
685                             ha->cs84xx->diag_fw_version));
686                         break;
687                 case A84_GOLD_LOGIN_COMPLETE:
688                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
689                         ha->cs84xx->fw_update = 1;
690                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
691                             "firmware version %x\n",
692                             ha->cs84xx->gold_fw_version));
693                         break;
694                 default:
695                         qla_printk(KERN_ERR, ha,
696                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
697                             mb[1], mb[2], mb[3]);
698                 }
699                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
700                 break;
701         }
702
703         if (!vha->vp_idx && ha->num_vhosts)
704                 qla2x00_alert_all_vps(rsp, mb);
705 }
706
707 static void
708 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
709 {
710         fc_port_t *fcport = data;
711         struct scsi_qla_host *vha = fcport->vha;
712         struct qla_hw_data *ha = vha->hw;
713         struct req_que *req = NULL;
714
715         req = ha->req_q_map[vha->req_ques[0]];
716         if (!req)
717                 return;
718         if (req->max_q_depth <= sdev->queue_depth)
719                 return;
720
721         if (sdev->ordered_tags)
722                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
723                     sdev->queue_depth + 1);
724         else
725                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
726                     sdev->queue_depth + 1);
727
728         fcport->last_ramp_up = jiffies;
729
730         DEBUG2(qla_printk(KERN_INFO, ha,
731             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
732             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
733             sdev->queue_depth));
734 }
735
736 static void
737 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
738 {
739         fc_port_t *fcport = data;
740
741         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
742                 return;
743
744         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
745             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
746             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
747             sdev->queue_depth));
748 }
749
750 static inline void
751 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752                                                                 srb_t *sp)
753 {
754         fc_port_t *fcport;
755         struct scsi_device *sdev;
756
757         sdev = sp->cmd->device;
758         if (sdev->queue_depth >= req->max_q_depth)
759                 return;
760
761         fcport = sp->fcport;
762         if (time_before(jiffies,
763             fcport->last_ramp_up + ql2xqfullrampup * HZ))
764                 return;
765         if (time_before(jiffies,
766             fcport->last_queue_full + ql2xqfullrampup * HZ))
767                 return;
768
769         starget_for_each_device(sdev->sdev_target, fcport,
770             qla2x00_adjust_sdev_qdepth_up);
771 }
772
773 /**
774  * qla2x00_process_completed_request() - Process a Fast Post response.
775  * @ha: SCSI driver HA context
776  * @index: SRB index
777  */
778 static void
779 qla2x00_process_completed_request(struct scsi_qla_host *vha,
780                                 struct req_que *req, uint32_t index)
781 {
782         srb_t *sp;
783         struct qla_hw_data *ha = vha->hw;
784
785         /* Validate handle. */
786         if (index >= MAX_OUTSTANDING_COMMANDS) {
787                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
788                     vha->host_no, index));
789                 qla_printk(KERN_WARNING, ha,
790                     "Invalid SCSI completion handle %d.\n", index);
791
792                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
793                 return;
794         }
795
796         sp = req->outstanding_cmds[index];
797         if (sp) {
798                 /* Free outstanding command slot. */
799                 req->outstanding_cmds[index] = NULL;
800
801                 CMD_COMPL_STATUS(sp->cmd) = 0L;
802                 CMD_SCSI_STATUS(sp->cmd) = 0L;
803
804                 /* Save ISP completion status */
805                 sp->cmd->result = DID_OK << 16;
806
807                 qla2x00_ramp_up_queue_depth(vha, req, sp);
808                 qla2x00_sp_compl(ha, sp);
809         } else {
810                 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
811                     vha->host_no));
812                 qla_printk(KERN_WARNING, ha,
813                     "Invalid ISP SCSI completion handle\n");
814
815                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
816         }
817 }
818
819 /**
820  * qla2x00_process_response_queue() - Process response queue entries.
821  * @ha: SCSI driver HA context
822  */
823 void
824 qla2x00_process_response_queue(struct rsp_que *rsp)
825 {
826         struct scsi_qla_host *vha;
827         struct qla_hw_data *ha = rsp->hw;
828         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
829         sts_entry_t     *pkt;
830         uint16_t        handle_cnt;
831         uint16_t        cnt;
832
833         vha = qla2x00_get_rsp_host(rsp);
834
835         if (!vha->flags.online)
836                 return;
837
838         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
839                 pkt = (sts_entry_t *)rsp->ring_ptr;
840
841                 rsp->ring_index++;
842                 if (rsp->ring_index == rsp->length) {
843                         rsp->ring_index = 0;
844                         rsp->ring_ptr = rsp->ring;
845                 } else {
846                         rsp->ring_ptr++;
847                 }
848
849                 if (pkt->entry_status != 0) {
850                         DEBUG3(printk(KERN_INFO
851                             "scsi(%ld): Process error entry.\n", vha->host_no));
852
853                         qla2x00_error_entry(vha, rsp, pkt);
854                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
855                         wmb();
856                         continue;
857                 }
858
859                 switch (pkt->entry_type) {
860                 case STATUS_TYPE:
861                         qla2x00_status_entry(vha, rsp, pkt);
862                         break;
863                 case STATUS_TYPE_21:
864                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
865                         for (cnt = 0; cnt < handle_cnt; cnt++) {
866                                 qla2x00_process_completed_request(vha, rsp->req,
867                                     ((sts21_entry_t *)pkt)->handle[cnt]);
868                         }
869                         break;
870                 case STATUS_TYPE_22:
871                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
872                         for (cnt = 0; cnt < handle_cnt; cnt++) {
873                                 qla2x00_process_completed_request(vha, rsp->req,
874                                     ((sts22_entry_t *)pkt)->handle[cnt]);
875                         }
876                         break;
877                 case STATUS_CONT_TYPE:
878                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
879                         break;
880                 default:
881                         /* Type Not Supported. */
882                         DEBUG4(printk(KERN_WARNING
883                             "scsi(%ld): Received unknown response pkt type %x "
884                             "entry status=%x.\n",
885                             vha->host_no, pkt->entry_type, pkt->entry_status));
886                         break;
887                 }
888                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
889                 wmb();
890         }
891
892         /* Adjust ring index */
893         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
894 }
895
896 static inline void
897 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898 {
899         struct scsi_cmnd *cp = sp->cmd;
900
901         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
902                 sense_len = SCSI_SENSE_BUFFERSIZE;
903
904         CMD_ACTUAL_SNSLEN(cp) = sense_len;
905         sp->request_sense_length = sense_len;
906         sp->request_sense_ptr = cp->sense_buffer;
907         if (sp->request_sense_length > 32)
908                 sense_len = 32;
909
910         memcpy(cp->sense_buffer, sense_data, sense_len);
911
912         sp->request_sense_ptr += sense_len;
913         sp->request_sense_length -= sense_len;
914         if (sp->request_sense_length != 0)
915                 sp->fcport->vha->status_srb = sp;
916
917         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
918             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
919             cp->device->channel, cp->device->id, cp->device->lun, cp,
920             cp->serial_number));
921         if (sense_len)
922                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
923                     CMD_ACTUAL_SNSLEN(cp)));
924 }
925
926 /**
927  * qla2x00_status_entry() - Process a Status IOCB entry.
928  * @ha: SCSI driver HA context
929  * @pkt: Entry pointer
930  */
931 static void
932 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
933 {
934         srb_t           *sp;
935         fc_port_t       *fcport;
936         struct scsi_cmnd *cp;
937         sts_entry_t *sts;
938         struct sts_entry_24xx *sts24;
939         uint16_t        comp_status;
940         uint16_t        scsi_status;
941         uint8_t         lscsi_status;
942         int32_t         resid;
943         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
944         uint8_t         *rsp_info, *sense_data;
945         struct qla_hw_data *ha = vha->hw;
946         struct req_que *req = rsp->req;
947
948         sts = (sts_entry_t *) pkt;
949         sts24 = (struct sts_entry_24xx *) pkt;
950         if (IS_FWI2_CAPABLE(ha)) {
951                 comp_status = le16_to_cpu(sts24->comp_status);
952                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
953         } else {
954                 comp_status = le16_to_cpu(sts->comp_status);
955                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
956         }
957
958         /* Fast path completion. */
959         if (comp_status == CS_COMPLETE && scsi_status == 0) {
960                 qla2x00_process_completed_request(vha, req, sts->handle);
961
962                 return;
963         }
964
965         /* Validate handle. */
966         if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
967                 sp = req->outstanding_cmds[sts->handle];
968                 req->outstanding_cmds[sts->handle] = NULL;
969         } else
970                 sp = NULL;
971
972         if (sp == NULL) {
973                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
974                     vha->host_no));
975                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
976
977                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
978                 qla2xxx_wake_dpc(vha);
979                 return;
980         }
981         cp = sp->cmd;
982         if (cp == NULL) {
983                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
984                     "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
985                 qla_printk(KERN_WARNING, ha,
986                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
987
988                 return;
989         }
990
991         lscsi_status = scsi_status & STATUS_MASK;
992         CMD_ENTRY_STATUS(cp) = sts->entry_status;
993         CMD_COMPL_STATUS(cp) = comp_status;
994         CMD_SCSI_STATUS(cp) = scsi_status;
995
996         fcport = sp->fcport;
997
998         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
999         if (IS_FWI2_CAPABLE(ha)) {
1000                 sense_len = le32_to_cpu(sts24->sense_len);
1001                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1002                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1003                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1004                 rsp_info = sts24->data;
1005                 sense_data = sts24->data;
1006                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1007         } else {
1008                 sense_len = le16_to_cpu(sts->req_sense_length);
1009                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1010                 resid_len = le32_to_cpu(sts->residual_length);
1011                 rsp_info = sts->rsp_info;
1012                 sense_data = sts->req_sense_data;
1013         }
1014
1015         /* Check for any FCP transport errors. */
1016         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1017                 /* Sense data lies beyond any FCP RESPONSE data. */
1018                 if (IS_FWI2_CAPABLE(ha))
1019                         sense_data += rsp_info_len;
1020                 if (rsp_info_len > 3 && rsp_info[3]) {
1021                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1022                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1023                             "retrying command\n", vha->host_no,
1024                             cp->device->channel, cp->device->id,
1025                             cp->device->lun, rsp_info_len, rsp_info[0],
1026                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1027                             rsp_info[5], rsp_info[6], rsp_info[7]));
1028
1029                         cp->result = DID_BUS_BUSY << 16;
1030                         qla2x00_sp_compl(ha, sp);
1031                         return;
1032                 }
1033         }
1034
1035         /* Check for overrun. */
1036         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1037             scsi_status & SS_RESIDUAL_OVER)
1038                 comp_status = CS_DATA_OVERRUN;
1039
1040         /*
1041          * Based on Host and scsi status generate status code for Linux
1042          */
1043         switch (comp_status) {
1044         case CS_COMPLETE:
1045         case CS_QUEUE_FULL:
1046                 if (scsi_status == 0) {
1047                         cp->result = DID_OK << 16;
1048                         break;
1049                 }
1050                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1051                         resid = resid_len;
1052                         scsi_set_resid(cp, resid);
1053                         CMD_RESID_LEN(cp) = resid;
1054
1055                         if (!lscsi_status &&
1056                             ((unsigned)(scsi_bufflen(cp) - resid) <
1057                              cp->underflow)) {
1058                                 qla_printk(KERN_INFO, ha,
1059                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1060                                            "detected (%x of %x bytes)...returning "
1061                                            "error status.\n", vha->host_no,
1062                                            cp->device->channel, cp->device->id,
1063                                            cp->device->lun, resid,
1064                                            scsi_bufflen(cp));
1065
1066                                 cp->result = DID_ERROR << 16;
1067                                 break;
1068                         }
1069                 }
1070                 cp->result = DID_OK << 16 | lscsi_status;
1071
1072                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1073                         DEBUG2(printk(KERN_INFO
1074                             "scsi(%ld): QUEUE FULL status detected "
1075                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1076                             scsi_status));
1077
1078                         /* Adjust queue depth for all luns on the port. */
1079                         fcport->last_queue_full = jiffies;
1080                         starget_for_each_device(cp->device->sdev_target,
1081                             fcport, qla2x00_adjust_sdev_qdepth_down);
1082                         break;
1083                 }
1084                 if (lscsi_status != SS_CHECK_CONDITION)
1085                         break;
1086
1087                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1088                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1089                         break;
1090
1091                 qla2x00_handle_sense(sp, sense_data, sense_len);
1092                 break;
1093
1094         case CS_DATA_UNDERRUN:
1095                 resid = resid_len;
1096                 /* Use F/W calculated residual length. */
1097                 if (IS_FWI2_CAPABLE(ha)) {
1098                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1099                                 lscsi_status = 0;
1100                         } else if (resid != fw_resid_len) {
1101                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1102                                 lscsi_status = 0;
1103                         }
1104                         resid = fw_resid_len;
1105                 }
1106
1107                 if (scsi_status & SS_RESIDUAL_UNDER) {
1108                         scsi_set_resid(cp, resid);
1109                         CMD_RESID_LEN(cp) = resid;
1110                 } else {
1111                         DEBUG2(printk(KERN_INFO
1112                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1113                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1114                             "os_underflow=0x%x\n", vha->host_no,
1115                             cp->device->id, cp->device->lun, comp_status,
1116                             scsi_status, resid_len, resid, cp->cmnd[0],
1117                             cp->underflow));
1118
1119                 }
1120
1121                 /*
1122                  * Check to see if SCSI Status is non zero. If so report SCSI
1123                  * Status.
1124                  */
1125                 if (lscsi_status != 0) {
1126                         cp->result = DID_OK << 16 | lscsi_status;
1127
1128                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1129                                 DEBUG2(printk(KERN_INFO
1130                                     "scsi(%ld): QUEUE FULL status detected "
1131                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1132                                     scsi_status));
1133
1134                                 /*
1135                                  * Adjust queue depth for all luns on the
1136                                  * port.
1137                                  */
1138                                 fcport->last_queue_full = jiffies;
1139                                 starget_for_each_device(
1140                                     cp->device->sdev_target, fcport,
1141                                     qla2x00_adjust_sdev_qdepth_down);
1142                                 break;
1143                         }
1144                         if (lscsi_status != SS_CHECK_CONDITION)
1145                                 break;
1146
1147                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1148                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1149                                 break;
1150
1151                         qla2x00_handle_sense(sp, sense_data, sense_len);
1152                 } else {
1153                         /*
1154                          * If RISC reports underrun and target does not report
1155                          * it then we must have a lost frame, so tell upper
1156                          * layer to retry it by reporting a bus busy.
1157                          */
1158                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1159                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1160                                               "frame(s) detected (%x of %x bytes)..."
1161                                               "retrying command.\n",
1162                                         vha->host_no, cp->device->channel,
1163                                         cp->device->id, cp->device->lun, resid,
1164                                         scsi_bufflen(cp)));
1165
1166                                 cp->result = DID_BUS_BUSY << 16;
1167                                 break;
1168                         }
1169
1170                         /* Handle mid-layer underflow */
1171                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1172                             cp->underflow) {
1173                                 qla_printk(KERN_INFO, ha,
1174                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1175                                            "detected (%x of %x bytes)...returning "
1176                                            "error status.\n", vha->host_no,
1177                                            cp->device->channel, cp->device->id,
1178                                            cp->device->lun, resid,
1179                                            scsi_bufflen(cp));
1180
1181                                 cp->result = DID_ERROR << 16;
1182                                 break;
1183                         }
1184
1185                         /* Everybody online, looking good... */
1186                         cp->result = DID_OK << 16;
1187                 }
1188                 break;
1189
1190         case CS_DATA_OVERRUN:
1191                 DEBUG2(printk(KERN_INFO
1192                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1193                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1194                     scsi_status));
1195                 DEBUG2(printk(KERN_INFO
1196                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1197                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1198                     cp->cmnd[4], cp->cmnd[5]));
1199                 DEBUG2(printk(KERN_INFO
1200                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1201                     "status!\n",
1202                     cp->serial_number, scsi_bufflen(cp), resid_len));
1203
1204                 cp->result = DID_ERROR << 16;
1205                 break;
1206
1207         case CS_PORT_LOGGED_OUT:
1208         case CS_PORT_CONFIG_CHG:
1209         case CS_PORT_BUSY:
1210         case CS_INCOMPLETE:
1211         case CS_PORT_UNAVAILABLE:
1212                 /*
1213                  * If the port is in Target Down state, return all IOs for this
1214                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1215                  * retry_queue.
1216                  */
1217                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1218                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1219                     vha->host_no, cp->device->id, cp->device->lun,
1220                     cp->serial_number, comp_status,
1221                     atomic_read(&fcport->state)));
1222
1223                 /*
1224                  * We are going to have the fc class block the rport
1225                  * while we try to recover so instruct the mid layer
1226                  * to requeue until the class decides how to handle this.
1227                  */
1228                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1229                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1230                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1231                 break;
1232
1233         case CS_RESET:
1234                 DEBUG2(printk(KERN_INFO
1235                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1236                     vha->host_no, comp_status, scsi_status));
1237
1238                 cp->result = DID_RESET << 16;
1239                 break;
1240
1241         case CS_ABORTED:
1242                 /*
1243                  * hv2.19.12 - DID_ABORT does not retry the request if we
1244                  * aborted this request then abort otherwise it must be a
1245                  * reset.
1246                  */
1247                 DEBUG2(printk(KERN_INFO
1248                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1249                     vha->host_no, comp_status, scsi_status));
1250
1251                 cp->result = DID_RESET << 16;
1252                 break;
1253
1254         case CS_TIMEOUT:
1255                 /*
1256                  * We are going to have the fc class block the rport
1257                  * while we try to recover so instruct the mid layer
1258                  * to requeue until the class decides how to handle this.
1259                  */
1260                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1261
1262                 if (IS_FWI2_CAPABLE(ha)) {
1263                         DEBUG2(printk(KERN_INFO
1264                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1265                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1266                             cp->device->id, cp->device->lun, comp_status,
1267                             scsi_status));
1268                         break;
1269                 }
1270                 DEBUG2(printk(KERN_INFO
1271                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1272                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1273                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1274                     le16_to_cpu(sts->status_flags)));
1275
1276                 /* Check to see if logout occurred. */
1277                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1278                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1279                 break;
1280
1281         default:
1282                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1283                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1284                 qla_printk(KERN_INFO, ha,
1285                     "Unknown status detected 0x%x-0x%x.\n",
1286                     comp_status, scsi_status);
1287
1288                 cp->result = DID_ERROR << 16;
1289                 break;
1290         }
1291
1292         /* Place command on done queue. */
1293         if (vha->status_srb == NULL)
1294                 qla2x00_sp_compl(ha, sp);
1295 }
1296
1297 /**
1298  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1299  * @ha: SCSI driver HA context
1300  * @pkt: Entry pointer
1301  *
1302  * Extended sense data.
1303  */
1304 static void
1305 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1306 {
1307         uint8_t         sense_sz = 0;
1308         struct qla_hw_data *ha = vha->hw;
1309         srb_t           *sp = vha->status_srb;
1310         struct scsi_cmnd *cp;
1311
1312         if (sp != NULL && sp->request_sense_length != 0) {
1313                 cp = sp->cmd;
1314                 if (cp == NULL) {
1315                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1316                             "sp=%p.\n", __func__, sp));
1317                         qla_printk(KERN_INFO, ha,
1318                             "cmd is NULL: already returned to OS (sp=%p)\n",
1319                             sp);
1320
1321                         vha->status_srb = NULL;
1322                         return;
1323                 }
1324
1325                 if (sp->request_sense_length > sizeof(pkt->data)) {
1326                         sense_sz = sizeof(pkt->data);
1327                 } else {
1328                         sense_sz = sp->request_sense_length;
1329                 }
1330
1331                 /* Move sense data. */
1332                 if (IS_FWI2_CAPABLE(ha))
1333                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1334                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1335                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1336
1337                 sp->request_sense_ptr += sense_sz;
1338                 sp->request_sense_length -= sense_sz;
1339
1340                 /* Place command on done queue. */
1341                 if (sp->request_sense_length == 0) {
1342                         vha->status_srb = NULL;
1343                         qla2x00_sp_compl(ha, sp);
1344                 }
1345         }
1346 }
1347
1348 /**
1349  * qla2x00_error_entry() - Process an error entry.
1350  * @ha: SCSI driver HA context
1351  * @pkt: Entry pointer
1352  */
1353 static void
1354 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1355 {
1356         srb_t *sp;
1357         struct qla_hw_data *ha = vha->hw;
1358         struct req_que *req = rsp->req;
1359 #if defined(QL_DEBUG_LEVEL_2)
1360         if (pkt->entry_status & RF_INV_E_ORDER)
1361                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1362         else if (pkt->entry_status & RF_INV_E_COUNT)
1363                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1364         else if (pkt->entry_status & RF_INV_E_PARAM)
1365                 qla_printk(KERN_ERR, ha,
1366                     "%s: Invalid Entry Parameter\n", __func__);
1367         else if (pkt->entry_status & RF_INV_E_TYPE)
1368                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1369         else if (pkt->entry_status & RF_BUSY)
1370                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1371         else
1372                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1373 #endif
1374
1375         /* Validate handle. */
1376         if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1377                 sp = req->outstanding_cmds[pkt->handle];
1378         else
1379                 sp = NULL;
1380
1381         if (sp) {
1382                 /* Free outstanding command slot. */
1383                 req->outstanding_cmds[pkt->handle] = NULL;
1384
1385                 /* Bad payload or header */
1386                 if (pkt->entry_status &
1387                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1388                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1389                         sp->cmd->result = DID_ERROR << 16;
1390                 } else if (pkt->entry_status & RF_BUSY) {
1391                         sp->cmd->result = DID_BUS_BUSY << 16;
1392                 } else {
1393                         sp->cmd->result = DID_ERROR << 16;
1394                 }
1395                 qla2x00_sp_compl(ha, sp);
1396
1397         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1398             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1399                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1400                     vha->host_no));
1401                 qla_printk(KERN_WARNING, ha,
1402                     "Error entry - invalid handle\n");
1403
1404                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1405                 qla2xxx_wake_dpc(vha);
1406         }
1407 }
1408
1409 /**
1410  * qla24xx_mbx_completion() - Process mailbox command completions.
1411  * @ha: SCSI driver HA context
1412  * @mb0: Mailbox0 register
1413  */
1414 static void
1415 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1416 {
1417         uint16_t        cnt;
1418         uint16_t __iomem *wptr;
1419         struct qla_hw_data *ha = vha->hw;
1420         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1421
1422         /* Load return mailbox registers. */
1423         ha->flags.mbox_int = 1;
1424         ha->mailbox_out[0] = mb0;
1425         wptr = (uint16_t __iomem *)&reg->mailbox1;
1426
1427         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1428                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1429                 wptr++;
1430         }
1431
1432         if (ha->mcp) {
1433                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1434                     __func__, vha->host_no, ha->mcp->mb[0]));
1435         } else {
1436                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1437                     __func__, vha->host_no));
1438         }
1439 }
1440
1441 /**
1442  * qla24xx_process_response_queue() - Process response queue entries.
1443  * @ha: SCSI driver HA context
1444  */
1445 void
1446 qla24xx_process_response_queue(struct rsp_que *rsp)
1447 {
1448         struct qla_hw_data *ha = rsp->hw;
1449         struct sts_entry_24xx *pkt;
1450         struct scsi_qla_host *vha;
1451
1452         vha = qla2x00_get_rsp_host(rsp);
1453
1454         if (!vha->flags.online)
1455                 return;
1456
1457         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1458                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1459
1460                 rsp->ring_index++;
1461                 if (rsp->ring_index == rsp->length) {
1462                         rsp->ring_index = 0;
1463                         rsp->ring_ptr = rsp->ring;
1464                 } else {
1465                         rsp->ring_ptr++;
1466                 }
1467
1468                 if (pkt->entry_status != 0) {
1469                         DEBUG3(printk(KERN_INFO
1470                             "scsi(%ld): Process error entry.\n", vha->host_no));
1471
1472                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1473                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1474                         wmb();
1475                         continue;
1476                 }
1477
1478                 switch (pkt->entry_type) {
1479                 case STATUS_TYPE:
1480                         qla2x00_status_entry(vha, rsp, pkt);
1481                         break;
1482                 case STATUS_CONT_TYPE:
1483                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1484                         break;
1485                 case VP_RPT_ID_IOCB_TYPE:
1486                         qla24xx_report_id_acquisition(vha,
1487                             (struct vp_rpt_id_entry_24xx *)pkt);
1488                         break;
1489                 default:
1490                         /* Type Not Supported. */
1491                         DEBUG4(printk(KERN_WARNING
1492                             "scsi(%ld): Received unknown response pkt type %x "
1493                             "entry status=%x.\n",
1494                             vha->host_no, pkt->entry_type, pkt->entry_status));
1495                         break;
1496                 }
1497                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1498                 wmb();
1499         }
1500
1501         /* Adjust ring index */
1502         ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1503 }
1504
1505 static void
1506 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1507 {
1508         int rval;
1509         uint32_t cnt;
1510         struct qla_hw_data *ha = vha->hw;
1511         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1512
1513         if (!IS_QLA25XX(ha))
1514                 return;
1515
1516         rval = QLA_SUCCESS;
1517         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1518         RD_REG_DWORD(&reg->iobase_addr);
1519         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1520         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1521             rval == QLA_SUCCESS; cnt--) {
1522                 if (cnt) {
1523                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1524                         udelay(10);
1525                 } else
1526                         rval = QLA_FUNCTION_TIMEOUT;
1527         }
1528         if (rval == QLA_SUCCESS)
1529                 goto next_test;
1530
1531         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1532         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1533             rval == QLA_SUCCESS; cnt--) {
1534                 if (cnt) {
1535                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1536                         udelay(10);
1537                 } else
1538                         rval = QLA_FUNCTION_TIMEOUT;
1539         }
1540         if (rval != QLA_SUCCESS)
1541                 goto done;
1542
1543 next_test:
1544         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1545                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1546
1547 done:
1548         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1549         RD_REG_DWORD(&reg->iobase_window);
1550 }
1551
1552 /**
1553  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1554  * @irq:
1555  * @dev_id: SCSI driver HA context
1556  *
1557  * Called by system whenever the host adapter generates an interrupt.
1558  *
1559  * Returns handled flag.
1560  */
1561 irqreturn_t
1562 qla24xx_intr_handler(int irq, void *dev_id)
1563 {
1564         scsi_qla_host_t *vha;
1565         struct qla_hw_data *ha;
1566         struct device_reg_24xx __iomem *reg;
1567         int             status;
1568         unsigned long   iter;
1569         uint32_t        stat;
1570         uint32_t        hccr;
1571         uint16_t        mb[4];
1572         struct rsp_que *rsp;
1573
1574         rsp = (struct rsp_que *) dev_id;
1575         if (!rsp) {
1576                 printk(KERN_INFO
1577                     "%s(): NULL response queue pointer\n", __func__);
1578                 return IRQ_NONE;
1579         }
1580
1581         ha = rsp->hw;
1582         reg = &ha->iobase->isp24;
1583         status = 0;
1584
1585         spin_lock(&ha->hardware_lock);
1586         vha = qla2x00_get_rsp_host(rsp);
1587         for (iter = 50; iter--; ) {
1588                 stat = RD_REG_DWORD(&reg->host_status);
1589                 if (stat & HSRX_RISC_PAUSED) {
1590                         if (pci_channel_offline(ha->pdev))
1591                                 break;
1592
1593                         if (ha->hw_event_pause_errors == 0)
1594                                 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1595                                     0, MSW(stat), LSW(stat));
1596                         else if (ha->hw_event_pause_errors < 0xffffffff)
1597                                 ha->hw_event_pause_errors++;
1598
1599                         hccr = RD_REG_DWORD(&reg->hccr);
1600
1601                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1602                             "Dumping firmware!\n", hccr);
1603
1604                         qla2xxx_check_risc_status(vha);
1605
1606                         ha->isp_ops->fw_dump(vha, 1);
1607                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1608                         break;
1609                 } else if ((stat & HSRX_RISC_INT) == 0)
1610                         break;
1611
1612                 switch (stat & 0xff) {
1613                 case 0x1:
1614                 case 0x2:
1615                 case 0x10:
1616                 case 0x11:
1617                         qla24xx_mbx_completion(vha, MSW(stat));
1618                         status |= MBX_INTERRUPT;
1619
1620                         break;
1621                 case 0x12:
1622                         mb[0] = MSW(stat);
1623                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1624                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1625                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1626                         qla2x00_async_event(vha, rsp, mb);
1627                         break;
1628                 case 0x13:
1629                 case 0x14:
1630                         qla24xx_process_response_queue(rsp);
1631                         break;
1632                 default:
1633                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1634                             "(%d).\n",
1635                             vha->host_no, stat & 0xff));
1636                         break;
1637                 }
1638                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1639                 RD_REG_DWORD_RELAXED(&reg->hccr);
1640         }
1641         spin_unlock(&ha->hardware_lock);
1642
1643         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1644             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1645                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1646                 complete(&ha->mbx_intr_comp);
1647         }
1648
1649         return IRQ_HANDLED;
1650 }
1651
1652 static irqreturn_t
1653 qla24xx_msix_rsp_q(int irq, void *dev_id)
1654 {
1655         struct qla_hw_data *ha;
1656         struct rsp_que *rsp;
1657         struct device_reg_24xx __iomem *reg;
1658
1659         rsp = (struct rsp_que *) dev_id;
1660         if (!rsp) {
1661                 printk(KERN_INFO
1662                 "%s(): NULL response queue pointer\n", __func__);
1663                 return IRQ_NONE;
1664         }
1665         ha = rsp->hw;
1666         reg = &ha->iobase->isp24;
1667
1668         spin_lock_irq(&ha->hardware_lock);
1669
1670         qla24xx_process_response_queue(rsp);
1671         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1672
1673         spin_unlock_irq(&ha->hardware_lock);
1674
1675         return IRQ_HANDLED;
1676 }
1677
1678 static irqreturn_t
1679 qla25xx_msix_rsp_q(int irq, void *dev_id)
1680 {
1681         struct qla_hw_data *ha;
1682         struct rsp_que *rsp;
1683         struct device_reg_24xx __iomem *reg;
1684         uint16_t msix_disabled_hccr = 0;
1685
1686         rsp = (struct rsp_que *) dev_id;
1687         if (!rsp) {
1688                 printk(KERN_INFO
1689                         "%s(): NULL response queue pointer\n", __func__);
1690                 return IRQ_NONE;
1691         }
1692         ha = rsp->hw;
1693         reg = &ha->iobase->isp24;
1694
1695         spin_lock_irq(&ha->hardware_lock);
1696
1697         msix_disabled_hccr = rsp->options;
1698         if (!rsp->id)
1699                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1700         else
1701                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1702
1703         qla24xx_process_response_queue(rsp);
1704
1705         if (!msix_disabled_hccr)
1706                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1707
1708         spin_unlock_irq(&ha->hardware_lock);
1709
1710         return IRQ_HANDLED;
1711 }
1712
1713 static irqreturn_t
1714 qla24xx_msix_default(int irq, void *dev_id)
1715 {
1716         scsi_qla_host_t *vha;
1717         struct qla_hw_data *ha;
1718         struct rsp_que *rsp;
1719         struct device_reg_24xx __iomem *reg;
1720         int             status;
1721         uint32_t        stat;
1722         uint32_t        hccr;
1723         uint16_t        mb[4];
1724
1725         rsp = (struct rsp_que *) dev_id;
1726         if (!rsp) {
1727                 DEBUG(printk(
1728                 "%s(): NULL response queue pointer\n", __func__));
1729                 return IRQ_NONE;
1730         }
1731         ha = rsp->hw;
1732         reg = &ha->iobase->isp24;
1733         status = 0;
1734
1735         spin_lock_irq(&ha->hardware_lock);
1736         vha = qla2x00_get_rsp_host(rsp);
1737         do {
1738                 stat = RD_REG_DWORD(&reg->host_status);
1739                 if (stat & HSRX_RISC_PAUSED) {
1740                         if (pci_channel_offline(ha->pdev))
1741                                 break;
1742
1743                         if (ha->hw_event_pause_errors == 0)
1744                                 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1745                                     0, MSW(stat), LSW(stat));
1746                         else if (ha->hw_event_pause_errors < 0xffffffff)
1747                                 ha->hw_event_pause_errors++;
1748
1749                         hccr = RD_REG_DWORD(&reg->hccr);
1750
1751                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1752                             "Dumping firmware!\n", hccr);
1753
1754                         qla2xxx_check_risc_status(vha);
1755
1756                         ha->isp_ops->fw_dump(vha, 1);
1757                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1758                         break;
1759                 } else if ((stat & HSRX_RISC_INT) == 0)
1760                         break;
1761
1762                 switch (stat & 0xff) {
1763                 case 0x1:
1764                 case 0x2:
1765                 case 0x10:
1766                 case 0x11:
1767                         qla24xx_mbx_completion(vha, MSW(stat));
1768                         status |= MBX_INTERRUPT;
1769
1770                         break;
1771                 case 0x12:
1772                         mb[0] = MSW(stat);
1773                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1774                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1775                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1776                         qla2x00_async_event(vha, rsp, mb);
1777                         break;
1778                 case 0x13:
1779                 case 0x14:
1780                         qla24xx_process_response_queue(rsp);
1781                         break;
1782                 default:
1783                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1784                             "(%d).\n",
1785                             vha->host_no, stat & 0xff));
1786                         break;
1787                 }
1788                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1789         } while (0);
1790         spin_unlock_irq(&ha->hardware_lock);
1791
1792         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1793             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1794                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1795                 complete(&ha->mbx_intr_comp);
1796         }
1797
1798         return IRQ_HANDLED;
1799 }
1800
1801 /* Interrupt handling helpers. */
1802
1803 struct qla_init_msix_entry {
1804         uint16_t entry;
1805         uint16_t index;
1806         const char *name;
1807         irq_handler_t handler;
1808 };
1809
1810 static struct qla_init_msix_entry base_queue = {
1811         .entry = 0,
1812         .index = 0,
1813         .name = "qla2xxx (default)",
1814         .handler = qla24xx_msix_default,
1815 };
1816
1817 static struct qla_init_msix_entry base_rsp_queue = {
1818         .entry = 1,
1819         .index = 1,
1820         .name = "qla2xxx (rsp_q)",
1821         .handler = qla24xx_msix_rsp_q,
1822 };
1823
1824 static struct qla_init_msix_entry multi_rsp_queue = {
1825         .entry = 1,
1826         .index = 1,
1827         .name = "qla2xxx (multi_q)",
1828         .handler = qla25xx_msix_rsp_q,
1829 };
1830
1831 static void
1832 qla24xx_disable_msix(struct qla_hw_data *ha)
1833 {
1834         int i;
1835         struct qla_msix_entry *qentry;
1836
1837         for (i = 0; i < ha->msix_count; i++) {
1838                 qentry = &ha->msix_entries[i];
1839                 if (qentry->have_irq)
1840                         free_irq(qentry->vector, qentry->rsp);
1841         }
1842         pci_disable_msix(ha->pdev);
1843         kfree(ha->msix_entries);
1844         ha->msix_entries = NULL;
1845         ha->flags.msix_enabled = 0;
1846 }
1847
1848 static int
1849 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1850 {
1851         int i, ret;
1852         struct msix_entry *entries;
1853         struct qla_msix_entry *qentry;
1854         struct qla_init_msix_entry *msix_queue;
1855
1856         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1857                                         GFP_KERNEL);
1858         if (!entries)
1859                 return -ENOMEM;
1860
1861         for (i = 0; i < ha->msix_count; i++)
1862                 entries[i].entry = i;
1863
1864         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1865         if (ret) {
1866                 qla_printk(KERN_WARNING, ha,
1867                         "MSI-X: Failed to enable support -- %d/%d\n"
1868                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1869                 ha->msix_count = ret;
1870                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1871                 if (ret) {
1872                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1873                                 " support, giving up -- %d/%d\n",
1874                                 ha->msix_count, ret);
1875                         goto msix_out;
1876                 }
1877                 ha->max_queues = ha->msix_count - 1;
1878         }
1879         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1880                                 ha->msix_count, GFP_KERNEL);
1881         if (!ha->msix_entries) {
1882                 ret = -ENOMEM;
1883                 goto msix_out;
1884         }
1885         ha->flags.msix_enabled = 1;
1886
1887         for (i = 0; i < ha->msix_count; i++) {
1888                 qentry = &ha->msix_entries[i];
1889                 qentry->vector = entries[i].vector;
1890                 qentry->entry = entries[i].entry;
1891                 qentry->have_irq = 0;
1892                 qentry->rsp = NULL;
1893         }
1894
1895         /* Enable MSI-X for AENs for queue 0 */
1896         qentry = &ha->msix_entries[0];
1897         ret = request_irq(qentry->vector, base_queue.handler, 0,
1898                                         base_queue.name, rsp);
1899         if (ret) {
1900                 qla_printk(KERN_WARNING, ha,
1901                         "MSI-X: Unable to register handler -- %x/%d.\n",
1902                         qentry->vector, ret);
1903                 qla24xx_disable_msix(ha);
1904                 goto msix_out;
1905         }
1906         qentry->have_irq = 1;
1907         qentry->rsp = rsp;
1908
1909         /* Enable MSI-X vector for response queue update for queue 0 */
1910         if (ha->max_queues > 1 && ha->mqiobase) {
1911                 ha->mqenable = 1;
1912                 msix_queue = &multi_rsp_queue;
1913                 qla_printk(KERN_INFO, ha,
1914                                 "MQ enabled, Number of Queue Resources: %d \n",
1915                                 ha->max_queues);
1916         } else {
1917                 ha->mqenable = 0;
1918                 msix_queue = &base_rsp_queue;
1919         }
1920
1921         qentry = &ha->msix_entries[1];
1922         ret = request_irq(qentry->vector, msix_queue->handler, 0,
1923                                                 msix_queue->name, rsp);
1924         if (ret) {
1925                 qla_printk(KERN_WARNING, ha,
1926                         "MSI-X: Unable to register handler -- %x/%d.\n",
1927                         qentry->vector, ret);
1928                 qla24xx_disable_msix(ha);
1929                 ha->mqenable = 0;
1930                 goto msix_out;
1931         }
1932         qentry->have_irq = 1;
1933         qentry->rsp = rsp;
1934
1935 msix_out:
1936         kfree(entries);
1937         return ret;
1938 }
1939
1940 int
1941 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1942 {
1943         int ret;
1944         device_reg_t __iomem *reg = ha->iobase;
1945
1946         /* If possible, enable MSI-X. */
1947         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1948                 goto skip_msix;
1949
1950         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1951                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1952                 DEBUG2(qla_printk(KERN_WARNING, ha,
1953                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1954                         ha->pdev->revision, ha->fw_attributes));
1955
1956                 goto skip_msix;
1957         }
1958
1959         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1960             (ha->pdev->subsystem_device == 0x7040 ||
1961                 ha->pdev->subsystem_device == 0x7041 ||
1962                 ha->pdev->subsystem_device == 0x1705)) {
1963                 DEBUG2(qla_printk(KERN_WARNING, ha,
1964                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1965                     ha->pdev->subsystem_vendor,
1966                     ha->pdev->subsystem_device));
1967
1968                 goto skip_msi;
1969         }
1970
1971         ret = qla24xx_enable_msix(ha, rsp);
1972         if (!ret) {
1973                 DEBUG2(qla_printk(KERN_INFO, ha,
1974                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1975                     ha->fw_attributes));
1976                 goto clear_risc_ints;
1977         }
1978         qla_printk(KERN_WARNING, ha,
1979             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1980 skip_msix:
1981
1982         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1983                 goto skip_msi;
1984
1985         ret = pci_enable_msi(ha->pdev);
1986         if (!ret) {
1987                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1988                 ha->flags.msi_enabled = 1;
1989         }
1990 skip_msi:
1991
1992         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1993             IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1994         if (ret) {
1995                 qla_printk(KERN_WARNING, ha,
1996                     "Failed to reserve interrupt %d already in use.\n",
1997                     ha->pdev->irq);
1998                 goto fail;
1999         }
2000         ha->flags.inta_enabled = 1;
2001 clear_risc_ints:
2002
2003         spin_lock_irq(&ha->hardware_lock);
2004         if (IS_FWI2_CAPABLE(ha)) {
2005                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2006                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2007         } else {
2008                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2009                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2010                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2011         }
2012         spin_unlock_irq(&ha->hardware_lock);
2013
2014 fail:
2015         return ret;
2016 }
2017
2018 void
2019 qla2x00_free_irqs(scsi_qla_host_t *vha)
2020 {
2021         struct qla_hw_data *ha = vha->hw;
2022         struct rsp_que *rsp = ha->rsp_q_map[0];
2023
2024         if (ha->flags.msix_enabled)
2025                 qla24xx_disable_msix(ha);
2026         else if (ha->flags.inta_enabled) {
2027                 free_irq(ha->pdev->irq, rsp);
2028                 pci_disable_msi(ha->pdev);
2029         }
2030 }
2031
2032 static struct scsi_qla_host *
2033 qla2x00_get_rsp_host(struct rsp_que *rsp)
2034 {
2035         srb_t *sp;
2036         struct qla_hw_data *ha = rsp->hw;
2037         struct scsi_qla_host *vha = NULL;
2038         struct sts_entry_24xx *pkt;
2039         struct req_que *req;
2040
2041         if (rsp->id) {
2042                 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2043                 req = rsp->req;
2044                 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045                         sp = req->outstanding_cmds[pkt->handle];
2046                         if (sp)
2047                                 vha = sp->vha;
2048                 }
2049         }
2050         if (!vha)
2051         /* handle it in base queue */
2052                 vha = pci_get_drvdata(ha->pdev);
2053
2054         return vha;
2055 }
2056
2057 int qla25xx_request_irq(struct rsp_que *rsp)
2058 {
2059         struct qla_hw_data *ha = rsp->hw;
2060         struct qla_init_msix_entry *intr = &multi_rsp_queue;
2061         struct qla_msix_entry *msix = rsp->msix;
2062         int ret;
2063
2064         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2065         if (ret) {
2066                 qla_printk(KERN_WARNING, ha,
2067                         "MSI-X: Unable to register handler -- %x/%d.\n",
2068                         msix->vector, ret);
2069                 return ret;
2070         }
2071         msix->have_irq = 1;
2072         msix->rsp = rsp;
2073         return ret;
2074 }
2075
2076 void
2077 qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2078 {
2079         device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2080         WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2081 }
2082
2083 void
2084 qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2085 {
2086         device_reg_t __iomem *reg = (void *) ha->iobase;
2087         WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2088 }
2089