]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/scsi/qla2xxx/qla_isr.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-nommu
[linux-2.6-omap-h63xx.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33         scsi_qla_host_t *vha;
34         struct qla_hw_data *ha;
35         struct device_reg_2xxx __iomem *reg;
36         int             status;
37         unsigned long   iter;
38         uint16_t        hccr;
39         uint16_t        mb[4];
40         struct rsp_que *rsp;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock(&ha->hardware_lock);
54         vha = qla2x00_get_rsp_host(rsp);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock(&ha->hardware_lock);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137
138         rsp = (struct rsp_que *) dev_id;
139         if (!rsp) {
140                 printk(KERN_INFO
141                     "%s(): NULL response queue pointer\n", __func__);
142                 return (IRQ_NONE);
143         }
144
145         ha = rsp->hw;
146         reg = &ha->iobase->isp;
147         status = 0;
148
149         spin_lock(&ha->hardware_lock);
150         vha = qla2x00_get_rsp_host(rsp);
151         for (iter = 50; iter--; ) {
152                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153                 if (stat & HSR_RISC_PAUSED) {
154                         if (pci_channel_offline(ha->pdev))
155                                 break;
156
157                         hccr = RD_REG_WORD(&reg->hccr);
158                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
159                                 qla_printk(KERN_INFO, ha, "Parity error -- "
160                                     "HCCR=%x, Dumping firmware!\n", hccr);
161                         else
162                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
163                                     "HCCR=%x, Dumping firmware!\n", hccr);
164
165                         /*
166                          * Issue a "HARD" reset in order for the RISC
167                          * interrupt bit to be cleared.  Schedule a big
168                          * hammmer to get out of the RISC PAUSED state.
169                          */
170                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171                         RD_REG_WORD(&reg->hccr);
172
173                         ha->isp_ops->fw_dump(vha, 1);
174                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175                         break;
176                 } else if ((stat & HSR_RISC_INT) == 0)
177                         break;
178
179                 switch (stat & 0xff) {
180                 case 0x1:
181                 case 0x2:
182                 case 0x10:
183                 case 0x11:
184                         qla2x00_mbx_completion(vha, MSW(stat));
185                         status |= MBX_INTERRUPT;
186
187                         /* Release mailbox registers. */
188                         WRT_REG_WORD(&reg->semaphore, 0);
189                         break;
190                 case 0x12:
191                         mb[0] = MSW(stat);
192                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195                         qla2x00_async_event(vha, rsp, mb);
196                         break;
197                 case 0x13:
198                         qla2x00_process_response_queue(rsp);
199                         break;
200                 case 0x15:
201                         mb[0] = MBA_CMPLT_1_16BIT;
202                         mb[1] = MSW(stat);
203                         qla2x00_async_event(vha, rsp, mb);
204                         break;
205                 case 0x16:
206                         mb[0] = MBA_SCSI_COMPLETION;
207                         mb[1] = MSW(stat);
208                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209                         qla2x00_async_event(vha, rsp, mb);
210                         break;
211                 default:
212                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
213                             "(%d).\n",
214                             vha->host_no, stat & 0xff));
215                         break;
216                 }
217                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218                 RD_REG_WORD_RELAXED(&reg->hccr);
219         }
220         spin_unlock(&ha->hardware_lock);
221
222         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
224                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
225                 complete(&ha->mbx_intr_comp);
226         }
227
228         return (IRQ_HANDLED);
229 }
230
231 /**
232  * qla2x00_mbx_completion() - Process mailbox command completions.
233  * @ha: SCSI driver HA context
234  * @mb0: Mailbox0 register
235  */
236 static void
237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
238 {
239         uint16_t        cnt;
240         uint16_t __iomem *wptr;
241         struct qla_hw_data *ha = vha->hw;
242         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
243
244         /* Load return mailbox registers. */
245         ha->flags.mbox_int = 1;
246         ha->mailbox_out[0] = mb0;
247         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
250                 if (IS_QLA2200(ha) && cnt == 8)
251                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252                 if (cnt == 4 || cnt == 5)
253                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254                 else
255                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
256
257                 wptr++;
258         }
259
260         if (ha->mcp) {
261                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262                     __func__, vha->host_no, ha->mcp->mb[0]));
263         } else {
264                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265                     __func__, vha->host_no));
266         }
267 }
268
269 /**
270  * qla2x00_async_event() - Process aynchronous events.
271  * @ha: SCSI driver HA context
272  * @mb: Mailbox registers (0 - 3)
273  */
274 void
275 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276 {
277 #define LS_UNKNOWN      2
278         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
279         char            *link_speed;
280         uint16_t        handle_cnt;
281         uint16_t        cnt;
282         uint32_t        handles[5];
283         struct qla_hw_data *ha = vha->hw;
284         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
285         uint32_t        rscn_entry, host_pid;
286         uint8_t         rscn_queue_index;
287         unsigned long   flags;
288
289         /* Setup to process RIO completion. */
290         handle_cnt = 0;
291         if (IS_QLA81XX(ha))
292                 goto skip_rio;
293         switch (mb[0]) {
294         case MBA_SCSI_COMPLETION:
295                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
296                 handle_cnt = 1;
297                 break;
298         case MBA_CMPLT_1_16BIT:
299                 handles[0] = mb[1];
300                 handle_cnt = 1;
301                 mb[0] = MBA_SCSI_COMPLETION;
302                 break;
303         case MBA_CMPLT_2_16BIT:
304                 handles[0] = mb[1];
305                 handles[1] = mb[2];
306                 handle_cnt = 2;
307                 mb[0] = MBA_SCSI_COMPLETION;
308                 break;
309         case MBA_CMPLT_3_16BIT:
310                 handles[0] = mb[1];
311                 handles[1] = mb[2];
312                 handles[2] = mb[3];
313                 handle_cnt = 3;
314                 mb[0] = MBA_SCSI_COMPLETION;
315                 break;
316         case MBA_CMPLT_4_16BIT:
317                 handles[0] = mb[1];
318                 handles[1] = mb[2];
319                 handles[2] = mb[3];
320                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
321                 handle_cnt = 4;
322                 mb[0] = MBA_SCSI_COMPLETION;
323                 break;
324         case MBA_CMPLT_5_16BIT:
325                 handles[0] = mb[1];
326                 handles[1] = mb[2];
327                 handles[2] = mb[3];
328                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
329                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
330                 handle_cnt = 5;
331                 mb[0] = MBA_SCSI_COMPLETION;
332                 break;
333         case MBA_CMPLT_2_32BIT:
334                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
335                 handles[1] = le32_to_cpu(
336                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
337                     RD_MAILBOX_REG(ha, reg, 6));
338                 handle_cnt = 2;
339                 mb[0] = MBA_SCSI_COMPLETION;
340                 break;
341         default:
342                 break;
343         }
344 skip_rio:
345         switch (mb[0]) {
346         case MBA_SCSI_COMPLETION:       /* Fast Post */
347                 if (!vha->flags.online)
348                         break;
349
350                 for (cnt = 0; cnt < handle_cnt; cnt++)
351                         qla2x00_process_completed_request(vha, rsp->req,
352                                 handles[cnt]);
353                 break;
354
355         case MBA_RESET:                 /* Reset */
356                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
357                         vha->host_no));
358
359                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
360                 break;
361
362         case MBA_SYSTEM_ERR:            /* System Error */
363                 qla_printk(KERN_INFO, ha,
364                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
365                     mb[1], mb[2], mb[3]);
366
367                 ha->isp_ops->fw_dump(vha, 1);
368
369                 if (IS_FWI2_CAPABLE(ha)) {
370                         if (mb[1] == 0 && mb[2] == 0) {
371                                 qla_printk(KERN_ERR, ha,
372                                     "Unrecoverable Hardware Error: adapter "
373                                     "marked OFFLINE!\n");
374                                 vha->flags.online = 0;
375                         } else
376                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
377                 } else if (mb[1] == 0) {
378                         qla_printk(KERN_INFO, ha,
379                             "Unrecoverable Hardware Error: adapter marked "
380                             "OFFLINE!\n");
381                         vha->flags.online = 0;
382                 } else
383                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
384                 break;
385
386         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
387                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
388                     vha->host_no));
389                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
390
391                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
392                 break;
393
394         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
395                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
396                     vha->host_no));
397                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398
399                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
400                 break;
401
402         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
403                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
404                     vha->host_no));
405                 break;
406
407         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
408                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
409                     mb[1]));
410                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
411
412                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
413                         atomic_set(&vha->loop_state, LOOP_DOWN);
414                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
415                         qla2x00_mark_all_devices_lost(vha, 1);
416                 }
417
418                 if (vha->vp_idx) {
419                         atomic_set(&vha->vp_state, VP_FAILED);
420                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
421                 }
422
423                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
424                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
425
426                 vha->flags.management_server_logged_in = 0;
427                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
428                 break;
429
430         case MBA_LOOP_UP:               /* Loop Up Event */
431                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
432                         link_speed = link_speeds[0];
433                         ha->link_data_rate = PORT_SPEED_1GB;
434                 } else {
435                         link_speed = link_speeds[LS_UNKNOWN];
436                         if (mb[1] < 5)
437                                 link_speed = link_speeds[mb[1]];
438                         else if (mb[1] == 0x13)
439                                 link_speed = link_speeds[5];
440                         ha->link_data_rate = mb[1];
441                 }
442
443                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
444                     vha->host_no, link_speed));
445                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
446                     link_speed);
447
448                 vha->flags.management_server_logged_in = 0;
449                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
450                 break;
451
452         case MBA_LOOP_DOWN:             /* Loop Down Event */
453                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
454                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
455                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
456                     mb[1], mb[2], mb[3]);
457
458                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
459                         atomic_set(&vha->loop_state, LOOP_DOWN);
460                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
461                         vha->device_flags |= DFLG_NO_CABLE;
462                         qla2x00_mark_all_devices_lost(vha, 1);
463                 }
464
465                 if (vha->vp_idx) {
466                         atomic_set(&vha->vp_state, VP_FAILED);
467                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
468                 }
469
470                 vha->flags.management_server_logged_in = 0;
471                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
472                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
473                 break;
474
475         case MBA_LIP_RESET:             /* LIP reset occurred */
476                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
477                     vha->host_no, mb[1]));
478                 qla_printk(KERN_INFO, ha,
479                     "LIP reset occurred (%x).\n", mb[1]);
480
481                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
482                         atomic_set(&vha->loop_state, LOOP_DOWN);
483                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
484                         qla2x00_mark_all_devices_lost(vha, 1);
485                 }
486
487                 if (vha->vp_idx) {
488                         atomic_set(&vha->vp_state, VP_FAILED);
489                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
490                 }
491
492                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
493
494                 ha->operating_mode = LOOP;
495                 vha->flags.management_server_logged_in = 0;
496                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
497                 break;
498
499         /* case MBA_DCBX_COMPLETE: */
500         case MBA_POINT_TO_POINT:        /* Point-to-Point */
501                 if (IS_QLA2100(ha))
502                         break;
503
504                 if (IS_QLA81XX(ha))
505                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
506                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
507                 else
508                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
509                             "received.\n", vha->host_no));
510
511                 /*
512                  * Until there's a transition from loop down to loop up, treat
513                  * this as loop down only.
514                  */
515                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
516                         atomic_set(&vha->loop_state, LOOP_DOWN);
517                         if (!atomic_read(&vha->loop_down_timer))
518                                 atomic_set(&vha->loop_down_timer,
519                                     LOOP_DOWN_TIME);
520                         qla2x00_mark_all_devices_lost(vha, 1);
521                 }
522
523                 if (vha->vp_idx) {
524                         atomic_set(&vha->vp_state, VP_FAILED);
525                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
526                 }
527
528                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
529                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
530
531                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
532                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
533
534                 ha->flags.gpsc_supported = 1;
535                 vha->flags.management_server_logged_in = 0;
536                 break;
537
538         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
539                 if (IS_QLA2100(ha))
540                         break;
541
542                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
543                     "received.\n",
544                     vha->host_no));
545                 qla_printk(KERN_INFO, ha,
546                     "Configuration change detected: value=%x.\n", mb[1]);
547
548                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
549                         atomic_set(&vha->loop_state, LOOP_DOWN);
550                         if (!atomic_read(&vha->loop_down_timer))
551                                 atomic_set(&vha->loop_down_timer,
552                                     LOOP_DOWN_TIME);
553                         qla2x00_mark_all_devices_lost(vha, 1);
554                 }
555
556                 if (vha->vp_idx) {
557                         atomic_set(&vha->vp_state, VP_FAILED);
558                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
559                 }
560
561                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
562                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
563                 break;
564
565         case MBA_PORT_UPDATE:           /* Port database update */
566                 /* Only handle SCNs for our Vport index. */
567                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
568                         break;
569
570                 /*
571                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
572                  * event etc. earlier indicating loop is down) then process
573                  * it.  Otherwise ignore it and Wait for RSCN to come in.
574                  */
575                 atomic_set(&vha->loop_down_timer, 0);
576                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
577                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
578                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
579                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
580                             mb[2], mb[3]));
581                         break;
582                 }
583
584                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
585                     vha->host_no));
586                 DEBUG(printk(KERN_INFO
587                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
588                     vha->host_no, mb[1], mb[2], mb[3]));
589
590                 /*
591                  * Mark all devices as missing so we will login again.
592                  */
593                 atomic_set(&vha->loop_state, LOOP_UP);
594
595                 qla2x00_mark_all_devices_lost(vha, 1);
596
597                 vha->flags.rscn_queue_overflow = 1;
598
599                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
600                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
601                 break;
602
603         case MBA_RSCN_UPDATE:           /* State Change Registration */
604                 /* Check if the Vport has issued a SCR */
605                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
606                         break;
607                 /* Only handle SCNs for our Vport index. */
608                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
609                         break;
610                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
611                     vha->host_no));
612                 DEBUG(printk(KERN_INFO
613                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
614                     vha->host_no, mb[1], mb[2], mb[3]));
615
616                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
617                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
618                                 | vha->d_id.b.al_pa;
619                 if (rscn_entry == host_pid) {
620                         DEBUG(printk(KERN_INFO
621                             "scsi(%ld): Ignoring RSCN update to local host "
622                             "port ID (%06x)\n",
623                             vha->host_no, host_pid));
624                         break;
625                 }
626
627                 /* Ignore reserved bits from RSCN-payload. */
628                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
629                 rscn_queue_index = vha->rscn_in_ptr + 1;
630                 if (rscn_queue_index == MAX_RSCN_COUNT)
631                         rscn_queue_index = 0;
632                 if (rscn_queue_index != vha->rscn_out_ptr) {
633                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
634                         vha->rscn_in_ptr = rscn_queue_index;
635                 } else {
636                         vha->flags.rscn_queue_overflow = 1;
637                 }
638
639                 atomic_set(&vha->loop_state, LOOP_UPDATE);
640                 atomic_set(&vha->loop_down_timer, 0);
641                 vha->flags.management_server_logged_in = 0;
642
643                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
644                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
645                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
646                 break;
647
648         /* case MBA_RIO_RESPONSE: */
649         case MBA_ZIO_RESPONSE:
650                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
651                     vha->host_no));
652
653                 if (IS_FWI2_CAPABLE(ha))
654                         qla24xx_process_response_queue(rsp);
655                 else
656                         qla2x00_process_response_queue(rsp);
657                 break;
658
659         case MBA_DISCARD_RND_FRAME:
660                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
661                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
662                 break;
663
664         case MBA_TRACE_NOTIFICATION:
665                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
666                 vha->host_no, mb[1], mb[2]));
667                 break;
668
669         case MBA_ISP84XX_ALERT:
670                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
671                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
672
673                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
674                 switch (mb[1]) {
675                 case A84_PANIC_RECOVERY:
676                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
677                             "%04x %04x\n", mb[2], mb[3]);
678                         break;
679                 case A84_OP_LOGIN_COMPLETE:
680                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
681                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
682                             "firmware version %x\n", ha->cs84xx->op_fw_version));
683                         break;
684                 case A84_DIAG_LOGIN_COMPLETE:
685                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
686                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
687                             "diagnostic firmware version %x\n",
688                             ha->cs84xx->diag_fw_version));
689                         break;
690                 case A84_GOLD_LOGIN_COMPLETE:
691                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
692                         ha->cs84xx->fw_update = 1;
693                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
694                             "firmware version %x\n",
695                             ha->cs84xx->gold_fw_version));
696                         break;
697                 default:
698                         qla_printk(KERN_ERR, ha,
699                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
700                             mb[1], mb[2], mb[3]);
701                 }
702                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
703                 break;
704         case MBA_DCBX_START:
705                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
706                     vha->host_no, mb[1], mb[2], mb[3]));
707                 break;
708         case MBA_DCBX_PARAM_UPDATE:
709                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
710                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
711                 break;
712         case MBA_FCF_CONF_ERR:
713                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
714                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
715                 break;
716         case MBA_IDC_COMPLETE:
717                 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
718                     "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2],
719                     mb[3]));
720                 break;
721         case MBA_IDC_NOTIFY:
722                 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
723                     "Request Notification -- %04x %04x %04x\n", vha->host_no,
724                     mb[1], mb[2], mb[3]));
725                 /**** Mailbox registers 4 - 7 valid!!! */
726                 break;
727         case MBA_IDC_TIME_EXT:
728                 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
729                     "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1],
730                     mb[2], mb[3]));
731                 /**** Mailbox registers 4 - 7 valid!!! */
732                 break;
733         }
734
735         if (!vha->vp_idx && ha->num_vhosts)
736                 qla2x00_alert_all_vps(rsp, mb);
737 }
738
739 static void
740 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
741 {
742         fc_port_t *fcport = data;
743         struct scsi_qla_host *vha = fcport->vha;
744         struct qla_hw_data *ha = vha->hw;
745         struct req_que *req = NULL;
746
747         req = ha->req_q_map[vha->req_ques[0]];
748         if (!req)
749                 return;
750         if (req->max_q_depth <= sdev->queue_depth)
751                 return;
752
753         if (sdev->ordered_tags)
754                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
755                     sdev->queue_depth + 1);
756         else
757                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
758                     sdev->queue_depth + 1);
759
760         fcport->last_ramp_up = jiffies;
761
762         DEBUG2(qla_printk(KERN_INFO, ha,
763             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
764             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
765             sdev->queue_depth));
766 }
767
768 static void
769 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
770 {
771         fc_port_t *fcport = data;
772
773         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
774                 return;
775
776         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
777             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
778             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
779             sdev->queue_depth));
780 }
781
782 static inline void
783 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
784                                                                 srb_t *sp)
785 {
786         fc_port_t *fcport;
787         struct scsi_device *sdev;
788
789         sdev = sp->cmd->device;
790         if (sdev->queue_depth >= req->max_q_depth)
791                 return;
792
793         fcport = sp->fcport;
794         if (time_before(jiffies,
795             fcport->last_ramp_up + ql2xqfullrampup * HZ))
796                 return;
797         if (time_before(jiffies,
798             fcport->last_queue_full + ql2xqfullrampup * HZ))
799                 return;
800
801         starget_for_each_device(sdev->sdev_target, fcport,
802             qla2x00_adjust_sdev_qdepth_up);
803 }
804
805 /**
806  * qla2x00_process_completed_request() - Process a Fast Post response.
807  * @ha: SCSI driver HA context
808  * @index: SRB index
809  */
810 static void
811 qla2x00_process_completed_request(struct scsi_qla_host *vha,
812                                 struct req_que *req, uint32_t index)
813 {
814         srb_t *sp;
815         struct qla_hw_data *ha = vha->hw;
816
817         /* Validate handle. */
818         if (index >= MAX_OUTSTANDING_COMMANDS) {
819                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
820                     vha->host_no, index));
821                 qla_printk(KERN_WARNING, ha,
822                     "Invalid SCSI completion handle %d.\n", index);
823
824                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
825                 return;
826         }
827
828         sp = req->outstanding_cmds[index];
829         if (sp) {
830                 /* Free outstanding command slot. */
831                 req->outstanding_cmds[index] = NULL;
832
833                 CMD_COMPL_STATUS(sp->cmd) = 0L;
834                 CMD_SCSI_STATUS(sp->cmd) = 0L;
835
836                 /* Save ISP completion status */
837                 sp->cmd->result = DID_OK << 16;
838
839                 qla2x00_ramp_up_queue_depth(vha, req, sp);
840                 qla2x00_sp_compl(ha, sp);
841         } else {
842                 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
843                     vha->host_no));
844                 qla_printk(KERN_WARNING, ha,
845                     "Invalid ISP SCSI completion handle\n");
846
847                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
848         }
849 }
850
851 /**
852  * qla2x00_process_response_queue() - Process response queue entries.
853  * @ha: SCSI driver HA context
854  */
855 void
856 qla2x00_process_response_queue(struct rsp_que *rsp)
857 {
858         struct scsi_qla_host *vha;
859         struct qla_hw_data *ha = rsp->hw;
860         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
861         sts_entry_t     *pkt;
862         uint16_t        handle_cnt;
863         uint16_t        cnt;
864
865         vha = qla2x00_get_rsp_host(rsp);
866
867         if (!vha->flags.online)
868                 return;
869
870         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
871                 pkt = (sts_entry_t *)rsp->ring_ptr;
872
873                 rsp->ring_index++;
874                 if (rsp->ring_index == rsp->length) {
875                         rsp->ring_index = 0;
876                         rsp->ring_ptr = rsp->ring;
877                 } else {
878                         rsp->ring_ptr++;
879                 }
880
881                 if (pkt->entry_status != 0) {
882                         DEBUG3(printk(KERN_INFO
883                             "scsi(%ld): Process error entry.\n", vha->host_no));
884
885                         qla2x00_error_entry(vha, rsp, pkt);
886                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
887                         wmb();
888                         continue;
889                 }
890
891                 switch (pkt->entry_type) {
892                 case STATUS_TYPE:
893                         qla2x00_status_entry(vha, rsp, pkt);
894                         break;
895                 case STATUS_TYPE_21:
896                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
897                         for (cnt = 0; cnt < handle_cnt; cnt++) {
898                                 qla2x00_process_completed_request(vha, rsp->req,
899                                     ((sts21_entry_t *)pkt)->handle[cnt]);
900                         }
901                         break;
902                 case STATUS_TYPE_22:
903                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
904                         for (cnt = 0; cnt < handle_cnt; cnt++) {
905                                 qla2x00_process_completed_request(vha, rsp->req,
906                                     ((sts22_entry_t *)pkt)->handle[cnt]);
907                         }
908                         break;
909                 case STATUS_CONT_TYPE:
910                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
911                         break;
912                 default:
913                         /* Type Not Supported. */
914                         DEBUG4(printk(KERN_WARNING
915                             "scsi(%ld): Received unknown response pkt type %x "
916                             "entry status=%x.\n",
917                             vha->host_no, pkt->entry_type, pkt->entry_status));
918                         break;
919                 }
920                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
921                 wmb();
922         }
923
924         /* Adjust ring index */
925         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
926 }
927
928 static inline void
929 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
930 {
931         struct scsi_cmnd *cp = sp->cmd;
932
933         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
934                 sense_len = SCSI_SENSE_BUFFERSIZE;
935
936         CMD_ACTUAL_SNSLEN(cp) = sense_len;
937         sp->request_sense_length = sense_len;
938         sp->request_sense_ptr = cp->sense_buffer;
939         if (sp->request_sense_length > 32)
940                 sense_len = 32;
941
942         memcpy(cp->sense_buffer, sense_data, sense_len);
943
944         sp->request_sense_ptr += sense_len;
945         sp->request_sense_length -= sense_len;
946         if (sp->request_sense_length != 0)
947                 sp->fcport->vha->status_srb = sp;
948
949         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
950             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
951             cp->device->channel, cp->device->id, cp->device->lun, cp,
952             cp->serial_number));
953         if (sense_len)
954                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
955                     CMD_ACTUAL_SNSLEN(cp)));
956 }
957
958 /**
959  * qla2x00_status_entry() - Process a Status IOCB entry.
960  * @ha: SCSI driver HA context
961  * @pkt: Entry pointer
962  */
963 static void
964 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
965 {
966         srb_t           *sp;
967         fc_port_t       *fcport;
968         struct scsi_cmnd *cp;
969         sts_entry_t *sts;
970         struct sts_entry_24xx *sts24;
971         uint16_t        comp_status;
972         uint16_t        scsi_status;
973         uint8_t         lscsi_status;
974         int32_t         resid;
975         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
976         uint8_t         *rsp_info, *sense_data;
977         struct qla_hw_data *ha = vha->hw;
978         struct req_que *req = rsp->req;
979
980         sts = (sts_entry_t *) pkt;
981         sts24 = (struct sts_entry_24xx *) pkt;
982         if (IS_FWI2_CAPABLE(ha)) {
983                 comp_status = le16_to_cpu(sts24->comp_status);
984                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
985         } else {
986                 comp_status = le16_to_cpu(sts->comp_status);
987                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
988         }
989
990         /* Fast path completion. */
991         if (comp_status == CS_COMPLETE && scsi_status == 0) {
992                 qla2x00_process_completed_request(vha, req, sts->handle);
993
994                 return;
995         }
996
997         /* Validate handle. */
998         if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
999                 sp = req->outstanding_cmds[sts->handle];
1000                 req->outstanding_cmds[sts->handle] = NULL;
1001         } else
1002                 sp = NULL;
1003
1004         if (sp == NULL) {
1005                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1006                     vha->host_no));
1007                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
1008
1009                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1010                 qla2xxx_wake_dpc(vha);
1011                 return;
1012         }
1013         cp = sp->cmd;
1014         if (cp == NULL) {
1015                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1016                     "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
1017                 qla_printk(KERN_WARNING, ha,
1018                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
1019
1020                 return;
1021         }
1022
1023         lscsi_status = scsi_status & STATUS_MASK;
1024         CMD_ENTRY_STATUS(cp) = sts->entry_status;
1025         CMD_COMPL_STATUS(cp) = comp_status;
1026         CMD_SCSI_STATUS(cp) = scsi_status;
1027
1028         fcport = sp->fcport;
1029
1030         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1031         if (IS_FWI2_CAPABLE(ha)) {
1032                 sense_len = le32_to_cpu(sts24->sense_len);
1033                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1034                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1035                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1036                 rsp_info = sts24->data;
1037                 sense_data = sts24->data;
1038                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1039         } else {
1040                 sense_len = le16_to_cpu(sts->req_sense_length);
1041                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1042                 resid_len = le32_to_cpu(sts->residual_length);
1043                 rsp_info = sts->rsp_info;
1044                 sense_data = sts->req_sense_data;
1045         }
1046
1047         /* Check for any FCP transport errors. */
1048         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1049                 /* Sense data lies beyond any FCP RESPONSE data. */
1050                 if (IS_FWI2_CAPABLE(ha))
1051                         sense_data += rsp_info_len;
1052                 if (rsp_info_len > 3 && rsp_info[3]) {
1053                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1054                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1055                             "retrying command\n", vha->host_no,
1056                             cp->device->channel, cp->device->id,
1057                             cp->device->lun, rsp_info_len, rsp_info[0],
1058                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1059                             rsp_info[5], rsp_info[6], rsp_info[7]));
1060
1061                         cp->result = DID_BUS_BUSY << 16;
1062                         qla2x00_sp_compl(ha, sp);
1063                         return;
1064                 }
1065         }
1066
1067         /* Check for overrun. */
1068         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1069             scsi_status & SS_RESIDUAL_OVER)
1070                 comp_status = CS_DATA_OVERRUN;
1071
1072         /*
1073          * Based on Host and scsi status generate status code for Linux
1074          */
1075         switch (comp_status) {
1076         case CS_COMPLETE:
1077         case CS_QUEUE_FULL:
1078                 if (scsi_status == 0) {
1079                         cp->result = DID_OK << 16;
1080                         break;
1081                 }
1082                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1083                         resid = resid_len;
1084                         scsi_set_resid(cp, resid);
1085                         CMD_RESID_LEN(cp) = resid;
1086
1087                         if (!lscsi_status &&
1088                             ((unsigned)(scsi_bufflen(cp) - resid) <
1089                              cp->underflow)) {
1090                                 qla_printk(KERN_INFO, ha,
1091                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1092                                            "detected (%x of %x bytes)...returning "
1093                                            "error status.\n", vha->host_no,
1094                                            cp->device->channel, cp->device->id,
1095                                            cp->device->lun, resid,
1096                                            scsi_bufflen(cp));
1097
1098                                 cp->result = DID_ERROR << 16;
1099                                 break;
1100                         }
1101                 }
1102                 cp->result = DID_OK << 16 | lscsi_status;
1103
1104                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1105                         DEBUG2(printk(KERN_INFO
1106                             "scsi(%ld): QUEUE FULL status detected "
1107                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1108                             scsi_status));
1109
1110                         /* Adjust queue depth for all luns on the port. */
1111                         fcport->last_queue_full = jiffies;
1112                         starget_for_each_device(cp->device->sdev_target,
1113                             fcport, qla2x00_adjust_sdev_qdepth_down);
1114                         break;
1115                 }
1116                 if (lscsi_status != SS_CHECK_CONDITION)
1117                         break;
1118
1119                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1120                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1121                         break;
1122
1123                 qla2x00_handle_sense(sp, sense_data, sense_len);
1124                 break;
1125
1126         case CS_DATA_UNDERRUN:
1127                 resid = resid_len;
1128                 /* Use F/W calculated residual length. */
1129                 if (IS_FWI2_CAPABLE(ha)) {
1130                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1131                                 lscsi_status = 0;
1132                         } else if (resid != fw_resid_len) {
1133                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1134                                 lscsi_status = 0;
1135                         }
1136                         resid = fw_resid_len;
1137                 }
1138
1139                 if (scsi_status & SS_RESIDUAL_UNDER) {
1140                         scsi_set_resid(cp, resid);
1141                         CMD_RESID_LEN(cp) = resid;
1142                 } else {
1143                         DEBUG2(printk(KERN_INFO
1144                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1145                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1146                             "os_underflow=0x%x\n", vha->host_no,
1147                             cp->device->id, cp->device->lun, comp_status,
1148                             scsi_status, resid_len, resid, cp->cmnd[0],
1149                             cp->underflow));
1150
1151                 }
1152
1153                 /*
1154                  * Check to see if SCSI Status is non zero. If so report SCSI
1155                  * Status.
1156                  */
1157                 if (lscsi_status != 0) {
1158                         cp->result = DID_OK << 16 | lscsi_status;
1159
1160                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1161                                 DEBUG2(printk(KERN_INFO
1162                                     "scsi(%ld): QUEUE FULL status detected "
1163                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1164                                     scsi_status));
1165
1166                                 /*
1167                                  * Adjust queue depth for all luns on the
1168                                  * port.
1169                                  */
1170                                 fcport->last_queue_full = jiffies;
1171                                 starget_for_each_device(
1172                                     cp->device->sdev_target, fcport,
1173                                     qla2x00_adjust_sdev_qdepth_down);
1174                                 break;
1175                         }
1176                         if (lscsi_status != SS_CHECK_CONDITION)
1177                                 break;
1178
1179                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1180                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1181                                 break;
1182
1183                         qla2x00_handle_sense(sp, sense_data, sense_len);
1184                 } else {
1185                         /*
1186                          * If RISC reports underrun and target does not report
1187                          * it then we must have a lost frame, so tell upper
1188                          * layer to retry it by reporting a bus busy.
1189                          */
1190                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1191                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1192                                               "frame(s) detected (%x of %x bytes)..."
1193                                               "retrying command.\n",
1194                                         vha->host_no, cp->device->channel,
1195                                         cp->device->id, cp->device->lun, resid,
1196                                         scsi_bufflen(cp)));
1197
1198                                 cp->result = DID_BUS_BUSY << 16;
1199                                 break;
1200                         }
1201
1202                         /* Handle mid-layer underflow */
1203                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1204                             cp->underflow) {
1205                                 qla_printk(KERN_INFO, ha,
1206                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1207                                            "detected (%x of %x bytes)...returning "
1208                                            "error status.\n", vha->host_no,
1209                                            cp->device->channel, cp->device->id,
1210                                            cp->device->lun, resid,
1211                                            scsi_bufflen(cp));
1212
1213                                 cp->result = DID_ERROR << 16;
1214                                 break;
1215                         }
1216
1217                         /* Everybody online, looking good... */
1218                         cp->result = DID_OK << 16;
1219                 }
1220                 break;
1221
1222         case CS_DATA_OVERRUN:
1223                 DEBUG2(printk(KERN_INFO
1224                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1225                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1226                     scsi_status));
1227                 DEBUG2(printk(KERN_INFO
1228                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1229                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1230                     cp->cmnd[4], cp->cmnd[5]));
1231                 DEBUG2(printk(KERN_INFO
1232                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1233                     "status!\n",
1234                     cp->serial_number, scsi_bufflen(cp), resid_len));
1235
1236                 cp->result = DID_ERROR << 16;
1237                 break;
1238
1239         case CS_PORT_LOGGED_OUT:
1240         case CS_PORT_CONFIG_CHG:
1241         case CS_PORT_BUSY:
1242         case CS_INCOMPLETE:
1243         case CS_PORT_UNAVAILABLE:
1244                 /*
1245                  * If the port is in Target Down state, return all IOs for this
1246                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1247                  * retry_queue.
1248                  */
1249                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1250                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1251                     vha->host_no, cp->device->id, cp->device->lun,
1252                     cp->serial_number, comp_status,
1253                     atomic_read(&fcport->state)));
1254
1255                 /*
1256                  * We are going to have the fc class block the rport
1257                  * while we try to recover so instruct the mid layer
1258                  * to requeue until the class decides how to handle this.
1259                  */
1260                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1261                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1262                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1263                 break;
1264
1265         case CS_RESET:
1266                 DEBUG2(printk(KERN_INFO
1267                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1268                     vha->host_no, comp_status, scsi_status));
1269
1270                 cp->result = DID_RESET << 16;
1271                 break;
1272
1273         case CS_ABORTED:
1274                 /*
1275                  * hv2.19.12 - DID_ABORT does not retry the request if we
1276                  * aborted this request then abort otherwise it must be a
1277                  * reset.
1278                  */
1279                 DEBUG2(printk(KERN_INFO
1280                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1281                     vha->host_no, comp_status, scsi_status));
1282
1283                 cp->result = DID_RESET << 16;
1284                 break;
1285
1286         case CS_TIMEOUT:
1287                 /*
1288                  * We are going to have the fc class block the rport
1289                  * while we try to recover so instruct the mid layer
1290                  * to requeue until the class decides how to handle this.
1291                  */
1292                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1293
1294                 if (IS_FWI2_CAPABLE(ha)) {
1295                         DEBUG2(printk(KERN_INFO
1296                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1297                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1298                             cp->device->id, cp->device->lun, comp_status,
1299                             scsi_status));
1300                         break;
1301                 }
1302                 DEBUG2(printk(KERN_INFO
1303                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1304                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1305                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1306                     le16_to_cpu(sts->status_flags)));
1307
1308                 /* Check to see if logout occurred. */
1309                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1310                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1311                 break;
1312
1313         default:
1314                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1315                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1316                 qla_printk(KERN_INFO, ha,
1317                     "Unknown status detected 0x%x-0x%x.\n",
1318                     comp_status, scsi_status);
1319
1320                 cp->result = DID_ERROR << 16;
1321                 break;
1322         }
1323
1324         /* Place command on done queue. */
1325         if (vha->status_srb == NULL)
1326                 qla2x00_sp_compl(ha, sp);
1327 }
1328
1329 /**
1330  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1331  * @ha: SCSI driver HA context
1332  * @pkt: Entry pointer
1333  *
1334  * Extended sense data.
1335  */
1336 static void
1337 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1338 {
1339         uint8_t         sense_sz = 0;
1340         struct qla_hw_data *ha = vha->hw;
1341         srb_t           *sp = vha->status_srb;
1342         struct scsi_cmnd *cp;
1343
1344         if (sp != NULL && sp->request_sense_length != 0) {
1345                 cp = sp->cmd;
1346                 if (cp == NULL) {
1347                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1348                             "sp=%p.\n", __func__, sp));
1349                         qla_printk(KERN_INFO, ha,
1350                             "cmd is NULL: already returned to OS (sp=%p)\n",
1351                             sp);
1352
1353                         vha->status_srb = NULL;
1354                         return;
1355                 }
1356
1357                 if (sp->request_sense_length > sizeof(pkt->data)) {
1358                         sense_sz = sizeof(pkt->data);
1359                 } else {
1360                         sense_sz = sp->request_sense_length;
1361                 }
1362
1363                 /* Move sense data. */
1364                 if (IS_FWI2_CAPABLE(ha))
1365                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1366                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1367                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1368
1369                 sp->request_sense_ptr += sense_sz;
1370                 sp->request_sense_length -= sense_sz;
1371
1372                 /* Place command on done queue. */
1373                 if (sp->request_sense_length == 0) {
1374                         vha->status_srb = NULL;
1375                         qla2x00_sp_compl(ha, sp);
1376                 }
1377         }
1378 }
1379
1380 /**
1381  * qla2x00_error_entry() - Process an error entry.
1382  * @ha: SCSI driver HA context
1383  * @pkt: Entry pointer
1384  */
1385 static void
1386 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1387 {
1388         srb_t *sp;
1389         struct qla_hw_data *ha = vha->hw;
1390         struct req_que *req = rsp->req;
1391 #if defined(QL_DEBUG_LEVEL_2)
1392         if (pkt->entry_status & RF_INV_E_ORDER)
1393                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1394         else if (pkt->entry_status & RF_INV_E_COUNT)
1395                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1396         else if (pkt->entry_status & RF_INV_E_PARAM)
1397                 qla_printk(KERN_ERR, ha,
1398                     "%s: Invalid Entry Parameter\n", __func__);
1399         else if (pkt->entry_status & RF_INV_E_TYPE)
1400                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1401         else if (pkt->entry_status & RF_BUSY)
1402                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1403         else
1404                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1405 #endif
1406
1407         /* Validate handle. */
1408         if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1409                 sp = req->outstanding_cmds[pkt->handle];
1410         else
1411                 sp = NULL;
1412
1413         if (sp) {
1414                 /* Free outstanding command slot. */
1415                 req->outstanding_cmds[pkt->handle] = NULL;
1416
1417                 /* Bad payload or header */
1418                 if (pkt->entry_status &
1419                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1420                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1421                         sp->cmd->result = DID_ERROR << 16;
1422                 } else if (pkt->entry_status & RF_BUSY) {
1423                         sp->cmd->result = DID_BUS_BUSY << 16;
1424                 } else {
1425                         sp->cmd->result = DID_ERROR << 16;
1426                 }
1427                 qla2x00_sp_compl(ha, sp);
1428
1429         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1430             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1431                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1432                     vha->host_no));
1433                 qla_printk(KERN_WARNING, ha,
1434                     "Error entry - invalid handle\n");
1435
1436                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1437                 qla2xxx_wake_dpc(vha);
1438         }
1439 }
1440
1441 /**
1442  * qla24xx_mbx_completion() - Process mailbox command completions.
1443  * @ha: SCSI driver HA context
1444  * @mb0: Mailbox0 register
1445  */
1446 static void
1447 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1448 {
1449         uint16_t        cnt;
1450         uint16_t __iomem *wptr;
1451         struct qla_hw_data *ha = vha->hw;
1452         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1453
1454         /* Load return mailbox registers. */
1455         ha->flags.mbox_int = 1;
1456         ha->mailbox_out[0] = mb0;
1457         wptr = (uint16_t __iomem *)&reg->mailbox1;
1458
1459         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1460                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1461                 wptr++;
1462         }
1463
1464         if (ha->mcp) {
1465                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1466                     __func__, vha->host_no, ha->mcp->mb[0]));
1467         } else {
1468                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1469                     __func__, vha->host_no));
1470         }
1471 }
1472
1473 /**
1474  * qla24xx_process_response_queue() - Process response queue entries.
1475  * @ha: SCSI driver HA context
1476  */
1477 void
1478 qla24xx_process_response_queue(struct rsp_que *rsp)
1479 {
1480         struct qla_hw_data *ha = rsp->hw;
1481         struct sts_entry_24xx *pkt;
1482         struct scsi_qla_host *vha;
1483
1484         vha = qla2x00_get_rsp_host(rsp);
1485
1486         if (!vha->flags.online)
1487                 return;
1488
1489         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1490                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1491
1492                 rsp->ring_index++;
1493                 if (rsp->ring_index == rsp->length) {
1494                         rsp->ring_index = 0;
1495                         rsp->ring_ptr = rsp->ring;
1496                 } else {
1497                         rsp->ring_ptr++;
1498                 }
1499
1500                 if (pkt->entry_status != 0) {
1501                         DEBUG3(printk(KERN_INFO
1502                             "scsi(%ld): Process error entry.\n", vha->host_no));
1503
1504                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1505                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1506                         wmb();
1507                         continue;
1508                 }
1509
1510                 switch (pkt->entry_type) {
1511                 case STATUS_TYPE:
1512                         qla2x00_status_entry(vha, rsp, pkt);
1513                         break;
1514                 case STATUS_CONT_TYPE:
1515                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1516                         break;
1517                 case VP_RPT_ID_IOCB_TYPE:
1518                         qla24xx_report_id_acquisition(vha,
1519                             (struct vp_rpt_id_entry_24xx *)pkt);
1520                         break;
1521                 default:
1522                         /* Type Not Supported. */
1523                         DEBUG4(printk(KERN_WARNING
1524                             "scsi(%ld): Received unknown response pkt type %x "
1525                             "entry status=%x.\n",
1526                             vha->host_no, pkt->entry_type, pkt->entry_status));
1527                         break;
1528                 }
1529                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1530                 wmb();
1531         }
1532
1533         /* Adjust ring index */
1534         ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1535 }
1536
1537 static void
1538 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1539 {
1540         int rval;
1541         uint32_t cnt;
1542         struct qla_hw_data *ha = vha->hw;
1543         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1544
1545         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1546                 return;
1547
1548         rval = QLA_SUCCESS;
1549         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1550         RD_REG_DWORD(&reg->iobase_addr);
1551         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1552         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1553             rval == QLA_SUCCESS; cnt--) {
1554                 if (cnt) {
1555                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1556                         udelay(10);
1557                 } else
1558                         rval = QLA_FUNCTION_TIMEOUT;
1559         }
1560         if (rval == QLA_SUCCESS)
1561                 goto next_test;
1562
1563         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1564         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1565             rval == QLA_SUCCESS; cnt--) {
1566                 if (cnt) {
1567                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1568                         udelay(10);
1569                 } else
1570                         rval = QLA_FUNCTION_TIMEOUT;
1571         }
1572         if (rval != QLA_SUCCESS)
1573                 goto done;
1574
1575 next_test:
1576         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1577                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1578
1579 done:
1580         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1581         RD_REG_DWORD(&reg->iobase_window);
1582 }
1583
1584 /**
1585  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1586  * @irq:
1587  * @dev_id: SCSI driver HA context
1588  *
1589  * Called by system whenever the host adapter generates an interrupt.
1590  *
1591  * Returns handled flag.
1592  */
1593 irqreturn_t
1594 qla24xx_intr_handler(int irq, void *dev_id)
1595 {
1596         scsi_qla_host_t *vha;
1597         struct qla_hw_data *ha;
1598         struct device_reg_24xx __iomem *reg;
1599         int             status;
1600         unsigned long   iter;
1601         uint32_t        stat;
1602         uint32_t        hccr;
1603         uint16_t        mb[4];
1604         struct rsp_que *rsp;
1605
1606         rsp = (struct rsp_que *) dev_id;
1607         if (!rsp) {
1608                 printk(KERN_INFO
1609                     "%s(): NULL response queue pointer\n", __func__);
1610                 return IRQ_NONE;
1611         }
1612
1613         ha = rsp->hw;
1614         reg = &ha->iobase->isp24;
1615         status = 0;
1616
1617         spin_lock(&ha->hardware_lock);
1618         vha = qla2x00_get_rsp_host(rsp);
1619         for (iter = 50; iter--; ) {
1620                 stat = RD_REG_DWORD(&reg->host_status);
1621                 if (stat & HSRX_RISC_PAUSED) {
1622                         if (pci_channel_offline(ha->pdev))
1623                                 break;
1624
1625                         hccr = RD_REG_DWORD(&reg->hccr);
1626
1627                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1628                             "Dumping firmware!\n", hccr);
1629
1630                         qla2xxx_check_risc_status(vha);
1631
1632                         ha->isp_ops->fw_dump(vha, 1);
1633                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1634                         break;
1635                 } else if ((stat & HSRX_RISC_INT) == 0)
1636                         break;
1637
1638                 switch (stat & 0xff) {
1639                 case 0x1:
1640                 case 0x2:
1641                 case 0x10:
1642                 case 0x11:
1643                         qla24xx_mbx_completion(vha, MSW(stat));
1644                         status |= MBX_INTERRUPT;
1645
1646                         break;
1647                 case 0x12:
1648                         mb[0] = MSW(stat);
1649                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1650                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1651                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1652                         qla2x00_async_event(vha, rsp, mb);
1653                         break;
1654                 case 0x13:
1655                 case 0x14:
1656                         qla24xx_process_response_queue(rsp);
1657                         break;
1658                 default:
1659                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1660                             "(%d).\n",
1661                             vha->host_no, stat & 0xff));
1662                         break;
1663                 }
1664                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1665                 RD_REG_DWORD_RELAXED(&reg->hccr);
1666         }
1667         spin_unlock(&ha->hardware_lock);
1668
1669         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1670             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1671                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1672                 complete(&ha->mbx_intr_comp);
1673         }
1674
1675         return IRQ_HANDLED;
1676 }
1677
1678 static irqreturn_t
1679 qla24xx_msix_rsp_q(int irq, void *dev_id)
1680 {
1681         struct qla_hw_data *ha;
1682         struct rsp_que *rsp;
1683         struct device_reg_24xx __iomem *reg;
1684
1685         rsp = (struct rsp_que *) dev_id;
1686         if (!rsp) {
1687                 printk(KERN_INFO
1688                 "%s(): NULL response queue pointer\n", __func__);
1689                 return IRQ_NONE;
1690         }
1691         ha = rsp->hw;
1692         reg = &ha->iobase->isp24;
1693
1694         spin_lock_irq(&ha->hardware_lock);
1695
1696         qla24xx_process_response_queue(rsp);
1697         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1698
1699         spin_unlock_irq(&ha->hardware_lock);
1700
1701         return IRQ_HANDLED;
1702 }
1703
1704 static irqreturn_t
1705 qla25xx_msix_rsp_q(int irq, void *dev_id)
1706 {
1707         struct qla_hw_data *ha;
1708         struct rsp_que *rsp;
1709         struct device_reg_24xx __iomem *reg;
1710         uint16_t msix_disabled_hccr = 0;
1711
1712         rsp = (struct rsp_que *) dev_id;
1713         if (!rsp) {
1714                 printk(KERN_INFO
1715                         "%s(): NULL response queue pointer\n", __func__);
1716                 return IRQ_NONE;
1717         }
1718         ha = rsp->hw;
1719         reg = &ha->iobase->isp24;
1720
1721         spin_lock_irq(&ha->hardware_lock);
1722
1723         msix_disabled_hccr = rsp->options;
1724         if (!rsp->id)
1725                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1726         else
1727                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1728
1729         qla24xx_process_response_queue(rsp);
1730
1731         if (!msix_disabled_hccr)
1732                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1733
1734         spin_unlock_irq(&ha->hardware_lock);
1735
1736         return IRQ_HANDLED;
1737 }
1738
1739 static irqreturn_t
1740 qla24xx_msix_default(int irq, void *dev_id)
1741 {
1742         scsi_qla_host_t *vha;
1743         struct qla_hw_data *ha;
1744         struct rsp_que *rsp;
1745         struct device_reg_24xx __iomem *reg;
1746         int             status;
1747         uint32_t        stat;
1748         uint32_t        hccr;
1749         uint16_t        mb[4];
1750
1751         rsp = (struct rsp_que *) dev_id;
1752         if (!rsp) {
1753                 DEBUG(printk(
1754                 "%s(): NULL response queue pointer\n", __func__));
1755                 return IRQ_NONE;
1756         }
1757         ha = rsp->hw;
1758         reg = &ha->iobase->isp24;
1759         status = 0;
1760
1761         spin_lock_irq(&ha->hardware_lock);
1762         vha = qla2x00_get_rsp_host(rsp);
1763         do {
1764                 stat = RD_REG_DWORD(&reg->host_status);
1765                 if (stat & HSRX_RISC_PAUSED) {
1766                         if (pci_channel_offline(ha->pdev))
1767                                 break;
1768
1769                         hccr = RD_REG_DWORD(&reg->hccr);
1770
1771                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1772                             "Dumping firmware!\n", hccr);
1773
1774                         qla2xxx_check_risc_status(vha);
1775
1776                         ha->isp_ops->fw_dump(vha, 1);
1777                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1778                         break;
1779                 } else if ((stat & HSRX_RISC_INT) == 0)
1780                         break;
1781
1782                 switch (stat & 0xff) {
1783                 case 0x1:
1784                 case 0x2:
1785                 case 0x10:
1786                 case 0x11:
1787                         qla24xx_mbx_completion(vha, MSW(stat));
1788                         status |= MBX_INTERRUPT;
1789
1790                         break;
1791                 case 0x12:
1792                         mb[0] = MSW(stat);
1793                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1794                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1795                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1796                         qla2x00_async_event(vha, rsp, mb);
1797                         break;
1798                 case 0x13:
1799                 case 0x14:
1800                         qla24xx_process_response_queue(rsp);
1801                         break;
1802                 default:
1803                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1804                             "(%d).\n",
1805                             vha->host_no, stat & 0xff));
1806                         break;
1807                 }
1808                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1809         } while (0);
1810         spin_unlock_irq(&ha->hardware_lock);
1811
1812         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1813             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1814                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1815                 complete(&ha->mbx_intr_comp);
1816         }
1817
1818         return IRQ_HANDLED;
1819 }
1820
1821 /* Interrupt handling helpers. */
1822
1823 struct qla_init_msix_entry {
1824         uint16_t entry;
1825         uint16_t index;
1826         const char *name;
1827         irq_handler_t handler;
1828 };
1829
1830 static struct qla_init_msix_entry base_queue = {
1831         .entry = 0,
1832         .index = 0,
1833         .name = "qla2xxx (default)",
1834         .handler = qla24xx_msix_default,
1835 };
1836
1837 static struct qla_init_msix_entry base_rsp_queue = {
1838         .entry = 1,
1839         .index = 1,
1840         .name = "qla2xxx (rsp_q)",
1841         .handler = qla24xx_msix_rsp_q,
1842 };
1843
1844 static struct qla_init_msix_entry multi_rsp_queue = {
1845         .entry = 1,
1846         .index = 1,
1847         .name = "qla2xxx (multi_q)",
1848         .handler = qla25xx_msix_rsp_q,
1849 };
1850
1851 static void
1852 qla24xx_disable_msix(struct qla_hw_data *ha)
1853 {
1854         int i;
1855         struct qla_msix_entry *qentry;
1856
1857         for (i = 0; i < ha->msix_count; i++) {
1858                 qentry = &ha->msix_entries[i];
1859                 if (qentry->have_irq)
1860                         free_irq(qentry->vector, qentry->rsp);
1861         }
1862         pci_disable_msix(ha->pdev);
1863         kfree(ha->msix_entries);
1864         ha->msix_entries = NULL;
1865         ha->flags.msix_enabled = 0;
1866 }
1867
1868 static int
1869 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1870 {
1871         int i, ret;
1872         struct msix_entry *entries;
1873         struct qla_msix_entry *qentry;
1874         struct qla_init_msix_entry *msix_queue;
1875
1876         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1877                                         GFP_KERNEL);
1878         if (!entries)
1879                 return -ENOMEM;
1880
1881         for (i = 0; i < ha->msix_count; i++)
1882                 entries[i].entry = i;
1883
1884         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1885         if (ret) {
1886                 qla_printk(KERN_WARNING, ha,
1887                         "MSI-X: Failed to enable support -- %d/%d\n"
1888                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1889                 ha->msix_count = ret;
1890                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1891                 if (ret) {
1892                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1893                                 " support, giving up -- %d/%d\n",
1894                                 ha->msix_count, ret);
1895                         goto msix_out;
1896                 }
1897                 ha->max_queues = ha->msix_count - 1;
1898         }
1899         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1900                                 ha->msix_count, GFP_KERNEL);
1901         if (!ha->msix_entries) {
1902                 ret = -ENOMEM;
1903                 goto msix_out;
1904         }
1905         ha->flags.msix_enabled = 1;
1906
1907         for (i = 0; i < ha->msix_count; i++) {
1908                 qentry = &ha->msix_entries[i];
1909                 qentry->vector = entries[i].vector;
1910                 qentry->entry = entries[i].entry;
1911                 qentry->have_irq = 0;
1912                 qentry->rsp = NULL;
1913         }
1914
1915         /* Enable MSI-X for AENs for queue 0 */
1916         qentry = &ha->msix_entries[0];
1917         ret = request_irq(qentry->vector, base_queue.handler, 0,
1918                                         base_queue.name, rsp);
1919         if (ret) {
1920                 qla_printk(KERN_WARNING, ha,
1921                         "MSI-X: Unable to register handler -- %x/%d.\n",
1922                         qentry->vector, ret);
1923                 qla24xx_disable_msix(ha);
1924                 goto msix_out;
1925         }
1926         qentry->have_irq = 1;
1927         qentry->rsp = rsp;
1928
1929         /* Enable MSI-X vector for response queue update for queue 0 */
1930         if (ha->max_queues > 1 && ha->mqiobase) {
1931                 ha->mqenable = 1;
1932                 msix_queue = &multi_rsp_queue;
1933                 qla_printk(KERN_INFO, ha,
1934                                 "MQ enabled, Number of Queue Resources: %d \n",
1935                                 ha->max_queues);
1936         } else {
1937                 ha->mqenable = 0;
1938                 msix_queue = &base_rsp_queue;
1939         }
1940
1941         qentry = &ha->msix_entries[1];
1942         ret = request_irq(qentry->vector, msix_queue->handler, 0,
1943                                                 msix_queue->name, rsp);
1944         if (ret) {
1945                 qla_printk(KERN_WARNING, ha,
1946                         "MSI-X: Unable to register handler -- %x/%d.\n",
1947                         qentry->vector, ret);
1948                 qla24xx_disable_msix(ha);
1949                 ha->mqenable = 0;
1950                 goto msix_out;
1951         }
1952         qentry->have_irq = 1;
1953         qentry->rsp = rsp;
1954
1955 msix_out:
1956         kfree(entries);
1957         return ret;
1958 }
1959
1960 int
1961 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1962 {
1963         int ret;
1964         device_reg_t __iomem *reg = ha->iobase;
1965
1966         /* If possible, enable MSI-X. */
1967         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
1968             !IS_QLA8432(ha) && !IS_QLA8001(ha))
1969                 goto skip_msix;
1970
1971         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1972                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1973                 DEBUG2(qla_printk(KERN_WARNING, ha,
1974                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1975                         ha->pdev->revision, ha->fw_attributes));
1976
1977                 goto skip_msix;
1978         }
1979
1980         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1981             (ha->pdev->subsystem_device == 0x7040 ||
1982                 ha->pdev->subsystem_device == 0x7041 ||
1983                 ha->pdev->subsystem_device == 0x1705)) {
1984                 DEBUG2(qla_printk(KERN_WARNING, ha,
1985                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1986                     ha->pdev->subsystem_vendor,
1987                     ha->pdev->subsystem_device));
1988
1989                 goto skip_msi;
1990         }
1991
1992         ret = qla24xx_enable_msix(ha, rsp);
1993         if (!ret) {
1994                 DEBUG2(qla_printk(KERN_INFO, ha,
1995                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1996                     ha->fw_attributes));
1997                 goto clear_risc_ints;
1998         }
1999         qla_printk(KERN_WARNING, ha,
2000             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2001 skip_msix:
2002
2003         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2004             !IS_QLA8001(ha))
2005                 goto skip_msi;
2006
2007         ret = pci_enable_msi(ha->pdev);
2008         if (!ret) {
2009                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2010                 ha->flags.msi_enabled = 1;
2011         }
2012 skip_msi:
2013
2014         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2015             IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2016         if (ret) {
2017                 qla_printk(KERN_WARNING, ha,
2018                     "Failed to reserve interrupt %d already in use.\n",
2019                     ha->pdev->irq);
2020                 goto fail;
2021         }
2022         ha->flags.inta_enabled = 1;
2023 clear_risc_ints:
2024
2025         /*
2026          * FIXME: Noted that 8014s were being dropped during NK testing.
2027          * Timing deltas during MSI-X/INTa transitions?
2028          */
2029         if (IS_QLA81XX(ha))
2030                 goto fail;
2031         spin_lock_irq(&ha->hardware_lock);
2032         if (IS_FWI2_CAPABLE(ha)) {
2033                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2034                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2035         } else {
2036                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2037                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2038                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2039         }
2040         spin_unlock_irq(&ha->hardware_lock);
2041
2042 fail:
2043         return ret;
2044 }
2045
2046 void
2047 qla2x00_free_irqs(scsi_qla_host_t *vha)
2048 {
2049         struct qla_hw_data *ha = vha->hw;
2050         struct rsp_que *rsp = ha->rsp_q_map[0];
2051
2052         if (ha->flags.msix_enabled)
2053                 qla24xx_disable_msix(ha);
2054         else if (ha->flags.inta_enabled) {
2055                 free_irq(ha->pdev->irq, rsp);
2056                 pci_disable_msi(ha->pdev);
2057         }
2058 }
2059
2060 static struct scsi_qla_host *
2061 qla2x00_get_rsp_host(struct rsp_que *rsp)
2062 {
2063         srb_t *sp;
2064         struct qla_hw_data *ha = rsp->hw;
2065         struct scsi_qla_host *vha = NULL;
2066         struct sts_entry_24xx *pkt;
2067         struct req_que *req;
2068
2069         if (rsp->id) {
2070                 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2071                 req = rsp->req;
2072                 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2073                         sp = req->outstanding_cmds[pkt->handle];
2074                         if (sp)
2075                                 vha = sp->fcport->vha;
2076                 }
2077         }
2078         if (!vha)
2079         /* handle it in base queue */
2080                 vha = pci_get_drvdata(ha->pdev);
2081
2082         return vha;
2083 }
2084
2085 int qla25xx_request_irq(struct rsp_que *rsp)
2086 {
2087         struct qla_hw_data *ha = rsp->hw;
2088         struct qla_init_msix_entry *intr = &multi_rsp_queue;
2089         struct qla_msix_entry *msix = rsp->msix;
2090         int ret;
2091
2092         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2093         if (ret) {
2094                 qla_printk(KERN_WARNING, ha,
2095                         "MSI-X: Unable to register handler -- %x/%d.\n",
2096                         msix->vector, ret);
2097                 return ret;
2098         }
2099         msix->have_irq = 1;
2100         msix->rsp = rsp;
2101         return ret;
2102 }
2103
2104 void
2105 qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2106 {
2107         device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2108         WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2109 }
2110
2111 void
2112 qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2113 {
2114         device_reg_t __iomem *reg = (void *) ha->iobase;
2115         WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2116 }
2117