]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/scsi/dpt_i2o.c
[SCSI] dpt_i2o: use standard __init / __exit code
[linux-2.6-omap-h63xx.git] / drivers / scsi / dpt_i2o.c
1 /***************************************************************************
2                           dpti.c  -  description
3                              -------------------
4     begin                : Thu Sep 7 2000
5     copyright            : (C) 2000 by Adaptec
6
7                            July 30, 2001 First version being submitted
8                            for inclusion in the kernel.  V2.4
9
10     See Documentation/scsi/dpti.txt for history, notes, license info
11     and credits
12  ***************************************************************************/
13
14 /***************************************************************************
15  *                                                                         *
16  *   This program is free software; you can redistribute it and/or modify  *
17  *   it under the terms of the GNU General Public License as published by  *
18  *   the Free Software Foundation; either version 2 of the License, or     *
19  *   (at your option) any later version.                                   *
20  *                                                                         *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31
32 /* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33    high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35 #define ADDR32 (0)
36
37 #include <linux/module.h>
38
39 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42 ////////////////////////////////////////////////////////////////
43
44 #include <linux/ioctl.h>        /* For SCSI-Passthrough */
45 #include <asm/uaccess.h>
46
47 #include <linux/stat.h>
48 #include <linux/slab.h>         /* for kmalloc() */
49 #include <linux/pci.h>          /* for PCI support */
50 #include <linux/proc_fs.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>        /* for udelay */
53 #include <linux/interrupt.h>
54 #include <linux/kernel.h>       /* for printk */
55 #include <linux/sched.h>
56 #include <linux/reboot.h>
57 #include <linux/spinlock.h>
58 #include <linux/dma-mapping.h>
59
60 #include <linux/timer.h>
61 #include <linux/string.h>
62 #include <linux/ioport.h>
63 #include <linux/mutex.h>
64
65 #include <asm/processor.h>      /* for boot_cpu_data */
66 #include <asm/pgtable.h>
67 #include <asm/io.h>             /* for virt_to_bus, etc. */
68
69 #include <scsi/scsi.h>
70 #include <scsi/scsi_cmnd.h>
71 #include <scsi/scsi_device.h>
72 #include <scsi/scsi_host.h>
73 #include <scsi/scsi_tcq.h>
74
75 #include "dpt/dptsig.h"
76 #include "dpti.h"
77
78 /*============================================================================
79  * Create a binary signature - this is read by dptsig
80  * Needed for our management apps
81  *============================================================================
82  */
83 static dpt_sig_S DPTI_sig = {
84         {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
85 #ifdef __i386__
86         PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
87 #elif defined(__ia64__)
88         PROC_INTEL, PROC_IA64,
89 #elif defined(__sparc__)
90         PROC_ULTRASPARC, PROC_ULTRASPARC,
91 #elif defined(__alpha__)
92         PROC_ALPHA, PROC_ALPHA,
93 #else
94         (-1),(-1),
95 #endif
96          FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
97         ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
98         DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
99 };
100
101
102
103
104 /*============================================================================
105  * Globals
106  *============================================================================
107  */
108
109 static DEFINE_MUTEX(adpt_configuration_lock);
110
111 static struct i2o_sys_tbl *sys_tbl = NULL;
112 static int sys_tbl_ind = 0;
113 static int sys_tbl_len = 0;
114
115 static adpt_hba* hba_chain = NULL;
116 static int hba_count = 0;
117
118 static const struct file_operations adpt_fops = {
119         .ioctl          = adpt_ioctl,
120         .open           = adpt_open,
121         .release        = adpt_close
122 };
123
124 /* Structures and definitions for synchronous message posting.
125  * See adpt_i2o_post_wait() for description
126  * */
127 struct adpt_i2o_post_wait_data
128 {
129         int status;
130         u32 id;
131         adpt_wait_queue_head_t *wq;
132         struct adpt_i2o_post_wait_data *next;
133 };
134
135 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
136 static u32 adpt_post_wait_id = 0;
137 static DEFINE_SPINLOCK(adpt_post_wait_lock);
138
139
140 /*============================================================================
141  *                              Functions
142  *============================================================================
143  */
144
145 static u8 adpt_read_blink_led(adpt_hba* host)
146 {
147         if(host->FwDebugBLEDflag_P != 0) {
148                 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
149                         return readb(host->FwDebugBLEDvalue_P);
150                 }
151         }
152         return 0;
153 }
154
155 /*============================================================================
156  * Scsi host template interface functions
157  *============================================================================
158  */
159
160 static struct pci_device_id dptids[] = {
161         { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
162         { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
163         { 0, }
164 };
165 MODULE_DEVICE_TABLE(pci,dptids);
166
167 static int adpt_detect(struct scsi_host_template* sht)
168 {
169         struct pci_dev *pDev = NULL;
170         adpt_hba* pHba;
171
172         PINFO("Detecting Adaptec I2O RAID controllers...\n");
173
174         /* search for all Adatpec I2O RAID cards */
175         while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
176                 if(pDev->device == PCI_DPT_DEVICE_ID ||
177                    pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
178                         if(adpt_install_hba(sht, pDev) ){
179                                 PERROR("Could not Init an I2O RAID device\n");
180                                 PERROR("Will not try to detect others.\n");
181                                 return hba_count-1;
182                         }
183                         pci_dev_get(pDev);
184                 }
185         }
186
187         /* In INIT state, Activate IOPs */
188         for (pHba = hba_chain; pHba; pHba = pHba->next) {
189                 // Activate does get status , init outbound, and get hrt
190                 if (adpt_i2o_activate_hba(pHba) < 0) {
191                         adpt_i2o_delete_hba(pHba);
192                 }
193         }
194
195
196         /* Active IOPs in HOLD state */
197
198 rebuild_sys_tab:
199         if (hba_chain == NULL) 
200                 return 0;
201
202         /*
203          * If build_sys_table fails, we kill everything and bail
204          * as we can't init the IOPs w/o a system table
205          */     
206         if (adpt_i2o_build_sys_table() < 0) {
207                 adpt_i2o_sys_shutdown();
208                 return 0;
209         }
210
211         PDEBUG("HBA's in HOLD state\n");
212
213         /* If IOP don't get online, we need to rebuild the System table */
214         for (pHba = hba_chain; pHba; pHba = pHba->next) {
215                 if (adpt_i2o_online_hba(pHba) < 0) {
216                         adpt_i2o_delete_hba(pHba);      
217                         goto rebuild_sys_tab;
218                 }
219         }
220
221         /* Active IOPs now in OPERATIONAL state */
222         PDEBUG("HBA's in OPERATIONAL state\n");
223
224         printk("dpti: If you have a lot of devices this could take a few minutes.\n");
225         for (pHba = hba_chain; pHba; pHba = pHba->next) {
226                 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
227                 if (adpt_i2o_lct_get(pHba) < 0){
228                         adpt_i2o_delete_hba(pHba);
229                         continue;
230                 }
231
232                 if (adpt_i2o_parse_lct(pHba) < 0){
233                         adpt_i2o_delete_hba(pHba);
234                         continue;
235                 }
236                 adpt_inquiry(pHba);
237         }
238
239         for (pHba = hba_chain; pHba; pHba = pHba->next) {
240                 if (adpt_scsi_host_alloc(pHba, sht) < 0){
241                         adpt_i2o_delete_hba(pHba);
242                         continue;
243                 }
244                 pHba->initialized = TRUE;
245                 pHba->state &= ~DPTI_STATE_RESET;
246         }
247
248         // Register our control device node
249         // nodes will need to be created in /dev to access this
250         // the nodes can not be created from within the driver
251         if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
252                 adpt_i2o_sys_shutdown();
253                 return 0;
254         }
255         return hba_count;
256 }
257
258
259 /*
260  * scsi_unregister will be called AFTER we return.
261  */
262 static int adpt_release(struct Scsi_Host *host)
263 {
264         adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
265 //      adpt_i2o_quiesce_hba(pHba);
266         adpt_i2o_delete_hba(pHba);
267         scsi_unregister(host);
268         return 0;
269 }
270
271
272 static void adpt_inquiry(adpt_hba* pHba)
273 {
274         u32 msg[14]; 
275         u32 *mptr;
276         u32 *lenptr;
277         int direction;
278         int scsidir;
279         u32 len;
280         u32 reqlen;
281         u8* buf;
282         u8  scb[16];
283         s32 rcode;
284
285         memset(msg, 0, sizeof(msg));
286         buf = kmalloc(80,GFP_KERNEL|ADDR32);
287         if(!buf){
288                 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
289                 return;
290         }
291         memset((void*)buf, 0, 36);
292         
293         len = 36;
294         direction = 0x00000000; 
295         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
296
297         reqlen = 14;            // SINGLE SGE
298         /* Stick the headers on */
299         msg[0] = reqlen<<16 | SGL_OFFSET_12;
300         msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
301         msg[2] = 0;
302         msg[3]  = 0;
303         // Adaptec/DPT Private stuff 
304         msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
305         msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
306         /* Direction, disconnect ok | sense data | simple queue , CDBLen */
307         // I2O_SCB_FLAG_ENABLE_DISCONNECT | 
308         // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
309         // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
310         msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
311
312         mptr=msg+7;
313
314         memset(scb, 0, sizeof(scb));
315         // Write SCSI command into the message - always 16 byte block 
316         scb[0] = INQUIRY;
317         scb[1] = 0;
318         scb[2] = 0;
319         scb[3] = 0;
320         scb[4] = 36;
321         scb[5] = 0;
322         // Don't care about the rest of scb
323
324         memcpy(mptr, scb, sizeof(scb));
325         mptr+=4;
326         lenptr=mptr++;          /* Remember me - fill in when we know */
327
328         /* Now fill in the SGList and command */
329         *lenptr = len;
330         *mptr++ = 0xD0000000|direction|len;
331         *mptr++ = virt_to_bus(buf);
332
333         // Send it on it's way
334         rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
335         if (rcode != 0) {
336                 sprintf(pHba->detail, "Adaptec I2O RAID");
337                 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
338                 if (rcode != -ETIME && rcode != -EINTR)
339                         kfree(buf);
340         } else {
341                 memset(pHba->detail, 0, sizeof(pHba->detail));
342                 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
343                 memcpy(&(pHba->detail[16]), " Model: ", 8);
344                 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
345                 memcpy(&(pHba->detail[40]), " FW: ", 4);
346                 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
347                 pHba->detail[48] = '\0';        /* precautionary */
348                 kfree(buf);
349         }
350         adpt_i2o_status_get(pHba);
351         return ;
352 }
353
354
355 static int adpt_slave_configure(struct scsi_device * device)
356 {
357         struct Scsi_Host *host = device->host;
358         adpt_hba* pHba;
359
360         pHba = (adpt_hba *) host->hostdata[0];
361
362         if (host->can_queue && device->tagged_supported) {
363                 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
364                                 host->can_queue - 1);
365         } else {
366                 scsi_adjust_queue_depth(device, 0, 1);
367         }
368         return 0;
369 }
370
371 static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
372 {
373         adpt_hba* pHba = NULL;
374         struct adpt_device* pDev = NULL;        /* dpt per device information */
375
376         cmd->scsi_done = done;
377         /*
378          * SCSI REQUEST_SENSE commands will be executed automatically by the 
379          * Host Adapter for any errors, so they should not be executed 
380          * explicitly unless the Sense Data is zero indicating that no error 
381          * occurred.
382          */
383
384         if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
385                 cmd->result = (DID_OK << 16);
386                 cmd->scsi_done(cmd);
387                 return 0;
388         }
389
390         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
391         if (!pHba) {
392                 return FAILED;
393         }
394
395         rmb();
396         /*
397          * TODO: I need to block here if I am processing ioctl cmds
398          * but if the outstanding cmds all finish before the ioctl,
399          * the scsi-core will not know to start sending cmds to me again.
400          * I need to a way to restart the scsi-cores queues or should I block
401          * calling scsi_done on the outstanding cmds instead
402          * for now we don't set the IOCTL state
403          */
404         if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
405                 pHba->host->last_reset = jiffies;
406                 pHba->host->resetting = 1;
407                 return 1;
408         }
409
410         // TODO if the cmd->device if offline then I may need to issue a bus rescan
411         // followed by a get_lct to see if the device is there anymore
412         if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
413                 /*
414                  * First command request for this device.  Set up a pointer
415                  * to the device structure.  This should be a TEST_UNIT_READY
416                  * command from scan_scsis_single.
417                  */
418                 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
419                         // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
420                         // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
421                         cmd->result = (DID_NO_CONNECT << 16);
422                         cmd->scsi_done(cmd);
423                         return 0;
424                 }
425                 cmd->device->hostdata = pDev;
426         }
427         pDev->pScsi_dev = cmd->device;
428
429         /*
430          * If we are being called from when the device is being reset, 
431          * delay processing of the command until later.
432          */
433         if (pDev->state & DPTI_DEV_RESET ) {
434                 return FAILED;
435         }
436         return adpt_scsi_to_i2o(pHba, cmd, pDev);
437 }
438
439 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
440                 sector_t capacity, int geom[])
441 {
442         int heads=-1;
443         int sectors=-1;
444         int cylinders=-1;
445
446         // *** First lets set the default geometry ****
447         
448         // If the capacity is less than ox2000
449         if (capacity < 0x2000 ) {       // floppy
450                 heads = 18;
451                 sectors = 2;
452         } 
453         // else if between 0x2000 and 0x20000
454         else if (capacity < 0x20000) {
455                 heads = 64;
456                 sectors = 32;
457         }
458         // else if between 0x20000 and 0x40000
459         else if (capacity < 0x40000) {
460                 heads = 65;
461                 sectors = 63;
462         }
463         // else if between 0x4000 and 0x80000
464         else if (capacity < 0x80000) {
465                 heads = 128;
466                 sectors = 63;
467         }
468         // else if greater than 0x80000
469         else {
470                 heads = 255;
471                 sectors = 63;
472         }
473         cylinders = sector_div(capacity, heads * sectors);
474
475         // Special case if CDROM
476         if(sdev->type == 5) {  // CDROM
477                 heads = 252;
478                 sectors = 63;
479                 cylinders = 1111;
480         }
481
482         geom[0] = heads;
483         geom[1] = sectors;
484         geom[2] = cylinders;
485         
486         PDEBUG("adpt_bios_param: exit\n");
487         return 0;
488 }
489
490
491 static const char *adpt_info(struct Scsi_Host *host)
492 {
493         adpt_hba* pHba;
494
495         pHba = (adpt_hba *) host->hostdata[0];
496         return (char *) (pHba->detail);
497 }
498
499 static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
500                   int length, int inout)
501 {
502         struct adpt_device* d;
503         int id;
504         int chan;
505         int len = 0;
506         int begin = 0;
507         int pos = 0;
508         adpt_hba* pHba;
509         int unit;
510
511         *start = buffer;
512         if (inout == TRUE) {
513                 /*
514                  * The user has done a write and wants us to take the
515                  * data in the buffer and do something with it.
516                  * proc_scsiwrite calls us with inout = 1
517                  *
518                  * Read data from buffer (writing to us) - NOT SUPPORTED
519                  */
520                 return -EINVAL;
521         }
522
523         /*
524          * inout = 0 means the user has done a read and wants information
525          * returned, so we write information about the cards into the buffer
526          * proc_scsiread() calls us with inout = 0
527          */
528
529         // Find HBA (host bus adapter) we are looking for
530         mutex_lock(&adpt_configuration_lock);
531         for (pHba = hba_chain; pHba; pHba = pHba->next) {
532                 if (pHba->host == host) {
533                         break;  /* found adapter */
534                 }
535         }
536         mutex_unlock(&adpt_configuration_lock);
537         if (pHba == NULL) {
538                 return 0;
539         }
540         host = pHba->host;
541
542         len  = sprintf(buffer    , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
543         len += sprintf(buffer+len, "%s\n", pHba->detail);
544         len += sprintf(buffer+len, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
545                         pHba->host->host_no, pHba->name, host->irq);
546         len += sprintf(buffer+len, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
547                         host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
548
549         pos = begin + len;
550
551         /* CHECKPOINT */
552         if(pos > offset + length) {
553                 goto stop_output;
554         }
555         if(pos <= offset) {
556                 /*
557                  * If we haven't even written to where we last left
558                  * off (the last time we were called), reset the 
559                  * beginning pointer.
560                  */
561                 len = 0;
562                 begin = pos;
563         }
564         len +=  sprintf(buffer+len, "Devices:\n");
565         for(chan = 0; chan < MAX_CHANNEL; chan++) {
566                 for(id = 0; id < MAX_ID; id++) {
567                         d = pHba->channel[chan].device[id];
568                         while(d){
569                                 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
570                                 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
571                                 pos = begin + len;
572
573
574                                 /* CHECKPOINT */
575                                 if(pos > offset + length) {
576                                         goto stop_output;
577                                 }
578                                 if(pos <= offset) {
579                                         len = 0;
580                                         begin = pos;
581                                 }
582
583                                 unit = d->pI2o_dev->lct_data.tid;
584                                 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d)  (%s)\n\n",
585                                                unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
586                                                scsi_device_online(d->pScsi_dev)? "online":"offline"); 
587                                 pos = begin + len;
588
589                                 /* CHECKPOINT */
590                                 if(pos > offset + length) {
591                                         goto stop_output;
592                                 }
593                                 if(pos <= offset) {
594                                         len = 0;
595                                         begin = pos;
596                                 }
597
598                                 d = d->next_lun;
599                         }
600                 }
601         }
602
603         /*
604          * begin is where we last checked our position with regards to offset
605          * begin is always less than offset.  len is relative to begin.  It
606          * is the number of bytes written past begin
607          *
608          */
609 stop_output:
610         /* stop the output and calculate the correct length */
611         *(buffer + len) = '\0';
612
613         *start = buffer + (offset - begin);     /* Start of wanted data */
614         len -= (offset - begin);
615         if(len > length) {
616                 len = length;
617         } else if(len < 0){
618                 len = 0;
619                 **start = '\0';
620         }
621         return len;
622 }
623
624
625 /*===========================================================================
626  * Error Handling routines
627  *===========================================================================
628  */
629
630 static int adpt_abort(struct scsi_cmnd * cmd)
631 {
632         adpt_hba* pHba = NULL;  /* host bus adapter structure */
633         struct adpt_device* dptdevice;  /* dpt per device information */
634         u32 msg[5];
635         int rcode;
636
637         if(cmd->serial_number == 0){
638                 return FAILED;
639         }
640         pHba = (adpt_hba*) cmd->device->host->hostdata[0];
641         printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
642         if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
643                 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
644                 return FAILED;
645         }
646
647         memset(msg, 0, sizeof(msg));
648         msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
649         msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
650         msg[2] = 0;
651         msg[3]= 0; 
652         msg[4] = (u32)cmd;
653         if (pHba->host)
654                 spin_lock_irq(pHba->host->host_lock);
655         rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
656         if (pHba->host)
657                 spin_unlock_irq(pHba->host->host_lock);
658         if (rcode != 0) {
659                 if(rcode == -EOPNOTSUPP ){
660                         printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
661                         return FAILED;
662                 }
663                 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
664                 return FAILED;
665         } 
666         printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
667         return SUCCESS;
668 }
669
670
671 #define I2O_DEVICE_RESET 0x27
672 // This is the same for BLK and SCSI devices
673 // NOTE this is wrong in the i2o.h definitions
674 // This is not currently supported by our adapter but we issue it anyway
675 static int adpt_device_reset(struct scsi_cmnd* cmd)
676 {
677         adpt_hba* pHba;
678         u32 msg[4];
679         u32 rcode;
680         int old_state;
681         struct adpt_device* d = cmd->device->hostdata;
682
683         pHba = (void*) cmd->device->host->hostdata[0];
684         printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
685         if (!d) {
686                 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
687                 return FAILED;
688         }
689         memset(msg, 0, sizeof(msg));
690         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
691         msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
692         msg[2] = 0;
693         msg[3] = 0;
694
695         if (pHba->host)
696                 spin_lock_irq(pHba->host->host_lock);
697         old_state = d->state;
698         d->state |= DPTI_DEV_RESET;
699         rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
700         d->state = old_state;
701         if (pHba->host)
702                 spin_unlock_irq(pHba->host->host_lock);
703         if (rcode != 0) {
704                 if(rcode == -EOPNOTSUPP ){
705                         printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
706                         return FAILED;
707                 }
708                 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
709                 return FAILED;
710         } else {
711                 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
712                 return SUCCESS;
713         }
714 }
715
716
717 #define I2O_HBA_BUS_RESET 0x87
718 // This version of bus reset is called by the eh_error handler
719 static int adpt_bus_reset(struct scsi_cmnd* cmd)
720 {
721         adpt_hba* pHba;
722         u32 msg[4];
723         u32 rcode;
724
725         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
726         memset(msg, 0, sizeof(msg));
727         printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
728         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
729         msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
730         msg[2] = 0;
731         msg[3] = 0;
732         if (pHba->host)
733                 spin_lock_irq(pHba->host->host_lock);
734         rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
735         if (pHba->host)
736                 spin_unlock_irq(pHba->host->host_lock);
737         if (rcode != 0) {
738                 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
739                 return FAILED;
740         } else {
741                 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
742                 return SUCCESS;
743         }
744 }
745
746 // This version of reset is called by the eh_error_handler
747 static int __adpt_reset(struct scsi_cmnd* cmd)
748 {
749         adpt_hba* pHba;
750         int rcode;
751         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
752         printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
753         rcode =  adpt_hba_reset(pHba);
754         if(rcode == 0){
755                 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
756                 return SUCCESS;
757         } else {
758                 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
759                 return FAILED;
760         }
761 }
762
763 static int adpt_reset(struct scsi_cmnd* cmd)
764 {
765         int rc;
766
767         spin_lock_irq(cmd->device->host->host_lock);
768         rc = __adpt_reset(cmd);
769         spin_unlock_irq(cmd->device->host->host_lock);
770
771         return rc;
772 }
773
774 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
775 static int adpt_hba_reset(adpt_hba* pHba)
776 {
777         int rcode;
778
779         pHba->state |= DPTI_STATE_RESET;
780
781         // Activate does get status , init outbound, and get hrt
782         if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
783                 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
784                 adpt_i2o_delete_hba(pHba);
785                 return rcode;
786         }
787
788         if ((rcode=adpt_i2o_build_sys_table()) < 0) {
789                 adpt_i2o_delete_hba(pHba);
790                 return rcode;
791         }
792         PDEBUG("%s: in HOLD state\n",pHba->name);
793
794         if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
795                 adpt_i2o_delete_hba(pHba);      
796                 return rcode;
797         }
798         PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
799
800         if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
801                 adpt_i2o_delete_hba(pHba);
802                 return rcode;
803         }
804
805         if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
806                 adpt_i2o_delete_hba(pHba);
807                 return rcode;
808         }
809         pHba->state &= ~DPTI_STATE_RESET;
810
811         adpt_fail_posted_scbs(pHba);
812         return 0;       /* return success */
813 }
814
815 /*===========================================================================
816  * 
817  *===========================================================================
818  */
819
820
821 static void adpt_i2o_sys_shutdown(void)
822 {
823         adpt_hba *pHba, *pNext;
824         struct adpt_i2o_post_wait_data *p1, *old;
825
826          printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
827          printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
828         /* Delete all IOPs from the controller chain */
829         /* They should have already been released by the
830          * scsi-core
831          */
832         for (pHba = hba_chain; pHba; pHba = pNext) {
833                 pNext = pHba->next;
834                 adpt_i2o_delete_hba(pHba);
835         }
836
837         /* Remove any timedout entries from the wait queue.  */
838 //      spin_lock_irqsave(&adpt_post_wait_lock, flags);
839         /* Nothing should be outstanding at this point so just
840          * free them 
841          */
842         for(p1 = adpt_post_wait_queue; p1;) {
843                 old = p1;
844                 p1 = p1->next;
845                 kfree(old);
846         }
847 //      spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
848         adpt_post_wait_queue = NULL;
849
850          printk(KERN_INFO "Adaptec I2O controllers down.\n");
851 }
852
853 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
854 {
855
856         adpt_hba* pHba = NULL;
857         adpt_hba* p = NULL;
858         ulong base_addr0_phys = 0;
859         ulong base_addr1_phys = 0;
860         u32 hba_map0_area_size = 0;
861         u32 hba_map1_area_size = 0;
862         void __iomem *base_addr_virt = NULL;
863         void __iomem *msg_addr_virt = NULL;
864
865         int raptorFlag = FALSE;
866
867         if(pci_enable_device(pDev)) {
868                 return -EINVAL;
869         }
870
871         if (pci_request_regions(pDev, "dpt_i2o")) {
872                 PERROR("dpti: adpt_config_hba: pci request region failed\n");
873                 return -EINVAL;
874         }
875
876         pci_set_master(pDev);
877         if (pci_set_dma_mask(pDev, DMA_32BIT_MASK))
878                 return -EINVAL;
879
880         base_addr0_phys = pci_resource_start(pDev,0);
881         hba_map0_area_size = pci_resource_len(pDev,0);
882
883         // Check if standard PCI card or single BAR Raptor
884         if(pDev->device == PCI_DPT_DEVICE_ID){
885                 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
886                         // Raptor card with this device id needs 4M
887                         hba_map0_area_size = 0x400000;
888                 } else { // Not Raptor - it is a PCI card
889                         if(hba_map0_area_size > 0x100000 ){ 
890                                 hba_map0_area_size = 0x100000;
891                         }
892                 }
893         } else {// Raptor split BAR config
894                 // Use BAR1 in this configuration
895                 base_addr1_phys = pci_resource_start(pDev,1);
896                 hba_map1_area_size = pci_resource_len(pDev,1);
897                 raptorFlag = TRUE;
898         }
899
900         base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
901         if (!base_addr_virt) {
902                 pci_release_regions(pDev);
903                 PERROR("dpti: adpt_config_hba: io remap failed\n");
904                 return -EINVAL;
905         }
906
907         if(raptorFlag == TRUE) {
908                 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
909                 if (!msg_addr_virt) {
910                         PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
911                         iounmap(base_addr_virt);
912                         pci_release_regions(pDev);
913                         return -EINVAL;
914                 }
915         } else {
916                 msg_addr_virt = base_addr_virt;
917         }
918         
919         // Allocate and zero the data structure
920         pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
921         if (!pHba) {
922                 if (msg_addr_virt != base_addr_virt)
923                         iounmap(msg_addr_virt);
924                 iounmap(base_addr_virt);
925                 pci_release_regions(pDev);
926                 return -ENOMEM;
927         }
928
929         mutex_lock(&adpt_configuration_lock);
930
931         if(hba_chain != NULL){
932                 for(p = hba_chain; p->next; p = p->next);
933                 p->next = pHba;
934         } else {
935                 hba_chain = pHba;
936         }
937         pHba->next = NULL;
938         pHba->unit = hba_count;
939         sprintf(pHba->name, "dpti%d", hba_count);
940         hba_count++;
941         
942         mutex_unlock(&adpt_configuration_lock);
943
944         pHba->pDev = pDev;
945         pHba->base_addr_phys = base_addr0_phys;
946
947         // Set up the Virtual Base Address of the I2O Device
948         pHba->base_addr_virt = base_addr_virt;
949         pHba->msg_addr_virt = msg_addr_virt;
950         pHba->irq_mask = base_addr_virt+0x30;
951         pHba->post_port = base_addr_virt+0x40;
952         pHba->reply_port = base_addr_virt+0x44;
953
954         pHba->hrt = NULL;
955         pHba->lct = NULL;
956         pHba->lct_size = 0;
957         pHba->status_block = NULL;
958         pHba->post_count = 0;
959         pHba->state = DPTI_STATE_RESET;
960         pHba->pDev = pDev;
961         pHba->devices = NULL;
962
963         // Initializing the spinlocks
964         spin_lock_init(&pHba->state_lock);
965         spin_lock_init(&adpt_post_wait_lock);
966
967         if(raptorFlag == 0){
968                 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 
969                         hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
970         } else {
971                 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
972                 printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
973                 printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
974         }
975
976         if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
977                 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
978                 adpt_i2o_delete_hba(pHba);
979                 return -EINVAL;
980         }
981
982         return 0;
983 }
984
985
986 static void adpt_i2o_delete_hba(adpt_hba* pHba)
987 {
988         adpt_hba* p1;
989         adpt_hba* p2;
990         struct i2o_device* d;
991         struct i2o_device* next;
992         int i;
993         int j;
994         struct adpt_device* pDev;
995         struct adpt_device* pNext;
996
997
998         mutex_lock(&adpt_configuration_lock);
999         // scsi_unregister calls our adpt_release which
1000         // does a quiese
1001         if(pHba->host){
1002                 free_irq(pHba->host->irq, pHba);
1003         }
1004         p2 = NULL;
1005         for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1006                 if(p1 == pHba) {
1007                         if(p2) {
1008                                 p2->next = p1->next;
1009                         } else {
1010                                 hba_chain = p1->next;
1011                         }
1012                         break;
1013                 }
1014         }
1015
1016         hba_count--;
1017         mutex_unlock(&adpt_configuration_lock);
1018
1019         iounmap(pHba->base_addr_virt);
1020         pci_release_regions(pHba->pDev);
1021         if(pHba->msg_addr_virt != pHba->base_addr_virt){
1022                 iounmap(pHba->msg_addr_virt);
1023         }
1024         kfree(pHba->hrt);
1025         kfree(pHba->lct);
1026         kfree(pHba->status_block);
1027         kfree(pHba->reply_pool);
1028
1029         for(d = pHba->devices; d ; d = next){
1030                 next = d->next;
1031                 kfree(d);
1032         }
1033         for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1034                 for(j = 0; j < MAX_ID; j++){
1035                         if(pHba->channel[i].device[j] != NULL){
1036                                 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1037                                         pNext = pDev->next_lun;
1038                                         kfree(pDev);
1039                                 }
1040                         }
1041                 }
1042         }
1043         pci_dev_put(pHba->pDev);
1044         kfree(pHba);
1045
1046         if(hba_count <= 0){
1047                 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1048         }
1049 }
1050
1051 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1052 {
1053         struct adpt_device* d;
1054
1055         if(chan < 0 || chan >= MAX_CHANNEL)
1056                 return NULL;
1057         
1058         if( pHba->channel[chan].device == NULL){
1059                 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1060                 return NULL;
1061         }
1062
1063         d = pHba->channel[chan].device[id];
1064         if(!d || d->tid == 0) {
1065                 return NULL;
1066         }
1067
1068         /* If it is the only lun at that address then this should match*/
1069         if(d->scsi_lun == lun){
1070                 return d;
1071         }
1072
1073         /* else we need to look through all the luns */
1074         for(d=d->next_lun ; d ; d = d->next_lun){
1075                 if(d->scsi_lun == lun){
1076                         return d;
1077                 }
1078         }
1079         return NULL;
1080 }
1081
1082
1083 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1084 {
1085         // I used my own version of the WAIT_QUEUE_HEAD
1086         // to handle some version differences
1087         // When embedded in the kernel this could go back to the vanilla one
1088         ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1089         int status = 0;
1090         ulong flags = 0;
1091         struct adpt_i2o_post_wait_data *p1, *p2;
1092         struct adpt_i2o_post_wait_data *wait_data =
1093                 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
1094         DECLARE_WAITQUEUE(wait, current);
1095
1096         if (!wait_data)
1097                 return -ENOMEM;
1098
1099         /*
1100          * The spin locking is needed to keep anyone from playing
1101          * with the queue pointers and id while we do the same
1102          */
1103         spin_lock_irqsave(&adpt_post_wait_lock, flags);
1104        // TODO we need a MORE unique way of getting ids
1105        // to support async LCT get
1106         wait_data->next = adpt_post_wait_queue;
1107         adpt_post_wait_queue = wait_data;
1108         adpt_post_wait_id++;
1109         adpt_post_wait_id &= 0x7fff;
1110         wait_data->id =  adpt_post_wait_id;
1111         spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1112
1113         wait_data->wq = &adpt_wq_i2o_post;
1114         wait_data->status = -ETIMEDOUT;
1115
1116         add_wait_queue(&adpt_wq_i2o_post, &wait);
1117
1118         msg[2] |= 0x80000000 | ((u32)wait_data->id);
1119         timeout *= HZ;
1120         if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1121                 set_current_state(TASK_INTERRUPTIBLE);
1122                 if(pHba->host)
1123                         spin_unlock_irq(pHba->host->host_lock);
1124                 if (!timeout)
1125                         schedule();
1126                 else{
1127                         timeout = schedule_timeout(timeout);
1128                         if (timeout == 0) {
1129                                 // I/O issued, but cannot get result in
1130                                 // specified time. Freeing resorces is
1131                                 // dangerous.
1132                                 status = -ETIME;
1133                         }
1134                 }
1135                 if(pHba->host)
1136                         spin_lock_irq(pHba->host->host_lock);
1137         }
1138         remove_wait_queue(&adpt_wq_i2o_post, &wait);
1139
1140         if(status == -ETIMEDOUT){
1141                 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1142                 // We will have to free the wait_data memory during shutdown
1143                 return status;
1144         }
1145
1146         /* Remove the entry from the queue.  */
1147         p2 = NULL;
1148         spin_lock_irqsave(&adpt_post_wait_lock, flags);
1149         for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1150                 if(p1 == wait_data) {
1151                         if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1152                                 status = -EOPNOTSUPP;
1153                         }
1154                         if(p2) {
1155                                 p2->next = p1->next;
1156                         } else {
1157                                 adpt_post_wait_queue = p1->next;
1158                         }
1159                         break;
1160                 }
1161         }
1162         spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1163
1164         kfree(wait_data);
1165
1166         return status;
1167 }
1168
1169
1170 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1171 {
1172
1173         u32 m = EMPTY_QUEUE;
1174         u32 __iomem *msg;
1175         ulong timeout = jiffies + 30*HZ;
1176         do {
1177                 rmb();
1178                 m = readl(pHba->post_port);
1179                 if (m != EMPTY_QUEUE) {
1180                         break;
1181                 }
1182                 if(time_after(jiffies,timeout)){
1183                         printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1184                         return -ETIMEDOUT;
1185                 }
1186                 schedule_timeout_uninterruptible(1);
1187         } while(m == EMPTY_QUEUE);
1188                 
1189         msg = pHba->msg_addr_virt + m;
1190         memcpy_toio(msg, data, len);
1191         wmb();
1192
1193         //post message
1194         writel(m, pHba->post_port);
1195         wmb();
1196
1197         return 0;
1198 }
1199
1200
1201 static void adpt_i2o_post_wait_complete(u32 context, int status)
1202 {
1203         struct adpt_i2o_post_wait_data *p1 = NULL;
1204         /*
1205          * We need to search through the adpt_post_wait
1206          * queue to see if the given message is still
1207          * outstanding.  If not, it means that the IOP
1208          * took longer to respond to the message than we
1209          * had allowed and timer has already expired.
1210          * Not much we can do about that except log
1211          * it for debug purposes, increase timeout, and recompile
1212          *
1213          * Lock needed to keep anyone from moving queue pointers
1214          * around while we're looking through them.
1215          */
1216
1217         context &= 0x7fff;
1218
1219         spin_lock(&adpt_post_wait_lock);
1220         for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1221                 if(p1->id == context) {
1222                         p1->status = status;
1223                         spin_unlock(&adpt_post_wait_lock);
1224                         wake_up_interruptible(p1->wq);
1225                         return;
1226                 }
1227         }
1228         spin_unlock(&adpt_post_wait_lock);
1229         // If this happens we lose commands that probably really completed
1230         printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1231         printk(KERN_DEBUG"      Tasks in wait queue:\n");
1232         for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1233                 printk(KERN_DEBUG"           %d\n",p1->id);
1234         }
1235         return;
1236 }
1237
1238 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)                   
1239 {
1240         u32 msg[8];
1241         u8* status;
1242         u32 m = EMPTY_QUEUE ;
1243         ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1244
1245         if(pHba->initialized  == FALSE) {       // First time reset should be quick
1246                 timeout = jiffies + (25*HZ);
1247         } else {
1248                 adpt_i2o_quiesce_hba(pHba);
1249         }
1250
1251         do {
1252                 rmb();
1253                 m = readl(pHba->post_port);
1254                 if (m != EMPTY_QUEUE) {
1255                         break;
1256                 }
1257                 if(time_after(jiffies,timeout)){
1258                         printk(KERN_WARNING"Timeout waiting for message!\n");
1259                         return -ETIMEDOUT;
1260                 }
1261                 schedule_timeout_uninterruptible(1);
1262         } while (m == EMPTY_QUEUE);
1263
1264         status = kzalloc(4, GFP_KERNEL|ADDR32);
1265         if(status == NULL) {
1266                 adpt_send_nop(pHba, m);
1267                 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1268                 return -ENOMEM;
1269         }
1270
1271         msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1272         msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1273         msg[2]=0;
1274         msg[3]=0;
1275         msg[4]=0;
1276         msg[5]=0;
1277         msg[6]=virt_to_bus(status);
1278         msg[7]=0;     
1279
1280         memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1281         wmb();
1282         writel(m, pHba->post_port);
1283         wmb();
1284
1285         while(*status == 0){
1286                 if(time_after(jiffies,timeout)){
1287                         printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1288                         kfree(status);
1289                         return -ETIMEDOUT;
1290                 }
1291                 rmb();
1292                 schedule_timeout_uninterruptible(1);
1293         }
1294
1295         if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1296                 PDEBUG("%s: Reset in progress...\n", pHba->name);
1297                 // Here we wait for message frame to become available
1298                 // indicated that reset has finished
1299                 do {
1300                         rmb();
1301                         m = readl(pHba->post_port);
1302                         if (m != EMPTY_QUEUE) {
1303                                 break;
1304                         }
1305                         if(time_after(jiffies,timeout)){
1306                                 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1307                                 return -ETIMEDOUT;
1308                         }
1309                         schedule_timeout_uninterruptible(1);
1310                 } while (m == EMPTY_QUEUE);
1311                 // Flush the offset
1312                 adpt_send_nop(pHba, m);
1313         }
1314         adpt_i2o_status_get(pHba);
1315         if(*status == 0x02 ||
1316                         pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1317                 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1318                                 pHba->name);
1319         } else {
1320                 PDEBUG("%s: Reset completed.\n", pHba->name);
1321         }
1322
1323         kfree(status);
1324 #ifdef UARTDELAY
1325         // This delay is to allow someone attached to the card through the debug UART to 
1326         // set up the dump levels that they want before the rest of the initialization sequence
1327         adpt_delay(20000);
1328 #endif
1329         return 0;
1330 }
1331
1332
1333 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1334 {
1335         int i;
1336         int max;
1337         int tid;
1338         struct i2o_device *d;
1339         i2o_lct *lct = pHba->lct;
1340         u8 bus_no = 0;
1341         s16 scsi_id;
1342         s16 scsi_lun;
1343         u32 buf[10]; // larger than 7, or 8 ...
1344         struct adpt_device* pDev; 
1345         
1346         if (lct == NULL) {
1347                 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1348                 return -1;
1349         }
1350         
1351         max = lct->table_size;  
1352         max -= 3;
1353         max /= 9;
1354
1355         for(i=0;i<max;i++) {
1356                 if( lct->lct_entry[i].user_tid != 0xfff){
1357                         /*
1358                          * If we have hidden devices, we need to inform the upper layers about
1359                          * the possible maximum id reference to handle device access when
1360                          * an array is disassembled. This code has no other purpose but to
1361                          * allow us future access to devices that are currently hidden
1362                          * behind arrays, hotspares or have not been configured (JBOD mode).
1363                          */
1364                         if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1365                             lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1366                             lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1367                                 continue;
1368                         }
1369                         tid = lct->lct_entry[i].tid;
1370                         // I2O_DPT_DEVICE_INFO_GROUP_NO;
1371                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1372                                 continue;
1373                         }
1374                         bus_no = buf[0]>>16;
1375                         scsi_id = buf[1];
1376                         scsi_lun = (buf[2]>>8 )&0xff;
1377                         if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1378                                 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1379                                 continue;
1380                         }
1381                         if (scsi_id >= MAX_ID){
1382                                 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1383                                 continue;
1384                         }
1385                         if(bus_no > pHba->top_scsi_channel){
1386                                 pHba->top_scsi_channel = bus_no;
1387                         }
1388                         if(scsi_id > pHba->top_scsi_id){
1389                                 pHba->top_scsi_id = scsi_id;
1390                         }
1391                         if(scsi_lun > pHba->top_scsi_lun){
1392                                 pHba->top_scsi_lun = scsi_lun;
1393                         }
1394                         continue;
1395                 }
1396                 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1397                 if(d==NULL)
1398                 {
1399                         printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1400                         return -ENOMEM;
1401                 }
1402                 
1403                 d->controller = pHba;
1404                 d->next = NULL;
1405
1406                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1407
1408                 d->flags = 0;
1409                 tid = d->lct_data.tid;
1410                 adpt_i2o_report_hba_unit(pHba, d);
1411                 adpt_i2o_install_device(pHba, d);
1412         }
1413         bus_no = 0;
1414         for(d = pHba->devices; d ; d = d->next) {
1415                 if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1416                    d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1417                         tid = d->lct_data.tid;
1418                         // TODO get the bus_no from hrt-but for now they are in order
1419                         //bus_no = 
1420                         if(bus_no > pHba->top_scsi_channel){
1421                                 pHba->top_scsi_channel = bus_no;
1422                         }
1423                         pHba->channel[bus_no].type = d->lct_data.class_id;
1424                         pHba->channel[bus_no].tid = tid;
1425                         if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1426                         {
1427                                 pHba->channel[bus_no].scsi_id = buf[1];
1428                                 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1429                         }
1430                         // TODO remove - this is just until we get from hrt
1431                         bus_no++;
1432                         if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1433                                 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1434                                 break;
1435                         }
1436                 }
1437         }
1438
1439         // Setup adpt_device table
1440         for(d = pHba->devices; d ; d = d->next) {
1441                 if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1442                    d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1443                    d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1444
1445                         tid = d->lct_data.tid;
1446                         scsi_id = -1;
1447                         // I2O_DPT_DEVICE_INFO_GROUP_NO;
1448                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1449                                 bus_no = buf[0]>>16;
1450                                 scsi_id = buf[1];
1451                                 scsi_lun = (buf[2]>>8 )&0xff;
1452                                 if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1453                                         continue;
1454                                 }
1455                                 if (scsi_id >= MAX_ID) {
1456                                         continue;
1457                                 }
1458                                 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1459                                         pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1460                                         if(pDev == NULL) {
1461                                                 return -ENOMEM;
1462                                         }
1463                                         pHba->channel[bus_no].device[scsi_id] = pDev;
1464                                 } else {
1465                                         for( pDev = pHba->channel[bus_no].device[scsi_id];      
1466                                                         pDev->next_lun; pDev = pDev->next_lun){
1467                                         }
1468                                         pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1469                                         if(pDev->next_lun == NULL) {
1470                                                 return -ENOMEM;
1471                                         }
1472                                         pDev = pDev->next_lun;
1473                                 }
1474                                 pDev->tid = tid;
1475                                 pDev->scsi_channel = bus_no;
1476                                 pDev->scsi_id = scsi_id;
1477                                 pDev->scsi_lun = scsi_lun;
1478                                 pDev->pI2o_dev = d;
1479                                 d->owner = pDev;
1480                                 pDev->type = (buf[0])&0xff;
1481                                 pDev->flags = (buf[0]>>8)&0xff;
1482                                 if(scsi_id > pHba->top_scsi_id){
1483                                         pHba->top_scsi_id = scsi_id;
1484                                 }
1485                                 if(scsi_lun > pHba->top_scsi_lun){
1486                                         pHba->top_scsi_lun = scsi_lun;
1487                                 }
1488                         }
1489                         if(scsi_id == -1){
1490                                 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1491                                                 d->lct_data.identity_tag);
1492                         }
1493                 }
1494         }
1495         return 0;
1496 }
1497
1498
1499 /*
1500  *      Each I2O controller has a chain of devices on it - these match
1501  *      the useful parts of the LCT of the board.
1502  */
1503  
1504 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1505 {
1506         mutex_lock(&adpt_configuration_lock);
1507         d->controller=pHba;
1508         d->owner=NULL;
1509         d->next=pHba->devices;
1510         d->prev=NULL;
1511         if (pHba->devices != NULL){
1512                 pHba->devices->prev=d;
1513         }
1514         pHba->devices=d;
1515         *d->dev_name = 0;
1516
1517         mutex_unlock(&adpt_configuration_lock);
1518         return 0;
1519 }
1520
1521 static int adpt_open(struct inode *inode, struct file *file)
1522 {
1523         int minor;
1524         adpt_hba* pHba;
1525
1526         //TODO check for root access
1527         //
1528         minor = iminor(inode);
1529         if (minor >= hba_count) {
1530                 return -ENXIO;
1531         }
1532         mutex_lock(&adpt_configuration_lock);
1533         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1534                 if (pHba->unit == minor) {
1535                         break;  /* found adapter */
1536                 }
1537         }
1538         if (pHba == NULL) {
1539                 mutex_unlock(&adpt_configuration_lock);
1540                 return -ENXIO;
1541         }
1542
1543 //      if(pHba->in_use){
1544         //      mutex_unlock(&adpt_configuration_lock);
1545 //              return -EBUSY;
1546 //      }
1547
1548         pHba->in_use = 1;
1549         mutex_unlock(&adpt_configuration_lock);
1550
1551         return 0;
1552 }
1553
1554 static int adpt_close(struct inode *inode, struct file *file)
1555 {
1556         int minor;
1557         adpt_hba* pHba;
1558
1559         minor = iminor(inode);
1560         if (minor >= hba_count) {
1561                 return -ENXIO;
1562         }
1563         mutex_lock(&adpt_configuration_lock);
1564         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1565                 if (pHba->unit == minor) {
1566                         break;  /* found adapter */
1567                 }
1568         }
1569         mutex_unlock(&adpt_configuration_lock);
1570         if (pHba == NULL) {
1571                 return -ENXIO;
1572         }
1573
1574         pHba->in_use = 0;
1575
1576         return 0;
1577 }
1578
1579
1580 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1581 {
1582         u32 msg[MAX_MESSAGE_SIZE];
1583         u32* reply = NULL;
1584         u32 size = 0;
1585         u32 reply_size = 0;
1586         u32 __user *user_msg = arg;
1587         u32 __user * user_reply = NULL;
1588         void *sg_list[pHba->sg_tablesize];
1589         u32 sg_offset = 0;
1590         u32 sg_count = 0;
1591         int sg_index = 0;
1592         u32 i = 0;
1593         u32 rcode = 0;
1594         void *p = NULL;
1595         ulong flags = 0;
1596
1597         memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1598         // get user msg size in u32s 
1599         if(get_user(size, &user_msg[0])){
1600                 return -EFAULT;
1601         }
1602         size = size>>16;
1603
1604         user_reply = &user_msg[size];
1605         if(size > MAX_MESSAGE_SIZE){
1606                 return -EFAULT;
1607         }
1608         size *= 4; // Convert to bytes
1609
1610         /* Copy in the user's I2O command */
1611         if(copy_from_user(msg, user_msg, size)) {
1612                 return -EFAULT;
1613         }
1614         get_user(reply_size, &user_reply[0]);
1615         reply_size = reply_size>>16;
1616         if(reply_size > REPLY_FRAME_SIZE){
1617                 reply_size = REPLY_FRAME_SIZE;
1618         }
1619         reply_size *= 4;
1620         reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1621         if(reply == NULL) {
1622                 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1623                 return -ENOMEM;
1624         }
1625         sg_offset = (msg[0]>>4)&0xf;
1626         msg[2] = 0x40000000; // IOCTL context
1627         msg[3] = (u32)reply;
1628         memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1629         if(sg_offset) {
1630                 // TODO 64bit fix
1631                 struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1632                 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1633                 if (sg_count > pHba->sg_tablesize){
1634                         printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1635                         kfree (reply);
1636                         return -EINVAL;
1637                 }
1638
1639                 for(i = 0; i < sg_count; i++) {
1640                         int sg_size;
1641
1642                         if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1643                                 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1644                                 rcode = -EINVAL;
1645                                 goto cleanup;
1646                         }
1647                         sg_size = sg[i].flag_count & 0xffffff;      
1648                         /* Allocate memory for the transfer */
1649                         p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1650                         if(!p) {
1651                                 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1652                                                 pHba->name,sg_size,i,sg_count);
1653                                 rcode = -ENOMEM;
1654                                 goto cleanup;
1655                         }
1656                         sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1657                         /* Copy in the user's SG buffer if necessary */
1658                         if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1659                                 // TODO 64bit fix
1660                                 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1661                                         printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1662                                         rcode = -EFAULT;
1663                                         goto cleanup;
1664                                 }
1665                         }
1666                         //TODO 64bit fix
1667                         sg[i].addr_bus = (u32)virt_to_bus(p);
1668                 }
1669         }
1670
1671         do {
1672                 if(pHba->host)
1673                         spin_lock_irqsave(pHba->host->host_lock, flags);
1674                 // This state stops any new commands from enterring the
1675                 // controller while processing the ioctl
1676 //              pHba->state |= DPTI_STATE_IOCTL;
1677 //              We can't set this now - The scsi subsystem sets host_blocked and
1678 //              the queue empties and stops.  We need a way to restart the queue
1679                 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1680                 if (rcode != 0)
1681                         printk("adpt_i2o_passthru: post wait failed %d %p\n",
1682                                         rcode, reply);
1683 //              pHba->state &= ~DPTI_STATE_IOCTL;
1684                 if(pHba->host)
1685                         spin_unlock_irqrestore(pHba->host->host_lock, flags);
1686         } while(rcode == -ETIMEDOUT);  
1687
1688         if(rcode){
1689                 goto cleanup;
1690         }
1691
1692         if(sg_offset) {
1693         /* Copy back the Scatter Gather buffers back to user space */
1694                 u32 j;
1695                 // TODO 64bit fix
1696                 struct sg_simple_element* sg;
1697                 int sg_size;
1698
1699                 // re-acquire the original message to handle correctly the sg copy operation
1700                 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1701                 // get user msg size in u32s 
1702                 if(get_user(size, &user_msg[0])){
1703                         rcode = -EFAULT; 
1704                         goto cleanup; 
1705                 }
1706                 size = size>>16;
1707                 size *= 4;
1708                 /* Copy in the user's I2O command */
1709                 if (copy_from_user (msg, user_msg, size)) {
1710                         rcode = -EFAULT;
1711                         goto cleanup;
1712                 }
1713                 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1714
1715                 // TODO 64bit fix
1716                 sg       = (struct sg_simple_element*)(msg + sg_offset);
1717                 for (j = 0; j < sg_count; j++) {
1718                         /* Copy out the SG list to user's buffer if necessary */
1719                         if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1720                                 sg_size = sg[j].flag_count & 0xffffff; 
1721                                 // TODO 64bit fix
1722                                 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1723                                         printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1724                                         rcode = -EFAULT;
1725                                         goto cleanup;
1726                                 }
1727                         }
1728                 }
1729         } 
1730
1731         /* Copy back the reply to user space */
1732         if (reply_size) {
1733                 // we wrote our own values for context - now restore the user supplied ones
1734                 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1735                         printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1736                         rcode = -EFAULT;
1737                 }
1738                 if(copy_to_user(user_reply, reply, reply_size)) {
1739                         printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1740                         rcode = -EFAULT;
1741                 }
1742         }
1743
1744
1745 cleanup:
1746         if (rcode != -ETIME && rcode != -EINTR)
1747                 kfree (reply);
1748         while(sg_index) {
1749                 if(sg_list[--sg_index]) {
1750                         if (rcode != -ETIME && rcode != -EINTR)
1751                                 kfree(sg_list[sg_index]);
1752                 }
1753         }
1754         return rcode;
1755 }
1756
1757
1758 /*
1759  * This routine returns information about the system.  This does not effect
1760  * any logic and if the info is wrong - it doesn't matter.
1761  */
1762
1763 /* Get all the info we can not get from kernel services */
1764 static int adpt_system_info(void __user *buffer)
1765 {
1766         sysInfo_S si;
1767
1768         memset(&si, 0, sizeof(si));
1769
1770         si.osType = OS_LINUX;
1771         si.osMajorVersion = 0;
1772         si.osMinorVersion = 0;
1773         si.osRevision = 0;
1774         si.busType = SI_PCI_BUS;
1775         si.processorFamily = DPTI_sig.dsProcessorFamily;
1776
1777 #if defined __i386__ 
1778         adpt_i386_info(&si);
1779 #elif defined (__ia64__)
1780         adpt_ia64_info(&si);
1781 #elif defined(__sparc__)
1782         adpt_sparc_info(&si);
1783 #elif defined (__alpha__)
1784         adpt_alpha_info(&si);
1785 #else
1786         si.processorType = 0xff ;
1787 #endif
1788         if(copy_to_user(buffer, &si, sizeof(si))){
1789                 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1790                 return -EFAULT;
1791         }
1792
1793         return 0;
1794 }
1795
1796 #if defined __ia64__ 
1797 static void adpt_ia64_info(sysInfo_S* si)
1798 {
1799         // This is all the info we need for now
1800         // We will add more info as our new
1801         // managmenent utility requires it
1802         si->processorType = PROC_IA64;
1803 }
1804 #endif
1805
1806
1807 #if defined __sparc__ 
1808 static void adpt_sparc_info(sysInfo_S* si)
1809 {
1810         // This is all the info we need for now
1811         // We will add more info as our new
1812         // managmenent utility requires it
1813         si->processorType = PROC_ULTRASPARC;
1814 }
1815 #endif
1816
1817 #if defined __alpha__ 
1818 static void adpt_alpha_info(sysInfo_S* si)
1819 {
1820         // This is all the info we need for now
1821         // We will add more info as our new
1822         // managmenent utility requires it
1823         si->processorType = PROC_ALPHA;
1824 }
1825 #endif
1826
1827 #if defined __i386__
1828
1829 static void adpt_i386_info(sysInfo_S* si)
1830 {
1831         // This is all the info we need for now
1832         // We will add more info as our new
1833         // managmenent utility requires it
1834         switch (boot_cpu_data.x86) {
1835         case CPU_386:
1836                 si->processorType = PROC_386;
1837                 break;
1838         case CPU_486:
1839                 si->processorType = PROC_486;
1840                 break;
1841         case CPU_586:
1842                 si->processorType = PROC_PENTIUM;
1843                 break;
1844         default:  // Just in case 
1845                 si->processorType = PROC_PENTIUM;
1846                 break;
1847         }
1848 }
1849
1850 #endif
1851
1852
1853 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1854               ulong arg)
1855 {
1856         int minor;
1857         int error = 0;
1858         adpt_hba* pHba;
1859         ulong flags = 0;
1860         void __user *argp = (void __user *)arg;
1861
1862         minor = iminor(inode);
1863         if (minor >= DPTI_MAX_HBA){
1864                 return -ENXIO;
1865         }
1866         mutex_lock(&adpt_configuration_lock);
1867         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1868                 if (pHba->unit == minor) {
1869                         break;  /* found adapter */
1870                 }
1871         }
1872         mutex_unlock(&adpt_configuration_lock);
1873         if(pHba == NULL){
1874                 return -ENXIO;
1875         }
1876
1877         while((volatile u32) pHba->state & DPTI_STATE_RESET )
1878                 schedule_timeout_uninterruptible(2);
1879
1880         switch (cmd) {
1881         // TODO: handle 3 cases
1882         case DPT_SIGNATURE:
1883                 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1884                         return -EFAULT;
1885                 }
1886                 break;
1887         case I2OUSRCMD:
1888                 return adpt_i2o_passthru(pHba, argp);
1889
1890         case DPT_CTRLINFO:{
1891                 drvrHBAinfo_S HbaInfo;
1892
1893 #define FLG_OSD_PCI_VALID 0x0001
1894 #define FLG_OSD_DMA       0x0002
1895 #define FLG_OSD_I2O       0x0004
1896                 memset(&HbaInfo, 0, sizeof(HbaInfo));
1897                 HbaInfo.drvrHBAnum = pHba->unit;
1898                 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1899                 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1900                 HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1901                 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 
1902                 HbaInfo.Interrupt = pHba->pDev->irq; 
1903                 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1904                 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1905                         printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1906                         return -EFAULT;
1907                 }
1908                 break;
1909                 }
1910         case DPT_SYSINFO:
1911                 return adpt_system_info(argp);
1912         case DPT_BLINKLED:{
1913                 u32 value;
1914                 value = (u32)adpt_read_blink_led(pHba);
1915                 if (copy_to_user(argp, &value, sizeof(value))) {
1916                         return -EFAULT;
1917                 }
1918                 break;
1919                 }
1920         case I2ORESETCMD:
1921                 if(pHba->host)
1922                         spin_lock_irqsave(pHba->host->host_lock, flags);
1923                 adpt_hba_reset(pHba);
1924                 if(pHba->host)
1925                         spin_unlock_irqrestore(pHba->host->host_lock, flags);
1926                 break;
1927         case I2ORESCANCMD:
1928                 adpt_rescan(pHba);
1929                 break;
1930         default:
1931                 return -EINVAL;
1932         }
1933
1934         return error;
1935 }
1936
1937
1938 static irqreturn_t adpt_isr(int irq, void *dev_id)
1939 {
1940         struct scsi_cmnd* cmd;
1941         adpt_hba* pHba = dev_id;
1942         u32 m;
1943         void __iomem *reply;
1944         u32 status=0;
1945         u32 context;
1946         ulong flags = 0;
1947         int handled = 0;
1948
1949         if (pHba == NULL){
1950                 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1951                 return IRQ_NONE;
1952         }
1953         if(pHba->host)
1954                 spin_lock_irqsave(pHba->host->host_lock, flags);
1955
1956         while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
1957                 m = readl(pHba->reply_port);
1958                 if(m == EMPTY_QUEUE){
1959                         // Try twice then give up
1960                         rmb();
1961                         m = readl(pHba->reply_port);
1962                         if(m == EMPTY_QUEUE){ 
1963                                 // This really should not happen
1964                                 printk(KERN_ERR"dpti: Could not get reply frame\n");
1965                                 goto out;
1966                         }
1967                 }
1968                 reply = bus_to_virt(m);
1969
1970                 if (readl(reply) & MSG_FAIL) {
1971                         u32 old_m = readl(reply+28); 
1972                         void __iomem *msg;
1973                         u32 old_context;
1974                         PDEBUG("%s: Failed message\n",pHba->name);
1975                         if(old_m >= 0x100000){
1976                                 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
1977                                 writel(m,pHba->reply_port);
1978                                 continue;
1979                         }
1980                         // Transaction context is 0 in failed reply frame
1981                         msg = pHba->msg_addr_virt + old_m;
1982                         old_context = readl(msg+12);
1983                         writel(old_context, reply+12);
1984                         adpt_send_nop(pHba, old_m);
1985                 } 
1986                 context = readl(reply+8);
1987                 if(context & 0x40000000){ // IOCTL
1988                         void *p = (void *)readl(reply+12);
1989                         if( p != NULL) {
1990                                 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1991                         }
1992                         // All IOCTLs will also be post wait
1993                 }
1994                 if(context & 0x80000000){ // Post wait message
1995                         status = readl(reply+16);
1996                         if(status  >> 24){
1997                                 status &=  0xffff; /* Get detail status */
1998                         } else {
1999                                 status = I2O_POST_WAIT_OK;
2000                         }
2001                         if(!(context & 0x40000000)) {
2002                                 cmd = (struct scsi_cmnd*) readl(reply+12); 
2003                                 if(cmd != NULL) {
2004                                         printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2005                                 }
2006                         }
2007                         adpt_i2o_post_wait_complete(context, status);
2008                 } else { // SCSI message
2009                         cmd = (struct scsi_cmnd*) readl(reply+12); 
2010                         if(cmd != NULL){
2011                                 if(cmd->serial_number != 0) { // If not timedout
2012                                         adpt_i2o_to_scsi(reply, cmd);
2013                                 }
2014                         }
2015                 }
2016                 writel(m, pHba->reply_port);
2017                 wmb();
2018                 rmb();
2019         }
2020         handled = 1;
2021 out:    if(pHba->host)
2022                 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2023         return IRQ_RETVAL(handled);
2024 }
2025
2026 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2027 {
2028         int i;
2029         u32 msg[MAX_MESSAGE_SIZE];
2030         u32* mptr;
2031         u32 *lenptr;
2032         int direction;
2033         int scsidir;
2034         int nseg;
2035         u32 len;
2036         u32 reqlen;
2037         s32 rcode;
2038
2039         memset(msg, 0 , sizeof(msg));
2040         len = scsi_bufflen(cmd);
2041         direction = 0x00000000; 
2042         
2043         scsidir = 0x00000000;                   // DATA NO XFER
2044         if(len) {
2045                 /*
2046                  * Set SCBFlags to indicate if data is being transferred
2047                  * in or out, or no data transfer
2048                  * Note:  Do not have to verify index is less than 0 since
2049                  * cmd->cmnd[0] is an unsigned char
2050                  */
2051                 switch(cmd->sc_data_direction){
2052                 case DMA_FROM_DEVICE:
2053                         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
2054                         break;
2055                 case DMA_TO_DEVICE:
2056                         direction=0x04000000;   // SGL OUT
2057                         scsidir  =0x80000000;   // DATA OUT (iop-->dev)
2058                         break;
2059                 case DMA_NONE:
2060                         break;
2061                 case DMA_BIDIRECTIONAL:
2062                         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
2063                         // Assume In - and continue;
2064                         break;
2065                 default:
2066                         printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2067                              pHba->name, cmd->cmnd[0]);
2068                         cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2069                         cmd->scsi_done(cmd);
2070                         return  0;
2071                 }
2072         }
2073         // msg[0] is set later
2074         // I2O_CMD_SCSI_EXEC
2075         msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2076         msg[2] = 0;
2077         msg[3] = (u32)cmd;      /* We want the SCSI control block back */
2078         // Our cards use the transaction context as the tag for queueing
2079         // Adaptec/DPT Private stuff 
2080         msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2081         msg[5] = d->tid;
2082         /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2083         // I2O_SCB_FLAG_ENABLE_DISCONNECT | 
2084         // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
2085         // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2086         msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2087
2088         mptr=msg+7;
2089
2090         // Write SCSI command into the message - always 16 byte block 
2091         memset(mptr, 0,  16);
2092         memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2093         mptr+=4;
2094         lenptr=mptr++;          /* Remember me - fill in when we know */
2095         reqlen = 14;            // SINGLE SGE
2096         /* Now fill in the SGList and command */
2097
2098         nseg = scsi_dma_map(cmd);
2099         BUG_ON(nseg < 0);
2100         if (nseg) {
2101                 struct scatterlist *sg;
2102
2103                 len = 0;
2104                 scsi_for_each_sg(cmd, sg, nseg, i) {
2105                         *mptr++ = direction|0x10000000|sg_dma_len(sg);
2106                         len+=sg_dma_len(sg);
2107                         *mptr++ = sg_dma_address(sg);
2108                         /* Make this an end of list */
2109                         if (i == nseg - 1)
2110                                 mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
2111                 }
2112                 reqlen = mptr - msg;
2113                 *lenptr = len;
2114                 
2115                 if(cmd->underflow && len != cmd->underflow){
2116                         printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2117                                 len, cmd->underflow);
2118                 }
2119         } else {
2120                 *lenptr = len = 0;
2121                 reqlen = 12;
2122         }
2123         
2124         /* Stick the headers on */
2125         msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2126         
2127         // Send it on it's way
2128         rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2129         if (rcode == 0) {
2130                 return 0;
2131         }
2132         return rcode;
2133 }
2134
2135
2136 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2137 {
2138         struct Scsi_Host *host;
2139
2140         host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2141         if (host == NULL) {
2142                 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2143                 return -1;
2144         }
2145         host->hostdata[0] = (unsigned long)pHba;
2146         pHba->host = host;
2147
2148         host->irq = pHba->pDev->irq;
2149         /* no IO ports, so don't have to set host->io_port and
2150          * host->n_io_port
2151          */
2152         host->io_port = 0;
2153         host->n_io_port = 0;
2154                                 /* see comments in scsi_host.h */
2155         host->max_id = 16;
2156         host->max_lun = 256;
2157         host->max_channel = pHba->top_scsi_channel + 1;
2158         host->cmd_per_lun = 1;
2159         host->unique_id = (uint) pHba;
2160         host->sg_tablesize = pHba->sg_tablesize;
2161         host->can_queue = pHba->post_fifo_size;
2162
2163         return 0;
2164 }
2165
2166
2167 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2168 {
2169         adpt_hba* pHba;
2170         u32 hba_status;
2171         u32 dev_status;
2172         u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 
2173         // I know this would look cleaner if I just read bytes
2174         // but the model I have been using for all the rest of the
2175         // io is in 4 byte words - so I keep that model
2176         u16 detailed_status = readl(reply+16) &0xffff;
2177         dev_status = (detailed_status & 0xff);
2178         hba_status = detailed_status >> 8;
2179
2180         // calculate resid for sg 
2181         scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
2182
2183         pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2184
2185         cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2186
2187         if(!(reply_flags & MSG_FAIL)) {
2188                 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2189                 case I2O_SCSI_DSC_SUCCESS:
2190                         cmd->result = (DID_OK << 16);
2191                         // handle underflow
2192                         if(readl(reply+5) < cmd->underflow ) {
2193                                 cmd->result = (DID_ERROR <<16);
2194                                 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2195                         }
2196                         break;
2197                 case I2O_SCSI_DSC_REQUEST_ABORTED:
2198                         cmd->result = (DID_ABORT << 16);
2199                         break;
2200                 case I2O_SCSI_DSC_PATH_INVALID:
2201                 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2202                 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2203                 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2204                 case I2O_SCSI_DSC_NO_ADAPTER:
2205                 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2206                         printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2207                                 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2208                         cmd->result = (DID_TIME_OUT << 16);
2209                         break;
2210                 case I2O_SCSI_DSC_ADAPTER_BUSY:
2211                 case I2O_SCSI_DSC_BUS_BUSY:
2212                         cmd->result = (DID_BUS_BUSY << 16);
2213                         break;
2214                 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2215                 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2216                         cmd->result = (DID_RESET << 16);
2217                         break;
2218                 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2219                         printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2220                         cmd->result = (DID_PARITY << 16);
2221                         break;
2222                 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2223                 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2224                 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2225                 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2226                 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2227                 case I2O_SCSI_DSC_DATA_OVERRUN:
2228                 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2229                 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2230                 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2231                 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2232                 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2233                 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2234                 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2235                 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2236                 case I2O_SCSI_DSC_INVALID_CDB:
2237                 case I2O_SCSI_DSC_LUN_INVALID:
2238                 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2239                 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2240                 case I2O_SCSI_DSC_NO_NEXUS:
2241                 case I2O_SCSI_DSC_CDB_RECEIVED:
2242                 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2243                 case I2O_SCSI_DSC_QUEUE_FROZEN:
2244                 case I2O_SCSI_DSC_REQUEST_INVALID:
2245                 default:
2246                         printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2247                                 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2248                                hba_status, dev_status, cmd->cmnd[0]);
2249                         cmd->result = (DID_ERROR << 16);
2250                         break;
2251                 }
2252
2253                 // copy over the request sense data if it was a check
2254                 // condition status
2255                 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2256                         u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2257                         // Copy over the sense data
2258                         memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2259                         if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
2260                            cmd->sense_buffer[2] == DATA_PROTECT ){
2261                                 /* This is to handle an array failed */
2262                                 cmd->result = (DID_TIME_OUT << 16);
2263                                 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2264                                         pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, 
2265                                         hba_status, dev_status, cmd->cmnd[0]);
2266
2267                         }
2268                 }
2269         } else {
2270                 /* In this condtion we could not talk to the tid
2271                  * the card rejected it.  We should signal a retry
2272                  * for a limitted number of retries.
2273                  */
2274                 cmd->result = (DID_TIME_OUT << 16);
2275                 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2276                         pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2277                         ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2278         }
2279
2280         cmd->result |= (dev_status);
2281
2282         if(cmd->scsi_done != NULL){
2283                 cmd->scsi_done(cmd);
2284         } 
2285         return cmd->result;
2286 }
2287
2288
2289 static s32 adpt_rescan(adpt_hba* pHba)
2290 {
2291         s32 rcode;
2292         ulong flags = 0;
2293
2294         if(pHba->host)
2295                 spin_lock_irqsave(pHba->host->host_lock, flags);
2296         if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2297                 goto out;
2298         if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2299                 goto out;
2300         rcode = 0;
2301 out:    if(pHba->host)
2302                 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2303         return rcode;
2304 }
2305
2306
2307 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2308 {
2309         int i;
2310         int max;
2311         int tid;
2312         struct i2o_device *d;
2313         i2o_lct *lct = pHba->lct;
2314         u8 bus_no = 0;
2315         s16 scsi_id;
2316         s16 scsi_lun;
2317         u32 buf[10]; // at least 8 u32's
2318         struct adpt_device* pDev = NULL;
2319         struct i2o_device* pI2o_dev = NULL;
2320         
2321         if (lct == NULL) {
2322                 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2323                 return -1;
2324         }
2325         
2326         max = lct->table_size;  
2327         max -= 3;
2328         max /= 9;
2329
2330         // Mark each drive as unscanned
2331         for (d = pHba->devices; d; d = d->next) {
2332                 pDev =(struct adpt_device*) d->owner;
2333                 if(!pDev){
2334                         continue;
2335                 }
2336                 pDev->state |= DPTI_DEV_UNSCANNED;
2337         }
2338
2339         printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2340         
2341         for(i=0;i<max;i++) {
2342                 if( lct->lct_entry[i].user_tid != 0xfff){
2343                         continue;
2344                 }
2345
2346                 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2347                     lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2348                     lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2349                         tid = lct->lct_entry[i].tid;
2350                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2351                                 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2352                                 continue;
2353                         }
2354                         bus_no = buf[0]>>16;
2355                         scsi_id = buf[1];
2356                         scsi_lun = (buf[2]>>8 )&0xff;
2357                         pDev = pHba->channel[bus_no].device[scsi_id];
2358                         /* da lun */
2359                         while(pDev) {
2360                                 if(pDev->scsi_lun == scsi_lun) {
2361                                         break;
2362                                 }
2363                                 pDev = pDev->next_lun;
2364                         }
2365                         if(!pDev ) { // Something new add it
2366                                 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2367                                 if(d==NULL)
2368                                 {
2369                                         printk(KERN_CRIT "Out of memory for I2O device data.\n");
2370                                         return -ENOMEM;
2371                                 }
2372                                 
2373                                 d->controller = pHba;
2374                                 d->next = NULL;
2375
2376                                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2377
2378                                 d->flags = 0;
2379                                 adpt_i2o_report_hba_unit(pHba, d);
2380                                 adpt_i2o_install_device(pHba, d);
2381         
2382                                 if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
2383                                         printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2384                                         continue;
2385                                 }
2386                                 pDev = pHba->channel[bus_no].device[scsi_id];   
2387                                 if( pDev == NULL){
2388                                         pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
2389                                         if(pDev == NULL) {
2390                                                 return -ENOMEM;
2391                                         }
2392                                         pHba->channel[bus_no].device[scsi_id] = pDev;
2393                                 } else {
2394                                         while (pDev->next_lun) {
2395                                                 pDev = pDev->next_lun;
2396                                         }
2397                                         pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
2398                                         if(pDev == NULL) {
2399                                                 return -ENOMEM;
2400                                         }
2401                                 }
2402                                 pDev->tid = d->lct_data.tid;
2403                                 pDev->scsi_channel = bus_no;
2404                                 pDev->scsi_id = scsi_id;
2405                                 pDev->scsi_lun = scsi_lun;
2406                                 pDev->pI2o_dev = d;
2407                                 d->owner = pDev;
2408                                 pDev->type = (buf[0])&0xff;
2409                                 pDev->flags = (buf[0]>>8)&0xff;
2410                                 // Too late, SCSI system has made up it's mind, but what the hey ...
2411                                 if(scsi_id > pHba->top_scsi_id){
2412                                         pHba->top_scsi_id = scsi_id;
2413                                 }
2414                                 if(scsi_lun > pHba->top_scsi_lun){
2415                                         pHba->top_scsi_lun = scsi_lun;
2416                                 }
2417                                 continue;
2418                         } // end of new i2o device
2419
2420                         // We found an old device - check it
2421                         while(pDev) {
2422                                 if(pDev->scsi_lun == scsi_lun) {
2423                                         if(!scsi_device_online(pDev->pScsi_dev)) {
2424                                                 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2425                                                                 pHba->name,bus_no,scsi_id,scsi_lun);
2426                                                 if (pDev->pScsi_dev) {
2427                                                         scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2428                                                 }
2429                                         }
2430                                         d = pDev->pI2o_dev;
2431                                         if(d->lct_data.tid != tid) { // something changed
2432                                                 pDev->tid = tid;
2433                                                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2434                                                 if (pDev->pScsi_dev) {
2435                                                         pDev->pScsi_dev->changed = TRUE;
2436                                                         pDev->pScsi_dev->removable = TRUE;
2437                                                 }
2438                                         }
2439                                         // Found it - mark it scanned
2440                                         pDev->state = DPTI_DEV_ONLINE;
2441                                         break;
2442                                 }
2443                                 pDev = pDev->next_lun;
2444                         }
2445                 }
2446         }
2447         for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2448                 pDev =(struct adpt_device*) pI2o_dev->owner;
2449                 if(!pDev){
2450                         continue;
2451                 }
2452                 // Drive offline drives that previously existed but could not be found
2453                 // in the LCT table
2454                 if (pDev->state & DPTI_DEV_UNSCANNED){
2455                         pDev->state = DPTI_DEV_OFFLINE;
2456                         printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2457                         if (pDev->pScsi_dev) {
2458                                 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2459                         }
2460                 }
2461         }
2462         return 0;
2463 }
2464
2465 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2466 {
2467         struct scsi_cmnd*       cmd = NULL;
2468         struct scsi_device*     d = NULL;
2469
2470         shost_for_each_device(d, pHba->host) {
2471                 unsigned long flags;
2472                 spin_lock_irqsave(&d->list_lock, flags);
2473                 list_for_each_entry(cmd, &d->cmd_list, list) {
2474                         if(cmd->serial_number == 0){
2475                                 continue;
2476                         }
2477                         cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2478                         cmd->scsi_done(cmd);
2479                 }
2480                 spin_unlock_irqrestore(&d->list_lock, flags);
2481         }
2482 }
2483
2484
2485 /*============================================================================
2486  *  Routines from i2o subsystem
2487  *============================================================================
2488  */
2489
2490
2491
2492 /*
2493  *      Bring an I2O controller into HOLD state. See the spec.
2494  */
2495 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2496 {
2497         int rcode;
2498
2499         if(pHba->initialized ) {
2500                 if (adpt_i2o_status_get(pHba) < 0) {
2501                         if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2502                                 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2503                                 return rcode;
2504                         }
2505                         if (adpt_i2o_status_get(pHba) < 0) {
2506                                 printk(KERN_INFO "HBA not responding.\n");
2507                                 return -1;
2508                         }
2509                 }
2510
2511                 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2512                         printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2513                         return -1;
2514                 }
2515
2516                 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2517                     pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2518                     pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2519                     pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2520                         adpt_i2o_reset_hba(pHba);                       
2521                         if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2522                                 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2523                                 return -1;
2524                         }
2525                 }
2526         } else {
2527                 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2528                         printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2529                         return rcode;
2530                 }
2531
2532         }
2533
2534         if (adpt_i2o_init_outbound_q(pHba) < 0) {
2535                 return -1;
2536         }
2537
2538         /* In HOLD state */
2539         
2540         if (adpt_i2o_hrt_get(pHba) < 0) {
2541                 return -1;
2542         }
2543
2544         return 0;
2545 }
2546
2547 /*
2548  *      Bring a controller online into OPERATIONAL state. 
2549  */
2550  
2551 static int adpt_i2o_online_hba(adpt_hba* pHba)
2552 {
2553         if (adpt_i2o_systab_send(pHba) < 0) {
2554                 adpt_i2o_delete_hba(pHba);
2555                 return -1;
2556         }
2557         /* In READY state */
2558
2559         if (adpt_i2o_enable_hba(pHba) < 0) {
2560                 adpt_i2o_delete_hba(pHba);
2561                 return -1;
2562         }
2563
2564         /* In OPERATIONAL state  */
2565         return 0;
2566 }
2567
2568 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2569 {
2570         u32 __iomem *msg;
2571         ulong timeout = jiffies + 5*HZ;
2572
2573         while(m == EMPTY_QUEUE){
2574                 rmb();
2575                 m = readl(pHba->post_port);
2576                 if(m != EMPTY_QUEUE){
2577                         break;
2578                 }
2579                 if(time_after(jiffies,timeout)){
2580                         printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2581                         return 2;
2582                 }
2583                 schedule_timeout_uninterruptible(1);
2584         }
2585         msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2586         writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2587         writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2588         writel( 0,&msg[2]);
2589         wmb();
2590
2591         writel(m, pHba->post_port);
2592         wmb();
2593         return 0;
2594 }
2595
2596 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2597 {
2598         u8 *status;
2599         u32 __iomem *msg = NULL;
2600         int i;
2601         ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2602         u32* ptr;
2603         u32 outbound_frame;  // This had to be a 32 bit address
2604         u32 m;
2605
2606         do {
2607                 rmb();
2608                 m = readl(pHba->post_port);
2609                 if (m != EMPTY_QUEUE) {
2610                         break;
2611                 }
2612
2613                 if(time_after(jiffies,timeout)){
2614                         printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2615                         return -ETIMEDOUT;
2616                 }
2617                 schedule_timeout_uninterruptible(1);
2618         } while(m == EMPTY_QUEUE);
2619
2620         msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2621
2622         status = kzalloc(4, GFP_KERNEL|ADDR32);
2623         if (!status) {
2624                 adpt_send_nop(pHba, m);
2625                 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2626                         pHba->name);
2627                 return -ENOMEM;
2628         }
2629
2630         writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2631         writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2632         writel(0, &msg[2]);
2633         writel(0x0106, &msg[3]);        /* Transaction context */
2634         writel(4096, &msg[4]);          /* Host page frame size */
2635         writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);   /* Outbound msg frame size and Initcode */
2636         writel(0xD0000004, &msg[6]);            /* Simple SG LE, EOB */
2637         writel(virt_to_bus(status), &msg[7]);
2638
2639         writel(m, pHba->post_port);
2640         wmb();
2641
2642         // Wait for the reply status to come back
2643         do {
2644                 if (*status) {
2645                         if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2646                                 break;
2647                         }
2648                 }
2649                 rmb();
2650                 if(time_after(jiffies,timeout)){
2651                         printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2652                         return -ETIMEDOUT;
2653                 }
2654                 schedule_timeout_uninterruptible(1);
2655         } while (1);
2656
2657         // If the command was successful, fill the fifo with our reply
2658         // message packets
2659         if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2660                 kfree(status);
2661                 return -2;
2662         }
2663         kfree(status);
2664
2665         kfree(pHba->reply_pool);
2666
2667         pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2668         if (!pHba->reply_pool) {
2669                 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2670                 return -ENOMEM;
2671         }
2672
2673         ptr = pHba->reply_pool;
2674         for(i = 0; i < pHba->reply_fifo_size; i++) {
2675                 outbound_frame = (u32)virt_to_bus(ptr);
2676                 writel(outbound_frame, pHba->reply_port);
2677                 wmb();
2678                 ptr +=  REPLY_FRAME_SIZE;
2679         }
2680         adpt_i2o_status_get(pHba);
2681         return 0;
2682 }
2683
2684
2685 /*
2686  * I2O System Table.  Contains information about
2687  * all the IOPs in the system.  Used to inform IOPs
2688  * about each other's existence.
2689  *
2690  * sys_tbl_ver is the CurrentChangeIndicator that is
2691  * used by IOPs to track changes.
2692  */
2693
2694
2695
2696 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2697 {
2698         ulong timeout;
2699         u32 m;
2700         u32 __iomem *msg;
2701         u8 *status_block=NULL;
2702         ulong status_block_bus;
2703
2704         if(pHba->status_block == NULL) {
2705                 pHba->status_block = (i2o_status_block*)
2706                         kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2707                 if(pHba->status_block == NULL) {
2708                         printk(KERN_ERR
2709                         "dpti%d: Get Status Block failed; Out of memory. \n", 
2710                         pHba->unit);
2711                         return -ENOMEM;
2712                 }
2713         }
2714         memset(pHba->status_block, 0, sizeof(i2o_status_block));
2715         status_block = (u8*)(pHba->status_block);
2716         status_block_bus = virt_to_bus(pHba->status_block);
2717         timeout = jiffies+TMOUT_GETSTATUS*HZ;
2718         do {
2719                 rmb();
2720                 m = readl(pHba->post_port);
2721                 if (m != EMPTY_QUEUE) {
2722                         break;
2723                 }
2724                 if(time_after(jiffies,timeout)){
2725                         printk(KERN_ERR "%s: Timeout waiting for message !\n",
2726                                         pHba->name);
2727                         return -ETIMEDOUT;
2728                 }
2729                 schedule_timeout_uninterruptible(1);
2730         } while(m==EMPTY_QUEUE);
2731
2732         
2733         msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2734
2735         writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2736         writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2737         writel(1, &msg[2]);
2738         writel(0, &msg[3]);
2739         writel(0, &msg[4]);
2740         writel(0, &msg[5]);
2741         writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2742         writel(0, &msg[7]);
2743         writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2744
2745         //post message
2746         writel(m, pHba->post_port);
2747         wmb();
2748
2749         while(status_block[87]!=0xff){
2750                 if(time_after(jiffies,timeout)){
2751                         printk(KERN_ERR"dpti%d: Get status timeout.\n",
2752                                 pHba->unit);
2753                         return -ETIMEDOUT;
2754                 }
2755                 rmb();
2756                 schedule_timeout_uninterruptible(1);
2757         }
2758
2759         // Set up our number of outbound and inbound messages
2760         pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2761         if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2762                 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2763         }
2764
2765         pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2766         if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2767                 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2768         }
2769
2770         // Calculate the Scatter Gather list size
2771         pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2772         if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2773                 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2774         }
2775
2776
2777 #ifdef DEBUG
2778         printk("dpti%d: State = ",pHba->unit);
2779         switch(pHba->status_block->iop_state) {
2780                 case 0x01:
2781                         printk("INIT\n");
2782                         break;
2783                 case 0x02:
2784                         printk("RESET\n");
2785                         break;
2786                 case 0x04:
2787                         printk("HOLD\n");
2788                         break;
2789                 case 0x05:
2790                         printk("READY\n");
2791                         break;
2792                 case 0x08:
2793                         printk("OPERATIONAL\n");
2794                         break;
2795                 case 0x10:
2796                         printk("FAILED\n");
2797                         break;
2798                 case 0x11:
2799                         printk("FAULTED\n");
2800                         break;
2801                 default:
2802                         printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2803         }
2804 #endif
2805         return 0;
2806 }
2807
2808 /*
2809  * Get the IOP's Logical Configuration Table
2810  */
2811 static int adpt_i2o_lct_get(adpt_hba* pHba)
2812 {
2813         u32 msg[8];
2814         int ret;
2815         u32 buf[16];
2816
2817         if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2818                 pHba->lct_size = pHba->status_block->expected_lct_size;
2819         }
2820         do {
2821                 if (pHba->lct == NULL) {
2822                         pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2823                         if(pHba->lct == NULL) {
2824                                 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2825                                         pHba->name);
2826                                 return -ENOMEM;
2827                         }
2828                 }
2829                 memset(pHba->lct, 0, pHba->lct_size);
2830
2831                 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2832                 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2833                 msg[2] = 0;
2834                 msg[3] = 0;
2835                 msg[4] = 0xFFFFFFFF;    /* All devices */
2836                 msg[5] = 0x00000000;    /* Report now */
2837                 msg[6] = 0xD0000000|pHba->lct_size;
2838                 msg[7] = virt_to_bus(pHba->lct);
2839
2840                 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2841                         printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 
2842                                 pHba->name, ret);       
2843                         printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2844                         return ret;
2845                 }
2846
2847                 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2848                         pHba->lct_size = pHba->lct->table_size << 2;
2849                         kfree(pHba->lct);
2850                         pHba->lct = NULL;
2851                 }
2852         } while (pHba->lct == NULL);
2853
2854         PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2855
2856
2857         // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2858         if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2859                 pHba->FwDebugBufferSize = buf[1];
2860                 pHba->FwDebugBuffer_P    = pHba->base_addr_virt + buf[0];
2861                 pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2862                 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2863                 pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
2864                 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2865                 pHba->FwDebugBuffer_P += buf[2]; 
2866                 pHba->FwDebugFlags = 0;
2867         }
2868
2869         return 0;
2870 }
2871
2872 static int adpt_i2o_build_sys_table(void)
2873 {
2874         adpt_hba* pHba = NULL;
2875         int count = 0;
2876
2877         sys_tbl_len = sizeof(struct i2o_sys_tbl) +      // Header + IOPs
2878                                 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2879
2880         kfree(sys_tbl);
2881
2882         sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2883         if (!sys_tbl) {
2884                 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");     
2885                 return -ENOMEM;
2886         }
2887
2888         sys_tbl->num_entries = hba_count;
2889         sys_tbl->version = I2OVERSION;
2890         sys_tbl->change_ind = sys_tbl_ind++;
2891
2892         for(pHba = hba_chain; pHba; pHba = pHba->next) {
2893                 // Get updated Status Block so we have the latest information
2894                 if (adpt_i2o_status_get(pHba)) {
2895                         sys_tbl->num_entries--;
2896                         continue; // try next one       
2897                 }
2898
2899                 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2900                 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2901                 sys_tbl->iops[count].seg_num = 0;
2902                 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2903                 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2904                 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2905                 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2906                 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2907                 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2908                 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2909                 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
2910
2911                 count++;
2912         }
2913
2914 #ifdef DEBUG
2915 {
2916         u32 *table = (u32*)sys_tbl;
2917         printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2918         for(count = 0; count < (sys_tbl_len >>2); count++) {
2919                 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 
2920                         count, table[count]);
2921         }
2922 }
2923 #endif
2924
2925         return 0;
2926 }
2927
2928
2929 /*
2930  *       Dump the information block associated with a given unit (TID)
2931  */
2932  
2933 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2934 {
2935         char buf[64];
2936         int unit = d->lct_data.tid;
2937
2938         printk(KERN_INFO "TID %3.3d ", unit);
2939
2940         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2941         {
2942                 buf[16]=0;
2943                 printk(" Vendor: %-12.12s", buf);
2944         }
2945         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2946         {
2947                 buf[16]=0;
2948                 printk(" Device: %-12.12s", buf);
2949         }
2950         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
2951         {
2952                 buf[8]=0;
2953                 printk(" Rev: %-12.12s\n", buf);
2954         }
2955 #ifdef DEBUG
2956          printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
2957          printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
2958          printk(KERN_INFO "\tFlags: ");
2959
2960          if(d->lct_data.device_flags&(1<<0))
2961                   printk("C");       // ConfigDialog requested
2962          if(d->lct_data.device_flags&(1<<1))
2963                   printk("U");       // Multi-user capable
2964          if(!(d->lct_data.device_flags&(1<<4)))
2965                   printk("P");       // Peer service enabled!
2966          if(!(d->lct_data.device_flags&(1<<5)))
2967                   printk("M");       // Mgmt service enabled!
2968          printk("\n");
2969 #endif
2970 }
2971
2972 #ifdef DEBUG
2973 /*
2974  *      Do i2o class name lookup
2975  */
2976 static const char *adpt_i2o_get_class_name(int class)
2977 {
2978         int idx = 16;
2979         static char *i2o_class_name[] = {
2980                 "Executive",
2981                 "Device Driver Module",
2982                 "Block Device",
2983                 "Tape Device",
2984                 "LAN Interface",
2985                 "WAN Interface",
2986                 "Fibre Channel Port",
2987                 "Fibre Channel Device",
2988                 "SCSI Device",
2989                 "ATE Port",
2990                 "ATE Device",
2991                 "Floppy Controller",
2992                 "Floppy Device",
2993                 "Secondary Bus Port",
2994                 "Peer Transport Agent",
2995                 "Peer Transport",
2996                 "Unknown"
2997         };
2998         
2999         switch(class&0xFFF) {
3000         case I2O_CLASS_EXECUTIVE:
3001                 idx = 0; break;
3002         case I2O_CLASS_DDM:
3003                 idx = 1; break;
3004         case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3005                 idx = 2; break;
3006         case I2O_CLASS_SEQUENTIAL_STORAGE:
3007                 idx = 3; break;
3008         case I2O_CLASS_LAN:
3009                 idx = 4; break;
3010         case I2O_CLASS_WAN:
3011                 idx = 5; break;
3012         case I2O_CLASS_FIBRE_CHANNEL_PORT:
3013                 idx = 6; break;
3014         case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3015                 idx = 7; break;
3016         case I2O_CLASS_SCSI_PERIPHERAL:
3017                 idx = 8; break;
3018         case I2O_CLASS_ATE_PORT:
3019                 idx = 9; break;
3020         case I2O_CLASS_ATE_PERIPHERAL:
3021                 idx = 10; break;
3022         case I2O_CLASS_FLOPPY_CONTROLLER:
3023                 idx = 11; break;
3024         case I2O_CLASS_FLOPPY_DEVICE:
3025                 idx = 12; break;
3026         case I2O_CLASS_BUS_ADAPTER_PORT:
3027                 idx = 13; break;
3028         case I2O_CLASS_PEER_TRANSPORT_AGENT:
3029                 idx = 14; break;
3030         case I2O_CLASS_PEER_TRANSPORT:
3031                 idx = 15; break;
3032         }
3033         return i2o_class_name[idx];
3034 }
3035 #endif
3036
3037
3038 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3039 {
3040         u32 msg[6];
3041         int ret, size = sizeof(i2o_hrt);
3042
3043         do {
3044                 if (pHba->hrt == NULL) {
3045                         pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3046                         if (pHba->hrt == NULL) {
3047                                 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3048                                 return -ENOMEM;
3049                         }
3050                 }
3051
3052                 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3053                 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3054                 msg[2]= 0;
3055                 msg[3]= 0;
3056                 msg[4]= (0xD0000000 | size);    /* Simple transaction */
3057                 msg[5]= virt_to_bus(pHba->hrt);   /* Dump it here */
3058
3059                 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3060                         printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3061                         return ret;
3062                 }
3063
3064                 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3065                         size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3066                         kfree(pHba->hrt);
3067                         pHba->hrt = NULL;
3068                 }
3069         } while(pHba->hrt == NULL);
3070         return 0;
3071 }                                                                                                                                       
3072
3073 /*
3074  *       Query one scalar group value or a whole scalar group.
3075  */                     
3076 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 
3077                         int group, int field, void *buf, int buflen)
3078 {
3079         u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3080         u8 *resblk;
3081
3082         int size;
3083
3084         /* 8 bytes for header */
3085         resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3086         if (resblk == NULL) {
3087                 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3088                 return -ENOMEM;
3089         }
3090
3091         if (field == -1)                /* whole group */
3092                         opblk[4] = -1;
3093
3094         size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 
3095                 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3096         if (size == -ETIME) {
3097                 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3098                 return -ETIME;
3099         } else if (size == -EINTR) {
3100                 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3101                 return -EINTR;
3102         }
3103                         
3104         memcpy(buf, resblk+8, buflen);  /* cut off header */
3105
3106         kfree(resblk);
3107         if (size < 0)
3108                 return size;    
3109
3110         return buflen;
3111 }
3112
3113
3114 /*      Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3115  *
3116  *      This function can be used for all UtilParamsGet/Set operations.
3117  *      The OperationBlock is given in opblk-buffer, 
3118  *      and results are returned in resblk-buffer.
3119  *      Note that the minimum sized resblk is 8 bytes and contains
3120  *      ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3121  */
3122 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 
3123                   void *opblk, int oplen, void *resblk, int reslen)
3124 {
3125         u32 msg[9]; 
3126         u32 *res = (u32 *)resblk;
3127         int wait_status;
3128
3129         msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3130         msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
3131         msg[2] = 0;
3132         msg[3] = 0;
3133         msg[4] = 0;
3134         msg[5] = 0x54000000 | oplen;    /* OperationBlock */
3135         msg[6] = virt_to_bus(opblk);
3136         msg[7] = 0xD0000000 | reslen;   /* ResultBlock */
3137         msg[8] = virt_to_bus(resblk);
3138
3139         if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3140                 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3141                 return wait_status;     /* -DetailedStatus */
3142         }
3143
3144         if (res[1]&0x00FF0000) {        /* BlockStatus != SUCCESS */
3145                 printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3146                         "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3147                         pHba->name,
3148                         (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3149                                                          : "PARAMS_GET",   
3150                         res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3151                 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3152         }
3153
3154          return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
3155 }
3156
3157
3158 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3159 {
3160         u32 msg[4];
3161         int ret;
3162
3163         adpt_i2o_status_get(pHba);
3164
3165         /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3166
3167         if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3168            (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3169                 return 0;
3170         }
3171
3172         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3173         msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3174         msg[2] = 0;
3175         msg[3] = 0;
3176
3177         if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3178                 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3179                                 pHba->unit, -ret);
3180         } else {
3181                 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3182         }
3183
3184         adpt_i2o_status_get(pHba);
3185         return ret;
3186 }
3187
3188
3189 /* 
3190  * Enable IOP. Allows the IOP to resume external operations.
3191  */
3192 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3193 {
3194         u32 msg[4];
3195         int ret;
3196         
3197         adpt_i2o_status_get(pHba);
3198         if(!pHba->status_block){
3199                 return -ENOMEM;
3200         }
3201         /* Enable only allowed on READY state */
3202         if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3203                 return 0;
3204
3205         if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3206                 return -EINVAL;
3207
3208         msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3209         msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3210         msg[2]= 0;
3211         msg[3]= 0;
3212
3213         if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3214                 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 
3215                         pHba->name, ret);
3216         } else {
3217                 PDEBUG("%s: Enabled.\n", pHba->name);
3218         }
3219
3220         adpt_i2o_status_get(pHba);
3221         return ret;
3222 }
3223
3224
3225 static int adpt_i2o_systab_send(adpt_hba* pHba)
3226 {
3227          u32 msg[12];
3228          int ret;
3229
3230         msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3231         msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3232         msg[2] = 0;
3233         msg[3] = 0;
3234         msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3235         msg[5] = 0;                                /* Segment 0 */
3236
3237         /* 
3238          * Provide three SGL-elements:
3239          * System table (SysTab), Private memory space declaration and 
3240          * Private i/o space declaration  
3241          */
3242         msg[6] = 0x54000000 | sys_tbl_len;
3243         msg[7] = virt_to_phys(sys_tbl);
3244         msg[8] = 0x54000000 | 0;
3245         msg[9] = 0;
3246         msg[10] = 0xD4000000 | 0;
3247         msg[11] = 0;
3248
3249         if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3250                 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 
3251                         pHba->name, ret);
3252         }
3253 #ifdef DEBUG
3254         else {
3255                 PINFO("%s: SysTab set.\n", pHba->name);
3256         }
3257 #endif
3258
3259         return ret;     
3260  }
3261
3262
3263 /*============================================================================
3264  *
3265  *============================================================================
3266  */
3267
3268
3269 #ifdef UARTDELAY 
3270
3271 static static void adpt_delay(int millisec)
3272 {
3273         int i;
3274         for (i = 0; i < millisec; i++) {
3275                 udelay(1000);   /* delay for one millisecond */
3276         }
3277 }
3278
3279 #endif
3280
3281 static struct scsi_host_template driver_template = {
3282         .module                 = THIS_MODULE,
3283         .name                   = "dpt_i2o",
3284         .proc_name              = "dpt_i2o",
3285         .proc_info              = adpt_proc_info,
3286         .info                   = adpt_info,
3287         .queuecommand           = adpt_queue,
3288         .eh_abort_handler       = adpt_abort,
3289         .eh_device_reset_handler = adpt_device_reset,
3290         .eh_bus_reset_handler   = adpt_bus_reset,
3291         .eh_host_reset_handler  = adpt_reset,
3292         .bios_param             = adpt_bios_param,
3293         .slave_configure        = adpt_slave_configure,
3294         .can_queue              = MAX_TO_IOP_MESSAGES,
3295         .this_id                = 7,
3296         .cmd_per_lun            = 1,
3297         .use_clustering         = ENABLE_CLUSTERING,
3298 };
3299
3300 static int __init adpt_init(void)
3301 {
3302         int             error;
3303         adpt_hba        *pHba, *next;
3304
3305         printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3306
3307         error = adpt_detect(&driver_template);
3308         if (error < 0)
3309                 return error;
3310         if (hba_chain == NULL)
3311                 return -ENODEV;
3312
3313         for (pHba = hba_chain; pHba; pHba = pHba->next) {
3314                 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3315                 if (error)
3316                         goto fail;
3317                 scsi_scan_host(pHba->host);
3318         }
3319         return 0;
3320 fail:
3321         for (pHba = hba_chain; pHba; pHba = next) {
3322                 next = pHba->next;
3323                 scsi_remove_host(pHba->host);
3324         }
3325         return error;
3326 }
3327
3328 static void __exit adpt_exit(void)
3329 {
3330         adpt_hba        *pHba, *next;
3331
3332         for (pHba = hba_chain; pHba; pHba = pHba->next)
3333                 scsi_remove_host(pHba->host);
3334         for (pHba = hba_chain; pHba; pHba = next) {
3335                 next = pHba->next;
3336                 adpt_release(pHba->host);
3337         }
3338 }
3339
3340 module_init(adpt_init);
3341 module_exit(adpt_exit);
3342
3343 MODULE_LICENSE("GPL");