2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #include <scsi/scsi_ioctl.h>
50 #include <linux/cdrom.h>
52 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
53 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
54 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
56 /* Embedded module documentation macros - see modules.h */
57 MODULE_AUTHOR("Hewlett-Packard Company");
58 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
59 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
60 " SA6i P600 P800 P400 P400i E200 E200i E500");
61 MODULE_VERSION("3.6.14");
62 MODULE_LICENSE("GPL");
64 #include "cciss_cmd.h"
66 #include <linux/cciss_ioctl.h>
68 /* define the PCI info for the cards we can control */
69 static const struct pci_device_id cciss_pci_device_id[] = {
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
89 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
90 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
94 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
96 /* board_id = Subsystem Device ID & Vendor ID
97 * product = Marketing Name for the board
98 * access = Address of the struct of function pointers
99 * nr_cmds = Number of commands supported by controller
101 static struct board_type products[] = {
102 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
103 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
104 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
105 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
106 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
107 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
108 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
109 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
110 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
111 {0x3225103C, "Smart Array P600", &SA5_access, 512},
112 {0x3223103C, "Smart Array P800", &SA5_access, 512},
113 {0x3234103C, "Smart Array P400", &SA5_access, 512},
114 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
115 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
116 {0x3212103C, "Smart Array E200", &SA5_access, 120},
117 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
118 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
119 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3237103C, "Smart Array E500", &SA5_access, 512},
121 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
124 /* How long to wait (in milliseconds) for board to go into simple mode */
125 #define MAX_CONFIG_WAIT 30000
126 #define MAX_IOCTL_CONFIG_WAIT 1000
128 /*define how many times we will try a command because of bus resets */
129 #define MAX_CMD_RETRIES 3
131 #define READ_AHEAD 1024
134 /* Originally cciss driver only supports 8 major numbers */
135 #define MAX_CTLR_ORIG 8
137 static ctlr_info_t *hba[MAX_CTLR];
139 static void do_cciss_request(request_queue_t *q);
140 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
141 static int cciss_open(struct inode *inode, struct file *filep);
142 static int cciss_release(struct inode *inode, struct file *filep);
143 static int cciss_ioctl(struct inode *inode, struct file *filep,
144 unsigned int cmd, unsigned long arg);
145 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
147 static int cciss_revalidate(struct gendisk *disk);
148 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
149 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
152 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
153 sector_t *total_size, unsigned int *block_size);
154 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
155 sector_t *total_size, unsigned int *block_size);
156 static void cciss_geometry_inquiry(int ctlr, int logvol,
157 int withirq, sector_t total_size,
158 unsigned int block_size, InquiryData_struct *inq_buff,
159 drive_info_struct *drv);
160 static void cciss_getgeometry(int cntl_num);
161 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
163 static void start_io(ctlr_info_t *h);
164 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
165 unsigned int use_unit_num, unsigned int log_unit,
166 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
167 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, int cmd_type);
171 static void fail_all_cmds(unsigned long ctlr);
173 #ifdef CONFIG_PROC_FS
174 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
175 int length, int *eof, void *data);
176 static void cciss_procinit(int i);
178 static void cciss_procinit(int i)
181 #endif /* CONFIG_PROC_FS */
184 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
187 static struct block_device_operations cciss_fops = {
188 .owner = THIS_MODULE,
190 .release = cciss_release,
191 .ioctl = cciss_ioctl,
192 .getgeo = cciss_getgeo,
194 .compat_ioctl = cciss_compat_ioctl,
196 .revalidate_disk = cciss_revalidate,
200 * Enqueuing and dequeuing functions for cmdlists.
202 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
206 c->next = c->prev = c;
208 c->prev = (*Qptr)->prev;
210 (*Qptr)->prev->next = c;
215 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
216 CommandList_struct *c)
218 if (c && c->next != c) {
221 c->prev->next = c->next;
222 c->next->prev = c->prev;
229 #include "cciss_scsi.c" /* For SCSI tape support */
231 #define RAID_UNKNOWN 6
233 #ifdef CONFIG_PROC_FS
236 * Report information about this controller.
238 #define ENG_GIG 1000000000
239 #define ENG_GIG_FACTOR (ENG_GIG/512)
240 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
244 static struct proc_dir_entry *proc_cciss;
246 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
247 int length, int *eof, void *data)
252 ctlr_info_t *h = (ctlr_info_t *) data;
253 drive_info_struct *drv;
255 sector_t vol_sz, vol_sz_frac;
259 /* prevent displaying bogus info during configuration
260 * or deconfiguration of a logical volume
262 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
263 if (h->busy_configuring) {
264 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
267 h->busy_configuring = 1;
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
270 size = sprintf(buffer, "%s: HP %s Controller\n"
271 "Board ID: 0x%08lx\n"
272 "Firmware Version: %c%c%c%c\n"
274 "Logical drives: %d\n"
276 "Current Q depth: %d\n"
277 "Current # commands on controller: %d\n"
278 "Max Q depth since init: %d\n"
279 "Max # commands on controller since init: %d\n"
280 "Max SG entries since init: %d\n\n",
283 (unsigned long)h->board_id,
284 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
285 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
287 h->cciss_max_sectors,
288 h->Qdepth, h->commands_outstanding,
289 h->maxQsinceinit, h->max_outstanding, h->maxSG);
293 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
294 for (i = 0; i <= h->highest_lun; i++) {
300 vol_sz = drv->nr_blocks;
301 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
303 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
305 if (drv->raid_level > 5)
306 drv->raid_level = RAID_UNKNOWN;
307 size = sprintf(buffer + len, "cciss/c%dd%d:"
308 "\t%4u.%02uGB\tRAID %s\n",
309 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
310 raid_label[drv->raid_level]);
316 *start = buffer + offset;
320 h->busy_configuring = 0;
325 cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data)
328 unsigned char cmd[80];
330 #ifdef CONFIG_CISS_SCSI_TAPE
331 ctlr_info_t *h = (ctlr_info_t *) data;
335 if (count > sizeof(cmd) - 1)
337 if (copy_from_user(cmd, buffer, count))
340 len = strlen(cmd); // above 3 lines ensure safety
341 if (len && cmd[len - 1] == '\n')
343 # ifdef CONFIG_CISS_SCSI_TAPE
344 if (strcmp("engage scsi", cmd) == 0) {
345 rc = cciss_engage_scsi(h->ctlr);
350 /* might be nice to have "disengage" too, but it's not
351 safely possible. (only 1 module use count, lock issues.) */
357 * Get us a file in /proc/cciss that says something about each controller.
358 * Create /proc/cciss if it doesn't exist yet.
360 static void __devinit cciss_procinit(int i)
362 struct proc_dir_entry *pde;
364 if (proc_cciss == NULL) {
365 proc_cciss = proc_mkdir("cciss", proc_root_driver);
370 pde = create_proc_read_entry(hba[i]->devname,
371 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
372 proc_cciss, cciss_proc_get_info, hba[i]);
373 pde->write_proc = cciss_proc_write;
375 #endif /* CONFIG_PROC_FS */
378 * For operations that cannot sleep, a command block is allocated at init,
379 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
380 * which ones are free or in use. For operations that can wait for kmalloc
381 * to possible sleep, this routine can be called with get_from_pool set to 0.
382 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
384 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
386 CommandList_struct *c;
389 dma_addr_t cmd_dma_handle, err_dma_handle;
391 if (!get_from_pool) {
392 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
393 sizeof(CommandList_struct), &cmd_dma_handle);
396 memset(c, 0, sizeof(CommandList_struct));
400 c->err_info = (ErrorInfo_struct *)
401 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
404 if (c->err_info == NULL) {
405 pci_free_consistent(h->pdev,
406 sizeof(CommandList_struct), c, cmd_dma_handle);
409 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
410 } else { /* get it out of the controllers pool */
413 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
416 } while (test_and_set_bit
417 (i & (BITS_PER_LONG - 1),
418 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
420 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
423 memset(c, 0, sizeof(CommandList_struct));
424 cmd_dma_handle = h->cmd_pool_dhandle
425 + i * sizeof(CommandList_struct);
426 c->err_info = h->errinfo_pool + i;
427 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
428 err_dma_handle = h->errinfo_pool_dhandle
429 + i * sizeof(ErrorInfo_struct);
435 c->busaddr = (__u32) cmd_dma_handle;
436 temp64.val = (__u64) err_dma_handle;
437 c->ErrDesc.Addr.lower = temp64.val32.lower;
438 c->ErrDesc.Addr.upper = temp64.val32.upper;
439 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
446 * Frees a command block that was previously allocated with cmd_alloc().
448 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
453 if (!got_from_pool) {
454 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr);
462 clear_bit(i & (BITS_PER_LONG - 1),
463 h->cmd_pool_bits + (i / BITS_PER_LONG));
468 static inline ctlr_info_t *get_host(struct gendisk *disk)
470 return disk->queue->queuedata;
473 static inline drive_info_struct *get_drv(struct gendisk *disk)
475 return disk->private_data;
479 * Open. Make sure the device is really there.
481 static int cciss_open(struct inode *inode, struct file *filep)
483 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
484 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488 #endif /* CCISS_DEBUG */
490 if (host->busy_initializing || drv->busy_configuring)
493 * Root is allowed to open raw volume zero even if it's not configured
494 * so array config can still work. Root is also allowed to open any
495 * volume that has a LUN ID, so it can issue IOCTL to reread the
496 * disk information. I don't think I really like this
497 * but I'm already using way to many device nodes to claim another one
498 * for "raw controller".
500 if (drv->heads == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) {
505 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) {
510 if (!capable(CAP_SYS_ADMIN))
521 static int cciss_release(struct inode *inode, struct file *filep)
523 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
524 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
527 printk(KERN_DEBUG "cciss_release %s\n",
528 inode->i_bdev->bd_disk->disk_name);
529 #endif /* CCISS_DEBUG */
538 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
542 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
547 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
549 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
552 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
555 case CCISS_GETPCIINFO:
556 case CCISS_GETINTINFO:
557 case CCISS_SETINTINFO:
558 case CCISS_GETNODENAME:
559 case CCISS_SETNODENAME:
560 case CCISS_GETHEARTBEAT:
561 case CCISS_GETBUSTYPES:
562 case CCISS_GETFIRMVER:
563 case CCISS_GETDRIVVER:
564 case CCISS_REVALIDVOLS:
565 case CCISS_DEREGDISK:
566 case CCISS_REGNEWDISK:
568 case CCISS_RESCANDISK:
569 case CCISS_GETLUNINFO:
570 return do_ioctl(f, cmd, arg);
572 case CCISS_PASSTHRU32:
573 return cciss_ioctl32_passthru(f, cmd, arg);
574 case CCISS_BIG_PASSTHRU32:
575 return cciss_ioctl32_big_passthru(f, cmd, arg);
582 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
585 IOCTL32_Command_struct __user *arg32 =
586 (IOCTL32_Command_struct __user *) arg;
587 IOCTL_Command_struct arg64;
588 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
594 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
595 sizeof(arg64.LUN_info));
597 copy_from_user(&arg64.Request, &arg32->Request,
598 sizeof(arg64.Request));
600 copy_from_user(&arg64.error_info, &arg32->error_info,
601 sizeof(arg64.error_info));
602 err |= get_user(arg64.buf_size, &arg32->buf_size);
603 err |= get_user(cp, &arg32->buf);
604 arg64.buf = compat_ptr(cp);
605 err |= copy_to_user(p, &arg64, sizeof(arg64));
610 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
614 copy_in_user(&arg32->error_info, &p->error_info,
615 sizeof(arg32->error_info));
621 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
624 BIG_IOCTL32_Command_struct __user *arg32 =
625 (BIG_IOCTL32_Command_struct __user *) arg;
626 BIG_IOCTL_Command_struct arg64;
627 BIG_IOCTL_Command_struct __user *p =
628 compat_alloc_user_space(sizeof(arg64));
634 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
635 sizeof(arg64.LUN_info));
637 copy_from_user(&arg64.Request, &arg32->Request,
638 sizeof(arg64.Request));
640 copy_from_user(&arg64.error_info, &arg32->error_info,
641 sizeof(arg64.error_info));
642 err |= get_user(arg64.buf_size, &arg32->buf_size);
643 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
644 err |= get_user(cp, &arg32->buf);
645 arg64.buf = compat_ptr(cp);
646 err |= copy_to_user(p, &arg64, sizeof(arg64));
651 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
655 copy_in_user(&arg32->error_info, &p->error_info,
656 sizeof(arg32->error_info));
663 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
665 drive_info_struct *drv = get_drv(bdev->bd_disk);
670 geo->heads = drv->heads;
671 geo->sectors = drv->sectors;
672 geo->cylinders = drv->cylinders;
679 static int cciss_ioctl(struct inode *inode, struct file *filep,
680 unsigned int cmd, unsigned long arg)
682 struct block_device *bdev = inode->i_bdev;
683 struct gendisk *disk = bdev->bd_disk;
684 ctlr_info_t *host = get_host(disk);
685 drive_info_struct *drv = get_drv(disk);
686 int ctlr = host->ctlr;
687 void __user *argp = (void __user *)arg;
690 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
691 #endif /* CCISS_DEBUG */
694 case CCISS_GETPCIINFO:
696 cciss_pci_info_struct pciinfo;
700 pciinfo.domain = pci_domain_nr(host->pdev->bus);
701 pciinfo.bus = host->pdev->bus->number;
702 pciinfo.dev_fn = host->pdev->devfn;
703 pciinfo.board_id = host->board_id;
705 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
709 case CCISS_GETINTINFO:
711 cciss_coalint_struct intinfo;
715 readl(&host->cfgtable->HostWrite.CoalIntDelay);
717 readl(&host->cfgtable->HostWrite.CoalIntCount);
719 (argp, &intinfo, sizeof(cciss_coalint_struct)))
723 case CCISS_SETINTINFO:
725 cciss_coalint_struct intinfo;
731 if (!capable(CAP_SYS_ADMIN))
734 (&intinfo, argp, sizeof(cciss_coalint_struct)))
736 if ((intinfo.delay == 0) && (intinfo.count == 0))
738 // printk("cciss_ioctl: delay and count cannot be 0\n");
741 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
742 /* Update the field, and then ring the doorbell */
743 writel(intinfo.delay,
744 &(host->cfgtable->HostWrite.CoalIntDelay));
745 writel(intinfo.count,
746 &(host->cfgtable->HostWrite.CoalIntCount));
747 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
749 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
750 if (!(readl(host->vaddr + SA5_DOORBELL)
753 /* delay and try again */
756 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
757 if (i >= MAX_IOCTL_CONFIG_WAIT)
761 case CCISS_GETNODENAME:
763 NodeName_type NodeName;
768 for (i = 0; i < 16; i++)
770 readb(&host->cfgtable->ServerName[i]);
771 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
775 case CCISS_SETNODENAME:
777 NodeName_type NodeName;
783 if (!capable(CAP_SYS_ADMIN))
787 (NodeName, argp, sizeof(NodeName_type)))
790 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
792 /* Update the field, and then ring the doorbell */
793 for (i = 0; i < 16; i++)
795 &host->cfgtable->ServerName[i]);
797 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
799 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
800 if (!(readl(host->vaddr + SA5_DOORBELL)
803 /* delay and try again */
806 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
807 if (i >= MAX_IOCTL_CONFIG_WAIT)
812 case CCISS_GETHEARTBEAT:
814 Heartbeat_type heartbeat;
818 heartbeat = readl(&host->cfgtable->HeartBeat);
820 (argp, &heartbeat, sizeof(Heartbeat_type)))
824 case CCISS_GETBUSTYPES:
826 BusTypes_type BusTypes;
830 BusTypes = readl(&host->cfgtable->BusTypes);
832 (argp, &BusTypes, sizeof(BusTypes_type)))
836 case CCISS_GETFIRMVER:
838 FirmwareVer_type firmware;
842 memcpy(firmware, host->firm_ver, 4);
845 (argp, firmware, sizeof(FirmwareVer_type)))
849 case CCISS_GETDRIVVER:
851 DriverVer_type DriverVer = DRIVER_VERSION;
857 (argp, &DriverVer, sizeof(DriverVer_type)))
862 case CCISS_REVALIDVOLS:
863 return rebuild_lun_table(host, NULL);
865 case CCISS_GETLUNINFO:{
866 LogvolInfo_struct luninfo;
868 luninfo.LunID = drv->LunID;
869 luninfo.num_opens = drv->usage_count;
870 luninfo.num_parts = 0;
871 if (copy_to_user(argp, &luninfo,
872 sizeof(LogvolInfo_struct)))
876 case CCISS_DEREGDISK:
877 return rebuild_lun_table(host, disk);
880 return rebuild_lun_table(host, NULL);
884 IOCTL_Command_struct iocommand;
885 CommandList_struct *c;
889 DECLARE_COMPLETION_ONSTACK(wait);
894 if (!capable(CAP_SYS_RAWIO))
898 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
900 if ((iocommand.buf_size < 1) &&
901 (iocommand.Request.Type.Direction != XFER_NONE)) {
904 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
905 /* Check kmalloc limits */
906 if (iocommand.buf_size > 128000)
909 if (iocommand.buf_size > 0) {
910 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
914 if (iocommand.Request.Type.Direction == XFER_WRITE) {
915 /* Copy the data into the buffer we created */
917 (buff, iocommand.buf, iocommand.buf_size)) {
922 memset(buff, 0, iocommand.buf_size);
924 if ((c = cmd_alloc(host, 0)) == NULL) {
928 // Fill in the command type
929 c->cmd_type = CMD_IOCTL_PEND;
930 // Fill in Command Header
931 c->Header.ReplyQueue = 0; // unused in simple mode
932 if (iocommand.buf_size > 0) // buffer to fill
934 c->Header.SGList = 1;
935 c->Header.SGTotal = 1;
936 } else // no buffers to fill
938 c->Header.SGList = 0;
939 c->Header.SGTotal = 0;
941 c->Header.LUN = iocommand.LUN_info;
942 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
944 // Fill in Request block
945 c->Request = iocommand.Request;
947 // Fill in the scatter gather information
948 if (iocommand.buf_size > 0) {
949 temp64.val = pci_map_single(host->pdev, buff,
951 PCI_DMA_BIDIRECTIONAL);
952 c->SG[0].Addr.lower = temp64.val32.lower;
953 c->SG[0].Addr.upper = temp64.val32.upper;
954 c->SG[0].Len = iocommand.buf_size;
955 c->SG[0].Ext = 0; // we are not chaining
959 /* Put the request on the tail of the request queue */
960 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
961 addQ(&host->reqQ, c);
964 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
966 wait_for_completion(&wait);
968 /* unlock the buffers from DMA */
969 temp64.val32.lower = c->SG[0].Addr.lower;
970 temp64.val32.upper = c->SG[0].Addr.upper;
971 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
973 PCI_DMA_BIDIRECTIONAL);
975 /* Copy the error information out */
976 iocommand.error_info = *(c->err_info);
978 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
980 cmd_free(host, c, 0);
984 if (iocommand.Request.Type.Direction == XFER_READ) {
985 /* Copy the data out of the buffer we created */
987 (iocommand.buf, buff, iocommand.buf_size)) {
989 cmd_free(host, c, 0);
994 cmd_free(host, c, 0);
997 case CCISS_BIG_PASSTHRU:{
998 BIG_IOCTL_Command_struct *ioc;
999 CommandList_struct *c;
1000 unsigned char **buff = NULL;
1001 int *buff_size = NULL;
1003 unsigned long flags;
1007 DECLARE_COMPLETION_ONSTACK(wait);
1010 BYTE __user *data_ptr;
1014 if (!capable(CAP_SYS_RAWIO))
1016 ioc = (BIG_IOCTL_Command_struct *)
1017 kmalloc(sizeof(*ioc), GFP_KERNEL);
1022 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1026 if ((ioc->buf_size < 1) &&
1027 (ioc->Request.Type.Direction != XFER_NONE)) {
1031 /* Check kmalloc limits using all SGs */
1032 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1036 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1041 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1046 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1052 left = ioc->buf_size;
1053 data_ptr = ioc->buf;
1056 ioc->malloc_size) ? ioc->
1058 buff_size[sg_used] = sz;
1059 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1060 if (buff[sg_used] == NULL) {
1064 if (ioc->Request.Type.Direction == XFER_WRITE) {
1066 (buff[sg_used], data_ptr, sz)) {
1071 memset(buff[sg_used], 0, sz);
1077 if ((c = cmd_alloc(host, 0)) == NULL) {
1081 c->cmd_type = CMD_IOCTL_PEND;
1082 c->Header.ReplyQueue = 0;
1084 if (ioc->buf_size > 0) {
1085 c->Header.SGList = sg_used;
1086 c->Header.SGTotal = sg_used;
1088 c->Header.SGList = 0;
1089 c->Header.SGTotal = 0;
1091 c->Header.LUN = ioc->LUN_info;
1092 c->Header.Tag.lower = c->busaddr;
1094 c->Request = ioc->Request;
1095 if (ioc->buf_size > 0) {
1097 for (i = 0; i < sg_used; i++) {
1099 pci_map_single(host->pdev, buff[i],
1101 PCI_DMA_BIDIRECTIONAL);
1102 c->SG[i].Addr.lower =
1104 c->SG[i].Addr.upper =
1106 c->SG[i].Len = buff_size[i];
1107 c->SG[i].Ext = 0; /* we are not chaining */
1111 /* Put the request on the tail of the request queue */
1112 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1113 addQ(&host->reqQ, c);
1116 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1117 wait_for_completion(&wait);
1118 /* unlock the buffers from DMA */
1119 for (i = 0; i < sg_used; i++) {
1120 temp64.val32.lower = c->SG[i].Addr.lower;
1121 temp64.val32.upper = c->SG[i].Addr.upper;
1122 pci_unmap_single(host->pdev,
1123 (dma_addr_t) temp64.val, buff_size[i],
1124 PCI_DMA_BIDIRECTIONAL);
1126 /* Copy the error information out */
1127 ioc->error_info = *(c->err_info);
1128 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1129 cmd_free(host, c, 0);
1133 if (ioc->Request.Type.Direction == XFER_READ) {
1134 /* Copy the data out of the buffer we created */
1135 BYTE __user *ptr = ioc->buf;
1136 for (i = 0; i < sg_used; i++) {
1138 (ptr, buff[i], buff_size[i])) {
1139 cmd_free(host, c, 0);
1143 ptr += buff_size[i];
1146 cmd_free(host, c, 0);
1150 for (i = 0; i < sg_used; i++)
1159 /* scsi_cmd_ioctl handles these, below, though some are not */
1160 /* very meaningful for cciss. SG_IO is the main one people want. */
1162 case SG_GET_VERSION_NUM:
1163 case SG_SET_TIMEOUT:
1164 case SG_GET_TIMEOUT:
1165 case SG_GET_RESERVED_SIZE:
1166 case SG_SET_RESERVED_SIZE:
1167 case SG_EMULATED_HOST:
1169 case SCSI_IOCTL_SEND_COMMAND:
1170 return scsi_cmd_ioctl(filep, disk, cmd, argp);
1172 /* scsi_cmd_ioctl would normally handle these, below, but */
1173 /* they aren't a good fit for cciss, as CD-ROMs are */
1174 /* not supported, and we don't have any bus/target/lun */
1175 /* which we present to the kernel. */
1177 case CDROM_SEND_PACKET:
1178 case CDROMCLOSETRAY:
1180 case SCSI_IOCTL_GET_IDLUN:
1181 case SCSI_IOCTL_GET_BUS_NUMBER:
1187 static inline void complete_buffers(struct bio *bio, int status)
1190 struct bio *xbh = bio->bi_next;
1191 int nr_sectors = bio_sectors(bio);
1193 bio->bi_next = NULL;
1194 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1199 static void cciss_check_queues(ctlr_info_t *h)
1201 int start_queue = h->next_to_run;
1204 /* check to see if we have maxed out the number of commands that can
1205 * be placed on the queue. If so then exit. We do this check here
1206 * in case the interrupt we serviced was from an ioctl and did not
1207 * free any new commands.
1209 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1212 /* We have room on the queue for more commands. Now we need to queue
1213 * them up. We will also keep track of the next queue to run so
1214 * that every queue gets a chance to be started first.
1216 for (i = 0; i < h->highest_lun + 1; i++) {
1217 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1218 /* make sure the disk has been added and the drive is real
1219 * because this can be called from the middle of init_one.
1221 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1223 blk_start_queue(h->gendisk[curr_queue]->queue);
1225 /* check to see if we have maxed out the number of commands
1226 * that can be placed on the queue.
1228 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1229 if (curr_queue == start_queue) {
1231 (start_queue + 1) % (h->highest_lun + 1);
1234 h->next_to_run = curr_queue;
1238 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1243 static void cciss_softirq_done(struct request *rq)
1245 CommandList_struct *cmd = rq->completion_data;
1246 ctlr_info_t *h = hba[cmd->ctlr];
1247 unsigned long flags;
1251 if (cmd->Request.Type.Direction == XFER_READ)
1252 ddir = PCI_DMA_FROMDEVICE;
1254 ddir = PCI_DMA_TODEVICE;
1256 /* command did not need to be retried */
1257 /* unmap the DMA mapping for all the scatter gather elements */
1258 for (i = 0; i < cmd->Header.SGList; i++) {
1259 temp64.val32.lower = cmd->SG[i].Addr.lower;
1260 temp64.val32.upper = cmd->SG[i].Addr.upper;
1261 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1264 complete_buffers(rq->bio, (rq->errors == 0));
1266 if (blk_fs_request(rq)) {
1267 const int rw = rq_data_dir(rq);
1269 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1273 printk("Done with %p\n", rq);
1274 #endif /* CCISS_DEBUG */
1276 add_disk_randomness(rq->rq_disk);
1277 spin_lock_irqsave(&h->lock, flags);
1278 end_that_request_last(rq, (rq->errors == 0));
1279 cmd_free(h, cmd, 1);
1280 cciss_check_queues(h);
1281 spin_unlock_irqrestore(&h->lock, flags);
1284 /* This function will check the usage_count of the drive to be updated/added.
1285 * If the usage_count is zero then the drive information will be updated and
1286 * the disk will be re-registered with the kernel. If not then it will be
1287 * left alone for the next reboot. The exception to this is disk 0 which
1288 * will always be left registered with the kernel since it is also the
1289 * controller node. Any changes to disk 0 will show up on the next
1292 static void cciss_update_drive_info(int ctlr, int drv_index)
1294 ctlr_info_t *h = hba[ctlr];
1295 struct gendisk *disk;
1296 InquiryData_struct *inq_buff = NULL;
1297 unsigned int block_size;
1298 sector_t total_size;
1299 unsigned long flags = 0;
1302 /* if the disk already exists then deregister it before proceeding */
1303 if (h->drv[drv_index].raid_level != -1) {
1304 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1305 h->drv[drv_index].busy_configuring = 1;
1306 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1307 ret = deregister_disk(h->gendisk[drv_index],
1308 &h->drv[drv_index], 0);
1309 h->drv[drv_index].busy_configuring = 0;
1312 /* If the disk is in use return */
1316 /* Get information about the disk and modify the driver structure */
1317 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1318 if (inq_buff == NULL)
1321 /* testing to see if 16-byte CDBs are already being used */
1322 if (h->cciss_read == CCISS_READ_16) {
1323 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1324 &total_size, &block_size);
1328 cciss_read_capacity(ctlr, drv_index, 1,
1329 &total_size, &block_size);
1331 /* if read_capacity returns all F's this volume is >2TB in size */
1332 /* so we switch to 16-byte CDB's for all read/write ops */
1333 if (total_size == 0xFFFFFFFFULL) {
1334 cciss_read_capacity_16(ctlr, drv_index, 1,
1335 &total_size, &block_size);
1336 h->cciss_read = CCISS_READ_16;
1337 h->cciss_write = CCISS_WRITE_16;
1339 h->cciss_read = CCISS_READ_10;
1340 h->cciss_write = CCISS_WRITE_10;
1343 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1344 inq_buff, &h->drv[drv_index]);
1347 disk = h->gendisk[drv_index];
1348 set_capacity(disk, h->drv[drv_index].nr_blocks);
1350 /* if it's the controller it's already added */
1352 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1353 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1354 disk->major = h->major;
1355 disk->first_minor = drv_index << NWD_SHIFT;
1356 disk->fops = &cciss_fops;
1357 disk->private_data = &h->drv[drv_index];
1359 /* Set up queue information */
1360 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1361 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1363 /* This is a hardware imposed limit. */
1364 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1366 /* This is a limit in the driver and could be eliminated. */
1367 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1369 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1371 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1373 disk->queue->queuedata = hba[ctlr];
1375 blk_queue_hardsect_size(disk->queue,
1376 hba[ctlr]->drv[drv_index].block_size);
1378 h->drv[drv_index].queue = disk->queue;
1386 printk(KERN_ERR "cciss: out of memory\n");
1390 /* This function will find the first index of the controllers drive array
1391 * that has a -1 for the raid_level and will return that index. This is
1392 * where new drives will be added. If the index to be returned is greater
1393 * than the highest_lun index for the controller then highest_lun is set
1394 * to this new index. If there are no available indexes then -1 is returned.
1396 static int cciss_find_free_drive_index(int ctlr)
1400 for (i = 0; i < CISS_MAX_LUN; i++) {
1401 if (hba[ctlr]->drv[i].raid_level == -1) {
1402 if (i > hba[ctlr]->highest_lun)
1403 hba[ctlr]->highest_lun = i;
1410 /* This function will add and remove logical drives from the Logical
1411 * drive array of the controller and maintain persistency of ordering
1412 * so that mount points are preserved until the next reboot. This allows
1413 * for the removal of logical drives in the middle of the drive array
1414 * without a re-ordering of those drives.
1416 * h = The controller to perform the operations on
1417 * del_disk = The disk to remove if specified. If the value given
1418 * is NULL then no disk is removed.
1420 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1424 ReportLunData_struct *ld_buff = NULL;
1425 drive_info_struct *drv = NULL;
1432 unsigned long flags;
1434 /* Set busy_configuring flag for this operation */
1435 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1436 if (h->busy_configuring) {
1437 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1440 h->busy_configuring = 1;
1442 /* if del_disk is NULL then we are being called to add a new disk
1443 * and update the logical drive table. If it is not NULL then
1444 * we will check if the disk is in use or not.
1446 if (del_disk != NULL) {
1447 drv = get_drv(del_disk);
1448 drv->busy_configuring = 1;
1449 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1450 return_code = deregister_disk(del_disk, drv, 1);
1451 drv->busy_configuring = 0;
1452 h->busy_configuring = 0;
1455 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1456 if (!capable(CAP_SYS_RAWIO))
1459 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1460 if (ld_buff == NULL)
1463 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1464 sizeof(ReportLunData_struct), 0,
1467 if (return_code == IO_OK) {
1469 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1470 } else { /* reading number of logical volumes failed */
1471 printk(KERN_WARNING "cciss: report logical volume"
1472 " command failed\n");
1477 num_luns = listlength / 8; /* 8 bytes per entry */
1478 if (num_luns > CISS_MAX_LUN) {
1479 num_luns = CISS_MAX_LUN;
1480 printk(KERN_WARNING "cciss: more luns configured"
1481 " on controller than can be handled by"
1485 /* Compare controller drive array to drivers drive array.
1486 * Check for updates in the drive information and any new drives
1487 * on the controller.
1489 for (i = 0; i < num_luns; i++) {
1495 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1497 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1499 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1500 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1502 /* Find if the LUN is already in the drive array
1503 * of the controller. If so then update its info
1504 * if not is use. If it does not exist then find
1505 * the first free index and add it.
1507 for (j = 0; j <= h->highest_lun; j++) {
1508 if (h->drv[j].LunID == lunid) {
1514 /* check if the drive was found already in the array */
1516 drv_index = cciss_find_free_drive_index(ctlr);
1517 if (drv_index == -1)
1520 /*Check if the gendisk needs to be allocated */
1521 if (!h->gendisk[drv_index]){
1522 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1523 if (!h->gendisk[drv_index]){
1524 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1529 h->drv[drv_index].LunID = lunid;
1530 cciss_update_drive_info(ctlr, drv_index);
1536 h->busy_configuring = 0;
1537 /* We return -1 here to tell the ACU that we have registered/updated
1538 * all of the drives that we can and to keep it from calling us
1543 printk(KERN_ERR "cciss: out of memory\n");
1547 /* This function will deregister the disk and it's queue from the
1548 * kernel. It must be called with the controller lock held and the
1549 * drv structures busy_configuring flag set. It's parameters are:
1551 * disk = This is the disk to be deregistered
1552 * drv = This is the drive_info_struct associated with the disk to be
1553 * deregistered. It contains information about the disk used
1555 * clear_all = This flag determines whether or not the disk information
1556 * is going to be completely cleared out and the highest_lun
1557 * reset. Sometimes we want to clear out information about
1558 * the disk in preparation for re-adding it. In this case
1559 * the highest_lun should be left unchanged and the LunID
1560 * should not be cleared.
1562 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1566 ctlr_info_t *h = get_host(disk);
1568 if (!capable(CAP_SYS_RAWIO))
1571 /* make sure logical volume is NOT is use */
1572 if (clear_all || (h->gendisk[0] == disk)) {
1573 if (drv->usage_count > 1)
1575 } else if (drv->usage_count > 0)
1578 /* invalidate the devices and deregister the disk. If it is disk
1579 * zero do not deregister it but just zero out it's values. This
1580 * allows us to delete disk zero but keep the controller registered.
1582 if (h->gendisk[0] != disk) {
1584 request_queue_t *q = disk->queue;
1585 if (disk->flags & GENHD_FL_UP)
1588 blk_cleanup_queue(q);
1589 /* Set drv->queue to NULL so that we do not try
1590 * to call blk_start_queue on this queue in the
1595 /* If clear_all is set then we are deleting the logical
1596 * drive, not just refreshing its info. For drives
1597 * other than disk 0 we will call put_disk. We do not
1598 * do this for disk 0 as we need it to be able to
1599 * configure the controller.
1602 /* This isn't pretty, but we need to find the
1603 * disk in our array and NULL our the pointer.
1604 * This is so that we will call alloc_disk if
1605 * this index is used again later.
1607 for (i=0; i < CISS_MAX_LUN; i++){
1608 if(h->gendisk[i] == disk){
1609 h->gendisk[i] = NULL;
1617 set_capacity(disk, 0);
1621 /* zero out the disk size info */
1623 drv->block_size = 0;
1627 drv->raid_level = -1; /* This can be used as a flag variable to
1628 * indicate that this element of the drive
1633 /* check to see if it was the last disk */
1634 if (drv == h->drv + h->highest_lun) {
1635 /* if so, find the new hightest lun */
1636 int i, newhighest = -1;
1637 for (i = 0; i < h->highest_lun; i++) {
1638 /* if the disk has size > 0, it is available */
1639 if (h->drv[i].heads)
1642 h->highest_lun = newhighest;
1650 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1651 1: address logical volume log_unit,
1652 2: periph device address is scsi3addr */
1653 unsigned int log_unit, __u8 page_code,
1654 unsigned char *scsi3addr, int cmd_type)
1656 ctlr_info_t *h = hba[ctlr];
1657 u64bit buff_dma_handle;
1660 c->cmd_type = CMD_IOCTL_PEND;
1661 c->Header.ReplyQueue = 0;
1663 c->Header.SGList = 1;
1664 c->Header.SGTotal = 1;
1666 c->Header.SGList = 0;
1667 c->Header.SGTotal = 0;
1669 c->Header.Tag.lower = c->busaddr;
1671 c->Request.Type.Type = cmd_type;
1672 if (cmd_type == TYPE_CMD) {
1675 /* If the logical unit number is 0 then, this is going
1676 to controller so It's a physical command
1677 mode = 0 target = 0. So we have nothing to write.
1678 otherwise, if use_unit_num == 1,
1679 mode = 1(volume set addressing) target = LUNID
1680 otherwise, if use_unit_num == 2,
1681 mode = 0(periph dev addr) target = scsi3addr */
1682 if (use_unit_num == 1) {
1683 c->Header.LUN.LogDev.VolId =
1684 h->drv[log_unit].LunID;
1685 c->Header.LUN.LogDev.Mode = 1;
1686 } else if (use_unit_num == 2) {
1687 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1689 c->Header.LUN.LogDev.Mode = 0;
1691 /* are we trying to read a vital product page */
1692 if (page_code != 0) {
1693 c->Request.CDB[1] = 0x01;
1694 c->Request.CDB[2] = page_code;
1696 c->Request.CDBLen = 6;
1697 c->Request.Type.Attribute = ATTR_SIMPLE;
1698 c->Request.Type.Direction = XFER_READ;
1699 c->Request.Timeout = 0;
1700 c->Request.CDB[0] = CISS_INQUIRY;
1701 c->Request.CDB[4] = size & 0xFF;
1703 case CISS_REPORT_LOG:
1704 case CISS_REPORT_PHYS:
1705 /* Talking to controller so It's a physical command
1706 mode = 00 target = 0. Nothing to write.
1708 c->Request.CDBLen = 12;
1709 c->Request.Type.Attribute = ATTR_SIMPLE;
1710 c->Request.Type.Direction = XFER_READ;
1711 c->Request.Timeout = 0;
1712 c->Request.CDB[0] = cmd;
1713 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1714 c->Request.CDB[7] = (size >> 16) & 0xFF;
1715 c->Request.CDB[8] = (size >> 8) & 0xFF;
1716 c->Request.CDB[9] = size & 0xFF;
1719 case CCISS_READ_CAPACITY:
1720 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1721 c->Header.LUN.LogDev.Mode = 1;
1722 c->Request.CDBLen = 10;
1723 c->Request.Type.Attribute = ATTR_SIMPLE;
1724 c->Request.Type.Direction = XFER_READ;
1725 c->Request.Timeout = 0;
1726 c->Request.CDB[0] = cmd;
1728 case CCISS_READ_CAPACITY_16:
1729 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1730 c->Header.LUN.LogDev.Mode = 1;
1731 c->Request.CDBLen = 16;
1732 c->Request.Type.Attribute = ATTR_SIMPLE;
1733 c->Request.Type.Direction = XFER_READ;
1734 c->Request.Timeout = 0;
1735 c->Request.CDB[0] = cmd;
1736 c->Request.CDB[1] = 0x10;
1737 c->Request.CDB[10] = (size >> 24) & 0xFF;
1738 c->Request.CDB[11] = (size >> 16) & 0xFF;
1739 c->Request.CDB[12] = (size >> 8) & 0xFF;
1740 c->Request.CDB[13] = size & 0xFF;
1741 c->Request.Timeout = 0;
1742 c->Request.CDB[0] = cmd;
1744 case CCISS_CACHE_FLUSH:
1745 c->Request.CDBLen = 12;
1746 c->Request.Type.Attribute = ATTR_SIMPLE;
1747 c->Request.Type.Direction = XFER_WRITE;
1748 c->Request.Timeout = 0;
1749 c->Request.CDB[0] = BMIC_WRITE;
1750 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1754 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1757 } else if (cmd_type == TYPE_MSG) {
1759 case 0: /* ABORT message */
1760 c->Request.CDBLen = 12;
1761 c->Request.Type.Attribute = ATTR_SIMPLE;
1762 c->Request.Type.Direction = XFER_WRITE;
1763 c->Request.Timeout = 0;
1764 c->Request.CDB[0] = cmd; /* abort */
1765 c->Request.CDB[1] = 0; /* abort a command */
1766 /* buff contains the tag of the command to abort */
1767 memcpy(&c->Request.CDB[4], buff, 8);
1769 case 1: /* RESET message */
1770 c->Request.CDBLen = 12;
1771 c->Request.Type.Attribute = ATTR_SIMPLE;
1772 c->Request.Type.Direction = XFER_WRITE;
1773 c->Request.Timeout = 0;
1774 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1775 c->Request.CDB[0] = cmd; /* reset */
1776 c->Request.CDB[1] = 0x04; /* reset a LUN */
1778 case 3: /* No-Op message */
1779 c->Request.CDBLen = 1;
1780 c->Request.Type.Attribute = ATTR_SIMPLE;
1781 c->Request.Type.Direction = XFER_WRITE;
1782 c->Request.Timeout = 0;
1783 c->Request.CDB[0] = cmd;
1787 "cciss%d: unknown message type %d\n", ctlr, cmd);
1792 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1795 /* Fill in the scatter gather information */
1797 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1799 PCI_DMA_BIDIRECTIONAL);
1800 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1801 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1802 c->SG[0].Len = size;
1803 c->SG[0].Ext = 0; /* we are not chaining */
1808 static int sendcmd_withirq(__u8 cmd,
1812 unsigned int use_unit_num,
1813 unsigned int log_unit, __u8 page_code, int cmd_type)
1815 ctlr_info_t *h = hba[ctlr];
1816 CommandList_struct *c;
1817 u64bit buff_dma_handle;
1818 unsigned long flags;
1820 DECLARE_COMPLETION_ONSTACK(wait);
1822 if ((c = cmd_alloc(h, 0)) == NULL)
1824 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1825 log_unit, page_code, NULL, cmd_type);
1826 if (return_status != IO_OK) {
1828 return return_status;
1833 /* Put the request on the tail of the queue and send it */
1834 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1838 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1840 wait_for_completion(&wait);
1842 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1843 switch (c->err_info->CommandStatus) {
1844 case CMD_TARGET_STATUS:
1845 printk(KERN_WARNING "cciss: cmd %p has "
1846 " completed with errors\n", c);
1847 if (c->err_info->ScsiStatus) {
1848 printk(KERN_WARNING "cciss: cmd %p "
1849 "has SCSI Status = %x\n",
1850 c, c->err_info->ScsiStatus);
1854 case CMD_DATA_UNDERRUN:
1855 case CMD_DATA_OVERRUN:
1856 /* expected for inquire and report lun commands */
1859 printk(KERN_WARNING "cciss: Cmd %p is "
1860 "reported invalid\n", c);
1861 return_status = IO_ERROR;
1863 case CMD_PROTOCOL_ERR:
1864 printk(KERN_WARNING "cciss: cmd %p has "
1865 "protocol error \n", c);
1866 return_status = IO_ERROR;
1868 case CMD_HARDWARE_ERR:
1869 printk(KERN_WARNING "cciss: cmd %p had "
1870 " hardware error\n", c);
1871 return_status = IO_ERROR;
1873 case CMD_CONNECTION_LOST:
1874 printk(KERN_WARNING "cciss: cmd %p had "
1875 "connection lost\n", c);
1876 return_status = IO_ERROR;
1879 printk(KERN_WARNING "cciss: cmd %p was "
1881 return_status = IO_ERROR;
1883 case CMD_ABORT_FAILED:
1884 printk(KERN_WARNING "cciss: cmd %p reports "
1885 "abort failed\n", c);
1886 return_status = IO_ERROR;
1888 case CMD_UNSOLICITED_ABORT:
1890 "cciss%d: unsolicited abort %p\n", ctlr, c);
1891 if (c->retry_count < MAX_CMD_RETRIES) {
1893 "cciss%d: retrying %p\n", ctlr, c);
1895 /* erase the old error information */
1896 memset(c->err_info, 0,
1897 sizeof(ErrorInfo_struct));
1898 return_status = IO_OK;
1899 INIT_COMPLETION(wait);
1902 return_status = IO_ERROR;
1905 printk(KERN_WARNING "cciss: cmd %p returned "
1906 "unknown status %x\n", c,
1907 c->err_info->CommandStatus);
1908 return_status = IO_ERROR;
1911 /* unlock the buffers from DMA */
1912 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1913 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1914 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1915 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1917 return return_status;
1920 static void cciss_geometry_inquiry(int ctlr, int logvol,
1921 int withirq, sector_t total_size,
1922 unsigned int block_size,
1923 InquiryData_struct *inq_buff,
1924 drive_info_struct *drv)
1929 memset(inq_buff, 0, sizeof(InquiryData_struct));
1931 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1932 inq_buff, sizeof(*inq_buff), 1,
1933 logvol, 0xC1, TYPE_CMD);
1935 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1936 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1938 if (return_code == IO_OK) {
1939 if (inq_buff->data_byte[8] == 0xFF) {
1941 "cciss: reading geometry failed, volume "
1942 "does not support reading geometry\n");
1944 drv->sectors = 32; // Sectors per track
1945 drv->cylinders = total_size + 1;
1946 drv->raid_level = RAID_UNKNOWN;
1948 drv->heads = inq_buff->data_byte[6];
1949 drv->sectors = inq_buff->data_byte[7];
1950 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1951 drv->cylinders += inq_buff->data_byte[5];
1952 drv->raid_level = inq_buff->data_byte[8];
1954 drv->block_size = block_size;
1955 drv->nr_blocks = total_size + 1;
1956 t = drv->heads * drv->sectors;
1958 sector_t real_size = total_size + 1;
1959 unsigned long rem = sector_div(real_size, t);
1962 drv->cylinders = real_size;
1964 } else { /* Get geometry failed */
1965 printk(KERN_WARNING "cciss: reading geometry failed\n");
1967 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1968 drv->heads, drv->sectors, drv->cylinders);
1972 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1973 unsigned int *block_size)
1975 ReadCapdata_struct *buf;
1977 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1979 printk(KERN_WARNING "cciss: out of memory\n");
1982 memset(buf, 0, sizeof(ReadCapdata_struct));
1984 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1985 ctlr, buf, sizeof(ReadCapdata_struct),
1986 1, logvol, 0, TYPE_CMD);
1988 return_code = sendcmd(CCISS_READ_CAPACITY,
1989 ctlr, buf, sizeof(ReadCapdata_struct),
1990 1, logvol, 0, NULL, TYPE_CMD);
1991 if (return_code == IO_OK) {
1992 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
1993 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
1994 } else { /* read capacity command failed */
1995 printk(KERN_WARNING "cciss: read capacity failed\n");
1997 *block_size = BLOCK_SIZE;
1999 if (*total_size != 0)
2000 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2001 (unsigned long long)*total_size+1, *block_size);
2007 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2009 ReadCapdata_struct_16 *buf;
2011 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2013 printk(KERN_WARNING "cciss: out of memory\n");
2016 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2018 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2019 ctlr, buf, sizeof(ReadCapdata_struct_16),
2020 1, logvol, 0, TYPE_CMD);
2023 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2024 ctlr, buf, sizeof(ReadCapdata_struct_16),
2025 1, logvol, 0, NULL, TYPE_CMD);
2027 if (return_code == IO_OK) {
2028 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2029 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2030 } else { /* read capacity command failed */
2031 printk(KERN_WARNING "cciss: read capacity failed\n");
2033 *block_size = BLOCK_SIZE;
2035 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2036 (unsigned long long)*total_size+1, *block_size);
2041 static int cciss_revalidate(struct gendisk *disk)
2043 ctlr_info_t *h = get_host(disk);
2044 drive_info_struct *drv = get_drv(disk);
2047 unsigned int block_size;
2048 sector_t total_size;
2049 InquiryData_struct *inq_buff = NULL;
2051 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2052 if (h->drv[logvol].LunID == drv->LunID) {
2061 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2062 if (inq_buff == NULL) {
2063 printk(KERN_WARNING "cciss: out of memory\n");
2066 if (h->cciss_read == CCISS_READ_10) {
2067 cciss_read_capacity(h->ctlr, logvol, 1,
2068 &total_size, &block_size);
2070 cciss_read_capacity_16(h->ctlr, logvol, 1,
2071 &total_size, &block_size);
2073 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2076 blk_queue_hardsect_size(drv->queue, drv->block_size);
2077 set_capacity(disk, drv->nr_blocks);
2084 * Wait polling for a command to complete.
2085 * The memory mapped FIFO is polled for the completion.
2086 * Used only at init time, interrupts from the HBA are disabled.
2088 static unsigned long pollcomplete(int ctlr)
2093 /* Wait (up to 20 seconds) for a command to complete */
2095 for (i = 20 * HZ; i > 0; i--) {
2096 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2097 if (done == FIFO_EMPTY)
2098 schedule_timeout_uninterruptible(1);
2102 /* Invalid address to tell caller we ran out of time */
2106 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2108 /* We get in here if sendcmd() is polling for completions
2109 and gets some command back that it wasn't expecting --
2110 something other than that which it just sent down.
2111 Ordinarily, that shouldn't happen, but it can happen when
2112 the scsi tape stuff gets into error handling mode, and
2113 starts using sendcmd() to try to abort commands and
2114 reset tape drives. In that case, sendcmd may pick up
2115 completions of commands that were sent to logical drives
2116 through the block i/o system, or cciss ioctls completing, etc.
2117 In that case, we need to save those completions for later
2118 processing by the interrupt handler.
2121 #ifdef CONFIG_CISS_SCSI_TAPE
2122 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2124 /* If it's not the scsi tape stuff doing error handling, (abort */
2125 /* or reset) then we don't expect anything weird. */
2126 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2128 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2129 "Invalid command list address returned! (%lx)\n",
2131 /* not much we can do. */
2132 #ifdef CONFIG_CISS_SCSI_TAPE
2136 /* We've sent down an abort or reset, but something else
2138 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2139 /* Uh oh. No room to save it for later... */
2140 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2141 "reject list overflow, command lost!\n", ctlr);
2144 /* Save it for later */
2145 srl->complete[srl->ncompletions] = complete;
2146 srl->ncompletions++;
2152 * Send a command to the controller, and wait for it to complete.
2153 * Only used at init time.
2155 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2156 1: address logical volume log_unit,
2157 2: periph device address is scsi3addr */
2158 unsigned int log_unit,
2159 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2161 CommandList_struct *c;
2163 unsigned long complete;
2164 ctlr_info_t *info_p = hba[ctlr];
2165 u64bit buff_dma_handle;
2166 int status, done = 0;
2168 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2169 printk(KERN_WARNING "cciss: unable to get memory");
2172 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2173 log_unit, page_code, scsi3addr, cmd_type);
2174 if (status != IO_OK) {
2175 cmd_free(info_p, c, 1);
2183 printk(KERN_DEBUG "cciss: turning intr off\n");
2184 #endif /* CCISS_DEBUG */
2185 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2187 /* Make sure there is room in the command FIFO */
2188 /* Actually it should be completely empty at this time */
2189 /* unless we are in here doing error handling for the scsi */
2190 /* tape side of the driver. */
2191 for (i = 200000; i > 0; i--) {
2192 /* if fifo isn't full go */
2193 if (!(info_p->access.fifo_full(info_p))) {
2198 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2199 " waiting!\n", ctlr);
2204 info_p->access.submit_command(info_p, c);
2207 complete = pollcomplete(ctlr);
2210 printk(KERN_DEBUG "cciss: command completed\n");
2211 #endif /* CCISS_DEBUG */
2213 if (complete == 1) {
2215 "cciss cciss%d: SendCmd Timeout out, "
2216 "No command list address returned!\n", ctlr);
2222 /* This will need to change for direct lookup completions */
2223 if ((complete & CISS_ERROR_BIT)
2224 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2225 /* if data overrun or underun on Report command
2228 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2229 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2230 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2231 ((c->err_info->CommandStatus ==
2232 CMD_DATA_OVERRUN) ||
2233 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2235 complete = c->busaddr;
2237 if (c->err_info->CommandStatus ==
2238 CMD_UNSOLICITED_ABORT) {
2239 printk(KERN_WARNING "cciss%d: "
2240 "unsolicited abort %p\n",
2242 if (c->retry_count < MAX_CMD_RETRIES) {
2244 "cciss%d: retrying %p\n",
2247 /* erase the old error */
2249 memset(c->err_info, 0,
2251 (ErrorInfo_struct));
2255 "cciss%d: retried %p too "
2256 "many times\n", ctlr, c);
2260 } else if (c->err_info->CommandStatus ==
2263 "cciss%d: command could not be aborted.\n",
2268 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2269 " Error %x \n", ctlr,
2270 c->err_info->CommandStatus);
2271 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2273 " size %x\n num %x value %x\n",
2275 c->err_info->MoreErrInfo.Invalid_Cmd.
2277 c->err_info->MoreErrInfo.Invalid_Cmd.
2279 c->err_info->MoreErrInfo.Invalid_Cmd.
2285 /* This will need changing for direct lookup completions */
2286 if (complete != c->busaddr) {
2287 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2288 BUG(); /* we are pretty much hosed if we get here. */
2296 /* unlock the data buffer from DMA */
2297 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2298 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2299 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2300 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2301 #ifdef CONFIG_CISS_SCSI_TAPE
2302 /* if we saved some commands for later, process them now. */
2303 if (info_p->scsi_rejects.ncompletions > 0)
2304 do_cciss_intr(0, info_p);
2306 cmd_free(info_p, c, 1);
2311 * Map (physical) PCI mem into (virtual) kernel space
2313 static void __iomem *remap_pci_mem(ulong base, ulong size)
2315 ulong page_base = ((ulong) base) & PAGE_MASK;
2316 ulong page_offs = ((ulong) base) - page_base;
2317 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2319 return page_remapped ? (page_remapped + page_offs) : NULL;
2323 * Takes jobs of the Q and sends them to the hardware, then puts it on
2324 * the Q to wait for completion.
2326 static void start_io(ctlr_info_t *h)
2328 CommandList_struct *c;
2330 while ((c = h->reqQ) != NULL) {
2331 /* can't do anything if fifo is full */
2332 if ((h->access.fifo_full(h))) {
2333 printk(KERN_WARNING "cciss: fifo full\n");
2337 /* Get the first entry from the Request Q */
2338 removeQ(&(h->reqQ), c);
2341 /* Tell the controller execute command */
2342 h->access.submit_command(h, c);
2344 /* Put job onto the completed Q */
2345 addQ(&(h->cmpQ), c);
2349 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2350 /* Zeros out the error record and then resends the command back */
2351 /* to the controller */
2352 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2354 /* erase the old error information */
2355 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2357 /* add it to software queue and then send it to the controller */
2358 addQ(&(h->reqQ), c);
2360 if (h->Qdepth > h->maxQsinceinit)
2361 h->maxQsinceinit = h->Qdepth;
2366 static inline int evaluate_target_status(CommandList_struct *cmd)
2368 unsigned char sense_key;
2369 int error_count = 1;
2371 if (cmd->err_info->ScsiStatus != 0x02) { /* not check condition? */
2372 if (!blk_pc_request(cmd->rq))
2373 printk(KERN_WARNING "cciss: cmd %p "
2374 "has SCSI Status 0x%x\n",
2375 cmd, cmd->err_info->ScsiStatus);
2379 /* check the sense key */
2380 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2381 /* no status or recovered error */
2382 if ((sense_key == 0x0) || (sense_key == 0x1))
2385 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2386 if (error_count != 0)
2387 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2388 " sense key = 0x%x\n", cmd, sense_key);
2392 /* SG_IO or similar, copy sense data back */
2393 if (cmd->rq->sense) {
2394 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2395 cmd->rq->sense_len = cmd->err_info->SenseLen;
2396 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2397 cmd->rq->sense_len);
2399 cmd->rq->sense_len = 0;
2404 /* checks the status of the job and calls complete buffers to mark all
2405 * buffers for the completed job. Note that this function does not need
2406 * to hold the hba/queue lock.
2408 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2412 struct request *rq = cmd->rq;
2419 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2420 goto after_error_processing;
2422 switch (cmd->err_info->CommandStatus) {
2423 case CMD_TARGET_STATUS:
2424 rq->errors = evaluate_target_status(cmd);
2426 case CMD_DATA_UNDERRUN:
2427 if (blk_fs_request(cmd->rq)) {
2428 printk(KERN_WARNING "cciss: cmd %p has"
2429 " completed with data underrun "
2431 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2434 case CMD_DATA_OVERRUN:
2435 if (blk_fs_request(cmd->rq))
2436 printk(KERN_WARNING "cciss: cmd %p has"
2437 " completed with data overrun "
2441 printk(KERN_WARNING "cciss: cmd %p is "
2442 "reported invalid\n", cmd);
2445 case CMD_PROTOCOL_ERR:
2446 printk(KERN_WARNING "cciss: cmd %p has "
2447 "protocol error \n", cmd);
2450 case CMD_HARDWARE_ERR:
2451 printk(KERN_WARNING "cciss: cmd %p had "
2452 " hardware error\n", cmd);
2455 case CMD_CONNECTION_LOST:
2456 printk(KERN_WARNING "cciss: cmd %p had "
2457 "connection lost\n", cmd);
2461 printk(KERN_WARNING "cciss: cmd %p was "
2465 case CMD_ABORT_FAILED:
2466 printk(KERN_WARNING "cciss: cmd %p reports "
2467 "abort failed\n", cmd);
2470 case CMD_UNSOLICITED_ABORT:
2471 printk(KERN_WARNING "cciss%d: unsolicited "
2472 "abort %p\n", h->ctlr, cmd);
2473 if (cmd->retry_count < MAX_CMD_RETRIES) {
2476 "cciss%d: retrying %p\n", h->ctlr, cmd);
2480 "cciss%d: %p retried too "
2481 "many times\n", h->ctlr, cmd);
2485 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2489 printk(KERN_WARNING "cciss: cmd %p returned "
2490 "unknown status %x\n", cmd,
2491 cmd->err_info->CommandStatus);
2495 after_error_processing:
2497 /* We need to return this command */
2499 resend_cciss_cmd(h, cmd);
2502 cmd->rq->data_len = 0;
2503 cmd->rq->completion_data = cmd;
2504 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2505 blk_complete_request(cmd->rq);
2509 * Get a request and submit it to the controller.
2511 static void do_cciss_request(request_queue_t *q)
2513 ctlr_info_t *h = q->queuedata;
2514 CommandList_struct *c;
2517 struct request *creq;
2519 struct scatterlist tmp_sg[MAXSGENTRIES];
2520 drive_info_struct *drv;
2523 /* We call start_io here in case there is a command waiting on the
2524 * queue that has not been sent.
2526 if (blk_queue_plugged(q))
2530 creq = elv_next_request(q);
2534 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2536 if ((c = cmd_alloc(h, 1)) == NULL)
2539 blkdev_dequeue_request(creq);
2541 spin_unlock_irq(q->queue_lock);
2543 c->cmd_type = CMD_RWREQ;
2546 /* fill in the request */
2547 drv = creq->rq_disk->private_data;
2548 c->Header.ReplyQueue = 0; // unused in simple mode
2549 /* got command from pool, so use the command block index instead */
2550 /* for direct lookups. */
2551 /* The first 2 bits are reserved for controller error reporting. */
2552 c->Header.Tag.lower = (c->cmdindex << 3);
2553 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2554 c->Header.LUN.LogDev.VolId = drv->LunID;
2555 c->Header.LUN.LogDev.Mode = 1;
2556 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2557 c->Request.Type.Type = TYPE_CMD; // It is a command.
2558 c->Request.Type.Attribute = ATTR_SIMPLE;
2559 c->Request.Type.Direction =
2560 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2561 c->Request.Timeout = 0; // Don't time out
2563 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2564 start_blk = creq->sector;
2566 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2567 (int)creq->nr_sectors);
2568 #endif /* CCISS_DEBUG */
2570 seg = blk_rq_map_sg(q, creq, tmp_sg);
2572 /* get the DMA records for the setup */
2573 if (c->Request.Type.Direction == XFER_READ)
2574 dir = PCI_DMA_FROMDEVICE;
2576 dir = PCI_DMA_TODEVICE;
2578 for (i = 0; i < seg; i++) {
2579 c->SG[i].Len = tmp_sg[i].length;
2580 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2582 tmp_sg[i].length, dir);
2583 c->SG[i].Addr.lower = temp64.val32.lower;
2584 c->SG[i].Addr.upper = temp64.val32.upper;
2585 c->SG[i].Ext = 0; // we are not chaining
2587 /* track how many SG entries we are using */
2592 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2593 creq->nr_sectors, seg);
2594 #endif /* CCISS_DEBUG */
2596 c->Header.SGList = c->Header.SGTotal = seg;
2597 if (likely(blk_fs_request(creq))) {
2598 if(h->cciss_read == CCISS_READ_10) {
2599 c->Request.CDB[1] = 0;
2600 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2601 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2602 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2603 c->Request.CDB[5] = start_blk & 0xff;
2604 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2605 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2606 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2607 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2609 c->Request.CDBLen = 16;
2610 c->Request.CDB[1]= 0;
2611 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2612 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2613 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2614 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2615 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2616 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2617 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2618 c->Request.CDB[9]= start_blk & 0xff;
2619 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2620 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2621 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2622 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2623 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2625 } else if (blk_pc_request(creq)) {
2626 c->Request.CDBLen = creq->cmd_len;
2627 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2629 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2633 spin_lock_irq(q->queue_lock);
2635 addQ(&(h->reqQ), c);
2637 if (h->Qdepth > h->maxQsinceinit)
2638 h->maxQsinceinit = h->Qdepth;
2644 /* We will already have the driver lock here so not need
2650 static inline unsigned long get_next_completion(ctlr_info_t *h)
2652 #ifdef CONFIG_CISS_SCSI_TAPE
2653 /* Any rejects from sendcmd() lying around? Process them first */
2654 if (h->scsi_rejects.ncompletions == 0)
2655 return h->access.command_completed(h);
2657 struct sendcmd_reject_list *srl;
2659 srl = &h->scsi_rejects;
2660 n = --srl->ncompletions;
2661 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2663 return srl->complete[n];
2666 return h->access.command_completed(h);
2670 static inline int interrupt_pending(ctlr_info_t *h)
2672 #ifdef CONFIG_CISS_SCSI_TAPE
2673 return (h->access.intr_pending(h)
2674 || (h->scsi_rejects.ncompletions > 0));
2676 return h->access.intr_pending(h);
2680 static inline long interrupt_not_for_us(ctlr_info_t *h)
2682 #ifdef CONFIG_CISS_SCSI_TAPE
2683 return (((h->access.intr_pending(h) == 0) ||
2684 (h->interrupts_enabled == 0))
2685 && (h->scsi_rejects.ncompletions == 0));
2687 return (((h->access.intr_pending(h) == 0) ||
2688 (h->interrupts_enabled == 0)));
2692 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2694 ctlr_info_t *h = dev_id;
2695 CommandList_struct *c;
2696 unsigned long flags;
2699 if (interrupt_not_for_us(h))
2702 * If there are completed commands in the completion queue,
2703 * we had better do something about it.
2705 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2706 while (interrupt_pending(h)) {
2707 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2711 if (a2 >= h->nr_cmds) {
2713 "cciss: controller cciss%d failed, stopping.\n",
2715 fail_all_cmds(h->ctlr);
2719 c = h->cmd_pool + a2;
2724 if ((c = h->cmpQ) == NULL) {
2726 "cciss: Completion of %08x ignored\n",
2730 while (c->busaddr != a) {
2737 * If we've found the command, take it off the
2738 * completion Q and free it
2740 if (c->busaddr == a) {
2741 removeQ(&h->cmpQ, c);
2742 if (c->cmd_type == CMD_RWREQ) {
2743 complete_command(h, c, 0);
2744 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2745 complete(c->waiting);
2747 # ifdef CONFIG_CISS_SCSI_TAPE
2748 else if (c->cmd_type == CMD_SCSI)
2749 complete_scsi_command(c, 0, a1);
2756 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2761 * We cannot read the structure directly, for portability we must use
2763 * This is for debug only.
2766 static void print_cfg_table(CfgTable_struct *tb)
2771 printk("Controller Configuration information\n");
2772 printk("------------------------------------\n");
2773 for (i = 0; i < 4; i++)
2774 temp_name[i] = readb(&(tb->Signature[i]));
2775 temp_name[4] = '\0';
2776 printk(" Signature = %s\n", temp_name);
2777 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2778 printk(" Transport methods supported = 0x%x\n",
2779 readl(&(tb->TransportSupport)));
2780 printk(" Transport methods active = 0x%x\n",
2781 readl(&(tb->TransportActive)));
2782 printk(" Requested transport Method = 0x%x\n",
2783 readl(&(tb->HostWrite.TransportRequest)));
2784 printk(" Coalesce Interrupt Delay = 0x%x\n",
2785 readl(&(tb->HostWrite.CoalIntDelay)));
2786 printk(" Coalesce Interrupt Count = 0x%x\n",
2787 readl(&(tb->HostWrite.CoalIntCount)));
2788 printk(" Max outstanding commands = 0x%d\n",
2789 readl(&(tb->CmdsOutMax)));
2790 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2791 for (i = 0; i < 16; i++)
2792 temp_name[i] = readb(&(tb->ServerName[i]));
2793 temp_name[16] = '\0';
2794 printk(" Server Name = %s\n", temp_name);
2795 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2797 #endif /* CCISS_DEBUG */
2799 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2801 int i, offset, mem_type, bar_type;
2802 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2805 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2806 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2807 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2810 mem_type = pci_resource_flags(pdev, i) &
2811 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2813 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2814 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2815 offset += 4; /* 32 bit */
2817 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2820 default: /* reserved in PCI 2.2 */
2822 "Base address is invalid\n");
2827 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2833 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2834 * controllers that are capable. If not, we use IO-APIC mode.
2837 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2838 struct pci_dev *pdev, __u32 board_id)
2840 #ifdef CONFIG_PCI_MSI
2842 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2846 /* Some boards advertise MSI but don't really support it */
2847 if ((board_id == 0x40700E11) ||
2848 (board_id == 0x40800E11) ||
2849 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2850 goto default_int_mode;
2852 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2853 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2855 c->intr[0] = cciss_msix_entries[0].vector;
2856 c->intr[1] = cciss_msix_entries[1].vector;
2857 c->intr[2] = cciss_msix_entries[2].vector;
2858 c->intr[3] = cciss_msix_entries[3].vector;
2863 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2864 "available\n", err);
2865 goto default_int_mode;
2867 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2869 goto default_int_mode;
2872 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2873 if (!pci_enable_msi(pdev)) {
2876 printk(KERN_WARNING "cciss: MSI init failed\n");
2880 #endif /* CONFIG_PCI_MSI */
2881 /* if we get here we're going to use the default interrupt mode */
2882 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2886 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2888 ushort subsystem_vendor_id, subsystem_device_id, command;
2889 __u32 board_id, scratchpad = 0;
2891 __u32 cfg_base_addr;
2892 __u64 cfg_base_addr_index;
2895 /* check to see if controller has been disabled */
2896 /* BEFORE trying to enable it */
2897 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2898 if (!(command & 0x02)) {
2900 "cciss: controller appears to be disabled\n");
2904 err = pci_enable_device(pdev);
2906 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2910 err = pci_request_regions(pdev, "cciss");
2912 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2917 subsystem_vendor_id = pdev->subsystem_vendor;
2918 subsystem_device_id = pdev->subsystem_device;
2919 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2920 subsystem_vendor_id);
2923 printk("command = %x\n", command);
2924 printk("irq = %x\n", pdev->irq);
2925 printk("board_id = %x\n", board_id);
2926 #endif /* CCISS_DEBUG */
2928 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2929 * else we use the IO-APIC interrupt assigned to us by system ROM.
2931 cciss_interrupt_mode(c, pdev, board_id);
2934 * Memory base addr is first addr , the second points to the config
2938 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2940 printk("address 0 = %x\n", c->paddr);
2941 #endif /* CCISS_DEBUG */
2942 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2944 /* Wait for the board to become ready. (PCI hotplug needs this.)
2945 * We poll for up to 120 secs, once per 100ms. */
2946 for (i = 0; i < 1200; i++) {
2947 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2948 if (scratchpad == CCISS_FIRMWARE_READY)
2950 set_current_state(TASK_INTERRUPTIBLE);
2951 schedule_timeout(HZ / 10); /* wait 100ms */
2953 if (scratchpad != CCISS_FIRMWARE_READY) {
2954 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2956 goto err_out_free_res;
2959 /* get the address index number */
2960 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2961 cfg_base_addr &= (__u32) 0x0000ffff;
2963 printk("cfg base address = %x\n", cfg_base_addr);
2964 #endif /* CCISS_DEBUG */
2965 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2967 printk("cfg base address index = %x\n", cfg_base_addr_index);
2968 #endif /* CCISS_DEBUG */
2969 if (cfg_base_addr_index == -1) {
2970 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2972 goto err_out_free_res;
2975 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2977 printk("cfg offset = %x\n", cfg_offset);
2978 #endif /* CCISS_DEBUG */
2979 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2980 cfg_base_addr_index) +
2981 cfg_offset, sizeof(CfgTable_struct));
2982 c->board_id = board_id;
2985 print_cfg_table(c->cfgtable);
2986 #endif /* CCISS_DEBUG */
2988 for (i = 0; i < ARRAY_SIZE(products); i++) {
2989 if (board_id == products[i].board_id) {
2990 c->product_name = products[i].product_name;
2991 c->access = *(products[i].access);
2992 c->nr_cmds = products[i].nr_cmds;
2996 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2997 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2998 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2999 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3000 printk("Does not appear to be a valid CISS config table\n");
3002 goto err_out_free_res;
3004 /* We didn't find the controller in our list. We know the
3005 * signature is valid. If it's an HP device let's try to
3006 * bind to the device and fire it up. Otherwise we bail.
3008 if (i == ARRAY_SIZE(products)) {
3009 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3010 c->product_name = products[i-1].product_name;
3011 c->access = *(products[i-1].access);
3012 c->nr_cmds = products[i-1].nr_cmds;
3013 printk(KERN_WARNING "cciss: This is an unknown "
3014 "Smart Array controller.\n"
3015 "cciss: Please update to the latest driver "
3016 "available from www.hp.com.\n");
3018 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3019 " to access the Smart Array controller %08lx\n"
3020 , (unsigned long)board_id);
3022 goto err_out_free_res;
3027 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3029 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3031 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3035 /* Disabling DMA prefetch for the P600
3036 * An ASIC bug may result in a prefetch beyond
3039 if(board_id == 0x3225103C) {
3041 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3042 dma_prefetch |= 0x8000;
3043 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3047 printk("Trying to put board into Simple mode\n");
3048 #endif /* CCISS_DEBUG */
3049 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3050 /* Update the field, and then ring the doorbell */
3051 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3052 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3054 /* under certain very rare conditions, this can take awhile.
3055 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3056 * as we enter this code.) */
3057 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3058 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3060 /* delay and try again */
3061 set_current_state(TASK_INTERRUPTIBLE);
3062 schedule_timeout(10);
3066 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3067 readl(c->vaddr + SA5_DOORBELL));
3068 #endif /* CCISS_DEBUG */
3070 print_cfg_table(c->cfgtable);
3071 #endif /* CCISS_DEBUG */
3073 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3074 printk(KERN_WARNING "cciss: unable to get board into"
3077 goto err_out_free_res;
3083 * Deliberately omit pci_disable_device(): it does something nasty to
3084 * Smart Array controllers that pci_enable_device does not undo
3086 pci_release_regions(pdev);
3091 * Gets information about the local volumes attached to the controller.
3093 static void cciss_getgeometry(int cntl_num)
3095 ReportLunData_struct *ld_buff;
3096 InquiryData_struct *inq_buff;
3102 sector_t total_size;
3104 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3105 if (ld_buff == NULL) {
3106 printk(KERN_ERR "cciss: out of memory\n");
3109 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3110 if (inq_buff == NULL) {
3111 printk(KERN_ERR "cciss: out of memory\n");
3115 /* Get the firmware version */
3116 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3117 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3119 if (return_code == IO_OK) {
3120 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3121 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3122 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3123 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3124 } else { /* send command failed */
3126 printk(KERN_WARNING "cciss: unable to determine firmware"
3127 " version of controller\n");
3129 /* Get the number of logical volumes */
3130 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3131 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3134 if (return_code == IO_OK) {
3136 printk("LUN Data\n--------------------------\n");
3137 #endif /* CCISS_DEBUG */
3140 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3142 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3144 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3145 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3146 } else { /* reading number of logical volumes failed */
3148 printk(KERN_WARNING "cciss: report logical volume"
3149 " command failed\n");
3152 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3153 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3155 "ciss: only %d number of logical volumes supported\n",
3157 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3160 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3161 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3162 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3163 hba[cntl_num]->num_luns);
3164 #endif /* CCISS_DEBUG */
3166 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3167 for (i = 0; i < CISS_MAX_LUN; i++) {
3168 if (i < hba[cntl_num]->num_luns) {
3169 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3171 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3173 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3175 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3177 hba[cntl_num]->drv[i].LunID = lunid;
3180 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3181 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3182 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3183 hba[cntl_num]->drv[i].LunID);
3184 #endif /* CCISS_DEBUG */
3186 /* testing to see if 16-byte CDBs are already being used */
3187 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3188 cciss_read_capacity_16(cntl_num, i, 0,
3189 &total_size, &block_size);
3192 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3194 /* If read_capacity returns all F's the logical is >2TB */
3195 /* so we switch to 16-byte CDBs for all read/write ops */
3196 if(total_size == 0xFFFFFFFFULL) {
3197 cciss_read_capacity_16(cntl_num, i, 0,
3198 &total_size, &block_size);
3199 hba[cntl_num]->cciss_read = CCISS_READ_16;
3200 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3202 hba[cntl_num]->cciss_read = CCISS_READ_10;
3203 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3206 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3207 block_size, inq_buff,
3208 &hba[cntl_num]->drv[i]);
3210 /* initialize raid_level to indicate a free space */
3211 hba[cntl_num]->drv[i].raid_level = -1;
3218 /* Function to find the first free pointer into our hba[] array */
3219 /* Returns -1 if no free entries are left. */
3220 static int alloc_cciss_hba(void)
3224 for (i = 0; i < MAX_CTLR; i++) {
3227 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3230 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3237 printk(KERN_WARNING "cciss: This driver supports a maximum"
3238 " of %d controllers.\n", MAX_CTLR);
3241 printk(KERN_ERR "cciss: out of memory.\n");
3245 static void free_hba(int i)
3247 ctlr_info_t *p = hba[i];
3251 for (n = 0; n < CISS_MAX_LUN; n++)
3252 put_disk(p->gendisk[n]);
3257 * This is it. Find all the controllers and register them. I really hate
3258 * stealing all these major device numbers.
3259 * returns the number of block devices registered.
3261 static int __devinit cciss_init_one(struct pci_dev *pdev,
3262 const struct pci_device_id *ent)
3269 i = alloc_cciss_hba();
3273 hba[i]->busy_initializing = 1;
3275 if (cciss_pci_init(hba[i], pdev) != 0)
3278 sprintf(hba[i]->devname, "cciss%d", i);
3280 hba[i]->pdev = pdev;
3282 /* configure PCI DMA stuff */
3283 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3285 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3288 printk(KERN_ERR "cciss: no suitable DMA available\n");
3293 * register with the major number, or get a dynamic major number
3294 * by passing 0 as argument. This is done for greater than
3295 * 8 controller support.
3297 if (i < MAX_CTLR_ORIG)
3298 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3299 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3300 if (rc == -EBUSY || rc == -EINVAL) {
3302 "cciss: Unable to get major number %d for %s "
3303 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3306 if (i >= MAX_CTLR_ORIG)
3310 /* make sure the board interrupts are off */
3311 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3312 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3313 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3314 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3315 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3319 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3320 hba[i]->devname, pdev->device, pci_name(pdev),
3321 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3323 hba[i]->cmd_pool_bits =
3324 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3325 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3326 hba[i]->cmd_pool = (CommandList_struct *)
3327 pci_alloc_consistent(hba[i]->pdev,
3328 hba[i]->nr_cmds * sizeof(CommandList_struct),
3329 &(hba[i]->cmd_pool_dhandle));
3330 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3331 pci_alloc_consistent(hba[i]->pdev,
3332 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3333 &(hba[i]->errinfo_pool_dhandle));
3334 if ((hba[i]->cmd_pool_bits == NULL)
3335 || (hba[i]->cmd_pool == NULL)
3336 || (hba[i]->errinfo_pool == NULL)) {
3337 printk(KERN_ERR "cciss: out of memory");
3340 #ifdef CONFIG_CISS_SCSI_TAPE
3341 hba[i]->scsi_rejects.complete =
3342 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3343 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3344 if (hba[i]->scsi_rejects.complete == NULL) {
3345 printk(KERN_ERR "cciss: out of memory");
3349 spin_lock_init(&hba[i]->lock);
3351 /* Initialize the pdev driver private data.
3352 have it point to hba[i]. */
3353 pci_set_drvdata(pdev, hba[i]);
3354 /* command and error info recs zeroed out before
3356 memset(hba[i]->cmd_pool_bits, 0,
3357 ((hba[i]->nr_cmds + BITS_PER_LONG -
3358 1) / BITS_PER_LONG) * sizeof(unsigned long));
3361 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3362 #endif /* CCISS_DEBUG */
3364 cciss_getgeometry(i);
3366 cciss_scsi_setup(i);
3368 /* Turn the interrupts on so we can service requests */
3369 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3373 hba[i]->cciss_max_sectors = 2048;
3375 hba[i]->busy_initializing = 0;
3378 drive_info_struct *drv = &(hba[i]->drv[j]);
3379 struct gendisk *disk = hba[i]->gendisk[j];
3382 /* Check if the disk was allocated already */
3384 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3385 disk = hba[i]->gendisk[j];
3388 /* Check that the disk was able to be allocated */
3390 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3394 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3397 "cciss: unable to allocate queue for disk %d\n",
3403 q->backing_dev_info.ra_pages = READ_AHEAD;
3404 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3406 /* This is a hardware imposed limit. */
3407 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3409 /* This is a limit in the driver and could be eliminated. */
3410 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3412 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3414 blk_queue_softirq_done(q, cciss_softirq_done);
3416 q->queuedata = hba[i];
3417 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3418 disk->major = hba[i]->major;
3419 disk->first_minor = j << NWD_SHIFT;
3420 disk->fops = &cciss_fops;
3422 disk->private_data = drv;
3423 disk->driverfs_dev = &pdev->dev;
3424 /* we must register the controller even if no disks exist */
3425 /* this is for the online array utilities */
3426 if (!drv->heads && j)
3428 blk_queue_hardsect_size(q, drv->block_size);
3429 set_capacity(disk, drv->nr_blocks);
3432 } while (j <= hba[i]->highest_lun);
3437 #ifdef CONFIG_CISS_SCSI_TAPE
3438 kfree(hba[i]->scsi_rejects.complete);
3440 kfree(hba[i]->cmd_pool_bits);
3441 if (hba[i]->cmd_pool)
3442 pci_free_consistent(hba[i]->pdev,
3443 hba[i]->nr_cmds * sizeof(CommandList_struct),
3444 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3445 if (hba[i]->errinfo_pool)
3446 pci_free_consistent(hba[i]->pdev,
3447 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3448 hba[i]->errinfo_pool,
3449 hba[i]->errinfo_pool_dhandle);
3450 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3452 unregister_blkdev(hba[i]->major, hba[i]->devname);
3454 hba[i]->busy_initializing = 0;
3455 /* cleanup any queues that may have been initialized */
3456 for (j=0; j <= hba[i]->highest_lun; j++){
3457 drive_info_struct *drv = &(hba[i]->drv[j]);
3459 blk_cleanup_queue(drv->queue);
3462 * Deliberately omit pci_disable_device(): it does something nasty to
3463 * Smart Array controllers that pci_enable_device does not undo
3465 pci_release_regions(pdev);
3466 pci_set_drvdata(pdev, NULL);
3471 static void cciss_remove_one(struct pci_dev *pdev)
3473 ctlr_info_t *tmp_ptr;
3478 if (pci_get_drvdata(pdev) == NULL) {
3479 printk(KERN_ERR "cciss: Unable to remove device \n");
3482 tmp_ptr = pci_get_drvdata(pdev);
3484 if (hba[i] == NULL) {
3485 printk(KERN_ERR "cciss: device appears to "
3486 "already be removed \n");
3490 remove_proc_entry(hba[i]->devname, proc_cciss);
3491 unregister_blkdev(hba[i]->major, hba[i]->devname);
3493 /* remove it from the disk list */
3494 for (j = 0; j < CISS_MAX_LUN; j++) {
3495 struct gendisk *disk = hba[i]->gendisk[j];
3497 request_queue_t *q = disk->queue;
3499 if (disk->flags & GENHD_FL_UP)
3502 blk_cleanup_queue(q);
3506 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3508 /* Turn board interrupts off and send the flush cache command */
3509 /* sendcmd will turn off interrupt, and send the flush...
3510 * To write all data in the battery backed cache to disks */
3511 memset(flush_buf, 0, 4);
3512 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3514 if (return_code == IO_OK) {
3515 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3517 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3519 free_irq(hba[i]->intr[2], hba[i]);
3521 #ifdef CONFIG_PCI_MSI
3522 if (hba[i]->msix_vector)
3523 pci_disable_msix(hba[i]->pdev);
3524 else if (hba[i]->msi_vector)
3525 pci_disable_msi(hba[i]->pdev);
3526 #endif /* CONFIG_PCI_MSI */
3528 iounmap(hba[i]->vaddr);
3530 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3531 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3532 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3533 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3534 kfree(hba[i]->cmd_pool_bits);
3535 #ifdef CONFIG_CISS_SCSI_TAPE
3536 kfree(hba[i]->scsi_rejects.complete);
3539 * Deliberately omit pci_disable_device(): it does something nasty to
3540 * Smart Array controllers that pci_enable_device does not undo
3542 pci_release_regions(pdev);
3543 pci_set_drvdata(pdev, NULL);
3547 static struct pci_driver cciss_pci_driver = {
3549 .probe = cciss_init_one,
3550 .remove = __devexit_p(cciss_remove_one),
3551 .id_table = cciss_pci_device_id, /* id_table */
3552 .shutdown = cciss_remove_one,
3556 * This is it. Register the PCI driver information for the cards we control
3557 * the OS will call our registered routines when it finds one of our cards.
3559 static int __init cciss_init(void)
3561 printk(KERN_INFO DRIVER_NAME "\n");
3563 /* Register for our PCI devices */
3564 return pci_register_driver(&cciss_pci_driver);
3567 static void __exit cciss_cleanup(void)
3571 pci_unregister_driver(&cciss_pci_driver);
3572 /* double check that all controller entrys have been removed */
3573 for (i = 0; i < MAX_CTLR; i++) {
3574 if (hba[i] != NULL) {
3575 printk(KERN_WARNING "cciss: had to remove"
3576 " controller %d\n", i);
3577 cciss_remove_one(hba[i]->pdev);
3580 remove_proc_entry("cciss", proc_root_driver);
3583 static void fail_all_cmds(unsigned long ctlr)
3585 /* If we get here, the board is apparently dead. */
3586 ctlr_info_t *h = hba[ctlr];
3587 CommandList_struct *c;
3588 unsigned long flags;
3590 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3591 h->alive = 0; /* the controller apparently died... */
3593 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3595 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3597 /* move everything off the request queue onto the completed queue */
3598 while ((c = h->reqQ) != NULL) {
3599 removeQ(&(h->reqQ), c);
3601 addQ(&(h->cmpQ), c);
3604 /* Now, fail everything on the completed queue with a HW error */
3605 while ((c = h->cmpQ) != NULL) {
3606 removeQ(&h->cmpQ, c);
3607 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3608 if (c->cmd_type == CMD_RWREQ) {
3609 complete_command(h, c, 0);
3610 } else if (c->cmd_type == CMD_IOCTL_PEND)
3611 complete(c->waiting);
3612 #ifdef CONFIG_CISS_SCSI_TAPE
3613 else if (c->cmd_type == CMD_SCSI)
3614 complete_scsi_command(c, 0, 0);
3617 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3621 module_init(cciss_init);
3622 module_exit(cciss_cleanup);