2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_VERSION("3.6.14");
59 MODULE_LICENSE("GPL");
61 #include "cciss_cmd.h"
63 #include <linux/cciss_ioctl.h>
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id[] = {
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
89 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
91 /* board_id = Subsystem Device ID & Vendor ID
92 * product = Marketing Name for the board
93 * access = Address of the struct of function pointers
95 static struct board_type products[] = {
96 {0x40700E11, "Smart Array 5300", &SA5_access},
97 {0x40800E11, "Smart Array 5i", &SA5B_access},
98 {0x40820E11, "Smart Array 532", &SA5B_access},
99 {0x40830E11, "Smart Array 5312", &SA5B_access},
100 {0x409A0E11, "Smart Array 641", &SA5_access},
101 {0x409B0E11, "Smart Array 642", &SA5_access},
102 {0x409C0E11, "Smart Array 6400", &SA5_access},
103 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
104 {0x40910E11, "Smart Array 6i", &SA5_access},
105 {0x3225103C, "Smart Array P600", &SA5_access},
106 {0x3223103C, "Smart Array P800", &SA5_access},
107 {0x3234103C, "Smart Array P400", &SA5_access},
108 {0x3235103C, "Smart Array P400i", &SA5_access},
109 {0x3211103C, "Smart Array E200i", &SA5_access},
110 {0x3212103C, "Smart Array E200", &SA5_access},
111 {0x3213103C, "Smart Array E200i", &SA5_access},
112 {0x3214103C, "Smart Array E200i", &SA5_access},
113 {0x3215103C, "Smart Array E200i", &SA5_access},
114 {0x3233103C, "Smart Array E500", &SA5_access},
117 /* How long to wait (in milliseconds) for board to go into simple mode */
118 #define MAX_CONFIG_WAIT 30000
119 #define MAX_IOCTL_CONFIG_WAIT 1000
121 /*define how many times we will try a command because of bus resets */
122 #define MAX_CMD_RETRIES 3
124 #define READ_AHEAD 1024
125 #define NR_CMDS 384 /* #commands that can be outstanding */
128 /* Originally cciss driver only supports 8 major numbers */
129 #define MAX_CTLR_ORIG 8
131 static ctlr_info_t *hba[MAX_CTLR];
133 static void do_cciss_request(request_queue_t *q);
134 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
135 static int cciss_open(struct inode *inode, struct file *filep);
136 static int cciss_release(struct inode *inode, struct file *filep);
137 static int cciss_ioctl(struct inode *inode, struct file *filep,
138 unsigned int cmd, unsigned long arg);
139 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
141 static int revalidate_allvol(ctlr_info_t *host);
142 static int cciss_revalidate(struct gendisk *disk);
143 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
144 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
147 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
148 sector_t *total_size, unsigned int *block_size);
149 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
150 sector_t *total_size, unsigned int *block_size);
151 static void cciss_geometry_inquiry(int ctlr, int logvol,
152 int withirq, sector_t total_size,
153 unsigned int block_size, InquiryData_struct *inq_buff,
154 drive_info_struct *drv);
155 static void cciss_getgeometry(int cntl_num);
156 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
158 static void start_io(ctlr_info_t *h);
159 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
160 unsigned int use_unit_num, unsigned int log_unit,
161 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
162 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
163 unsigned int use_unit_num, unsigned int log_unit,
164 __u8 page_code, int cmd_type);
166 static void fail_all_cmds(unsigned long ctlr);
168 #ifdef CONFIG_PROC_FS
169 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
170 int length, int *eof, void *data);
171 static void cciss_procinit(int i);
173 static void cciss_procinit(int i)
176 #endif /* CONFIG_PROC_FS */
179 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
182 static struct block_device_operations cciss_fops = {
183 .owner = THIS_MODULE,
185 .release = cciss_release,
186 .ioctl = cciss_ioctl,
187 .getgeo = cciss_getgeo,
189 .compat_ioctl = cciss_compat_ioctl,
191 .revalidate_disk = cciss_revalidate,
195 * Enqueuing and dequeuing functions for cmdlists.
197 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
201 c->next = c->prev = c;
203 c->prev = (*Qptr)->prev;
205 (*Qptr)->prev->next = c;
210 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
211 CommandList_struct *c)
213 if (c && c->next != c) {
216 c->prev->next = c->next;
217 c->next->prev = c->prev;
224 #include "cciss_scsi.c" /* For SCSI tape support */
226 #ifdef CONFIG_PROC_FS
229 * Report information about this controller.
231 #define ENG_GIG 1000000000
232 #define ENG_GIG_FACTOR (ENG_GIG/512)
233 #define RAID_UNKNOWN 6
234 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
238 static struct proc_dir_entry *proc_cciss;
240 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
241 int length, int *eof, void *data)
246 ctlr_info_t *h = (ctlr_info_t *) data;
247 drive_info_struct *drv;
249 sector_t vol_sz, vol_sz_frac;
253 /* prevent displaying bogus info during configuration
254 * or deconfiguration of a logical volume
256 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
257 if (h->busy_configuring) {
258 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
261 h->busy_configuring = 1;
262 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
264 size = sprintf(buffer, "%s: HP %s Controller\n"
265 "Board ID: 0x%08lx\n"
266 "Firmware Version: %c%c%c%c\n"
268 "Logical drives: %d\n"
269 "Current Q depth: %d\n"
270 "Current # commands on controller: %d\n"
271 "Max Q depth since init: %d\n"
272 "Max # commands on controller since init: %d\n"
273 "Max SG entries since init: %d\n\n",
276 (unsigned long)h->board_id,
277 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
278 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
279 h->num_luns, h->Qdepth, h->commands_outstanding,
280 h->maxQsinceinit, h->max_outstanding, h->maxSG);
284 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
285 for (i = 0; i <= h->highest_lun; i++) {
291 vol_sz = drv->nr_blocks;
292 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
294 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
296 if (drv->raid_level > 5)
297 drv->raid_level = RAID_UNKNOWN;
298 size = sprintf(buffer + len, "cciss/c%dd%d:"
299 "\t%4u.%02uGB\tRAID %s\n",
300 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
301 raid_label[drv->raid_level]);
307 *start = buffer + offset;
311 h->busy_configuring = 0;
316 cciss_proc_write(struct file *file, const char __user *buffer,
317 unsigned long count, void *data)
319 unsigned char cmd[80];
321 #ifdef CONFIG_CISS_SCSI_TAPE
322 ctlr_info_t *h = (ctlr_info_t *) data;
326 if (count > sizeof(cmd) - 1)
328 if (copy_from_user(cmd, buffer, count))
331 len = strlen(cmd); // above 3 lines ensure safety
332 if (len && cmd[len - 1] == '\n')
334 # ifdef CONFIG_CISS_SCSI_TAPE
335 if (strcmp("engage scsi", cmd) == 0) {
336 rc = cciss_engage_scsi(h->ctlr);
341 /* might be nice to have "disengage" too, but it's not
342 safely possible. (only 1 module use count, lock issues.) */
348 * Get us a file in /proc/cciss that says something about each controller.
349 * Create /proc/cciss if it doesn't exist yet.
351 static void __devinit cciss_procinit(int i)
353 struct proc_dir_entry *pde;
355 if (proc_cciss == NULL) {
356 proc_cciss = proc_mkdir("cciss", proc_root_driver);
361 pde = create_proc_read_entry(hba[i]->devname,
362 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
363 proc_cciss, cciss_proc_get_info, hba[i]);
364 pde->write_proc = cciss_proc_write;
366 #endif /* CONFIG_PROC_FS */
369 * For operations that cannot sleep, a command block is allocated at init,
370 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
371 * which ones are free or in use. For operations that can wait for kmalloc
372 * to possible sleep, this routine can be called with get_from_pool set to 0.
373 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
375 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
377 CommandList_struct *c;
380 dma_addr_t cmd_dma_handle, err_dma_handle;
382 if (!get_from_pool) {
383 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
384 sizeof(CommandList_struct), &cmd_dma_handle);
387 memset(c, 0, sizeof(CommandList_struct));
391 c->err_info = (ErrorInfo_struct *)
392 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
395 if (c->err_info == NULL) {
396 pci_free_consistent(h->pdev,
397 sizeof(CommandList_struct), c, cmd_dma_handle);
400 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
401 } else { /* get it out of the controllers pool */
404 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
407 } while (test_and_set_bit
408 (i & (BITS_PER_LONG - 1),
409 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
411 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
414 memset(c, 0, sizeof(CommandList_struct));
415 cmd_dma_handle = h->cmd_pool_dhandle
416 + i * sizeof(CommandList_struct);
417 c->err_info = h->errinfo_pool + i;
418 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
419 err_dma_handle = h->errinfo_pool_dhandle
420 + i * sizeof(ErrorInfo_struct);
426 c->busaddr = (__u32) cmd_dma_handle;
427 temp64.val = (__u64) err_dma_handle;
428 c->ErrDesc.Addr.lower = temp64.val32.lower;
429 c->ErrDesc.Addr.upper = temp64.val32.upper;
430 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
437 * Frees a command block that was previously allocated with cmd_alloc().
439 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
444 if (!got_from_pool) {
445 temp64.val32.lower = c->ErrDesc.Addr.lower;
446 temp64.val32.upper = c->ErrDesc.Addr.upper;
447 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
448 c->err_info, (dma_addr_t) temp64.val);
449 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
450 c, (dma_addr_t) c->busaddr);
453 clear_bit(i & (BITS_PER_LONG - 1),
454 h->cmd_pool_bits + (i / BITS_PER_LONG));
459 static inline ctlr_info_t *get_host(struct gendisk *disk)
461 return disk->queue->queuedata;
464 static inline drive_info_struct *get_drv(struct gendisk *disk)
466 return disk->private_data;
470 * Open. Make sure the device is really there.
472 static int cciss_open(struct inode *inode, struct file *filep)
474 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
475 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
478 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
479 #endif /* CCISS_DEBUG */
481 if (host->busy_initializing || drv->busy_configuring)
484 * Root is allowed to open raw volume zero even if it's not configured
485 * so array config can still work. Root is also allowed to open any
486 * volume that has a LUN ID, so it can issue IOCTL to reread the
487 * disk information. I don't think I really like this
488 * but I'm already using way to many device nodes to claim another one
489 * for "raw controller".
491 if (drv->nr_blocks == 0) {
492 if (iminor(inode) != 0) { /* not node 0? */
493 /* if not node 0 make sure it is a partition = 0 */
494 if (iminor(inode) & 0x0f) {
496 /* if it is, make sure we have a LUN ID */
497 } else if (drv->LunID == 0) {
501 if (!capable(CAP_SYS_ADMIN))
512 static int cciss_release(struct inode *inode, struct file *filep)
514 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
515 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
518 printk(KERN_DEBUG "cciss_release %s\n",
519 inode->i_bdev->bd_disk->disk_name);
520 #endif /* CCISS_DEBUG */
529 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
533 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
538 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
540 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
543 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
546 case CCISS_GETPCIINFO:
547 case CCISS_GETINTINFO:
548 case CCISS_SETINTINFO:
549 case CCISS_GETNODENAME:
550 case CCISS_SETNODENAME:
551 case CCISS_GETHEARTBEAT:
552 case CCISS_GETBUSTYPES:
553 case CCISS_GETFIRMVER:
554 case CCISS_GETDRIVVER:
555 case CCISS_REVALIDVOLS:
556 case CCISS_DEREGDISK:
557 case CCISS_REGNEWDISK:
559 case CCISS_RESCANDISK:
560 case CCISS_GETLUNINFO:
561 return do_ioctl(f, cmd, arg);
563 case CCISS_PASSTHRU32:
564 return cciss_ioctl32_passthru(f, cmd, arg);
565 case CCISS_BIG_PASSTHRU32:
566 return cciss_ioctl32_big_passthru(f, cmd, arg);
573 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
576 IOCTL32_Command_struct __user *arg32 =
577 (IOCTL32_Command_struct __user *) arg;
578 IOCTL_Command_struct arg64;
579 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
585 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
586 sizeof(arg64.LUN_info));
588 copy_from_user(&arg64.Request, &arg32->Request,
589 sizeof(arg64.Request));
591 copy_from_user(&arg64.error_info, &arg32->error_info,
592 sizeof(arg64.error_info));
593 err |= get_user(arg64.buf_size, &arg32->buf_size);
594 err |= get_user(cp, &arg32->buf);
595 arg64.buf = compat_ptr(cp);
596 err |= copy_to_user(p, &arg64, sizeof(arg64));
601 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
605 copy_in_user(&arg32->error_info, &p->error_info,
606 sizeof(arg32->error_info));
612 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
615 BIG_IOCTL32_Command_struct __user *arg32 =
616 (BIG_IOCTL32_Command_struct __user *) arg;
617 BIG_IOCTL_Command_struct arg64;
618 BIG_IOCTL_Command_struct __user *p =
619 compat_alloc_user_space(sizeof(arg64));
625 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
626 sizeof(arg64.LUN_info));
628 copy_from_user(&arg64.Request, &arg32->Request,
629 sizeof(arg64.Request));
631 copy_from_user(&arg64.error_info, &arg32->error_info,
632 sizeof(arg64.error_info));
633 err |= get_user(arg64.buf_size, &arg32->buf_size);
634 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
635 err |= get_user(cp, &arg32->buf);
636 arg64.buf = compat_ptr(cp);
637 err |= copy_to_user(p, &arg64, sizeof(arg64));
642 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
646 copy_in_user(&arg32->error_info, &p->error_info,
647 sizeof(arg32->error_info));
654 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
656 drive_info_struct *drv = get_drv(bdev->bd_disk);
661 geo->heads = drv->heads;
662 geo->sectors = drv->sectors;
663 geo->cylinders = drv->cylinders;
670 static int cciss_ioctl(struct inode *inode, struct file *filep,
671 unsigned int cmd, unsigned long arg)
673 struct block_device *bdev = inode->i_bdev;
674 struct gendisk *disk = bdev->bd_disk;
675 ctlr_info_t *host = get_host(disk);
676 drive_info_struct *drv = get_drv(disk);
677 int ctlr = host->ctlr;
678 void __user *argp = (void __user *)arg;
681 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
682 #endif /* CCISS_DEBUG */
685 case CCISS_GETPCIINFO:
687 cciss_pci_info_struct pciinfo;
691 pciinfo.domain = pci_domain_nr(host->pdev->bus);
692 pciinfo.bus = host->pdev->bus->number;
693 pciinfo.dev_fn = host->pdev->devfn;
694 pciinfo.board_id = host->board_id;
696 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
700 case CCISS_GETINTINFO:
702 cciss_coalint_struct intinfo;
706 readl(&host->cfgtable->HostWrite.CoalIntDelay);
708 readl(&host->cfgtable->HostWrite.CoalIntCount);
710 (argp, &intinfo, sizeof(cciss_coalint_struct)))
714 case CCISS_SETINTINFO:
716 cciss_coalint_struct intinfo;
722 if (!capable(CAP_SYS_ADMIN))
725 (&intinfo, argp, sizeof(cciss_coalint_struct)))
727 if ((intinfo.delay == 0) && (intinfo.count == 0))
729 // printk("cciss_ioctl: delay and count cannot be 0\n");
732 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
733 /* Update the field, and then ring the doorbell */
734 writel(intinfo.delay,
735 &(host->cfgtable->HostWrite.CoalIntDelay));
736 writel(intinfo.count,
737 &(host->cfgtable->HostWrite.CoalIntCount));
738 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
740 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
741 if (!(readl(host->vaddr + SA5_DOORBELL)
744 /* delay and try again */
747 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
748 if (i >= MAX_IOCTL_CONFIG_WAIT)
752 case CCISS_GETNODENAME:
754 NodeName_type NodeName;
759 for (i = 0; i < 16; i++)
761 readb(&host->cfgtable->ServerName[i]);
762 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
766 case CCISS_SETNODENAME:
768 NodeName_type NodeName;
774 if (!capable(CAP_SYS_ADMIN))
778 (NodeName, argp, sizeof(NodeName_type)))
781 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
783 /* Update the field, and then ring the doorbell */
784 for (i = 0; i < 16; i++)
786 &host->cfgtable->ServerName[i]);
788 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
790 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
791 if (!(readl(host->vaddr + SA5_DOORBELL)
794 /* delay and try again */
797 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
798 if (i >= MAX_IOCTL_CONFIG_WAIT)
803 case CCISS_GETHEARTBEAT:
805 Heartbeat_type heartbeat;
809 heartbeat = readl(&host->cfgtable->HeartBeat);
811 (argp, &heartbeat, sizeof(Heartbeat_type)))
815 case CCISS_GETBUSTYPES:
817 BusTypes_type BusTypes;
821 BusTypes = readl(&host->cfgtable->BusTypes);
823 (argp, &BusTypes, sizeof(BusTypes_type)))
827 case CCISS_GETFIRMVER:
829 FirmwareVer_type firmware;
833 memcpy(firmware, host->firm_ver, 4);
836 (argp, firmware, sizeof(FirmwareVer_type)))
840 case CCISS_GETDRIVVER:
842 DriverVer_type DriverVer = DRIVER_VERSION;
848 (argp, &DriverVer, sizeof(DriverVer_type)))
853 case CCISS_REVALIDVOLS:
854 if (bdev != bdev->bd_contains || drv != host->drv)
856 return revalidate_allvol(host);
858 case CCISS_GETLUNINFO:{
859 LogvolInfo_struct luninfo;
861 luninfo.LunID = drv->LunID;
862 luninfo.num_opens = drv->usage_count;
863 luninfo.num_parts = 0;
864 if (copy_to_user(argp, &luninfo,
865 sizeof(LogvolInfo_struct)))
869 case CCISS_DEREGDISK:
870 return rebuild_lun_table(host, disk);
873 return rebuild_lun_table(host, NULL);
877 IOCTL_Command_struct iocommand;
878 CommandList_struct *c;
882 DECLARE_COMPLETION_ONSTACK(wait);
887 if (!capable(CAP_SYS_RAWIO))
891 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
893 if ((iocommand.buf_size < 1) &&
894 (iocommand.Request.Type.Direction != XFER_NONE)) {
897 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
898 /* Check kmalloc limits */
899 if (iocommand.buf_size > 128000)
902 if (iocommand.buf_size > 0) {
903 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
907 if (iocommand.Request.Type.Direction == XFER_WRITE) {
908 /* Copy the data into the buffer we created */
910 (buff, iocommand.buf, iocommand.buf_size)) {
915 memset(buff, 0, iocommand.buf_size);
917 if ((c = cmd_alloc(host, 0)) == NULL) {
921 // Fill in the command type
922 c->cmd_type = CMD_IOCTL_PEND;
923 // Fill in Command Header
924 c->Header.ReplyQueue = 0; // unused in simple mode
925 if (iocommand.buf_size > 0) // buffer to fill
927 c->Header.SGList = 1;
928 c->Header.SGTotal = 1;
929 } else // no buffers to fill
931 c->Header.SGList = 0;
932 c->Header.SGTotal = 0;
934 c->Header.LUN = iocommand.LUN_info;
935 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
937 // Fill in Request block
938 c->Request = iocommand.Request;
940 // Fill in the scatter gather information
941 if (iocommand.buf_size > 0) {
942 temp64.val = pci_map_single(host->pdev, buff,
944 PCI_DMA_BIDIRECTIONAL);
945 c->SG[0].Addr.lower = temp64.val32.lower;
946 c->SG[0].Addr.upper = temp64.val32.upper;
947 c->SG[0].Len = iocommand.buf_size;
948 c->SG[0].Ext = 0; // we are not chaining
952 /* Put the request on the tail of the request queue */
953 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
954 addQ(&host->reqQ, c);
957 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
959 wait_for_completion(&wait);
961 /* unlock the buffers from DMA */
962 temp64.val32.lower = c->SG[0].Addr.lower;
963 temp64.val32.upper = c->SG[0].Addr.upper;
964 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
966 PCI_DMA_BIDIRECTIONAL);
968 /* Copy the error information out */
969 iocommand.error_info = *(c->err_info);
971 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
973 cmd_free(host, c, 0);
977 if (iocommand.Request.Type.Direction == XFER_READ) {
978 /* Copy the data out of the buffer we created */
980 (iocommand.buf, buff, iocommand.buf_size)) {
982 cmd_free(host, c, 0);
987 cmd_free(host, c, 0);
990 case CCISS_BIG_PASSTHRU:{
991 BIG_IOCTL_Command_struct *ioc;
992 CommandList_struct *c;
993 unsigned char **buff = NULL;
994 int *buff_size = NULL;
1000 DECLARE_COMPLETION_ONSTACK(wait);
1003 BYTE __user *data_ptr;
1007 if (!capable(CAP_SYS_RAWIO))
1009 ioc = (BIG_IOCTL_Command_struct *)
1010 kmalloc(sizeof(*ioc), GFP_KERNEL);
1015 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1019 if ((ioc->buf_size < 1) &&
1020 (ioc->Request.Type.Direction != XFER_NONE)) {
1024 /* Check kmalloc limits using all SGs */
1025 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1029 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1034 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1039 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1045 left = ioc->buf_size;
1046 data_ptr = ioc->buf;
1049 ioc->malloc_size) ? ioc->
1051 buff_size[sg_used] = sz;
1052 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1053 if (buff[sg_used] == NULL) {
1057 if (ioc->Request.Type.Direction == XFER_WRITE) {
1059 (buff[sg_used], data_ptr, sz)) {
1064 memset(buff[sg_used], 0, sz);
1070 if ((c = cmd_alloc(host, 0)) == NULL) {
1074 c->cmd_type = CMD_IOCTL_PEND;
1075 c->Header.ReplyQueue = 0;
1077 if (ioc->buf_size > 0) {
1078 c->Header.SGList = sg_used;
1079 c->Header.SGTotal = sg_used;
1081 c->Header.SGList = 0;
1082 c->Header.SGTotal = 0;
1084 c->Header.LUN = ioc->LUN_info;
1085 c->Header.Tag.lower = c->busaddr;
1087 c->Request = ioc->Request;
1088 if (ioc->buf_size > 0) {
1090 for (i = 0; i < sg_used; i++) {
1092 pci_map_single(host->pdev, buff[i],
1094 PCI_DMA_BIDIRECTIONAL);
1095 c->SG[i].Addr.lower =
1097 c->SG[i].Addr.upper =
1099 c->SG[i].Len = buff_size[i];
1100 c->SG[i].Ext = 0; /* we are not chaining */
1104 /* Put the request on the tail of the request queue */
1105 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1106 addQ(&host->reqQ, c);
1109 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1110 wait_for_completion(&wait);
1111 /* unlock the buffers from DMA */
1112 for (i = 0; i < sg_used; i++) {
1113 temp64.val32.lower = c->SG[i].Addr.lower;
1114 temp64.val32.upper = c->SG[i].Addr.upper;
1115 pci_unmap_single(host->pdev,
1116 (dma_addr_t) temp64.val, buff_size[i],
1117 PCI_DMA_BIDIRECTIONAL);
1119 /* Copy the error information out */
1120 ioc->error_info = *(c->err_info);
1121 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1122 cmd_free(host, c, 0);
1126 if (ioc->Request.Type.Direction == XFER_READ) {
1127 /* Copy the data out of the buffer we created */
1128 BYTE __user *ptr = ioc->buf;
1129 for (i = 0; i < sg_used; i++) {
1131 (ptr, buff[i], buff_size[i])) {
1132 cmd_free(host, c, 0);
1136 ptr += buff_size[i];
1139 cmd_free(host, c, 0);
1143 for (i = 0; i < sg_used; i++)
1157 * revalidate_allvol is for online array config utilities. After a
1158 * utility reconfigures the drives in the array, it can use this function
1159 * (through an ioctl) to make the driver zap any previous disk structs for
1160 * that controller and get new ones.
1162 * Right now I'm using the getgeometry() function to do this, but this
1163 * function should probably be finer grained and allow you to revalidate one
1164 * particular logical volume (instead of all of them on a particular
1167 static int revalidate_allvol(ctlr_info_t *host)
1169 int ctlr = host->ctlr, i;
1170 unsigned long flags;
1172 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1173 if (host->usage_count > 1) {
1174 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1175 printk(KERN_WARNING "cciss: Device busy for volume"
1176 " revalidation (usage=%d)\n", host->usage_count);
1179 host->usage_count++;
1180 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1182 for (i = 0; i < NWD; i++) {
1183 struct gendisk *disk = host->gendisk[i];
1185 request_queue_t *q = disk->queue;
1187 if (disk->flags & GENHD_FL_UP)
1190 blk_cleanup_queue(q);
1195 * Set the partition and block size structures for all volumes
1196 * on this controller to zero. We will reread all of this data
1198 memset(host->drv, 0, sizeof(drive_info_struct)
1201 * Tell the array controller not to give us any interrupts while
1202 * we check the new geometry. Then turn interrupts back on when
1205 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1206 cciss_getgeometry(ctlr);
1207 host->access.set_intr_mask(host, CCISS_INTR_ON);
1209 /* Loop through each real device */
1210 for (i = 0; i < NWD; i++) {
1211 struct gendisk *disk = host->gendisk[i];
1212 drive_info_struct *drv = &(host->drv[i]);
1213 /* we must register the controller even if no disks exist */
1214 /* this is for the online array utilities */
1215 if (!drv->heads && i)
1217 blk_queue_hardsect_size(drv->queue, drv->block_size);
1218 set_capacity(disk, drv->nr_blocks);
1221 host->usage_count--;
1225 static inline void complete_buffers(struct bio *bio, int status)
1228 struct bio *xbh = bio->bi_next;
1229 int nr_sectors = bio_sectors(bio);
1231 bio->bi_next = NULL;
1232 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1237 static void cciss_check_queues(ctlr_info_t *h)
1239 int start_queue = h->next_to_run;
1242 /* check to see if we have maxed out the number of commands that can
1243 * be placed on the queue. If so then exit. We do this check here
1244 * in case the interrupt we serviced was from an ioctl and did not
1245 * free any new commands.
1247 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1250 /* We have room on the queue for more commands. Now we need to queue
1251 * them up. We will also keep track of the next queue to run so
1252 * that every queue gets a chance to be started first.
1254 for (i = 0; i < h->highest_lun + 1; i++) {
1255 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1256 /* make sure the disk has been added and the drive is real
1257 * because this can be called from the middle of init_one.
1259 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1261 blk_start_queue(h->gendisk[curr_queue]->queue);
1263 /* check to see if we have maxed out the number of commands
1264 * that can be placed on the queue.
1266 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1267 if (curr_queue == start_queue) {
1269 (start_queue + 1) % (h->highest_lun + 1);
1272 h->next_to_run = curr_queue;
1276 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1281 static void cciss_softirq_done(struct request *rq)
1283 CommandList_struct *cmd = rq->completion_data;
1284 ctlr_info_t *h = hba[cmd->ctlr];
1285 unsigned long flags;
1289 if (cmd->Request.Type.Direction == XFER_READ)
1290 ddir = PCI_DMA_FROMDEVICE;
1292 ddir = PCI_DMA_TODEVICE;
1294 /* command did not need to be retried */
1295 /* unmap the DMA mapping for all the scatter gather elements */
1296 for (i = 0; i < cmd->Header.SGList; i++) {
1297 temp64.val32.lower = cmd->SG[i].Addr.lower;
1298 temp64.val32.upper = cmd->SG[i].Addr.upper;
1299 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1302 complete_buffers(rq->bio, rq->errors);
1304 if (blk_fs_request(rq)) {
1305 const int rw = rq_data_dir(rq);
1307 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1311 printk("Done with %p\n", rq);
1312 #endif /* CCISS_DEBUG */
1314 add_disk_randomness(rq->rq_disk);
1315 spin_lock_irqsave(&h->lock, flags);
1316 end_that_request_last(rq, rq->errors);
1317 cmd_free(h, cmd, 1);
1318 cciss_check_queues(h);
1319 spin_unlock_irqrestore(&h->lock, flags);
1322 /* This function will check the usage_count of the drive to be updated/added.
1323 * If the usage_count is zero then the drive information will be updated and
1324 * the disk will be re-registered with the kernel. If not then it will be
1325 * left alone for the next reboot. The exception to this is disk 0 which
1326 * will always be left registered with the kernel since it is also the
1327 * controller node. Any changes to disk 0 will show up on the next
1330 static void cciss_update_drive_info(int ctlr, int drv_index)
1332 ctlr_info_t *h = hba[ctlr];
1333 struct gendisk *disk;
1334 InquiryData_struct *inq_buff = NULL;
1335 unsigned int block_size;
1336 sector_t total_size;
1337 unsigned long flags = 0;
1340 /* if the disk already exists then deregister it before proceeding */
1341 if (h->drv[drv_index].raid_level != -1) {
1342 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1343 h->drv[drv_index].busy_configuring = 1;
1344 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1345 ret = deregister_disk(h->gendisk[drv_index],
1346 &h->drv[drv_index], 0);
1347 h->drv[drv_index].busy_configuring = 0;
1350 /* If the disk is in use return */
1354 /* Get information about the disk and modify the driver structure */
1355 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1356 if (inq_buff == NULL)
1359 cciss_read_capacity(ctlr, drv_index, 1,
1360 &total_size, &block_size);
1362 /* total size = last LBA + 1 */
1363 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1364 /* so we assume this volume this must be >2TB in size */
1365 if (total_size == (__u32) 0) {
1366 cciss_read_capacity_16(ctlr, drv_index, 1,
1367 &total_size, &block_size);
1368 h->cciss_read = CCISS_READ_16;
1369 h->cciss_write = CCISS_WRITE_16;
1371 h->cciss_read = CCISS_READ_10;
1372 h->cciss_write = CCISS_WRITE_10;
1374 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1375 inq_buff, &h->drv[drv_index]);
1378 disk = h->gendisk[drv_index];
1379 set_capacity(disk, h->drv[drv_index].nr_blocks);
1381 /* if it's the controller it's already added */
1383 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1385 /* Set up queue information */
1386 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1387 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1389 /* This is a hardware imposed limit. */
1390 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1392 /* This is a limit in the driver and could be eliminated. */
1393 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1395 blk_queue_max_sectors(disk->queue, 512);
1397 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1399 disk->queue->queuedata = hba[ctlr];
1401 blk_queue_hardsect_size(disk->queue,
1402 hba[ctlr]->drv[drv_index].block_size);
1404 h->drv[drv_index].queue = disk->queue;
1412 printk(KERN_ERR "cciss: out of memory\n");
1416 /* This function will find the first index of the controllers drive array
1417 * that has a -1 for the raid_level and will return that index. This is
1418 * where new drives will be added. If the index to be returned is greater
1419 * than the highest_lun index for the controller then highest_lun is set
1420 * to this new index. If there are no available indexes then -1 is returned.
1422 static int cciss_find_free_drive_index(int ctlr)
1426 for (i = 0; i < CISS_MAX_LUN; i++) {
1427 if (hba[ctlr]->drv[i].raid_level == -1) {
1428 if (i > hba[ctlr]->highest_lun)
1429 hba[ctlr]->highest_lun = i;
1436 /* This function will add and remove logical drives from the Logical
1437 * drive array of the controller and maintain persistency of ordering
1438 * so that mount points are preserved until the next reboot. This allows
1439 * for the removal of logical drives in the middle of the drive array
1440 * without a re-ordering of those drives.
1442 * h = The controller to perform the operations on
1443 * del_disk = The disk to remove if specified. If the value given
1444 * is NULL then no disk is removed.
1446 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1450 ReportLunData_struct *ld_buff = NULL;
1451 drive_info_struct *drv = NULL;
1458 unsigned long flags;
1460 /* Set busy_configuring flag for this operation */
1461 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1462 if (h->num_luns >= CISS_MAX_LUN) {
1463 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1467 if (h->busy_configuring) {
1468 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1471 h->busy_configuring = 1;
1473 /* if del_disk is NULL then we are being called to add a new disk
1474 * and update the logical drive table. If it is not NULL then
1475 * we will check if the disk is in use or not.
1477 if (del_disk != NULL) {
1478 drv = get_drv(del_disk);
1479 drv->busy_configuring = 1;
1480 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1481 return_code = deregister_disk(del_disk, drv, 1);
1482 drv->busy_configuring = 0;
1483 h->busy_configuring = 0;
1486 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1487 if (!capable(CAP_SYS_RAWIO))
1490 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1491 if (ld_buff == NULL)
1494 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1495 sizeof(ReportLunData_struct), 0,
1498 if (return_code == IO_OK) {
1500 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1503 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1506 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1509 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1510 } else { /* reading number of logical volumes failed */
1511 printk(KERN_WARNING "cciss: report logical volume"
1512 " command failed\n");
1517 num_luns = listlength / 8; /* 8 bytes per entry */
1518 if (num_luns > CISS_MAX_LUN) {
1519 num_luns = CISS_MAX_LUN;
1520 printk(KERN_WARNING "cciss: more luns configured"
1521 " on controller than can be handled by"
1525 /* Compare controller drive array to drivers drive array.
1526 * Check for updates in the drive information and any new drives
1527 * on the controller.
1529 for (i = 0; i < num_luns; i++) {
1535 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1537 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1539 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1540 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1542 /* Find if the LUN is already in the drive array
1543 * of the controller. If so then update its info
1544 * if not is use. If it does not exist then find
1545 * the first free index and add it.
1547 for (j = 0; j <= h->highest_lun; j++) {
1548 if (h->drv[j].LunID == lunid) {
1554 /* check if the drive was found already in the array */
1556 drv_index = cciss_find_free_drive_index(ctlr);
1557 if (drv_index == -1)
1561 h->drv[drv_index].LunID = lunid;
1562 cciss_update_drive_info(ctlr, drv_index);
1568 h->busy_configuring = 0;
1569 /* We return -1 here to tell the ACU that we have registered/updated
1570 * all of the drives that we can and to keep it from calling us
1575 printk(KERN_ERR "cciss: out of memory\n");
1579 /* This function will deregister the disk and it's queue from the
1580 * kernel. It must be called with the controller lock held and the
1581 * drv structures busy_configuring flag set. It's parameters are:
1583 * disk = This is the disk to be deregistered
1584 * drv = This is the drive_info_struct associated with the disk to be
1585 * deregistered. It contains information about the disk used
1587 * clear_all = This flag determines whether or not the disk information
1588 * is going to be completely cleared out and the highest_lun
1589 * reset. Sometimes we want to clear out information about
1590 * the disk in preparation for re-adding it. In this case
1591 * the highest_lun should be left unchanged and the LunID
1592 * should not be cleared.
1594 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1597 ctlr_info_t *h = get_host(disk);
1599 if (!capable(CAP_SYS_RAWIO))
1602 /* make sure logical volume is NOT is use */
1603 if (clear_all || (h->gendisk[0] == disk)) {
1604 if (drv->usage_count > 1)
1606 } else if (drv->usage_count > 0)
1609 /* invalidate the devices and deregister the disk. If it is disk
1610 * zero do not deregister it but just zero out it's values. This
1611 * allows us to delete disk zero but keep the controller registered.
1613 if (h->gendisk[0] != disk) {
1615 request_queue_t *q = disk->queue;
1616 if (disk->flags & GENHD_FL_UP)
1619 blk_cleanup_queue(q);
1626 /* zero out the disk size info */
1628 drv->block_size = 0;
1632 drv->raid_level = -1; /* This can be used as a flag variable to
1633 * indicate that this element of the drive
1638 /* check to see if it was the last disk */
1639 if (drv == h->drv + h->highest_lun) {
1640 /* if so, find the new hightest lun */
1641 int i, newhighest = -1;
1642 for (i = 0; i < h->highest_lun; i++) {
1643 /* if the disk has size > 0, it is available */
1644 if (h->drv[i].heads)
1647 h->highest_lun = newhighest;
1655 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1656 1: address logical volume log_unit,
1657 2: periph device address is scsi3addr */
1658 unsigned int log_unit, __u8 page_code,
1659 unsigned char *scsi3addr, int cmd_type)
1661 ctlr_info_t *h = hba[ctlr];
1662 u64bit buff_dma_handle;
1665 c->cmd_type = CMD_IOCTL_PEND;
1666 c->Header.ReplyQueue = 0;
1668 c->Header.SGList = 1;
1669 c->Header.SGTotal = 1;
1671 c->Header.SGList = 0;
1672 c->Header.SGTotal = 0;
1674 c->Header.Tag.lower = c->busaddr;
1676 c->Request.Type.Type = cmd_type;
1677 if (cmd_type == TYPE_CMD) {
1680 /* If the logical unit number is 0 then, this is going
1681 to controller so It's a physical command
1682 mode = 0 target = 0. So we have nothing to write.
1683 otherwise, if use_unit_num == 1,
1684 mode = 1(volume set addressing) target = LUNID
1685 otherwise, if use_unit_num == 2,
1686 mode = 0(periph dev addr) target = scsi3addr */
1687 if (use_unit_num == 1) {
1688 c->Header.LUN.LogDev.VolId =
1689 h->drv[log_unit].LunID;
1690 c->Header.LUN.LogDev.Mode = 1;
1691 } else if (use_unit_num == 2) {
1692 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1694 c->Header.LUN.LogDev.Mode = 0;
1696 /* are we trying to read a vital product page */
1697 if (page_code != 0) {
1698 c->Request.CDB[1] = 0x01;
1699 c->Request.CDB[2] = page_code;
1701 c->Request.CDBLen = 6;
1702 c->Request.Type.Attribute = ATTR_SIMPLE;
1703 c->Request.Type.Direction = XFER_READ;
1704 c->Request.Timeout = 0;
1705 c->Request.CDB[0] = CISS_INQUIRY;
1706 c->Request.CDB[4] = size & 0xFF;
1708 case CISS_REPORT_LOG:
1709 case CISS_REPORT_PHYS:
1710 /* Talking to controller so It's a physical command
1711 mode = 00 target = 0. Nothing to write.
1713 c->Request.CDBLen = 12;
1714 c->Request.Type.Attribute = ATTR_SIMPLE;
1715 c->Request.Type.Direction = XFER_READ;
1716 c->Request.Timeout = 0;
1717 c->Request.CDB[0] = cmd;
1718 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1719 c->Request.CDB[7] = (size >> 16) & 0xFF;
1720 c->Request.CDB[8] = (size >> 8) & 0xFF;
1721 c->Request.CDB[9] = size & 0xFF;
1724 case CCISS_READ_CAPACITY:
1725 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1726 c->Header.LUN.LogDev.Mode = 1;
1727 c->Request.CDBLen = 10;
1728 c->Request.Type.Attribute = ATTR_SIMPLE;
1729 c->Request.Type.Direction = XFER_READ;
1730 c->Request.Timeout = 0;
1731 c->Request.CDB[0] = cmd;
1733 case CCISS_READ_CAPACITY_16:
1734 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1735 c->Header.LUN.LogDev.Mode = 1;
1736 c->Request.CDBLen = 16;
1737 c->Request.Type.Attribute = ATTR_SIMPLE;
1738 c->Request.Type.Direction = XFER_READ;
1739 c->Request.Timeout = 0;
1740 c->Request.CDB[0] = cmd;
1741 c->Request.CDB[1] = 0x10;
1742 c->Request.CDB[10] = (size >> 24) & 0xFF;
1743 c->Request.CDB[11] = (size >> 16) & 0xFF;
1744 c->Request.CDB[12] = (size >> 8) & 0xFF;
1745 c->Request.CDB[13] = size & 0xFF;
1746 c->Request.Timeout = 0;
1747 c->Request.CDB[0] = cmd;
1749 case CCISS_CACHE_FLUSH:
1750 c->Request.CDBLen = 12;
1751 c->Request.Type.Attribute = ATTR_SIMPLE;
1752 c->Request.Type.Direction = XFER_WRITE;
1753 c->Request.Timeout = 0;
1754 c->Request.CDB[0] = BMIC_WRITE;
1755 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1759 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1762 } else if (cmd_type == TYPE_MSG) {
1764 case 0: /* ABORT message */
1765 c->Request.CDBLen = 12;
1766 c->Request.Type.Attribute = ATTR_SIMPLE;
1767 c->Request.Type.Direction = XFER_WRITE;
1768 c->Request.Timeout = 0;
1769 c->Request.CDB[0] = cmd; /* abort */
1770 c->Request.CDB[1] = 0; /* abort a command */
1771 /* buff contains the tag of the command to abort */
1772 memcpy(&c->Request.CDB[4], buff, 8);
1774 case 1: /* RESET message */
1775 c->Request.CDBLen = 12;
1776 c->Request.Type.Attribute = ATTR_SIMPLE;
1777 c->Request.Type.Direction = XFER_WRITE;
1778 c->Request.Timeout = 0;
1779 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1780 c->Request.CDB[0] = cmd; /* reset */
1781 c->Request.CDB[1] = 0x04; /* reset a LUN */
1783 case 3: /* No-Op message */
1784 c->Request.CDBLen = 1;
1785 c->Request.Type.Attribute = ATTR_SIMPLE;
1786 c->Request.Type.Direction = XFER_WRITE;
1787 c->Request.Timeout = 0;
1788 c->Request.CDB[0] = cmd;
1792 "cciss%d: unknown message type %d\n", ctlr, cmd);
1797 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1800 /* Fill in the scatter gather information */
1802 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1804 PCI_DMA_BIDIRECTIONAL);
1805 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1806 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1807 c->SG[0].Len = size;
1808 c->SG[0].Ext = 0; /* we are not chaining */
1813 static int sendcmd_withirq(__u8 cmd,
1817 unsigned int use_unit_num,
1818 unsigned int log_unit, __u8 page_code, int cmd_type)
1820 ctlr_info_t *h = hba[ctlr];
1821 CommandList_struct *c;
1822 u64bit buff_dma_handle;
1823 unsigned long flags;
1825 DECLARE_COMPLETION_ONSTACK(wait);
1827 if ((c = cmd_alloc(h, 0)) == NULL)
1829 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1830 log_unit, page_code, NULL, cmd_type);
1831 if (return_status != IO_OK) {
1833 return return_status;
1838 /* Put the request on the tail of the queue and send it */
1839 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1843 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1845 wait_for_completion(&wait);
1847 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1848 switch (c->err_info->CommandStatus) {
1849 case CMD_TARGET_STATUS:
1850 printk(KERN_WARNING "cciss: cmd %p has "
1851 " completed with errors\n", c);
1852 if (c->err_info->ScsiStatus) {
1853 printk(KERN_WARNING "cciss: cmd %p "
1854 "has SCSI Status = %x\n",
1855 c, c->err_info->ScsiStatus);
1859 case CMD_DATA_UNDERRUN:
1860 case CMD_DATA_OVERRUN:
1861 /* expected for inquire and report lun commands */
1864 printk(KERN_WARNING "cciss: Cmd %p is "
1865 "reported invalid\n", c);
1866 return_status = IO_ERROR;
1868 case CMD_PROTOCOL_ERR:
1869 printk(KERN_WARNING "cciss: cmd %p has "
1870 "protocol error \n", c);
1871 return_status = IO_ERROR;
1873 case CMD_HARDWARE_ERR:
1874 printk(KERN_WARNING "cciss: cmd %p had "
1875 " hardware error\n", c);
1876 return_status = IO_ERROR;
1878 case CMD_CONNECTION_LOST:
1879 printk(KERN_WARNING "cciss: cmd %p had "
1880 "connection lost\n", c);
1881 return_status = IO_ERROR;
1884 printk(KERN_WARNING "cciss: cmd %p was "
1886 return_status = IO_ERROR;
1888 case CMD_ABORT_FAILED:
1889 printk(KERN_WARNING "cciss: cmd %p reports "
1890 "abort failed\n", c);
1891 return_status = IO_ERROR;
1893 case CMD_UNSOLICITED_ABORT:
1895 "cciss%d: unsolicited abort %p\n", ctlr, c);
1896 if (c->retry_count < MAX_CMD_RETRIES) {
1898 "cciss%d: retrying %p\n", ctlr, c);
1900 /* erase the old error information */
1901 memset(c->err_info, 0,
1902 sizeof(ErrorInfo_struct));
1903 return_status = IO_OK;
1904 INIT_COMPLETION(wait);
1907 return_status = IO_ERROR;
1910 printk(KERN_WARNING "cciss: cmd %p returned "
1911 "unknown status %x\n", c,
1912 c->err_info->CommandStatus);
1913 return_status = IO_ERROR;
1916 /* unlock the buffers from DMA */
1917 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1918 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1919 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1920 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1922 return return_status;
1925 static void cciss_geometry_inquiry(int ctlr, int logvol,
1926 int withirq, sector_t total_size,
1927 unsigned int block_size,
1928 InquiryData_struct *inq_buff,
1929 drive_info_struct *drv)
1934 memset(inq_buff, 0, sizeof(InquiryData_struct));
1936 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1937 inq_buff, sizeof(*inq_buff), 1,
1938 logvol, 0xC1, TYPE_CMD);
1940 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1941 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1943 if (return_code == IO_OK) {
1944 if (inq_buff->data_byte[8] == 0xFF) {
1946 "cciss: reading geometry failed, volume "
1947 "does not support reading geometry\n");
1949 drv->sectors = 32; // Sectors per track
1951 drv->heads = inq_buff->data_byte[6];
1952 drv->sectors = inq_buff->data_byte[7];
1953 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1954 drv->cylinders += inq_buff->data_byte[5];
1955 drv->raid_level = inq_buff->data_byte[8];
1957 drv->block_size = block_size;
1958 drv->nr_blocks = total_size;
1959 t = drv->heads * drv->sectors;
1961 unsigned rem = sector_div(total_size, t);
1964 drv->cylinders = total_size;
1966 } else { /* Get geometry failed */
1967 printk(KERN_WARNING "cciss: reading geometry failed\n");
1969 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1970 drv->heads, drv->sectors, drv->cylinders);
1974 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1975 unsigned int *block_size)
1977 ReadCapdata_struct *buf;
1979 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1981 printk(KERN_WARNING "cciss: out of memory\n");
1984 memset(buf, 0, sizeof(ReadCapdata_struct));
1986 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1987 ctlr, buf, sizeof(ReadCapdata_struct),
1988 1, logvol, 0, TYPE_CMD);
1990 return_code = sendcmd(CCISS_READ_CAPACITY,
1991 ctlr, buf, sizeof(ReadCapdata_struct),
1992 1, logvol, 0, NULL, TYPE_CMD);
1993 if (return_code == IO_OK) {
1994 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1995 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1996 } else { /* read capacity command failed */
1997 printk(KERN_WARNING "cciss: read capacity failed\n");
1999 *block_size = BLOCK_SIZE;
2001 if (*total_size != (__u32) 0)
2002 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2003 (unsigned long long)*total_size, *block_size);
2009 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2011 ReadCapdata_struct_16 *buf;
2013 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2015 printk(KERN_WARNING "cciss: out of memory\n");
2018 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2020 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2021 ctlr, buf, sizeof(ReadCapdata_struct_16),
2022 1, logvol, 0, TYPE_CMD);
2025 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2026 ctlr, buf, sizeof(ReadCapdata_struct_16),
2027 1, logvol, 0, NULL, TYPE_CMD);
2029 if (return_code == IO_OK) {
2030 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2031 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2032 } else { /* read capacity command failed */
2033 printk(KERN_WARNING "cciss: read capacity failed\n");
2035 *block_size = BLOCK_SIZE;
2037 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2038 (unsigned long long)*total_size, *block_size);
2043 static int cciss_revalidate(struct gendisk *disk)
2045 ctlr_info_t *h = get_host(disk);
2046 drive_info_struct *drv = get_drv(disk);
2049 unsigned int block_size;
2050 sector_t total_size;
2051 InquiryData_struct *inq_buff = NULL;
2053 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2054 if (h->drv[logvol].LunID == drv->LunID) {
2063 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2064 if (inq_buff == NULL) {
2065 printk(KERN_WARNING "cciss: out of memory\n");
2068 if (h->cciss_read == CCISS_READ_10) {
2069 cciss_read_capacity(h->ctlr, logvol, 1,
2070 &total_size, &block_size);
2072 cciss_read_capacity_16(h->ctlr, logvol, 1,
2073 &total_size, &block_size);
2075 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2078 blk_queue_hardsect_size(drv->queue, drv->block_size);
2079 set_capacity(disk, drv->nr_blocks);
2086 * Wait polling for a command to complete.
2087 * The memory mapped FIFO is polled for the completion.
2088 * Used only at init time, interrupts from the HBA are disabled.
2090 static unsigned long pollcomplete(int ctlr)
2095 /* Wait (up to 20 seconds) for a command to complete */
2097 for (i = 20 * HZ; i > 0; i--) {
2098 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2099 if (done == FIFO_EMPTY)
2100 schedule_timeout_uninterruptible(1);
2104 /* Invalid address to tell caller we ran out of time */
2108 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2110 /* We get in here if sendcmd() is polling for completions
2111 and gets some command back that it wasn't expecting --
2112 something other than that which it just sent down.
2113 Ordinarily, that shouldn't happen, but it can happen when
2114 the scsi tape stuff gets into error handling mode, and
2115 starts using sendcmd() to try to abort commands and
2116 reset tape drives. In that case, sendcmd may pick up
2117 completions of commands that were sent to logical drives
2118 through the block i/o system, or cciss ioctls completing, etc.
2119 In that case, we need to save those completions for later
2120 processing by the interrupt handler.
2123 #ifdef CONFIG_CISS_SCSI_TAPE
2124 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2126 /* If it's not the scsi tape stuff doing error handling, (abort */
2127 /* or reset) then we don't expect anything weird. */
2128 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2130 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2131 "Invalid command list address returned! (%lx)\n",
2133 /* not much we can do. */
2134 #ifdef CONFIG_CISS_SCSI_TAPE
2138 /* We've sent down an abort or reset, but something else
2140 if (srl->ncompletions >= (NR_CMDS + 2)) {
2141 /* Uh oh. No room to save it for later... */
2142 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2143 "reject list overflow, command lost!\n", ctlr);
2146 /* Save it for later */
2147 srl->complete[srl->ncompletions] = complete;
2148 srl->ncompletions++;
2154 * Send a command to the controller, and wait for it to complete.
2155 * Only used at init time.
2157 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2158 1: address logical volume log_unit,
2159 2: periph device address is scsi3addr */
2160 unsigned int log_unit,
2161 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2163 CommandList_struct *c;
2165 unsigned long complete;
2166 ctlr_info_t *info_p = hba[ctlr];
2167 u64bit buff_dma_handle;
2168 int status, done = 0;
2170 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2171 printk(KERN_WARNING "cciss: unable to get memory");
2174 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2175 log_unit, page_code, scsi3addr, cmd_type);
2176 if (status != IO_OK) {
2177 cmd_free(info_p, c, 1);
2185 printk(KERN_DEBUG "cciss: turning intr off\n");
2186 #endif /* CCISS_DEBUG */
2187 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2189 /* Make sure there is room in the command FIFO */
2190 /* Actually it should be completely empty at this time */
2191 /* unless we are in here doing error handling for the scsi */
2192 /* tape side of the driver. */
2193 for (i = 200000; i > 0; i--) {
2194 /* if fifo isn't full go */
2195 if (!(info_p->access.fifo_full(info_p))) {
2200 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2201 " waiting!\n", ctlr);
2206 info_p->access.submit_command(info_p, c);
2209 complete = pollcomplete(ctlr);
2212 printk(KERN_DEBUG "cciss: command completed\n");
2213 #endif /* CCISS_DEBUG */
2215 if (complete == 1) {
2217 "cciss cciss%d: SendCmd Timeout out, "
2218 "No command list address returned!\n", ctlr);
2224 /* This will need to change for direct lookup completions */
2225 if ((complete & CISS_ERROR_BIT)
2226 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2227 /* if data overrun or underun on Report command
2230 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2231 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2232 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2233 ((c->err_info->CommandStatus ==
2234 CMD_DATA_OVERRUN) ||
2235 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2237 complete = c->busaddr;
2239 if (c->err_info->CommandStatus ==
2240 CMD_UNSOLICITED_ABORT) {
2241 printk(KERN_WARNING "cciss%d: "
2242 "unsolicited abort %p\n",
2244 if (c->retry_count < MAX_CMD_RETRIES) {
2246 "cciss%d: retrying %p\n",
2249 /* erase the old error */
2251 memset(c->err_info, 0,
2253 (ErrorInfo_struct));
2257 "cciss%d: retried %p too "
2258 "many times\n", ctlr, c);
2262 } else if (c->err_info->CommandStatus ==
2265 "cciss%d: command could not be aborted.\n",
2270 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2271 " Error %x \n", ctlr,
2272 c->err_info->CommandStatus);
2273 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2275 " size %x\n num %x value %x\n",
2277 c->err_info->MoreErrInfo.Invalid_Cmd.
2279 c->err_info->MoreErrInfo.Invalid_Cmd.
2281 c->err_info->MoreErrInfo.Invalid_Cmd.
2287 /* This will need changing for direct lookup completions */
2288 if (complete != c->busaddr) {
2289 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2290 BUG(); /* we are pretty much hosed if we get here. */
2298 /* unlock the data buffer from DMA */
2299 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2300 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2301 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2302 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2303 #ifdef CONFIG_CISS_SCSI_TAPE
2304 /* if we saved some commands for later, process them now. */
2305 if (info_p->scsi_rejects.ncompletions > 0)
2306 do_cciss_intr(0, info_p);
2308 cmd_free(info_p, c, 1);
2313 * Map (physical) PCI mem into (virtual) kernel space
2315 static void __iomem *remap_pci_mem(ulong base, ulong size)
2317 ulong page_base = ((ulong) base) & PAGE_MASK;
2318 ulong page_offs = ((ulong) base) - page_base;
2319 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2321 return page_remapped ? (page_remapped + page_offs) : NULL;
2325 * Takes jobs of the Q and sends them to the hardware, then puts it on
2326 * the Q to wait for completion.
2328 static void start_io(ctlr_info_t *h)
2330 CommandList_struct *c;
2332 while ((c = h->reqQ) != NULL) {
2333 /* can't do anything if fifo is full */
2334 if ((h->access.fifo_full(h))) {
2335 printk(KERN_WARNING "cciss: fifo full\n");
2339 /* Get the first entry from the Request Q */
2340 removeQ(&(h->reqQ), c);
2343 /* Tell the controller execute command */
2344 h->access.submit_command(h, c);
2346 /* Put job onto the completed Q */
2347 addQ(&(h->cmpQ), c);
2351 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2352 /* Zeros out the error record and then resends the command back */
2353 /* to the controller */
2354 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2356 /* erase the old error information */
2357 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2359 /* add it to software queue and then send it to the controller */
2360 addQ(&(h->reqQ), c);
2362 if (h->Qdepth > h->maxQsinceinit)
2363 h->maxQsinceinit = h->Qdepth;
2368 /* checks the status of the job and calls complete buffers to mark all
2369 * buffers for the completed job. Note that this function does not need
2370 * to hold the hba/queue lock.
2372 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2381 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2382 switch (cmd->err_info->CommandStatus) {
2383 unsigned char sense_key;
2384 case CMD_TARGET_STATUS:
2387 if (cmd->err_info->ScsiStatus == 0x02) {
2388 printk(KERN_WARNING "cciss: cmd %p "
2389 "has CHECK CONDITION "
2390 " byte 2 = 0x%x\n", cmd,
2391 cmd->err_info->SenseInfo[2]
2393 /* check the sense key */
2394 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2395 /* no status or recovered error */
2396 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2400 printk(KERN_WARNING "cciss: cmd %p "
2401 "has SCSI Status 0x%x\n",
2402 cmd, cmd->err_info->ScsiStatus);
2405 case CMD_DATA_UNDERRUN:
2406 printk(KERN_WARNING "cciss: cmd %p has"
2407 " completed with data underrun "
2410 case CMD_DATA_OVERRUN:
2411 printk(KERN_WARNING "cciss: cmd %p has"
2412 " completed with data overrun "
2416 printk(KERN_WARNING "cciss: cmd %p is "
2417 "reported invalid\n", cmd);
2420 case CMD_PROTOCOL_ERR:
2421 printk(KERN_WARNING "cciss: cmd %p has "
2422 "protocol error \n", cmd);
2425 case CMD_HARDWARE_ERR:
2426 printk(KERN_WARNING "cciss: cmd %p had "
2427 " hardware error\n", cmd);
2430 case CMD_CONNECTION_LOST:
2431 printk(KERN_WARNING "cciss: cmd %p had "
2432 "connection lost\n", cmd);
2436 printk(KERN_WARNING "cciss: cmd %p was "
2440 case CMD_ABORT_FAILED:
2441 printk(KERN_WARNING "cciss: cmd %p reports "
2442 "abort failed\n", cmd);
2445 case CMD_UNSOLICITED_ABORT:
2446 printk(KERN_WARNING "cciss%d: unsolicited "
2447 "abort %p\n", h->ctlr, cmd);
2448 if (cmd->retry_count < MAX_CMD_RETRIES) {
2451 "cciss%d: retrying %p\n", h->ctlr, cmd);
2455 "cciss%d: %p retried too "
2456 "many times\n", h->ctlr, cmd);
2460 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2464 printk(KERN_WARNING "cciss: cmd %p returned "
2465 "unknown status %x\n", cmd,
2466 cmd->err_info->CommandStatus);
2470 /* We need to return this command */
2472 resend_cciss_cmd(h, cmd);
2476 cmd->rq->completion_data = cmd;
2477 cmd->rq->errors = status;
2478 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2479 blk_complete_request(cmd->rq);
2483 * Get a request and submit it to the controller.
2485 static void do_cciss_request(request_queue_t *q)
2487 ctlr_info_t *h = q->queuedata;
2488 CommandList_struct *c;
2491 struct request *creq;
2493 struct scatterlist tmp_sg[MAXSGENTRIES];
2494 drive_info_struct *drv;
2497 /* We call start_io here in case there is a command waiting on the
2498 * queue that has not been sent.
2500 if (blk_queue_plugged(q))
2504 creq = elv_next_request(q);
2508 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2510 if ((c = cmd_alloc(h, 1)) == NULL)
2513 blkdev_dequeue_request(creq);
2515 spin_unlock_irq(q->queue_lock);
2517 c->cmd_type = CMD_RWREQ;
2520 /* fill in the request */
2521 drv = creq->rq_disk->private_data;
2522 c->Header.ReplyQueue = 0; // unused in simple mode
2523 /* got command from pool, so use the command block index instead */
2524 /* for direct lookups. */
2525 /* The first 2 bits are reserved for controller error reporting. */
2526 c->Header.Tag.lower = (c->cmdindex << 3);
2527 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2528 c->Header.LUN.LogDev.VolId = drv->LunID;
2529 c->Header.LUN.LogDev.Mode = 1;
2530 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2531 c->Request.Type.Type = TYPE_CMD; // It is a command.
2532 c->Request.Type.Attribute = ATTR_SIMPLE;
2533 c->Request.Type.Direction =
2534 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2535 c->Request.Timeout = 0; // Don't time out
2537 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2538 start_blk = creq->sector;
2540 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2541 (int)creq->nr_sectors);
2542 #endif /* CCISS_DEBUG */
2544 seg = blk_rq_map_sg(q, creq, tmp_sg);
2546 /* get the DMA records for the setup */
2547 if (c->Request.Type.Direction == XFER_READ)
2548 dir = PCI_DMA_FROMDEVICE;
2550 dir = PCI_DMA_TODEVICE;
2552 for (i = 0; i < seg; i++) {
2553 c->SG[i].Len = tmp_sg[i].length;
2554 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2556 tmp_sg[i].length, dir);
2557 c->SG[i].Addr.lower = temp64.val32.lower;
2558 c->SG[i].Addr.upper = temp64.val32.upper;
2559 c->SG[i].Ext = 0; // we are not chaining
2561 /* track how many SG entries we are using */
2566 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2567 creq->nr_sectors, seg);
2568 #endif /* CCISS_DEBUG */
2570 c->Header.SGList = c->Header.SGTotal = seg;
2571 if(h->cciss_read == CCISS_READ_10) {
2572 c->Request.CDB[1] = 0;
2573 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2574 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2575 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2576 c->Request.CDB[5] = start_blk & 0xff;
2577 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2578 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2579 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2580 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2582 c->Request.CDBLen = 16;
2583 c->Request.CDB[1]= 0;
2584 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2585 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2586 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2587 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2588 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2589 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2590 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2591 c->Request.CDB[9]= start_blk & 0xff;
2592 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2593 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2594 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2595 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2596 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2599 spin_lock_irq(q->queue_lock);
2601 addQ(&(h->reqQ), c);
2603 if (h->Qdepth > h->maxQsinceinit)
2604 h->maxQsinceinit = h->Qdepth;
2610 /* We will already have the driver lock here so not need
2616 static inline unsigned long get_next_completion(ctlr_info_t *h)
2618 #ifdef CONFIG_CISS_SCSI_TAPE
2619 /* Any rejects from sendcmd() lying around? Process them first */
2620 if (h->scsi_rejects.ncompletions == 0)
2621 return h->access.command_completed(h);
2623 struct sendcmd_reject_list *srl;
2625 srl = &h->scsi_rejects;
2626 n = --srl->ncompletions;
2627 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2629 return srl->complete[n];
2632 return h->access.command_completed(h);
2636 static inline int interrupt_pending(ctlr_info_t *h)
2638 #ifdef CONFIG_CISS_SCSI_TAPE
2639 return (h->access.intr_pending(h)
2640 || (h->scsi_rejects.ncompletions > 0));
2642 return h->access.intr_pending(h);
2646 static inline long interrupt_not_for_us(ctlr_info_t *h)
2648 #ifdef CONFIG_CISS_SCSI_TAPE
2649 return (((h->access.intr_pending(h) == 0) ||
2650 (h->interrupts_enabled == 0))
2651 && (h->scsi_rejects.ncompletions == 0));
2653 return (((h->access.intr_pending(h) == 0) ||
2654 (h->interrupts_enabled == 0)));
2658 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2660 ctlr_info_t *h = dev_id;
2661 CommandList_struct *c;
2662 unsigned long flags;
2665 if (interrupt_not_for_us(h))
2668 * If there are completed commands in the completion queue,
2669 * we had better do something about it.
2671 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2672 while (interrupt_pending(h)) {
2673 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2677 if (a2 >= NR_CMDS) {
2679 "cciss: controller cciss%d failed, stopping.\n",
2681 fail_all_cmds(h->ctlr);
2685 c = h->cmd_pool + a2;
2690 if ((c = h->cmpQ) == NULL) {
2692 "cciss: Completion of %08x ignored\n",
2696 while (c->busaddr != a) {
2703 * If we've found the command, take it off the
2704 * completion Q and free it
2706 if (c->busaddr == a) {
2707 removeQ(&h->cmpQ, c);
2708 if (c->cmd_type == CMD_RWREQ) {
2709 complete_command(h, c, 0);
2710 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2711 complete(c->waiting);
2713 # ifdef CONFIG_CISS_SCSI_TAPE
2714 else if (c->cmd_type == CMD_SCSI)
2715 complete_scsi_command(c, 0, a1);
2722 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2727 * We cannot read the structure directly, for portability we must use
2729 * This is for debug only.
2732 static void print_cfg_table(CfgTable_struct *tb)
2737 printk("Controller Configuration information\n");
2738 printk("------------------------------------\n");
2739 for (i = 0; i < 4; i++)
2740 temp_name[i] = readb(&(tb->Signature[i]));
2741 temp_name[4] = '\0';
2742 printk(" Signature = %s\n", temp_name);
2743 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2744 printk(" Transport methods supported = 0x%x\n",
2745 readl(&(tb->TransportSupport)));
2746 printk(" Transport methods active = 0x%x\n",
2747 readl(&(tb->TransportActive)));
2748 printk(" Requested transport Method = 0x%x\n",
2749 readl(&(tb->HostWrite.TransportRequest)));
2750 printk(" Coalesce Interrupt Delay = 0x%x\n",
2751 readl(&(tb->HostWrite.CoalIntDelay)));
2752 printk(" Coalesce Interrupt Count = 0x%x\n",
2753 readl(&(tb->HostWrite.CoalIntCount)));
2754 printk(" Max outstanding commands = 0x%d\n",
2755 readl(&(tb->CmdsOutMax)));
2756 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2757 for (i = 0; i < 16; i++)
2758 temp_name[i] = readb(&(tb->ServerName[i]));
2759 temp_name[16] = '\0';
2760 printk(" Server Name = %s\n", temp_name);
2761 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2763 #endif /* CCISS_DEBUG */
2765 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2767 int i, offset, mem_type, bar_type;
2768 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2771 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2772 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2773 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2776 mem_type = pci_resource_flags(pdev, i) &
2777 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2779 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2780 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2781 offset += 4; /* 32 bit */
2783 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2786 default: /* reserved in PCI 2.2 */
2788 "Base address is invalid\n");
2793 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2799 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2800 * controllers that are capable. If not, we use IO-APIC mode.
2803 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2804 struct pci_dev *pdev, __u32 board_id)
2806 #ifdef CONFIG_PCI_MSI
2808 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2812 /* Some boards advertise MSI but don't really support it */
2813 if ((board_id == 0x40700E11) ||
2814 (board_id == 0x40800E11) ||
2815 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2816 goto default_int_mode;
2818 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2819 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2821 c->intr[0] = cciss_msix_entries[0].vector;
2822 c->intr[1] = cciss_msix_entries[1].vector;
2823 c->intr[2] = cciss_msix_entries[2].vector;
2824 c->intr[3] = cciss_msix_entries[3].vector;
2829 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2830 "available\n", err);
2832 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2836 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2837 if (!pci_enable_msi(pdev)) {
2838 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2842 printk(KERN_WARNING "cciss: MSI init failed\n");
2843 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2848 #endif /* CONFIG_PCI_MSI */
2849 /* if we get here we're going to use the default interrupt mode */
2850 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2854 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2856 ushort subsystem_vendor_id, subsystem_device_id, command;
2857 __u32 board_id, scratchpad = 0;
2859 __u32 cfg_base_addr;
2860 __u64 cfg_base_addr_index;
2863 /* check to see if controller has been disabled */
2864 /* BEFORE trying to enable it */
2865 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2866 if (!(command & 0x02)) {
2868 "cciss: controller appears to be disabled\n");
2872 err = pci_enable_device(pdev);
2874 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2878 err = pci_request_regions(pdev, "cciss");
2880 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2882 goto err_out_disable_pdev;
2885 subsystem_vendor_id = pdev->subsystem_vendor;
2886 subsystem_device_id = pdev->subsystem_device;
2887 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2888 subsystem_vendor_id);
2891 printk("command = %x\n", command);
2892 printk("irq = %x\n", pdev->irq);
2893 printk("board_id = %x\n", board_id);
2894 #endif /* CCISS_DEBUG */
2896 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2897 * else we use the IO-APIC interrupt assigned to us by system ROM.
2899 cciss_interrupt_mode(c, pdev, board_id);
2902 * Memory base addr is first addr , the second points to the config
2906 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2908 printk("address 0 = %x\n", c->paddr);
2909 #endif /* CCISS_DEBUG */
2910 c->vaddr = remap_pci_mem(c->paddr, 200);
2912 /* Wait for the board to become ready. (PCI hotplug needs this.)
2913 * We poll for up to 120 secs, once per 100ms. */
2914 for (i = 0; i < 1200; i++) {
2915 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2916 if (scratchpad == CCISS_FIRMWARE_READY)
2918 set_current_state(TASK_INTERRUPTIBLE);
2919 schedule_timeout(HZ / 10); /* wait 100ms */
2921 if (scratchpad != CCISS_FIRMWARE_READY) {
2922 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2924 goto err_out_free_res;
2927 /* get the address index number */
2928 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2929 cfg_base_addr &= (__u32) 0x0000ffff;
2931 printk("cfg base address = %x\n", cfg_base_addr);
2932 #endif /* CCISS_DEBUG */
2933 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2935 printk("cfg base address index = %x\n", cfg_base_addr_index);
2936 #endif /* CCISS_DEBUG */
2937 if (cfg_base_addr_index == -1) {
2938 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2940 goto err_out_free_res;
2943 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2945 printk("cfg offset = %x\n", cfg_offset);
2946 #endif /* CCISS_DEBUG */
2947 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2948 cfg_base_addr_index) +
2949 cfg_offset, sizeof(CfgTable_struct));
2950 c->board_id = board_id;
2953 print_cfg_table(c->cfgtable);
2954 #endif /* CCISS_DEBUG */
2956 for (i = 0; i < ARRAY_SIZE(products); i++) {
2957 if (board_id == products[i].board_id) {
2958 c->product_name = products[i].product_name;
2959 c->access = *(products[i].access);
2963 if (i == ARRAY_SIZE(products)) {
2964 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2965 " to access the Smart Array controller %08lx\n",
2966 (unsigned long)board_id);
2968 goto err_out_free_res;
2970 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2971 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2972 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2973 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2974 printk("Does not appear to be a valid CISS config table\n");
2976 goto err_out_free_res;
2980 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2982 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2984 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2989 printk("Trying to put board into Simple mode\n");
2990 #endif /* CCISS_DEBUG */
2991 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2992 /* Update the field, and then ring the doorbell */
2993 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2994 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2996 /* under certain very rare conditions, this can take awhile.
2997 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2998 * as we enter this code.) */
2999 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3000 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3002 /* delay and try again */
3003 set_current_state(TASK_INTERRUPTIBLE);
3004 schedule_timeout(10);
3008 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3009 readl(c->vaddr + SA5_DOORBELL));
3010 #endif /* CCISS_DEBUG */
3012 print_cfg_table(c->cfgtable);
3013 #endif /* CCISS_DEBUG */
3015 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3016 printk(KERN_WARNING "cciss: unable to get board into"
3019 goto err_out_free_res;
3024 pci_release_regions(pdev);
3026 err_out_disable_pdev:
3027 pci_disable_device(pdev);
3032 * Gets information about the local volumes attached to the controller.
3034 static void cciss_getgeometry(int cntl_num)
3036 ReportLunData_struct *ld_buff;
3037 InquiryData_struct *inq_buff;
3043 sector_t total_size;
3045 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3046 if (ld_buff == NULL) {
3047 printk(KERN_ERR "cciss: out of memory\n");
3050 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3051 if (inq_buff == NULL) {
3052 printk(KERN_ERR "cciss: out of memory\n");
3056 /* Get the firmware version */
3057 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3058 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3060 if (return_code == IO_OK) {
3061 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3062 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3063 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3064 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3065 } else { /* send command failed */
3067 printk(KERN_WARNING "cciss: unable to determine firmware"
3068 " version of controller\n");
3070 /* Get the number of logical volumes */
3071 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3072 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3075 if (return_code == IO_OK) {
3077 printk("LUN Data\n--------------------------\n");
3078 #endif /* CCISS_DEBUG */
3081 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3083 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3085 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3086 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3087 } else { /* reading number of logical volumes failed */
3089 printk(KERN_WARNING "cciss: report logical volume"
3090 " command failed\n");
3093 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3094 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3096 "ciss: only %d number of logical volumes supported\n",
3098 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3101 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3102 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3103 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3104 hba[cntl_num]->num_luns);
3105 #endif /* CCISS_DEBUG */
3107 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3108 for (i = 0; i < CISS_MAX_LUN; i++) {
3109 if (i < hba[cntl_num]->num_luns) {
3110 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3112 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3114 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3116 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3118 hba[cntl_num]->drv[i].LunID = lunid;
3121 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3122 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3123 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3124 hba[cntl_num]->drv[i].LunID);
3125 #endif /* CCISS_DEBUG */
3127 /* testing to see if 16-byte CDBs are already being used */
3128 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3129 cciss_read_capacity_16(cntl_num, i, 0,
3130 &total_size, &block_size);
3133 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3135 /* total_size = last LBA + 1 */
3136 if(total_size == (__u32) 0) {
3137 cciss_read_capacity_16(cntl_num, i, 0,
3138 &total_size, &block_size);
3139 hba[cntl_num]->cciss_read = CCISS_READ_16;
3140 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3142 hba[cntl_num]->cciss_read = CCISS_READ_10;
3143 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3146 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3147 block_size, inq_buff,
3148 &hba[cntl_num]->drv[i]);
3150 /* initialize raid_level to indicate a free space */
3151 hba[cntl_num]->drv[i].raid_level = -1;
3158 /* Function to find the first free pointer into our hba[] array */
3159 /* Returns -1 if no free entries are left. */
3160 static int alloc_cciss_hba(void)
3162 struct gendisk *disk[NWD];
3164 for (n = 0; n < NWD; n++) {
3165 disk[n] = alloc_disk(1 << NWD_SHIFT);
3170 for (i = 0; i < MAX_CTLR; i++) {
3173 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3176 for (n = 0; n < NWD; n++)
3177 p->gendisk[n] = disk[n];
3182 printk(KERN_WARNING "cciss: This driver supports a maximum"
3183 " of %d controllers.\n", MAX_CTLR);
3186 printk(KERN_ERR "cciss: out of memory.\n");
3193 static void free_hba(int i)
3195 ctlr_info_t *p = hba[i];
3199 for (n = 0; n < NWD; n++)
3200 put_disk(p->gendisk[n]);
3205 * This is it. Find all the controllers and register them. I really hate
3206 * stealing all these major device numbers.
3207 * returns the number of block devices registered.
3209 static int __devinit cciss_init_one(struct pci_dev *pdev,
3210 const struct pci_device_id *ent)
3218 i = alloc_cciss_hba();
3222 hba[i]->busy_initializing = 1;
3224 if (cciss_pci_init(hba[i], pdev) != 0)
3227 sprintf(hba[i]->devname, "cciss%d", i);
3229 hba[i]->pdev = pdev;
3231 /* configure PCI DMA stuff */
3232 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3234 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3237 printk(KERN_ERR "cciss: no suitable DMA available\n");
3242 * register with the major number, or get a dynamic major number
3243 * by passing 0 as argument. This is done for greater than
3244 * 8 controller support.
3246 if (i < MAX_CTLR_ORIG)
3247 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3248 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3249 if (rc == -EBUSY || rc == -EINVAL) {
3251 "cciss: Unable to get major number %d for %s "
3252 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3255 if (i >= MAX_CTLR_ORIG)
3259 /* make sure the board interrupts are off */
3260 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3261 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3262 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3263 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3264 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3268 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3269 hba[i]->devname, pdev->device, pci_name(pdev),
3270 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3272 hba[i]->cmd_pool_bits =
3273 kmalloc(((NR_CMDS + BITS_PER_LONG -
3274 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3275 hba[i]->cmd_pool = (CommandList_struct *)
3276 pci_alloc_consistent(hba[i]->pdev,
3277 NR_CMDS * sizeof(CommandList_struct),
3278 &(hba[i]->cmd_pool_dhandle));
3279 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3280 pci_alloc_consistent(hba[i]->pdev,
3281 NR_CMDS * sizeof(ErrorInfo_struct),
3282 &(hba[i]->errinfo_pool_dhandle));
3283 if ((hba[i]->cmd_pool_bits == NULL)
3284 || (hba[i]->cmd_pool == NULL)
3285 || (hba[i]->errinfo_pool == NULL)) {
3286 printk(KERN_ERR "cciss: out of memory");
3289 #ifdef CONFIG_CISS_SCSI_TAPE
3290 hba[i]->scsi_rejects.complete =
3291 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3292 (NR_CMDS + 5), GFP_KERNEL);
3293 if (hba[i]->scsi_rejects.complete == NULL) {
3294 printk(KERN_ERR "cciss: out of memory");
3298 spin_lock_init(&hba[i]->lock);
3300 /* Initialize the pdev driver private data.
3301 have it point to hba[i]. */
3302 pci_set_drvdata(pdev, hba[i]);
3303 /* command and error info recs zeroed out before
3305 memset(hba[i]->cmd_pool_bits, 0,
3306 ((NR_CMDS + BITS_PER_LONG -
3307 1) / BITS_PER_LONG) * sizeof(unsigned long));
3310 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3311 #endif /* CCISS_DEBUG */
3313 cciss_getgeometry(i);
3315 cciss_scsi_setup(i);
3317 /* Turn the interrupts on so we can service requests */
3318 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3321 hba[i]->busy_initializing = 0;
3323 for (j = 0; j < NWD; j++) { /* mfm */
3324 drive_info_struct *drv = &(hba[i]->drv[j]);
3325 struct gendisk *disk = hba[i]->gendisk[j];
3327 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3330 "cciss: unable to allocate queue for disk %d\n",
3336 q->backing_dev_info.ra_pages = READ_AHEAD;
3337 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3339 /* This is a hardware imposed limit. */
3340 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3342 /* This is a limit in the driver and could be eliminated. */
3343 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3345 blk_queue_max_sectors(q, 512);
3347 blk_queue_softirq_done(q, cciss_softirq_done);
3349 q->queuedata = hba[i];
3350 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3351 disk->major = hba[i]->major;
3352 disk->first_minor = j << NWD_SHIFT;
3353 disk->fops = &cciss_fops;
3355 disk->private_data = drv;
3356 disk->driverfs_dev = &pdev->dev;
3357 /* we must register the controller even if no disks exist */
3358 /* this is for the online array utilities */
3359 if (!drv->heads && j)
3361 blk_queue_hardsect_size(q, drv->block_size);
3362 set_capacity(disk, drv->nr_blocks);
3369 #ifdef CONFIG_CISS_SCSI_TAPE
3370 kfree(hba[i]->scsi_rejects.complete);
3372 kfree(hba[i]->cmd_pool_bits);
3373 if (hba[i]->cmd_pool)
3374 pci_free_consistent(hba[i]->pdev,
3375 NR_CMDS * sizeof(CommandList_struct),
3376 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3377 if (hba[i]->errinfo_pool)
3378 pci_free_consistent(hba[i]->pdev,
3379 NR_CMDS * sizeof(ErrorInfo_struct),
3380 hba[i]->errinfo_pool,
3381 hba[i]->errinfo_pool_dhandle);
3382 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3384 unregister_blkdev(hba[i]->major, hba[i]->devname);
3386 hba[i]->busy_initializing = 0;
3391 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3393 ctlr_info_t *tmp_ptr;
3398 if (pci_get_drvdata(pdev) == NULL) {
3399 printk(KERN_ERR "cciss: Unable to remove device \n");
3402 tmp_ptr = pci_get_drvdata(pdev);
3404 if (hba[i] == NULL) {
3405 printk(KERN_ERR "cciss: device appears to "
3406 "already be removed \n");
3409 /* Turn board interrupts off and send the flush cache command */
3410 /* sendcmd will turn off interrupt, and send the flush...
3411 * To write all data in the battery backed cache to disks */
3412 memset(flush_buf, 0, 4);
3413 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3415 if (return_code != IO_OK) {
3416 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3419 free_irq(hba[i]->intr[2], hba[i]);
3421 #ifdef CONFIG_PCI_MSI
3422 if (hba[i]->msix_vector)
3423 pci_disable_msix(hba[i]->pdev);
3424 else if (hba[i]->msi_vector)
3425 pci_disable_msi(hba[i]->pdev);
3426 #endif /* CONFIG_PCI_MSI */
3428 iounmap(hba[i]->vaddr);
3429 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3430 unregister_blkdev(hba[i]->major, hba[i]->devname);
3431 remove_proc_entry(hba[i]->devname, proc_cciss);
3433 /* remove it from the disk list */
3434 for (j = 0; j < NWD; j++) {
3435 struct gendisk *disk = hba[i]->gendisk[j];
3437 request_queue_t *q = disk->queue;
3439 if (disk->flags & GENHD_FL_UP)
3442 blk_cleanup_queue(q);
3446 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3447 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3448 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3449 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3450 kfree(hba[i]->cmd_pool_bits);
3451 #ifdef CONFIG_CISS_SCSI_TAPE
3452 kfree(hba[i]->scsi_rejects.complete);
3454 pci_release_regions(pdev);
3455 pci_disable_device(pdev);
3456 pci_set_drvdata(pdev, NULL);
3460 static struct pci_driver cciss_pci_driver = {
3462 .probe = cciss_init_one,
3463 .remove = __devexit_p(cciss_remove_one),
3464 .id_table = cciss_pci_device_id, /* id_table */
3468 * This is it. Register the PCI driver information for the cards we control
3469 * the OS will call our registered routines when it finds one of our cards.
3471 static int __init cciss_init(void)
3473 printk(KERN_INFO DRIVER_NAME "\n");
3475 /* Register for our PCI devices */
3476 return pci_register_driver(&cciss_pci_driver);
3479 static void __exit cciss_cleanup(void)
3483 pci_unregister_driver(&cciss_pci_driver);
3484 /* double check that all controller entrys have been removed */
3485 for (i = 0; i < MAX_CTLR; i++) {
3486 if (hba[i] != NULL) {
3487 printk(KERN_WARNING "cciss: had to remove"
3488 " controller %d\n", i);
3489 cciss_remove_one(hba[i]->pdev);
3492 remove_proc_entry("cciss", proc_root_driver);
3495 static void fail_all_cmds(unsigned long ctlr)
3497 /* If we get here, the board is apparently dead. */
3498 ctlr_info_t *h = hba[ctlr];
3499 CommandList_struct *c;
3500 unsigned long flags;
3502 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3503 h->alive = 0; /* the controller apparently died... */
3505 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3507 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3509 /* move everything off the request queue onto the completed queue */
3510 while ((c = h->reqQ) != NULL) {
3511 removeQ(&(h->reqQ), c);
3513 addQ(&(h->cmpQ), c);
3516 /* Now, fail everything on the completed queue with a HW error */
3517 while ((c = h->cmpQ) != NULL) {
3518 removeQ(&h->cmpQ, c);
3519 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3520 if (c->cmd_type == CMD_RWREQ) {
3521 complete_command(h, c, 0);
3522 } else if (c->cmd_type == CMD_IOCTL_PEND)
3523 complete(c->waiting);
3524 #ifdef CONFIG_CISS_SCSI_TAPE
3525 else if (c->cmd_type == CMD_SCSI)
3526 complete_scsi_command(c, 0, 0);
3529 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3533 module_init(cciss_init);
3534 module_exit(cciss_cleanup);