]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/md/md.c
md: build failure due to missing delay.h
[linux-2.6-omap-h63xx.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/raid/md.h>
37 #include <linux/raid/bitmap.h>
38 #include <linux/sysctl.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48
49 #define MAJOR_NR MD_MAJOR
50
51 /* 63 partitions with the alternate major number (mdp) */
52 #define MdpMinorShift 6
53
54 #define DEBUG 0
55 #define dprintk(x...) ((void)(DEBUG && printk(x)))
56
57
58 #ifndef MODULE
59 static void autostart_arrays(int part);
60 #endif
61
62 static LIST_HEAD(pers_list);
63 static DEFINE_SPINLOCK(pers_lock);
64
65 static void md_print_devices(void);
66
67 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68
69 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
70
71 /*
72  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
73  * is 1000 KB/sec, so the extra system load does not show up that much.
74  * Increase it if you want to have more _guaranteed_ speed. Note that
75  * the RAID driver will use the maximum available bandwidth if the IO
76  * subsystem is idle. There is also an 'absolute maximum' reconstruction
77  * speed limit - in case reconstruction slows down your system despite
78  * idle IO detection.
79  *
80  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
81  * or /sys/block/mdX/md/sync_speed_{min,max}
82  */
83
84 static int sysctl_speed_limit_min = 1000;
85 static int sysctl_speed_limit_max = 200000;
86 static inline int speed_min(mddev_t *mddev)
87 {
88         return mddev->sync_speed_min ?
89                 mddev->sync_speed_min : sysctl_speed_limit_min;
90 }
91
92 static inline int speed_max(mddev_t *mddev)
93 {
94         return mddev->sync_speed_max ?
95                 mddev->sync_speed_max : sysctl_speed_limit_max;
96 }
97
98 static struct ctl_table_header *raid_table_header;
99
100 static ctl_table raid_table[] = {
101         {
102                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
103                 .procname       = "speed_limit_min",
104                 .data           = &sysctl_speed_limit_min,
105                 .maxlen         = sizeof(int),
106                 .mode           = S_IRUGO|S_IWUSR,
107                 .proc_handler   = &proc_dointvec,
108         },
109         {
110                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
111                 .procname       = "speed_limit_max",
112                 .data           = &sysctl_speed_limit_max,
113                 .maxlen         = sizeof(int),
114                 .mode           = S_IRUGO|S_IWUSR,
115                 .proc_handler   = &proc_dointvec,
116         },
117         { .ctl_name = 0 }
118 };
119
120 static ctl_table raid_dir_table[] = {
121         {
122                 .ctl_name       = DEV_RAID,
123                 .procname       = "raid",
124                 .maxlen         = 0,
125                 .mode           = S_IRUGO|S_IXUGO,
126                 .child          = raid_table,
127         },
128         { .ctl_name = 0 }
129 };
130
131 static ctl_table raid_root_table[] = {
132         {
133                 .ctl_name       = CTL_DEV,
134                 .procname       = "dev",
135                 .maxlen         = 0,
136                 .mode           = 0555,
137                 .child          = raid_dir_table,
138         },
139         { .ctl_name = 0 }
140 };
141
142 static struct block_device_operations md_fops;
143
144 static int start_readonly;
145
146 /*
147  * We have a system wide 'event count' that is incremented
148  * on any 'interesting' event, and readers of /proc/mdstat
149  * can use 'poll' or 'select' to find out when the event
150  * count increases.
151  *
152  * Events are:
153  *  start array, stop array, error, add device, remove device,
154  *  start build, activate spare
155  */
156 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
157 static atomic_t md_event_count;
158 void md_new_event(mddev_t *mddev)
159 {
160         atomic_inc(&md_event_count);
161         wake_up(&md_event_waiters);
162 }
163 EXPORT_SYMBOL_GPL(md_new_event);
164
165 /* Alternate version that can be called from interrupts
166  * when calling sysfs_notify isn't needed.
167  */
168 static void md_new_event_inintr(mddev_t *mddev)
169 {
170         atomic_inc(&md_event_count);
171         wake_up(&md_event_waiters);
172 }
173
174 /*
175  * Enables to iterate over all existing md arrays
176  * all_mddevs_lock protects this list.
177  */
178 static LIST_HEAD(all_mddevs);
179 static DEFINE_SPINLOCK(all_mddevs_lock);
180
181
182 /*
183  * iterates through all used mddevs in the system.
184  * We take care to grab the all_mddevs_lock whenever navigating
185  * the list, and to always hold a refcount when unlocked.
186  * Any code which breaks out of this loop while own
187  * a reference to the current mddev and must mddev_put it.
188  */
189 #define for_each_mddev(mddev,tmp)                                       \
190                                                                         \
191         for (({ spin_lock(&all_mddevs_lock);                            \
192                 tmp = all_mddevs.next;                                  \
193                 mddev = NULL;});                                        \
194              ({ if (tmp != &all_mddevs)                                 \
195                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
196                 spin_unlock(&all_mddevs_lock);                          \
197                 if (mddev) mddev_put(mddev);                            \
198                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
199                 tmp != &all_mddevs;});                                  \
200              ({ spin_lock(&all_mddevs_lock);                            \
201                 tmp = tmp->next;})                                      \
202                 )
203
204
205 static int md_fail_request(struct request_queue *q, struct bio *bio)
206 {
207         bio_io_error(bio);
208         return 0;
209 }
210
211 static inline mddev_t *mddev_get(mddev_t *mddev)
212 {
213         atomic_inc(&mddev->active);
214         return mddev;
215 }
216
217 static void mddev_put(mddev_t *mddev)
218 {
219         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
220                 return;
221         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
222                 list_del(&mddev->all_mddevs);
223                 spin_unlock(&all_mddevs_lock);
224                 blk_cleanup_queue(mddev->queue);
225                 kobject_put(&mddev->kobj);
226         } else
227                 spin_unlock(&all_mddevs_lock);
228 }
229
230 static mddev_t * mddev_find(dev_t unit)
231 {
232         mddev_t *mddev, *new = NULL;
233
234  retry:
235         spin_lock(&all_mddevs_lock);
236         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
237                 if (mddev->unit == unit) {
238                         mddev_get(mddev);
239                         spin_unlock(&all_mddevs_lock);
240                         kfree(new);
241                         return mddev;
242                 }
243
244         if (new) {
245                 list_add(&new->all_mddevs, &all_mddevs);
246                 spin_unlock(&all_mddevs_lock);
247                 return new;
248         }
249         spin_unlock(&all_mddevs_lock);
250
251         new = kzalloc(sizeof(*new), GFP_KERNEL);
252         if (!new)
253                 return NULL;
254
255         new->unit = unit;
256         if (MAJOR(unit) == MD_MAJOR)
257                 new->md_minor = MINOR(unit);
258         else
259                 new->md_minor = MINOR(unit) >> MdpMinorShift;
260
261         mutex_init(&new->reconfig_mutex);
262         INIT_LIST_HEAD(&new->disks);
263         INIT_LIST_HEAD(&new->all_mddevs);
264         init_timer(&new->safemode_timer);
265         atomic_set(&new->active, 1);
266         atomic_set(&new->openers, 0);
267         spin_lock_init(&new->write_lock);
268         init_waitqueue_head(&new->sb_wait);
269         init_waitqueue_head(&new->recovery_wait);
270         new->reshape_position = MaxSector;
271         new->resync_min = 0;
272         new->resync_max = MaxSector;
273         new->level = LEVEL_NONE;
274
275         new->queue = blk_alloc_queue(GFP_KERNEL);
276         if (!new->queue) {
277                 kfree(new);
278                 return NULL;
279         }
280         /* Can be unlocked because the queue is new: no concurrency */
281         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
282
283         blk_queue_make_request(new->queue, md_fail_request);
284
285         goto retry;
286 }
287
288 static inline int mddev_lock(mddev_t * mddev)
289 {
290         return mutex_lock_interruptible(&mddev->reconfig_mutex);
291 }
292
293 static inline int mddev_trylock(mddev_t * mddev)
294 {
295         return mutex_trylock(&mddev->reconfig_mutex);
296 }
297
298 static inline void mddev_unlock(mddev_t * mddev)
299 {
300         mutex_unlock(&mddev->reconfig_mutex);
301
302         md_wakeup_thread(mddev->thread);
303 }
304
305 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
306 {
307         mdk_rdev_t * rdev;
308         struct list_head *tmp;
309
310         rdev_for_each(rdev, tmp, mddev) {
311                 if (rdev->desc_nr == nr)
312                         return rdev;
313         }
314         return NULL;
315 }
316
317 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
318 {
319         struct list_head *tmp;
320         mdk_rdev_t *rdev;
321
322         rdev_for_each(rdev, tmp, mddev) {
323                 if (rdev->bdev->bd_dev == dev)
324                         return rdev;
325         }
326         return NULL;
327 }
328
329 static struct mdk_personality *find_pers(int level, char *clevel)
330 {
331         struct mdk_personality *pers;
332         list_for_each_entry(pers, &pers_list, list) {
333                 if (level != LEVEL_NONE && pers->level == level)
334                         return pers;
335                 if (strcmp(pers->name, clevel)==0)
336                         return pers;
337         }
338         return NULL;
339 }
340
341 /* return the offset of the super block in 512byte sectors */
342 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
343 {
344         sector_t num_sectors = bdev->bd_inode->i_size / 512;
345         return MD_NEW_SIZE_SECTORS(num_sectors);
346 }
347
348 static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
349 {
350         sector_t num_sectors = rdev->sb_start;
351
352         if (chunk_size)
353                 num_sectors &= ~((sector_t)chunk_size/512 - 1);
354         return num_sectors;
355 }
356
357 static int alloc_disk_sb(mdk_rdev_t * rdev)
358 {
359         if (rdev->sb_page)
360                 MD_BUG();
361
362         rdev->sb_page = alloc_page(GFP_KERNEL);
363         if (!rdev->sb_page) {
364                 printk(KERN_ALERT "md: out of memory.\n");
365                 return -ENOMEM;
366         }
367
368         return 0;
369 }
370
371 static void free_disk_sb(mdk_rdev_t * rdev)
372 {
373         if (rdev->sb_page) {
374                 put_page(rdev->sb_page);
375                 rdev->sb_loaded = 0;
376                 rdev->sb_page = NULL;
377                 rdev->sb_start = 0;
378                 rdev->size = 0;
379         }
380 }
381
382
383 static void super_written(struct bio *bio, int error)
384 {
385         mdk_rdev_t *rdev = bio->bi_private;
386         mddev_t *mddev = rdev->mddev;
387
388         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
389                 printk("md: super_written gets error=%d, uptodate=%d\n",
390                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
391                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
392                 md_error(mddev, rdev);
393         }
394
395         if (atomic_dec_and_test(&mddev->pending_writes))
396                 wake_up(&mddev->sb_wait);
397         bio_put(bio);
398 }
399
400 static void super_written_barrier(struct bio *bio, int error)
401 {
402         struct bio *bio2 = bio->bi_private;
403         mdk_rdev_t *rdev = bio2->bi_private;
404         mddev_t *mddev = rdev->mddev;
405
406         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
407             error == -EOPNOTSUPP) {
408                 unsigned long flags;
409                 /* barriers don't appear to be supported :-( */
410                 set_bit(BarriersNotsupp, &rdev->flags);
411                 mddev->barriers_work = 0;
412                 spin_lock_irqsave(&mddev->write_lock, flags);
413                 bio2->bi_next = mddev->biolist;
414                 mddev->biolist = bio2;
415                 spin_unlock_irqrestore(&mddev->write_lock, flags);
416                 wake_up(&mddev->sb_wait);
417                 bio_put(bio);
418         } else {
419                 bio_put(bio2);
420                 bio->bi_private = rdev;
421                 super_written(bio, error);
422         }
423 }
424
425 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
426                    sector_t sector, int size, struct page *page)
427 {
428         /* write first size bytes of page to sector of rdev
429          * Increment mddev->pending_writes before returning
430          * and decrement it on completion, waking up sb_wait
431          * if zero is reached.
432          * If an error occurred, call md_error
433          *
434          * As we might need to resubmit the request if BIO_RW_BARRIER
435          * causes ENOTSUPP, we allocate a spare bio...
436          */
437         struct bio *bio = bio_alloc(GFP_NOIO, 1);
438         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
439
440         bio->bi_bdev = rdev->bdev;
441         bio->bi_sector = sector;
442         bio_add_page(bio, page, size, 0);
443         bio->bi_private = rdev;
444         bio->bi_end_io = super_written;
445         bio->bi_rw = rw;
446
447         atomic_inc(&mddev->pending_writes);
448         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
449                 struct bio *rbio;
450                 rw |= (1<<BIO_RW_BARRIER);
451                 rbio = bio_clone(bio, GFP_NOIO);
452                 rbio->bi_private = bio;
453                 rbio->bi_end_io = super_written_barrier;
454                 submit_bio(rw, rbio);
455         } else
456                 submit_bio(rw, bio);
457 }
458
459 void md_super_wait(mddev_t *mddev)
460 {
461         /* wait for all superblock writes that were scheduled to complete.
462          * if any had to be retried (due to BARRIER problems), retry them
463          */
464         DEFINE_WAIT(wq);
465         for(;;) {
466                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
467                 if (atomic_read(&mddev->pending_writes)==0)
468                         break;
469                 while (mddev->biolist) {
470                         struct bio *bio;
471                         spin_lock_irq(&mddev->write_lock);
472                         bio = mddev->biolist;
473                         mddev->biolist = bio->bi_next ;
474                         bio->bi_next = NULL;
475                         spin_unlock_irq(&mddev->write_lock);
476                         submit_bio(bio->bi_rw, bio);
477                 }
478                 schedule();
479         }
480         finish_wait(&mddev->sb_wait, &wq);
481 }
482
483 static void bi_complete(struct bio *bio, int error)
484 {
485         complete((struct completion*)bio->bi_private);
486 }
487
488 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
489                    struct page *page, int rw)
490 {
491         struct bio *bio = bio_alloc(GFP_NOIO, 1);
492         struct completion event;
493         int ret;
494
495         rw |= (1 << BIO_RW_SYNC);
496
497         bio->bi_bdev = bdev;
498         bio->bi_sector = sector;
499         bio_add_page(bio, page, size, 0);
500         init_completion(&event);
501         bio->bi_private = &event;
502         bio->bi_end_io = bi_complete;
503         submit_bio(rw, bio);
504         wait_for_completion(&event);
505
506         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
507         bio_put(bio);
508         return ret;
509 }
510 EXPORT_SYMBOL_GPL(sync_page_io);
511
512 static int read_disk_sb(mdk_rdev_t * rdev, int size)
513 {
514         char b[BDEVNAME_SIZE];
515         if (!rdev->sb_page) {
516                 MD_BUG();
517                 return -EINVAL;
518         }
519         if (rdev->sb_loaded)
520                 return 0;
521
522
523         if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
524                 goto fail;
525         rdev->sb_loaded = 1;
526         return 0;
527
528 fail:
529         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
530                 bdevname(rdev->bdev,b));
531         return -EINVAL;
532 }
533
534 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
535 {
536         return  sb1->set_uuid0 == sb2->set_uuid0 &&
537                 sb1->set_uuid1 == sb2->set_uuid1 &&
538                 sb1->set_uuid2 == sb2->set_uuid2 &&
539                 sb1->set_uuid3 == sb2->set_uuid3;
540 }
541
542 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
543 {
544         int ret;
545         mdp_super_t *tmp1, *tmp2;
546
547         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
548         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
549
550         if (!tmp1 || !tmp2) {
551                 ret = 0;
552                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
553                 goto abort;
554         }
555
556         *tmp1 = *sb1;
557         *tmp2 = *sb2;
558
559         /*
560          * nr_disks is not constant
561          */
562         tmp1->nr_disks = 0;
563         tmp2->nr_disks = 0;
564
565         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
566 abort:
567         kfree(tmp1);
568         kfree(tmp2);
569         return ret;
570 }
571
572
573 static u32 md_csum_fold(u32 csum)
574 {
575         csum = (csum & 0xffff) + (csum >> 16);
576         return (csum & 0xffff) + (csum >> 16);
577 }
578
579 static unsigned int calc_sb_csum(mdp_super_t * sb)
580 {
581         u64 newcsum = 0;
582         u32 *sb32 = (u32*)sb;
583         int i;
584         unsigned int disk_csum, csum;
585
586         disk_csum = sb->sb_csum;
587         sb->sb_csum = 0;
588
589         for (i = 0; i < MD_SB_BYTES/4 ; i++)
590                 newcsum += sb32[i];
591         csum = (newcsum & 0xffffffff) + (newcsum>>32);
592
593
594 #ifdef CONFIG_ALPHA
595         /* This used to use csum_partial, which was wrong for several
596          * reasons including that different results are returned on
597          * different architectures.  It isn't critical that we get exactly
598          * the same return value as before (we always csum_fold before
599          * testing, and that removes any differences).  However as we
600          * know that csum_partial always returned a 16bit value on
601          * alphas, do a fold to maximise conformity to previous behaviour.
602          */
603         sb->sb_csum = md_csum_fold(disk_csum);
604 #else
605         sb->sb_csum = disk_csum;
606 #endif
607         return csum;
608 }
609
610
611 /*
612  * Handle superblock details.
613  * We want to be able to handle multiple superblock formats
614  * so we have a common interface to them all, and an array of
615  * different handlers.
616  * We rely on user-space to write the initial superblock, and support
617  * reading and updating of superblocks.
618  * Interface methods are:
619  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
620  *      loads and validates a superblock on dev.
621  *      if refdev != NULL, compare superblocks on both devices
622  *    Return:
623  *      0 - dev has a superblock that is compatible with refdev
624  *      1 - dev has a superblock that is compatible and newer than refdev
625  *          so dev should be used as the refdev in future
626  *     -EINVAL superblock incompatible or invalid
627  *     -othererror e.g. -EIO
628  *
629  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
630  *      Verify that dev is acceptable into mddev.
631  *       The first time, mddev->raid_disks will be 0, and data from
632  *       dev should be merged in.  Subsequent calls check that dev
633  *       is new enough.  Return 0 or -EINVAL
634  *
635  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
636  *     Update the superblock for rdev with data in mddev
637  *     This does not write to disc.
638  *
639  */
640
641 struct super_type  {
642         char                *name;
643         struct module       *owner;
644         int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
645                                           int minor_version);
646         int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
647         void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
648         unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
649                                                 sector_t num_sectors);
650 };
651
652 /*
653  * load_super for 0.90.0 
654  */
655 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
656 {
657         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
658         mdp_super_t *sb;
659         int ret;
660
661         /*
662          * Calculate the position of the superblock (512byte sectors),
663          * it's at the end of the disk.
664          *
665          * It also happens to be a multiple of 4Kb.
666          */
667         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
668
669         ret = read_disk_sb(rdev, MD_SB_BYTES);
670         if (ret) return ret;
671
672         ret = -EINVAL;
673
674         bdevname(rdev->bdev, b);
675         sb = (mdp_super_t*)page_address(rdev->sb_page);
676
677         if (sb->md_magic != MD_SB_MAGIC) {
678                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
679                        b);
680                 goto abort;
681         }
682
683         if (sb->major_version != 0 ||
684             sb->minor_version < 90 ||
685             sb->minor_version > 91) {
686                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
687                         sb->major_version, sb->minor_version,
688                         b);
689                 goto abort;
690         }
691
692         if (sb->raid_disks <= 0)
693                 goto abort;
694
695         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
696                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
697                         b);
698                 goto abort;
699         }
700
701         rdev->preferred_minor = sb->md_minor;
702         rdev->data_offset = 0;
703         rdev->sb_size = MD_SB_BYTES;
704
705         if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
706                 if (sb->level != 1 && sb->level != 4
707                     && sb->level != 5 && sb->level != 6
708                     && sb->level != 10) {
709                         /* FIXME use a better test */
710                         printk(KERN_WARNING
711                                "md: bitmaps not supported for this level.\n");
712                         goto abort;
713                 }
714         }
715
716         if (sb->level == LEVEL_MULTIPATH)
717                 rdev->desc_nr = -1;
718         else
719                 rdev->desc_nr = sb->this_disk.number;
720
721         if (!refdev) {
722                 ret = 1;
723         } else {
724                 __u64 ev1, ev2;
725                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
726                 if (!uuid_equal(refsb, sb)) {
727                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
728                                 b, bdevname(refdev->bdev,b2));
729                         goto abort;
730                 }
731                 if (!sb_equal(refsb, sb)) {
732                         printk(KERN_WARNING "md: %s has same UUID"
733                                " but different superblock to %s\n",
734                                b, bdevname(refdev->bdev, b2));
735                         goto abort;
736                 }
737                 ev1 = md_event(sb);
738                 ev2 = md_event(refsb);
739                 if (ev1 > ev2)
740                         ret = 1;
741                 else 
742                         ret = 0;
743         }
744         rdev->size = calc_num_sectors(rdev, sb->chunk_size) / 2;
745
746         if (rdev->size < sb->size && sb->level > 1)
747                 /* "this cannot possibly happen" ... */
748                 ret = -EINVAL;
749
750  abort:
751         return ret;
752 }
753
754 /*
755  * validate_super for 0.90.0
756  */
757 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
758 {
759         mdp_disk_t *desc;
760         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
761         __u64 ev1 = md_event(sb);
762
763         rdev->raid_disk = -1;
764         clear_bit(Faulty, &rdev->flags);
765         clear_bit(In_sync, &rdev->flags);
766         clear_bit(WriteMostly, &rdev->flags);
767         clear_bit(BarriersNotsupp, &rdev->flags);
768
769         if (mddev->raid_disks == 0) {
770                 mddev->major_version = 0;
771                 mddev->minor_version = sb->minor_version;
772                 mddev->patch_version = sb->patch_version;
773                 mddev->external = 0;
774                 mddev->chunk_size = sb->chunk_size;
775                 mddev->ctime = sb->ctime;
776                 mddev->utime = sb->utime;
777                 mddev->level = sb->level;
778                 mddev->clevel[0] = 0;
779                 mddev->layout = sb->layout;
780                 mddev->raid_disks = sb->raid_disks;
781                 mddev->size = sb->size;
782                 mddev->events = ev1;
783                 mddev->bitmap_offset = 0;
784                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
785
786                 if (mddev->minor_version >= 91) {
787                         mddev->reshape_position = sb->reshape_position;
788                         mddev->delta_disks = sb->delta_disks;
789                         mddev->new_level = sb->new_level;
790                         mddev->new_layout = sb->new_layout;
791                         mddev->new_chunk = sb->new_chunk;
792                 } else {
793                         mddev->reshape_position = MaxSector;
794                         mddev->delta_disks = 0;
795                         mddev->new_level = mddev->level;
796                         mddev->new_layout = mddev->layout;
797                         mddev->new_chunk = mddev->chunk_size;
798                 }
799
800                 if (sb->state & (1<<MD_SB_CLEAN))
801                         mddev->recovery_cp = MaxSector;
802                 else {
803                         if (sb->events_hi == sb->cp_events_hi && 
804                                 sb->events_lo == sb->cp_events_lo) {
805                                 mddev->recovery_cp = sb->recovery_cp;
806                         } else
807                                 mddev->recovery_cp = 0;
808                 }
809
810                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
811                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
812                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
813                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
814
815                 mddev->max_disks = MD_SB_DISKS;
816
817                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
818                     mddev->bitmap_file == NULL)
819                         mddev->bitmap_offset = mddev->default_bitmap_offset;
820
821         } else if (mddev->pers == NULL) {
822                 /* Insist on good event counter while assembling */
823                 ++ev1;
824                 if (ev1 < mddev->events) 
825                         return -EINVAL;
826         } else if (mddev->bitmap) {
827                 /* if adding to array with a bitmap, then we can accept an
828                  * older device ... but not too old.
829                  */
830                 if (ev1 < mddev->bitmap->events_cleared)
831                         return 0;
832         } else {
833                 if (ev1 < mddev->events)
834                         /* just a hot-add of a new device, leave raid_disk at -1 */
835                         return 0;
836         }
837
838         if (mddev->level != LEVEL_MULTIPATH) {
839                 desc = sb->disks + rdev->desc_nr;
840
841                 if (desc->state & (1<<MD_DISK_FAULTY))
842                         set_bit(Faulty, &rdev->flags);
843                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
844                             desc->raid_disk < mddev->raid_disks */) {
845                         set_bit(In_sync, &rdev->flags);
846                         rdev->raid_disk = desc->raid_disk;
847                 }
848                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
849                         set_bit(WriteMostly, &rdev->flags);
850         } else /* MULTIPATH are always insync */
851                 set_bit(In_sync, &rdev->flags);
852         return 0;
853 }
854
855 /*
856  * sync_super for 0.90.0
857  */
858 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
859 {
860         mdp_super_t *sb;
861         struct list_head *tmp;
862         mdk_rdev_t *rdev2;
863         int next_spare = mddev->raid_disks;
864
865
866         /* make rdev->sb match mddev data..
867          *
868          * 1/ zero out disks
869          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
870          * 3/ any empty disks < next_spare become removed
871          *
872          * disks[0] gets initialised to REMOVED because
873          * we cannot be sure from other fields if it has
874          * been initialised or not.
875          */
876         int i;
877         int active=0, working=0,failed=0,spare=0,nr_disks=0;
878
879         rdev->sb_size = MD_SB_BYTES;
880
881         sb = (mdp_super_t*)page_address(rdev->sb_page);
882
883         memset(sb, 0, sizeof(*sb));
884
885         sb->md_magic = MD_SB_MAGIC;
886         sb->major_version = mddev->major_version;
887         sb->patch_version = mddev->patch_version;
888         sb->gvalid_words  = 0; /* ignored */
889         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
890         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
891         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
892         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
893
894         sb->ctime = mddev->ctime;
895         sb->level = mddev->level;
896         sb->size  = mddev->size;
897         sb->raid_disks = mddev->raid_disks;
898         sb->md_minor = mddev->md_minor;
899         sb->not_persistent = 0;
900         sb->utime = mddev->utime;
901         sb->state = 0;
902         sb->events_hi = (mddev->events>>32);
903         sb->events_lo = (u32)mddev->events;
904
905         if (mddev->reshape_position == MaxSector)
906                 sb->minor_version = 90;
907         else {
908                 sb->minor_version = 91;
909                 sb->reshape_position = mddev->reshape_position;
910                 sb->new_level = mddev->new_level;
911                 sb->delta_disks = mddev->delta_disks;
912                 sb->new_layout = mddev->new_layout;
913                 sb->new_chunk = mddev->new_chunk;
914         }
915         mddev->minor_version = sb->minor_version;
916         if (mddev->in_sync)
917         {
918                 sb->recovery_cp = mddev->recovery_cp;
919                 sb->cp_events_hi = (mddev->events>>32);
920                 sb->cp_events_lo = (u32)mddev->events;
921                 if (mddev->recovery_cp == MaxSector)
922                         sb->state = (1<< MD_SB_CLEAN);
923         } else
924                 sb->recovery_cp = 0;
925
926         sb->layout = mddev->layout;
927         sb->chunk_size = mddev->chunk_size;
928
929         if (mddev->bitmap && mddev->bitmap_file == NULL)
930                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
931
932         sb->disks[0].state = (1<<MD_DISK_REMOVED);
933         rdev_for_each(rdev2, tmp, mddev) {
934                 mdp_disk_t *d;
935                 int desc_nr;
936                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
937                     && !test_bit(Faulty, &rdev2->flags))
938                         desc_nr = rdev2->raid_disk;
939                 else
940                         desc_nr = next_spare++;
941                 rdev2->desc_nr = desc_nr;
942                 d = &sb->disks[rdev2->desc_nr];
943                 nr_disks++;
944                 d->number = rdev2->desc_nr;
945                 d->major = MAJOR(rdev2->bdev->bd_dev);
946                 d->minor = MINOR(rdev2->bdev->bd_dev);
947                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
948                     && !test_bit(Faulty, &rdev2->flags))
949                         d->raid_disk = rdev2->raid_disk;
950                 else
951                         d->raid_disk = rdev2->desc_nr; /* compatibility */
952                 if (test_bit(Faulty, &rdev2->flags))
953                         d->state = (1<<MD_DISK_FAULTY);
954                 else if (test_bit(In_sync, &rdev2->flags)) {
955                         d->state = (1<<MD_DISK_ACTIVE);
956                         d->state |= (1<<MD_DISK_SYNC);
957                         active++;
958                         working++;
959                 } else {
960                         d->state = 0;
961                         spare++;
962                         working++;
963                 }
964                 if (test_bit(WriteMostly, &rdev2->flags))
965                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
966         }
967         /* now set the "removed" and "faulty" bits on any missing devices */
968         for (i=0 ; i < mddev->raid_disks ; i++) {
969                 mdp_disk_t *d = &sb->disks[i];
970                 if (d->state == 0 && d->number == 0) {
971                         d->number = i;
972                         d->raid_disk = i;
973                         d->state = (1<<MD_DISK_REMOVED);
974                         d->state |= (1<<MD_DISK_FAULTY);
975                         failed++;
976                 }
977         }
978         sb->nr_disks = nr_disks;
979         sb->active_disks = active;
980         sb->working_disks = working;
981         sb->failed_disks = failed;
982         sb->spare_disks = spare;
983
984         sb->this_disk = sb->disks[rdev->desc_nr];
985         sb->sb_csum = calc_sb_csum(sb);
986 }
987
988 /*
989  * rdev_size_change for 0.90.0
990  */
991 static unsigned long long
992 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
993 {
994         if (num_sectors && num_sectors < rdev->mddev->size * 2)
995                 return 0; /* component must fit device */
996         if (rdev->mddev->bitmap_offset)
997                 return 0; /* can't move bitmap */
998         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
999         if (!num_sectors || num_sectors > rdev->sb_start)
1000                 num_sectors = rdev->sb_start;
1001         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1002                        rdev->sb_page);
1003         md_super_wait(rdev->mddev);
1004         return num_sectors / 2; /* kB for sysfs */
1005 }
1006
1007
1008 /*
1009  * version 1 superblock
1010  */
1011
1012 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1013 {
1014         __le32 disk_csum;
1015         u32 csum;
1016         unsigned long long newcsum;
1017         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1018         __le32 *isuper = (__le32*)sb;
1019         int i;
1020
1021         disk_csum = sb->sb_csum;
1022         sb->sb_csum = 0;
1023         newcsum = 0;
1024         for (i=0; size>=4; size -= 4 )
1025                 newcsum += le32_to_cpu(*isuper++);
1026
1027         if (size == 2)
1028                 newcsum += le16_to_cpu(*(__le16*) isuper);
1029
1030         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1031         sb->sb_csum = disk_csum;
1032         return cpu_to_le32(csum);
1033 }
1034
1035 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1036 {
1037         struct mdp_superblock_1 *sb;
1038         int ret;
1039         sector_t sb_start;
1040         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1041         int bmask;
1042
1043         /*
1044          * Calculate the position of the superblock in 512byte sectors.
1045          * It is always aligned to a 4K boundary and
1046          * depeding on minor_version, it can be:
1047          * 0: At least 8K, but less than 12K, from end of device
1048          * 1: At start of device
1049          * 2: 4K from start of device.
1050          */
1051         switch(minor_version) {
1052         case 0:
1053                 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1054                 sb_start -= 8*2;
1055                 sb_start &= ~(sector_t)(4*2-1);
1056                 break;
1057         case 1:
1058                 sb_start = 0;
1059                 break;
1060         case 2:
1061                 sb_start = 8;
1062                 break;
1063         default:
1064                 return -EINVAL;
1065         }
1066         rdev->sb_start = sb_start;
1067
1068         /* superblock is rarely larger than 1K, but it can be larger,
1069          * and it is safe to read 4k, so we do that
1070          */
1071         ret = read_disk_sb(rdev, 4096);
1072         if (ret) return ret;
1073
1074
1075         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1076
1077         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1078             sb->major_version != cpu_to_le32(1) ||
1079             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1080             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1081             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1082                 return -EINVAL;
1083
1084         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1085                 printk("md: invalid superblock checksum on %s\n",
1086                         bdevname(rdev->bdev,b));
1087                 return -EINVAL;
1088         }
1089         if (le64_to_cpu(sb->data_size) < 10) {
1090                 printk("md: data_size too small on %s\n",
1091                        bdevname(rdev->bdev,b));
1092                 return -EINVAL;
1093         }
1094         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1095                 if (sb->level != cpu_to_le32(1) &&
1096                     sb->level != cpu_to_le32(4) &&
1097                     sb->level != cpu_to_le32(5) &&
1098                     sb->level != cpu_to_le32(6) &&
1099                     sb->level != cpu_to_le32(10)) {
1100                         printk(KERN_WARNING
1101                                "md: bitmaps not supported for this level.\n");
1102                         return -EINVAL;
1103                 }
1104         }
1105
1106         rdev->preferred_minor = 0xffff;
1107         rdev->data_offset = le64_to_cpu(sb->data_offset);
1108         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1109
1110         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1111         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1112         if (rdev->sb_size & bmask)
1113                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1114
1115         if (minor_version
1116             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1117                 return -EINVAL;
1118
1119         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1120                 rdev->desc_nr = -1;
1121         else
1122                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1123
1124         if (!refdev) {
1125                 ret = 1;
1126         } else {
1127                 __u64 ev1, ev2;
1128                 struct mdp_superblock_1 *refsb = 
1129                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1130
1131                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1132                     sb->level != refsb->level ||
1133                     sb->layout != refsb->layout ||
1134                     sb->chunksize != refsb->chunksize) {
1135                         printk(KERN_WARNING "md: %s has strangely different"
1136                                 " superblock to %s\n",
1137                                 bdevname(rdev->bdev,b),
1138                                 bdevname(refdev->bdev,b2));
1139                         return -EINVAL;
1140                 }
1141                 ev1 = le64_to_cpu(sb->events);
1142                 ev2 = le64_to_cpu(refsb->events);
1143
1144                 if (ev1 > ev2)
1145                         ret = 1;
1146                 else
1147                         ret = 0;
1148         }
1149         if (minor_version)
1150                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1151         else
1152                 rdev->size = rdev->sb_start / 2;
1153         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1154                 return -EINVAL;
1155         rdev->size = le64_to_cpu(sb->data_size)/2;
1156         if (le32_to_cpu(sb->chunksize))
1157                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1158
1159         if (le64_to_cpu(sb->size) > rdev->size*2)
1160                 return -EINVAL;
1161         return ret;
1162 }
1163
1164 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1165 {
1166         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1167         __u64 ev1 = le64_to_cpu(sb->events);
1168
1169         rdev->raid_disk = -1;
1170         clear_bit(Faulty, &rdev->flags);
1171         clear_bit(In_sync, &rdev->flags);
1172         clear_bit(WriteMostly, &rdev->flags);
1173         clear_bit(BarriersNotsupp, &rdev->flags);
1174
1175         if (mddev->raid_disks == 0) {
1176                 mddev->major_version = 1;
1177                 mddev->patch_version = 0;
1178                 mddev->external = 0;
1179                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1180                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1181                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1182                 mddev->level = le32_to_cpu(sb->level);
1183                 mddev->clevel[0] = 0;
1184                 mddev->layout = le32_to_cpu(sb->layout);
1185                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1186                 mddev->size = le64_to_cpu(sb->size)/2;
1187                 mddev->events = ev1;
1188                 mddev->bitmap_offset = 0;
1189                 mddev->default_bitmap_offset = 1024 >> 9;
1190                 
1191                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1192                 memcpy(mddev->uuid, sb->set_uuid, 16);
1193
1194                 mddev->max_disks =  (4096-256)/2;
1195
1196                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1197                     mddev->bitmap_file == NULL )
1198                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1199
1200                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1201                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1202                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1203                         mddev->new_level = le32_to_cpu(sb->new_level);
1204                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1205                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1206                 } else {
1207                         mddev->reshape_position = MaxSector;
1208                         mddev->delta_disks = 0;
1209                         mddev->new_level = mddev->level;
1210                         mddev->new_layout = mddev->layout;
1211                         mddev->new_chunk = mddev->chunk_size;
1212                 }
1213
1214         } else if (mddev->pers == NULL) {
1215                 /* Insist of good event counter while assembling */
1216                 ++ev1;
1217                 if (ev1 < mddev->events)
1218                         return -EINVAL;
1219         } else if (mddev->bitmap) {
1220                 /* If adding to array with a bitmap, then we can accept an
1221                  * older device, but not too old.
1222                  */
1223                 if (ev1 < mddev->bitmap->events_cleared)
1224                         return 0;
1225         } else {
1226                 if (ev1 < mddev->events)
1227                         /* just a hot-add of a new device, leave raid_disk at -1 */
1228                         return 0;
1229         }
1230         if (mddev->level != LEVEL_MULTIPATH) {
1231                 int role;
1232                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1233                 switch(role) {
1234                 case 0xffff: /* spare */
1235                         break;
1236                 case 0xfffe: /* faulty */
1237                         set_bit(Faulty, &rdev->flags);
1238                         break;
1239                 default:
1240                         if ((le32_to_cpu(sb->feature_map) &
1241                              MD_FEATURE_RECOVERY_OFFSET))
1242                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1243                         else
1244                                 set_bit(In_sync, &rdev->flags);
1245                         rdev->raid_disk = role;
1246                         break;
1247                 }
1248                 if (sb->devflags & WriteMostly1)
1249                         set_bit(WriteMostly, &rdev->flags);
1250         } else /* MULTIPATH are always insync */
1251                 set_bit(In_sync, &rdev->flags);
1252
1253         return 0;
1254 }
1255
1256 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1257 {
1258         struct mdp_superblock_1 *sb;
1259         struct list_head *tmp;
1260         mdk_rdev_t *rdev2;
1261         int max_dev, i;
1262         /* make rdev->sb match mddev and rdev data. */
1263
1264         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1265
1266         sb->feature_map = 0;
1267         sb->pad0 = 0;
1268         sb->recovery_offset = cpu_to_le64(0);
1269         memset(sb->pad1, 0, sizeof(sb->pad1));
1270         memset(sb->pad2, 0, sizeof(sb->pad2));
1271         memset(sb->pad3, 0, sizeof(sb->pad3));
1272
1273         sb->utime = cpu_to_le64((__u64)mddev->utime);
1274         sb->events = cpu_to_le64(mddev->events);
1275         if (mddev->in_sync)
1276                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1277         else
1278                 sb->resync_offset = cpu_to_le64(0);
1279
1280         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1281
1282         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1283         sb->size = cpu_to_le64(mddev->size<<1);
1284
1285         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1286                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1287                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1288         }
1289
1290         if (rdev->raid_disk >= 0 &&
1291             !test_bit(In_sync, &rdev->flags) &&
1292             rdev->recovery_offset > 0) {
1293                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1294                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1295         }
1296
1297         if (mddev->reshape_position != MaxSector) {
1298                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1299                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1300                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1301                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1302                 sb->new_level = cpu_to_le32(mddev->new_level);
1303                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1304         }
1305
1306         max_dev = 0;
1307         rdev_for_each(rdev2, tmp, mddev)
1308                 if (rdev2->desc_nr+1 > max_dev)
1309                         max_dev = rdev2->desc_nr+1;
1310
1311         if (max_dev > le32_to_cpu(sb->max_dev))
1312                 sb->max_dev = cpu_to_le32(max_dev);
1313         for (i=0; i<max_dev;i++)
1314                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1315         
1316         rdev_for_each(rdev2, tmp, mddev) {
1317                 i = rdev2->desc_nr;
1318                 if (test_bit(Faulty, &rdev2->flags))
1319                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1320                 else if (test_bit(In_sync, &rdev2->flags))
1321                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1322                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1323                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1324                 else
1325                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1326         }
1327
1328         sb->sb_csum = calc_sb_1_csum(sb);
1329 }
1330
1331 static unsigned long long
1332 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1333 {
1334         struct mdp_superblock_1 *sb;
1335         sector_t max_sectors;
1336         if (num_sectors && num_sectors < rdev->mddev->size * 2)
1337                 return 0; /* component must fit device */
1338         if (rdev->sb_start < rdev->data_offset) {
1339                 /* minor versions 1 and 2; superblock before data */
1340                 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1341                 max_sectors -= rdev->data_offset;
1342                 if (!num_sectors || num_sectors > max_sectors)
1343                         num_sectors = max_sectors;
1344         } else if (rdev->mddev->bitmap_offset) {
1345                 /* minor version 0 with bitmap we can't move */
1346                 return 0;
1347         } else {
1348                 /* minor version 0; superblock after data */
1349                 sector_t sb_start;
1350                 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1351                 sb_start &= ~(sector_t)(4*2 - 1);
1352                 max_sectors = rdev->size * 2 + sb_start - rdev->sb_start;
1353                 if (!num_sectors || num_sectors > max_sectors)
1354                         num_sectors = max_sectors;
1355                 rdev->sb_start = sb_start;
1356         }
1357         sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1358         sb->data_size = cpu_to_le64(num_sectors);
1359         sb->super_offset = rdev->sb_start;
1360         sb->sb_csum = calc_sb_1_csum(sb);
1361         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1362                        rdev->sb_page);
1363         md_super_wait(rdev->mddev);
1364         return num_sectors / 2; /* kB for sysfs */
1365 }
1366
1367 static struct super_type super_types[] = {
1368         [0] = {
1369                 .name   = "0.90.0",
1370                 .owner  = THIS_MODULE,
1371                 .load_super         = super_90_load,
1372                 .validate_super     = super_90_validate,
1373                 .sync_super         = super_90_sync,
1374                 .rdev_size_change   = super_90_rdev_size_change,
1375         },
1376         [1] = {
1377                 .name   = "md-1",
1378                 .owner  = THIS_MODULE,
1379                 .load_super         = super_1_load,
1380                 .validate_super     = super_1_validate,
1381                 .sync_super         = super_1_sync,
1382                 .rdev_size_change   = super_1_rdev_size_change,
1383         },
1384 };
1385
1386 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1387 {
1388         mdk_rdev_t *rdev, *rdev2;
1389
1390         rcu_read_lock();
1391         rdev_for_each_rcu(rdev, mddev1)
1392                 rdev_for_each_rcu(rdev2, mddev2)
1393                         if (rdev->bdev->bd_contains ==
1394                             rdev2->bdev->bd_contains) {
1395                                 rcu_read_unlock();
1396                                 return 1;
1397                         }
1398         rcu_read_unlock();
1399         return 0;
1400 }
1401
1402 static LIST_HEAD(pending_raid_disks);
1403
1404 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1405 {
1406         char b[BDEVNAME_SIZE];
1407         struct kobject *ko;
1408         char *s;
1409         int err;
1410
1411         if (rdev->mddev) {
1412                 MD_BUG();
1413                 return -EINVAL;
1414         }
1415
1416         /* prevent duplicates */
1417         if (find_rdev(mddev, rdev->bdev->bd_dev))
1418                 return -EEXIST;
1419
1420         /* make sure rdev->size exceeds mddev->size */
1421         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1422                 if (mddev->pers) {
1423                         /* Cannot change size, so fail
1424                          * If mddev->level <= 0, then we don't care
1425                          * about aligning sizes (e.g. linear)
1426                          */
1427                         if (mddev->level > 0)
1428                                 return -ENOSPC;
1429                 } else
1430                         mddev->size = rdev->size;
1431         }
1432
1433         /* Verify rdev->desc_nr is unique.
1434          * If it is -1, assign a free number, else
1435          * check number is not in use
1436          */
1437         if (rdev->desc_nr < 0) {
1438                 int choice = 0;
1439                 if (mddev->pers) choice = mddev->raid_disks;
1440                 while (find_rdev_nr(mddev, choice))
1441                         choice++;
1442                 rdev->desc_nr = choice;
1443         } else {
1444                 if (find_rdev_nr(mddev, rdev->desc_nr))
1445                         return -EBUSY;
1446         }
1447         bdevname(rdev->bdev,b);
1448         while ( (s=strchr(b, '/')) != NULL)
1449                 *s = '!';
1450
1451         rdev->mddev = mddev;
1452         printk(KERN_INFO "md: bind<%s>\n", b);
1453
1454         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1455                 goto fail;
1456
1457         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1458         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1459                 kobject_del(&rdev->kobj);
1460                 goto fail;
1461         }
1462         list_add_rcu(&rdev->same_set, &mddev->disks);
1463         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1464         return 0;
1465
1466  fail:
1467         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1468                b, mdname(mddev));
1469         return err;
1470 }
1471
1472 static void md_delayed_delete(struct work_struct *ws)
1473 {
1474         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1475         kobject_del(&rdev->kobj);
1476         kobject_put(&rdev->kobj);
1477 }
1478
1479 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1480 {
1481         char b[BDEVNAME_SIZE];
1482         if (!rdev->mddev) {
1483                 MD_BUG();
1484                 return;
1485         }
1486         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1487         list_del_rcu(&rdev->same_set);
1488         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1489         rdev->mddev = NULL;
1490         sysfs_remove_link(&rdev->kobj, "block");
1491
1492         /* We need to delay this, otherwise we can deadlock when
1493          * writing to 'remove' to "dev/state".  We also need
1494          * to delay it due to rcu usage.
1495          */
1496         synchronize_rcu();
1497         INIT_WORK(&rdev->del_work, md_delayed_delete);
1498         kobject_get(&rdev->kobj);
1499         schedule_work(&rdev->del_work);
1500 }
1501
1502 /*
1503  * prevent the device from being mounted, repartitioned or
1504  * otherwise reused by a RAID array (or any other kernel
1505  * subsystem), by bd_claiming the device.
1506  */
1507 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1508 {
1509         int err = 0;
1510         struct block_device *bdev;
1511         char b[BDEVNAME_SIZE];
1512
1513         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1514         if (IS_ERR(bdev)) {
1515                 printk(KERN_ERR "md: could not open %s.\n",
1516                         __bdevname(dev, b));
1517                 return PTR_ERR(bdev);
1518         }
1519         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1520         if (err) {
1521                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1522                         bdevname(bdev, b));
1523                 blkdev_put(bdev);
1524                 return err;
1525         }
1526         if (!shared)
1527                 set_bit(AllReserved, &rdev->flags);
1528         rdev->bdev = bdev;
1529         return err;
1530 }
1531
1532 static void unlock_rdev(mdk_rdev_t *rdev)
1533 {
1534         struct block_device *bdev = rdev->bdev;
1535         rdev->bdev = NULL;
1536         if (!bdev)
1537                 MD_BUG();
1538         bd_release(bdev);
1539         blkdev_put(bdev);
1540 }
1541
1542 void md_autodetect_dev(dev_t dev);
1543
1544 static void export_rdev(mdk_rdev_t * rdev)
1545 {
1546         char b[BDEVNAME_SIZE];
1547         printk(KERN_INFO "md: export_rdev(%s)\n",
1548                 bdevname(rdev->bdev,b));
1549         if (rdev->mddev)
1550                 MD_BUG();
1551         free_disk_sb(rdev);
1552 #ifndef MODULE
1553         if (test_bit(AutoDetected, &rdev->flags))
1554                 md_autodetect_dev(rdev->bdev->bd_dev);
1555 #endif
1556         unlock_rdev(rdev);
1557         kobject_put(&rdev->kobj);
1558 }
1559
1560 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1561 {
1562         unbind_rdev_from_array(rdev);
1563         export_rdev(rdev);
1564 }
1565
1566 static void export_array(mddev_t *mddev)
1567 {
1568         struct list_head *tmp;
1569         mdk_rdev_t *rdev;
1570
1571         rdev_for_each(rdev, tmp, mddev) {
1572                 if (!rdev->mddev) {
1573                         MD_BUG();
1574                         continue;
1575                 }
1576                 kick_rdev_from_array(rdev);
1577         }
1578         if (!list_empty(&mddev->disks))
1579                 MD_BUG();
1580         mddev->raid_disks = 0;
1581         mddev->major_version = 0;
1582 }
1583
1584 static void print_desc(mdp_disk_t *desc)
1585 {
1586         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1587                 desc->major,desc->minor,desc->raid_disk,desc->state);
1588 }
1589
1590 static void print_sb(mdp_super_t *sb)
1591 {
1592         int i;
1593
1594         printk(KERN_INFO 
1595                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1596                 sb->major_version, sb->minor_version, sb->patch_version,
1597                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1598                 sb->ctime);
1599         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1600                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1601                 sb->md_minor, sb->layout, sb->chunk_size);
1602         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1603                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1604                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1605                 sb->failed_disks, sb->spare_disks,
1606                 sb->sb_csum, (unsigned long)sb->events_lo);
1607
1608         printk(KERN_INFO);
1609         for (i = 0; i < MD_SB_DISKS; i++) {
1610                 mdp_disk_t *desc;
1611
1612                 desc = sb->disks + i;
1613                 if (desc->number || desc->major || desc->minor ||
1614                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1615                         printk("     D %2d: ", i);
1616                         print_desc(desc);
1617                 }
1618         }
1619         printk(KERN_INFO "md:     THIS: ");
1620         print_desc(&sb->this_disk);
1621
1622 }
1623
1624 static void print_rdev(mdk_rdev_t *rdev)
1625 {
1626         char b[BDEVNAME_SIZE];
1627         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1628                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1629                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1630                 rdev->desc_nr);
1631         if (rdev->sb_loaded) {
1632                 printk(KERN_INFO "md: rdev superblock:\n");
1633                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1634         } else
1635                 printk(KERN_INFO "md: no rdev superblock!\n");
1636 }
1637
1638 static void md_print_devices(void)
1639 {
1640         struct list_head *tmp, *tmp2;
1641         mdk_rdev_t *rdev;
1642         mddev_t *mddev;
1643         char b[BDEVNAME_SIZE];
1644
1645         printk("\n");
1646         printk("md:     **********************************\n");
1647         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1648         printk("md:     **********************************\n");
1649         for_each_mddev(mddev, tmp) {
1650
1651                 if (mddev->bitmap)
1652                         bitmap_print_sb(mddev->bitmap);
1653                 else
1654                         printk("%s: ", mdname(mddev));
1655                 rdev_for_each(rdev, tmp2, mddev)
1656                         printk("<%s>", bdevname(rdev->bdev,b));
1657                 printk("\n");
1658
1659                 rdev_for_each(rdev, tmp2, mddev)
1660                         print_rdev(rdev);
1661         }
1662         printk("md:     **********************************\n");
1663         printk("\n");
1664 }
1665
1666
1667 static void sync_sbs(mddev_t * mddev, int nospares)
1668 {
1669         /* Update each superblock (in-memory image), but
1670          * if we are allowed to, skip spares which already
1671          * have the right event counter, or have one earlier
1672          * (which would mean they aren't being marked as dirty
1673          * with the rest of the array)
1674          */
1675         mdk_rdev_t *rdev;
1676         struct list_head *tmp;
1677
1678         rdev_for_each(rdev, tmp, mddev) {
1679                 if (rdev->sb_events == mddev->events ||
1680                     (nospares &&
1681                      rdev->raid_disk < 0 &&
1682                      (rdev->sb_events&1)==0 &&
1683                      rdev->sb_events+1 == mddev->events)) {
1684                         /* Don't update this superblock */
1685                         rdev->sb_loaded = 2;
1686                 } else {
1687                         super_types[mddev->major_version].
1688                                 sync_super(mddev, rdev);
1689                         rdev->sb_loaded = 1;
1690                 }
1691         }
1692 }
1693
1694 static void md_update_sb(mddev_t * mddev, int force_change)
1695 {
1696         struct list_head *tmp;
1697         mdk_rdev_t *rdev;
1698         int sync_req;
1699         int nospares = 0;
1700
1701         if (mddev->external)
1702                 return;
1703 repeat:
1704         spin_lock_irq(&mddev->write_lock);
1705
1706         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1707         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1708                 force_change = 1;
1709         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1710                 /* just a clean<-> dirty transition, possibly leave spares alone,
1711                  * though if events isn't the right even/odd, we will have to do
1712                  * spares after all
1713                  */
1714                 nospares = 1;
1715         if (force_change)
1716                 nospares = 0;
1717         if (mddev->degraded)
1718                 /* If the array is degraded, then skipping spares is both
1719                  * dangerous and fairly pointless.
1720                  * Dangerous because a device that was removed from the array
1721                  * might have a event_count that still looks up-to-date,
1722                  * so it can be re-added without a resync.
1723                  * Pointless because if there are any spares to skip,
1724                  * then a recovery will happen and soon that array won't
1725                  * be degraded any more and the spare can go back to sleep then.
1726                  */
1727                 nospares = 0;
1728
1729         sync_req = mddev->in_sync;
1730         mddev->utime = get_seconds();
1731
1732         /* If this is just a dirty<->clean transition, and the array is clean
1733          * and 'events' is odd, we can roll back to the previous clean state */
1734         if (nospares
1735             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1736             && (mddev->events & 1)
1737             && mddev->events != 1)
1738                 mddev->events--;
1739         else {
1740                 /* otherwise we have to go forward and ... */
1741                 mddev->events ++;
1742                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1743                         /* .. if the array isn't clean, insist on an odd 'events' */
1744                         if ((mddev->events&1)==0) {
1745                                 mddev->events++;
1746                                 nospares = 0;
1747                         }
1748                 } else {
1749                         /* otherwise insist on an even 'events' (for clean states) */
1750                         if ((mddev->events&1)) {
1751                                 mddev->events++;
1752                                 nospares = 0;
1753                         }
1754                 }
1755         }
1756
1757         if (!mddev->events) {
1758                 /*
1759                  * oops, this 64-bit counter should never wrap.
1760                  * Either we are in around ~1 trillion A.C., assuming
1761                  * 1 reboot per second, or we have a bug:
1762                  */
1763                 MD_BUG();
1764                 mddev->events --;
1765         }
1766
1767         /*
1768          * do not write anything to disk if using
1769          * nonpersistent superblocks
1770          */
1771         if (!mddev->persistent) {
1772                 if (!mddev->external)
1773                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1774
1775                 spin_unlock_irq(&mddev->write_lock);
1776                 wake_up(&mddev->sb_wait);
1777                 return;
1778         }
1779         sync_sbs(mddev, nospares);
1780         spin_unlock_irq(&mddev->write_lock);
1781
1782         dprintk(KERN_INFO 
1783                 "md: updating %s RAID superblock on device (in sync %d)\n",
1784                 mdname(mddev),mddev->in_sync);
1785
1786         bitmap_update_sb(mddev->bitmap);
1787         rdev_for_each(rdev, tmp, mddev) {
1788                 char b[BDEVNAME_SIZE];
1789                 dprintk(KERN_INFO "md: ");
1790                 if (rdev->sb_loaded != 1)
1791                         continue; /* no noise on spare devices */
1792                 if (test_bit(Faulty, &rdev->flags))
1793                         dprintk("(skipping faulty ");
1794
1795                 dprintk("%s ", bdevname(rdev->bdev,b));
1796                 if (!test_bit(Faulty, &rdev->flags)) {
1797                         md_super_write(mddev,rdev,
1798                                        rdev->sb_start, rdev->sb_size,
1799                                        rdev->sb_page);
1800                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1801                                 bdevname(rdev->bdev,b),
1802                                 (unsigned long long)rdev->sb_start);
1803                         rdev->sb_events = mddev->events;
1804
1805                 } else
1806                         dprintk(")\n");
1807                 if (mddev->level == LEVEL_MULTIPATH)
1808                         /* only need to write one superblock... */
1809                         break;
1810         }
1811         md_super_wait(mddev);
1812         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1813
1814         spin_lock_irq(&mddev->write_lock);
1815         if (mddev->in_sync != sync_req ||
1816             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1817                 /* have to write it out again */
1818                 spin_unlock_irq(&mddev->write_lock);
1819                 goto repeat;
1820         }
1821         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1822         spin_unlock_irq(&mddev->write_lock);
1823         wake_up(&mddev->sb_wait);
1824
1825 }
1826
1827 /* words written to sysfs files may, or may not, be \n terminated.
1828  * We want to accept with case. For this we use cmd_match.
1829  */
1830 static int cmd_match(const char *cmd, const char *str)
1831 {
1832         /* See if cmd, written into a sysfs file, matches
1833          * str.  They must either be the same, or cmd can
1834          * have a trailing newline
1835          */
1836         while (*cmd && *str && *cmd == *str) {
1837                 cmd++;
1838                 str++;
1839         }
1840         if (*cmd == '\n')
1841                 cmd++;
1842         if (*str || *cmd)
1843                 return 0;
1844         return 1;
1845 }
1846
1847 struct rdev_sysfs_entry {
1848         struct attribute attr;
1849         ssize_t (*show)(mdk_rdev_t *, char *);
1850         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1851 };
1852
1853 static ssize_t
1854 state_show(mdk_rdev_t *rdev, char *page)
1855 {
1856         char *sep = "";
1857         size_t len = 0;
1858
1859         if (test_bit(Faulty, &rdev->flags)) {
1860                 len+= sprintf(page+len, "%sfaulty",sep);
1861                 sep = ",";
1862         }
1863         if (test_bit(In_sync, &rdev->flags)) {
1864                 len += sprintf(page+len, "%sin_sync",sep);
1865                 sep = ",";
1866         }
1867         if (test_bit(WriteMostly, &rdev->flags)) {
1868                 len += sprintf(page+len, "%swrite_mostly",sep);
1869                 sep = ",";
1870         }
1871         if (test_bit(Blocked, &rdev->flags)) {
1872                 len += sprintf(page+len, "%sblocked", sep);
1873                 sep = ",";
1874         }
1875         if (!test_bit(Faulty, &rdev->flags) &&
1876             !test_bit(In_sync, &rdev->flags)) {
1877                 len += sprintf(page+len, "%sspare", sep);
1878                 sep = ",";
1879         }
1880         return len+sprintf(page+len, "\n");
1881 }
1882
1883 static ssize_t
1884 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1885 {
1886         /* can write
1887          *  faulty  - simulates and error
1888          *  remove  - disconnects the device
1889          *  writemostly - sets write_mostly
1890          *  -writemostly - clears write_mostly
1891          *  blocked - sets the Blocked flag
1892          *  -blocked - clears the Blocked flag
1893          */
1894         int err = -EINVAL;
1895         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1896                 md_error(rdev->mddev, rdev);
1897                 err = 0;
1898         } else if (cmd_match(buf, "remove")) {
1899                 if (rdev->raid_disk >= 0)
1900                         err = -EBUSY;
1901                 else {
1902                         mddev_t *mddev = rdev->mddev;
1903                         kick_rdev_from_array(rdev);
1904                         if (mddev->pers)
1905                                 md_update_sb(mddev, 1);
1906                         md_new_event(mddev);
1907                         err = 0;
1908                 }
1909         } else if (cmd_match(buf, "writemostly")) {
1910                 set_bit(WriteMostly, &rdev->flags);
1911                 err = 0;
1912         } else if (cmd_match(buf, "-writemostly")) {
1913                 clear_bit(WriteMostly, &rdev->flags);
1914                 err = 0;
1915         } else if (cmd_match(buf, "blocked")) {
1916                 set_bit(Blocked, &rdev->flags);
1917                 err = 0;
1918         } else if (cmd_match(buf, "-blocked")) {
1919                 clear_bit(Blocked, &rdev->flags);
1920                 wake_up(&rdev->blocked_wait);
1921                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1922                 md_wakeup_thread(rdev->mddev->thread);
1923
1924                 err = 0;
1925         }
1926         if (!err)
1927                 sysfs_notify(&rdev->kobj, NULL, "state");
1928         return err ? err : len;
1929 }
1930 static struct rdev_sysfs_entry rdev_state =
1931 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1932
1933 static ssize_t
1934 errors_show(mdk_rdev_t *rdev, char *page)
1935 {
1936         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1937 }
1938
1939 static ssize_t
1940 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1941 {
1942         char *e;
1943         unsigned long n = simple_strtoul(buf, &e, 10);
1944         if (*buf && (*e == 0 || *e == '\n')) {
1945                 atomic_set(&rdev->corrected_errors, n);
1946                 return len;
1947         }
1948         return -EINVAL;
1949 }
1950 static struct rdev_sysfs_entry rdev_errors =
1951 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1952
1953 static ssize_t
1954 slot_show(mdk_rdev_t *rdev, char *page)
1955 {
1956         if (rdev->raid_disk < 0)
1957                 return sprintf(page, "none\n");
1958         else
1959                 return sprintf(page, "%d\n", rdev->raid_disk);
1960 }
1961
1962 static ssize_t
1963 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1964 {
1965         char *e;
1966         int err;
1967         char nm[20];
1968         int slot = simple_strtoul(buf, &e, 10);
1969         if (strncmp(buf, "none", 4)==0)
1970                 slot = -1;
1971         else if (e==buf || (*e && *e!= '\n'))
1972                 return -EINVAL;
1973         if (rdev->mddev->pers && slot == -1) {
1974                 /* Setting 'slot' on an active array requires also
1975                  * updating the 'rd%d' link, and communicating
1976                  * with the personality with ->hot_*_disk.
1977                  * For now we only support removing
1978                  * failed/spare devices.  This normally happens automatically,
1979                  * but not when the metadata is externally managed.
1980                  */
1981                 if (rdev->raid_disk == -1)
1982                         return -EEXIST;
1983                 /* personality does all needed checks */
1984                 if (rdev->mddev->pers->hot_add_disk == NULL)
1985                         return -EINVAL;
1986                 err = rdev->mddev->pers->
1987                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
1988                 if (err)
1989                         return err;
1990                 sprintf(nm, "rd%d", rdev->raid_disk);
1991                 sysfs_remove_link(&rdev->mddev->kobj, nm);
1992                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1993                 md_wakeup_thread(rdev->mddev->thread);
1994         } else if (rdev->mddev->pers) {
1995                 mdk_rdev_t *rdev2;
1996                 struct list_head *tmp;
1997                 /* Activating a spare .. or possibly reactivating
1998                  * if we every get bitmaps working here.
1999                  */
2000
2001                 if (rdev->raid_disk != -1)
2002                         return -EBUSY;
2003
2004                 if (rdev->mddev->pers->hot_add_disk == NULL)
2005                         return -EINVAL;
2006
2007                 rdev_for_each(rdev2, tmp, rdev->mddev)
2008                         if (rdev2->raid_disk == slot)
2009                                 return -EEXIST;
2010
2011                 rdev->raid_disk = slot;
2012                 if (test_bit(In_sync, &rdev->flags))
2013                         rdev->saved_raid_disk = slot;
2014                 else
2015                         rdev->saved_raid_disk = -1;
2016                 err = rdev->mddev->pers->
2017                         hot_add_disk(rdev->mddev, rdev);
2018                 if (err) {
2019                         rdev->raid_disk = -1;
2020                         return err;
2021                 } else
2022                         sysfs_notify(&rdev->kobj, NULL, "state");
2023                 sprintf(nm, "rd%d", rdev->raid_disk);
2024                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2025                         printk(KERN_WARNING
2026                                "md: cannot register "
2027                                "%s for %s\n",
2028                                nm, mdname(rdev->mddev));
2029
2030                 /* don't wakeup anyone, leave that to userspace. */
2031         } else {
2032                 if (slot >= rdev->mddev->raid_disks)
2033                         return -ENOSPC;
2034                 rdev->raid_disk = slot;
2035                 /* assume it is working */
2036                 clear_bit(Faulty, &rdev->flags);
2037                 clear_bit(WriteMostly, &rdev->flags);
2038                 set_bit(In_sync, &rdev->flags);
2039                 sysfs_notify(&rdev->kobj, NULL, "state");
2040         }
2041         return len;
2042 }
2043
2044
2045 static struct rdev_sysfs_entry rdev_slot =
2046 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2047
2048 static ssize_t
2049 offset_show(mdk_rdev_t *rdev, char *page)
2050 {
2051         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2052 }
2053
2054 static ssize_t
2055 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2056 {
2057         char *e;
2058         unsigned long long offset = simple_strtoull(buf, &e, 10);
2059         if (e==buf || (*e && *e != '\n'))
2060                 return -EINVAL;
2061         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2062                 return -EBUSY;
2063         if (rdev->size && rdev->mddev->external)
2064                 /* Must set offset before size, so overlap checks
2065                  * can be sane */
2066                 return -EBUSY;
2067         rdev->data_offset = offset;
2068         return len;
2069 }
2070
2071 static struct rdev_sysfs_entry rdev_offset =
2072 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2073
2074 static ssize_t
2075 rdev_size_show(mdk_rdev_t *rdev, char *page)
2076 {
2077         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
2078 }
2079
2080 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2081 {
2082         /* check if two start/length pairs overlap */
2083         if (s1+l1 <= s2)
2084                 return 0;
2085         if (s2+l2 <= s1)
2086                 return 0;
2087         return 1;
2088 }
2089
2090 static ssize_t
2091 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2092 {
2093         unsigned long long size;
2094         unsigned long long oldsize = rdev->size;
2095         mddev_t *my_mddev = rdev->mddev;
2096
2097         if (strict_strtoull(buf, 10, &size) < 0)
2098                 return -EINVAL;
2099         if (my_mddev->pers && rdev->raid_disk >= 0) {
2100                 if (my_mddev->persistent) {
2101                         size = super_types[my_mddev->major_version].
2102                                 rdev_size_change(rdev, size * 2);
2103                         if (!size)
2104                                 return -EBUSY;
2105                 } else if (!size) {
2106                         size = (rdev->bdev->bd_inode->i_size >> 10);
2107                         size -= rdev->data_offset/2;
2108                 }
2109         }
2110         if (size < my_mddev->size)
2111                 return -EINVAL; /* component must fit device */
2112
2113         rdev->size = size;
2114         if (size > oldsize && my_mddev->external) {
2115                 /* need to check that all other rdevs with the same ->bdev
2116                  * do not overlap.  We need to unlock the mddev to avoid
2117                  * a deadlock.  We have already changed rdev->size, and if
2118                  * we have to change it back, we will have the lock again.
2119                  */
2120                 mddev_t *mddev;
2121                 int overlap = 0;
2122                 struct list_head *tmp, *tmp2;
2123
2124                 mddev_unlock(my_mddev);
2125                 for_each_mddev(mddev, tmp) {
2126                         mdk_rdev_t *rdev2;
2127
2128                         mddev_lock(mddev);
2129                         rdev_for_each(rdev2, tmp2, mddev)
2130                                 if (test_bit(AllReserved, &rdev2->flags) ||
2131                                     (rdev->bdev == rdev2->bdev &&
2132                                      rdev != rdev2 &&
2133                                      overlaps(rdev->data_offset, rdev->size * 2,
2134                                               rdev2->data_offset,
2135                                               rdev2->size * 2))) {
2136                                         overlap = 1;
2137                                         break;
2138                                 }
2139                         mddev_unlock(mddev);
2140                         if (overlap) {
2141                                 mddev_put(mddev);
2142                                 break;
2143                         }
2144                 }
2145                 mddev_lock(my_mddev);
2146                 if (overlap) {
2147                         /* Someone else could have slipped in a size
2148                          * change here, but doing so is just silly.
2149                          * We put oldsize back because we *know* it is
2150                          * safe, and trust userspace not to race with
2151                          * itself
2152                          */
2153                         rdev->size = oldsize;
2154                         return -EBUSY;
2155                 }
2156         }
2157         return len;
2158 }
2159
2160 static struct rdev_sysfs_entry rdev_size =
2161 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2162
2163 static struct attribute *rdev_default_attrs[] = {
2164         &rdev_state.attr,
2165         &rdev_errors.attr,
2166         &rdev_slot.attr,
2167         &rdev_offset.attr,
2168         &rdev_size.attr,
2169         NULL,
2170 };
2171 static ssize_t
2172 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2173 {
2174         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2175         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2176         mddev_t *mddev = rdev->mddev;
2177         ssize_t rv;
2178
2179         if (!entry->show)
2180                 return -EIO;
2181
2182         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2183         if (!rv) {
2184                 if (rdev->mddev == NULL)
2185                         rv = -EBUSY;
2186                 else
2187                         rv = entry->show(rdev, page);
2188                 mddev_unlock(mddev);
2189         }
2190         return rv;
2191 }
2192
2193 static ssize_t
2194 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2195               const char *page, size_t length)
2196 {
2197         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2198         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2199         ssize_t rv;
2200         mddev_t *mddev = rdev->mddev;
2201
2202         if (!entry->store)
2203                 return -EIO;
2204         if (!capable(CAP_SYS_ADMIN))
2205                 return -EACCES;
2206         rv = mddev ? mddev_lock(mddev): -EBUSY;
2207         if (!rv) {
2208                 if (rdev->mddev == NULL)
2209                         rv = -EBUSY;
2210                 else
2211                         rv = entry->store(rdev, page, length);
2212                 mddev_unlock(mddev);
2213         }
2214         return rv;
2215 }
2216
2217 static void rdev_free(struct kobject *ko)
2218 {
2219         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2220         kfree(rdev);
2221 }
2222 static struct sysfs_ops rdev_sysfs_ops = {
2223         .show           = rdev_attr_show,
2224         .store          = rdev_attr_store,
2225 };
2226 static struct kobj_type rdev_ktype = {
2227         .release        = rdev_free,
2228         .sysfs_ops      = &rdev_sysfs_ops,
2229         .default_attrs  = rdev_default_attrs,
2230 };
2231
2232 /*
2233  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2234  *
2235  * mark the device faulty if:
2236  *
2237  *   - the device is nonexistent (zero size)
2238  *   - the device has no valid superblock
2239  *
2240  * a faulty rdev _never_ has rdev->sb set.
2241  */
2242 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2243 {
2244         char b[BDEVNAME_SIZE];
2245         int err;
2246         mdk_rdev_t *rdev;
2247         sector_t size;
2248
2249         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2250         if (!rdev) {
2251                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2252                 return ERR_PTR(-ENOMEM);
2253         }
2254
2255         if ((err = alloc_disk_sb(rdev)))
2256                 goto abort_free;
2257
2258         err = lock_rdev(rdev, newdev, super_format == -2);
2259         if (err)
2260                 goto abort_free;
2261
2262         kobject_init(&rdev->kobj, &rdev_ktype);
2263
2264         rdev->desc_nr = -1;
2265         rdev->saved_raid_disk = -1;
2266         rdev->raid_disk = -1;
2267         rdev->flags = 0;
2268         rdev->data_offset = 0;
2269         rdev->sb_events = 0;
2270         atomic_set(&rdev->nr_pending, 0);
2271         atomic_set(&rdev->read_errors, 0);
2272         atomic_set(&rdev->corrected_errors, 0);
2273
2274         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2275         if (!size) {
2276                 printk(KERN_WARNING 
2277                         "md: %s has zero or unknown size, marking faulty!\n",
2278                         bdevname(rdev->bdev,b));
2279                 err = -EINVAL;
2280                 goto abort_free;
2281         }
2282
2283         if (super_format >= 0) {
2284                 err = super_types[super_format].
2285                         load_super(rdev, NULL, super_minor);
2286                 if (err == -EINVAL) {
2287                         printk(KERN_WARNING
2288                                 "md: %s does not have a valid v%d.%d "
2289                                "superblock, not importing!\n",
2290                                 bdevname(rdev->bdev,b),
2291                                super_format, super_minor);
2292                         goto abort_free;
2293                 }
2294                 if (err < 0) {
2295                         printk(KERN_WARNING 
2296                                 "md: could not read %s's sb, not importing!\n",
2297                                 bdevname(rdev->bdev,b));
2298                         goto abort_free;
2299                 }
2300         }
2301
2302         INIT_LIST_HEAD(&rdev->same_set);
2303         init_waitqueue_head(&rdev->blocked_wait);
2304
2305         return rdev;
2306
2307 abort_free:
2308         if (rdev->sb_page) {
2309                 if (rdev->bdev)
2310                         unlock_rdev(rdev);
2311                 free_disk_sb(rdev);
2312         }
2313         kfree(rdev);
2314         return ERR_PTR(err);
2315 }
2316
2317 /*
2318  * Check a full RAID array for plausibility
2319  */
2320
2321
2322 static void analyze_sbs(mddev_t * mddev)
2323 {
2324         int i;
2325         struct list_head *tmp;
2326         mdk_rdev_t *rdev, *freshest;
2327         char b[BDEVNAME_SIZE];
2328
2329         freshest = NULL;
2330         rdev_for_each(rdev, tmp, mddev)
2331                 switch (super_types[mddev->major_version].
2332                         load_super(rdev, freshest, mddev->minor_version)) {
2333                 case 1:
2334                         freshest = rdev;
2335                         break;
2336                 case 0:
2337                         break;
2338                 default:
2339                         printk( KERN_ERR \
2340                                 "md: fatal superblock inconsistency in %s"
2341                                 " -- removing from array\n", 
2342                                 bdevname(rdev->bdev,b));
2343                         kick_rdev_from_array(rdev);
2344                 }
2345
2346
2347         super_types[mddev->major_version].
2348                 validate_super(mddev, freshest);
2349
2350         i = 0;
2351         rdev_for_each(rdev, tmp, mddev) {
2352                 if (rdev != freshest)
2353                         if (super_types[mddev->major_version].
2354                             validate_super(mddev, rdev)) {
2355                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2356                                         " from array!\n",
2357                                         bdevname(rdev->bdev,b));
2358                                 kick_rdev_from_array(rdev);
2359                                 continue;
2360                         }
2361                 if (mddev->level == LEVEL_MULTIPATH) {
2362                         rdev->desc_nr = i++;
2363                         rdev->raid_disk = rdev->desc_nr;
2364                         set_bit(In_sync, &rdev->flags);
2365                 } else if (rdev->raid_disk >= mddev->raid_disks) {
2366                         rdev->raid_disk = -1;
2367                         clear_bit(In_sync, &rdev->flags);
2368                 }
2369         }
2370
2371
2372
2373         if (mddev->recovery_cp != MaxSector &&
2374             mddev->level >= 1)
2375                 printk(KERN_ERR "md: %s: raid array is not clean"
2376                        " -- starting background reconstruction\n",
2377                        mdname(mddev));
2378
2379 }
2380
2381 static void md_safemode_timeout(unsigned long data);
2382
2383 static ssize_t
2384 safe_delay_show(mddev_t *mddev, char *page)
2385 {
2386         int msec = (mddev->safemode_delay*1000)/HZ;
2387         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2388 }
2389 static ssize_t
2390 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2391 {
2392         int scale=1;
2393         int dot=0;
2394         int i;
2395         unsigned long msec;
2396         char buf[30];
2397         char *e;
2398         /* remove a period, and count digits after it */
2399         if (len >= sizeof(buf))
2400                 return -EINVAL;
2401         strlcpy(buf, cbuf, len);
2402         buf[len] = 0;
2403         for (i=0; i<len; i++) {
2404                 if (dot) {
2405                         if (isdigit(buf[i])) {
2406                                 buf[i-1] = buf[i];
2407                                 scale *= 10;
2408                         }
2409                         buf[i] = 0;
2410                 } else if (buf[i] == '.') {
2411                         dot=1;
2412                         buf[i] = 0;
2413                 }
2414         }
2415         msec = simple_strtoul(buf, &e, 10);
2416         if (e == buf || (*e && *e != '\n'))
2417                 return -EINVAL;
2418         msec = (msec * 1000) / scale;
2419         if (msec == 0)
2420                 mddev->safemode_delay = 0;
2421         else {
2422                 unsigned long old_delay = mddev->safemode_delay;
2423                 mddev->safemode_delay = (msec*HZ)/1000;
2424                 if (mddev->safemode_delay == 0)
2425                         mddev->safemode_delay = 1;
2426                 if (mddev->safemode_delay < old_delay)
2427                         md_safemode_timeout((unsigned long)mddev);
2428         }
2429         return len;
2430 }
2431 static struct md_sysfs_entry md_safe_delay =
2432 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2433
2434 static ssize_t
2435 level_show(mddev_t *mddev, char *page)
2436 {
2437         struct mdk_personality *p = mddev->pers;
2438         if (p)
2439                 return sprintf(page, "%s\n", p->name);
2440         else if (mddev->clevel[0])
2441                 return sprintf(page, "%s\n", mddev->clevel);
2442         else if (mddev->level != LEVEL_NONE)
2443                 return sprintf(page, "%d\n", mddev->level);
2444         else
2445                 return 0;
2446 }
2447
2448 static ssize_t
2449 level_store(mddev_t *mddev, const char *buf, size_t len)
2450 {
2451         ssize_t rv = len;
2452         if (mddev->pers)
2453                 return -EBUSY;
2454         if (len == 0)
2455                 return 0;
2456         if (len >= sizeof(mddev->clevel))
2457                 return -ENOSPC;
2458         strncpy(mddev->clevel, buf, len);
2459         if (mddev->clevel[len-1] == '\n')
2460                 len--;
2461         mddev->clevel[len] = 0;
2462         mddev->level = LEVEL_NONE;
2463         return rv;
2464 }
2465
2466 static struct md_sysfs_entry md_level =
2467 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2468
2469
2470 static ssize_t
2471 layout_show(mddev_t *mddev, char *page)
2472 {
2473         /* just a number, not meaningful for all levels */
2474         if (mddev->reshape_position != MaxSector &&
2475             mddev->layout != mddev->new_layout)
2476                 return sprintf(page, "%d (%d)\n",
2477                                mddev->new_layout, mddev->layout);
2478         return sprintf(page, "%d\n", mddev->layout);
2479 }
2480
2481 static ssize_t
2482 layout_store(mddev_t *mddev, const char *buf, size_t len)
2483 {
2484         char *e;
2485         unsigned long n = simple_strtoul(buf, &e, 10);
2486
2487         if (!*buf || (*e && *e != '\n'))
2488                 return -EINVAL;
2489
2490         if (mddev->pers)
2491                 return -EBUSY;
2492         if (mddev->reshape_position != MaxSector)
2493                 mddev->new_layout = n;
2494         else
2495                 mddev->layout = n;
2496         return len;
2497 }
2498 static struct md_sysfs_entry md_layout =
2499 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2500
2501
2502 static ssize_t
2503 raid_disks_show(mddev_t *mddev, char *page)
2504 {
2505         if (mddev->raid_disks == 0)
2506                 return 0;
2507         if (mddev->reshape_position != MaxSector &&
2508             mddev->delta_disks != 0)
2509                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2510                                mddev->raid_disks - mddev->delta_disks);
2511         return sprintf(page, "%d\n", mddev->raid_disks);
2512 }
2513
2514 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2515
2516 static ssize_t
2517 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2518 {
2519         char *e;
2520         int rv = 0;
2521         unsigned long n = simple_strtoul(buf, &e, 10);
2522
2523         if (!*buf || (*e && *e != '\n'))
2524                 return -EINVAL;
2525
2526         if (mddev->pers)
2527                 rv = update_raid_disks(mddev, n);
2528         else if (mddev->reshape_position != MaxSector) {
2529                 int olddisks = mddev->raid_disks - mddev->delta_disks;
2530                 mddev->delta_disks = n - olddisks;
2531                 mddev->raid_disks = n;
2532         } else
2533                 mddev->raid_disks = n;
2534         return rv ? rv : len;
2535 }
2536 static struct md_sysfs_entry md_raid_disks =
2537 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2538
2539 static ssize_t
2540 chunk_size_show(mddev_t *mddev, char *page)
2541 {
2542         if (mddev->reshape_position != MaxSector &&
2543             mddev->chunk_size != mddev->new_chunk)
2544                 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2545                                mddev->chunk_size);
2546         return sprintf(page, "%d\n", mddev->chunk_size);
2547 }
2548
2549 static ssize_t
2550 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2551 {
2552         /* can only set chunk_size if array is not yet active */
2553         char *e;
2554         unsigned long n = simple_strtoul(buf, &e, 10);
2555
2556         if (!*buf || (*e && *e != '\n'))
2557                 return -EINVAL;
2558
2559         if (mddev->pers)
2560                 return -EBUSY;
2561         else if (mddev->reshape_position != MaxSector)
2562                 mddev->new_chunk = n;
2563         else
2564                 mddev->chunk_size = n;
2565         return len;
2566 }
2567 static struct md_sysfs_entry md_chunk_size =
2568 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2569
2570 static ssize_t
2571 resync_start_show(mddev_t *mddev, char *page)
2572 {
2573         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2574 }
2575
2576 static ssize_t
2577 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2578 {
2579         char *e;
2580         unsigned long long n = simple_strtoull(buf, &e, 10);
2581
2582         if (mddev->pers)
2583                 return -EBUSY;
2584         if (!*buf || (*e && *e != '\n'))
2585                 return -EINVAL;
2586
2587         mddev->recovery_cp = n;
2588         return len;
2589 }
2590 static struct md_sysfs_entry md_resync_start =
2591 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2592
2593 /*
2594  * The array state can be:
2595  *
2596  * clear
2597  *     No devices, no size, no level
2598  *     Equivalent to STOP_ARRAY ioctl
2599  * inactive
2600  *     May have some settings, but array is not active
2601  *        all IO results in error
2602  *     When written, doesn't tear down array, but just stops it
2603  * suspended (not supported yet)
2604  *     All IO requests will block. The array can be reconfigured.
2605  *     Writing this, if accepted, will block until array is quiescent
2606  * readonly
2607  *     no resync can happen.  no superblocks get written.
2608  *     write requests fail
2609  * read-auto
2610  *     like readonly, but behaves like 'clean' on a write request.
2611  *
2612  * clean - no pending writes, but otherwise active.
2613  *     When written to inactive array, starts without resync
2614  *     If a write request arrives then
2615  *       if metadata is known, mark 'dirty' and switch to 'active'.
2616  *       if not known, block and switch to write-pending
2617  *     If written to an active array that has pending writes, then fails.
2618  * active
2619  *     fully active: IO and resync can be happening.
2620  *     When written to inactive array, starts with resync
2621  *
2622  * write-pending
2623  *     clean, but writes are blocked waiting for 'active' to be written.
2624  *
2625  * active-idle
2626  *     like active, but no writes have been seen for a while (100msec).
2627  *
2628  */
2629 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2630                    write_pending, active_idle, bad_word};
2631 static char *array_states[] = {
2632         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2633         "write-pending", "active-idle", NULL };
2634
2635 static int match_word(const char *word, char **list)
2636 {
2637         int n;
2638         for (n=0; list[n]; n++)
2639                 if (cmd_match(word, list[n]))
2640                         break;
2641         return n;
2642 }
2643
2644 static ssize_t
2645 array_state_show(mddev_t *mddev, char *page)
2646 {
2647         enum array_state st = inactive;
2648
2649         if (mddev->pers)
2650                 switch(mddev->ro) {
2651                 case 1:
2652                         st = readonly;
2653                         break;
2654                 case 2:
2655                         st = read_auto;
2656                         break;
2657                 case 0:
2658                         if (mddev->in_sync)
2659                                 st = clean;
2660                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
2661                                 st = write_pending;
2662                         else if (mddev->safemode)
2663                                 st = active_idle;
2664                         else
2665                                 st = active;
2666                 }
2667         else {
2668                 if (list_empty(&mddev->disks) &&
2669                     mddev->raid_disks == 0 &&
2670                     mddev->size == 0)
2671                         st = clear;
2672                 else
2673                         st = inactive;
2674         }
2675         return sprintf(page, "%s\n", array_states[st]);
2676 }
2677
2678 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
2679 static int do_md_run(mddev_t * mddev);
2680 static int restart_array(mddev_t *mddev);
2681
2682 static ssize_t
2683 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2684 {
2685         int err = -EINVAL;
2686         enum array_state st = match_word(buf, array_states);
2687         switch(st) {
2688         case bad_word:
2689                 break;
2690         case clear:
2691                 /* stopping an active array */
2692                 if (atomic_read(&mddev->openers) > 0)
2693                         return -EBUSY;
2694                 err = do_md_stop(mddev, 0, 0);
2695                 break;
2696         case inactive:
2697                 /* stopping an active array */
2698                 if (mddev->pers) {
2699                         if (atomic_read(&mddev->openers) > 0)
2700                                 return -EBUSY;
2701                         err = do_md_stop(mddev, 2, 0);
2702                 } else
2703                         err = 0; /* already inactive */
2704                 break;
2705         case suspended:
2706                 break; /* not supported yet */
2707         case readonly:
2708                 if (mddev->pers)
2709                         err = do_md_stop(mddev, 1, 0);
2710                 else {
2711                         mddev->ro = 1;
2712                         set_disk_ro(mddev->gendisk, 1);
2713                         err = do_md_run(mddev);
2714                 }
2715                 break;
2716         case read_auto:
2717                 if (mddev->pers) {
2718                         if (mddev->ro == 0)
2719                                 err = do_md_stop(mddev, 1, 0);
2720                         else if (mddev->ro == 1)
2721                                 err = restart_array(mddev);
2722                         if (err == 0) {
2723                                 mddev->ro = 2;
2724                                 set_disk_ro(mddev->gendisk, 0);
2725                         }
2726                 } else {
2727                         mddev->ro = 2;
2728                         err = do_md_run(mddev);
2729                 }
2730                 break;
2731         case clean:
2732                 if (mddev->pers) {
2733                         restart_array(mddev);
2734                         spin_lock_irq(&mddev->write_lock);
2735                         if (atomic_read(&mddev->writes_pending) == 0) {
2736                                 if (mddev->in_sync == 0) {
2737                                         mddev->in_sync = 1;
2738                                         if (mddev->safemode == 1)
2739                                                 mddev->safemode = 0;
2740                                         if (mddev->persistent)
2741                                                 set_bit(MD_CHANGE_CLEAN,
2742                                                         &mddev->flags);
2743                                 }
2744                                 err = 0;
2745                         } else
2746                                 err = -EBUSY;
2747                         spin_unlock_irq(&mddev->write_lock);
2748                 } else {
2749                         mddev->ro = 0;
2750                         mddev->recovery_cp = MaxSector;
2751                         err = do_md_run(mddev);
2752                 }
2753                 break;
2754         case active:
2755                 if (mddev->pers) {
2756                         restart_array(mddev);
2757                         if (mddev->external)
2758                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2759                         wake_up(&mddev->sb_wait);
2760                         err = 0;
2761                 } else {
2762                         mddev->ro = 0;
2763                         set_disk_ro(mddev->gendisk, 0);
2764                         err = do_md_run(mddev);
2765                 }
2766                 break;
2767         case write_pending:
2768         case active_idle:
2769                 /* these cannot be set */
2770                 break;
2771         }
2772         if (err)
2773                 return err;
2774         else {
2775                 sysfs_notify(&mddev->kobj, NULL, "array_state");
2776                 return len;
2777         }
2778 }
2779 static struct md_sysfs_entry md_array_state =
2780 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2781
2782 static ssize_t
2783 null_show(mddev_t *mddev, char *page)
2784 {
2785         return -EINVAL;
2786 }
2787
2788 static ssize_t
2789 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2790 {
2791         /* buf must be %d:%d\n? giving major and minor numbers */
2792         /* The new device is added to the array.
2793          * If the array has a persistent superblock, we read the
2794          * superblock to initialise info and check validity.
2795          * Otherwise, only checking done is that in bind_rdev_to_array,
2796          * which mainly checks size.
2797          */
2798         char *e;
2799         int major = simple_strtoul(buf, &e, 10);
2800         int minor;
2801         dev_t dev;
2802         mdk_rdev_t *rdev;
2803         int err;
2804
2805         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2806                 return -EINVAL;
2807         minor = simple_strtoul(e+1, &e, 10);
2808         if (*e && *e != '\n')
2809                 return -EINVAL;
2810         dev = MKDEV(major, minor);
2811         if (major != MAJOR(dev) ||
2812             minor != MINOR(dev))
2813                 return -EOVERFLOW;
2814
2815
2816         if (mddev->persistent) {
2817                 rdev = md_import_device(dev, mddev->major_version,
2818                                         mddev->minor_version);
2819                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2820                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2821                                                        mdk_rdev_t, same_set);
2822                         err = super_types[mddev->major_version]
2823                                 .load_super(rdev, rdev0, mddev->minor_version);
2824                         if (err < 0)
2825                                 goto out;
2826                 }
2827         } else if (mddev->external)
2828                 rdev = md_import_device(dev, -2, -1);
2829         else
2830                 rdev = md_import_device(dev, -1, -1);
2831
2832         if (IS_ERR(rdev))
2833                 return PTR_ERR(rdev);
2834         err = bind_rdev_to_array(rdev, mddev);
2835  out:
2836         if (err)
2837                 export_rdev(rdev);
2838         return err ? err : len;
2839 }
2840
2841 static struct md_sysfs_entry md_new_device =
2842 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2843
2844 static ssize_t
2845 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2846 {
2847         char *end;
2848         unsigned long chunk, end_chunk;
2849
2850         if (!mddev->bitmap)
2851                 goto out;
2852         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2853         while (*buf) {
2854                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2855                 if (buf == end) break;
2856                 if (*end == '-') { /* range */
2857                         buf = end + 1;
2858                         end_chunk = simple_strtoul(buf, &end, 0);
2859                         if (buf == end) break;
2860                 }
2861                 if (*end && !isspace(*end)) break;
2862                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2863                 buf = end;
2864                 while (isspace(*buf)) buf++;
2865         }
2866         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2867 out:
2868         return len;
2869 }
2870
2871 static struct md_sysfs_entry md_bitmap =
2872 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2873
2874 static ssize_t
2875 size_show(mddev_t *mddev, char *page)
2876 {
2877         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2878 }
2879
2880 static int update_size(mddev_t *mddev, sector_t num_sectors);
2881
2882 static ssize_t
2883 size_store(mddev_t *mddev, const char *buf, size_t len)
2884 {
2885         /* If array is inactive, we can reduce the component size, but
2886          * not increase it (except from 0).
2887          * If array is active, we can try an on-line resize
2888          */
2889         char *e;
2890         int err = 0;
2891         unsigned long long size = simple_strtoull(buf, &e, 10);
2892         if (!*buf || *buf == '\n' ||
2893             (*e && *e != '\n'))
2894                 return -EINVAL;
2895
2896         if (mddev->pers) {
2897                 err = update_size(mddev, size * 2);
2898                 md_update_sb(mddev, 1);
2899         } else {
2900                 if (mddev->size == 0 ||
2901                     mddev->size > size)
2902                         mddev->size = size;
2903                 else
2904                         err = -ENOSPC;
2905         }
2906         return err ? err : len;
2907 }
2908
2909 static struct md_sysfs_entry md_size =
2910 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2911
2912
2913 /* Metdata version.
2914  * This is one of
2915  *   'none' for arrays with no metadata (good luck...)
2916  *   'external' for arrays with externally managed metadata,
2917  * or N.M for internally known formats
2918  */
2919 static ssize_t
2920 metadata_show(mddev_t *mddev, char *page)
2921 {
2922         if (mddev->persistent)
2923                 return sprintf(page, "%d.%d\n",
2924                                mddev->major_version, mddev->minor_version);
2925         else if (mddev->external)
2926                 return sprintf(page, "external:%s\n", mddev->metadata_type);
2927         else
2928                 return sprintf(page, "none\n");
2929 }
2930
2931 static ssize_t
2932 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2933 {
2934         int major, minor;
2935         char *e;
2936         /* Changing the details of 'external' metadata is
2937          * always permitted.  Otherwise there must be
2938          * no devices attached to the array.
2939          */
2940         if (mddev->external && strncmp(buf, "external:", 9) == 0)
2941                 ;
2942         else if (!list_empty(&mddev->disks))
2943                 return -EBUSY;
2944
2945         if (cmd_match(buf, "none")) {
2946                 mddev->persistent = 0;
2947                 mddev->external = 0;
2948                 mddev->major_version = 0;
2949                 mddev->minor_version = 90;
2950                 return len;
2951         }
2952         if (strncmp(buf, "external:", 9) == 0) {
2953                 size_t namelen = len-9;
2954                 if (namelen >= sizeof(mddev->metadata_type))
2955                         namelen = sizeof(mddev->metadata_type)-1;
2956                 strncpy(mddev->metadata_type, buf+9, namelen);
2957                 mddev->metadata_type[namelen] = 0;
2958                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
2959                         mddev->metadata_type[--namelen] = 0;
2960                 mddev->persistent = 0;
2961                 mddev->external = 1;
2962                 mddev->major_version = 0;
2963                 mddev->minor_version = 90;
2964                 return len;
2965         }
2966         major = simple_strtoul(buf, &e, 10);
2967         if (e==buf || *e != '.')
2968                 return -EINVAL;
2969         buf = e+1;
2970         minor = simple_strtoul(buf, &e, 10);
2971         if (e==buf || (*e && *e != '\n') )
2972                 return -EINVAL;
2973         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2974                 return -ENOENT;
2975         mddev->major_version = major;
2976         mddev->minor_version = minor;
2977         mddev->persistent = 1;
2978         mddev->external = 0;
2979         return len;
2980 }
2981
2982 static struct md_sysfs_entry md_metadata =
2983 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2984
2985 static ssize_t
2986 action_show(mddev_t *mddev, char *page)
2987 {
2988         char *type = "idle";
2989         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2990             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
2991                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2992                         type = "reshape";
2993                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2994                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2995                                 type = "resync";
2996                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2997                                 type = "check";
2998                         else
2999                                 type = "repair";
3000                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3001                         type = "recover";
3002         }
3003         return sprintf(page, "%s\n", type);
3004 }
3005
3006 static ssize_t
3007 action_store(mddev_t *mddev, const char *page, size_t len)
3008 {
3009         if (!mddev->pers || !mddev->pers->sync_request)
3010                 return -EINVAL;
3011
3012         if (cmd_match(page, "idle")) {
3013                 if (mddev->sync_thread) {
3014                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3015                         md_unregister_thread(mddev->sync_thread);
3016                         mddev->sync_thread = NULL;
3017                         mddev->recovery = 0;
3018                 }
3019         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3020                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3021                 return -EBUSY;
3022         else if (cmd_match(page, "resync"))
3023                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3024         else if (cmd_match(page, "recover")) {
3025                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3026                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3027         } else if (cmd_match(page, "reshape")) {
3028                 int err;
3029                 if (mddev->pers->start_reshape == NULL)
3030                         return -EINVAL;
3031                 err = mddev->pers->start_reshape(mddev);
3032                 if (err)
3033                         return err;
3034                 sysfs_notify(&mddev->kobj, NULL, "degraded");
3035         } else {
3036                 if (cmd_match(page, "check"))
3037                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3038                 else if (!cmd_match(page, "repair"))
3039                         return -EINVAL;
3040                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3041                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3042         }
3043         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3044         md_wakeup_thread(mddev->thread);
3045         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3046         return len;
3047 }
3048
3049 static ssize_t
3050 mismatch_cnt_show(mddev_t *mddev, char *page)
3051 {
3052         return sprintf(page, "%llu\n",
3053                        (unsigned long long) mddev->resync_mismatches);
3054 }
3055
3056 static struct md_sysfs_entry md_scan_mode =
3057 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3058
3059
3060 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3061
3062 static ssize_t
3063 sync_min_show(mddev_t *mddev, char *page)
3064 {
3065         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3066                        mddev->sync_speed_min ? "local": "system");
3067 }
3068
3069 static ssize_t
3070 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3071 {
3072         int min;
3073         char *e;
3074         if (strncmp(buf, "system", 6)==0) {
3075                 mddev->sync_speed_min = 0;
3076                 return len;
3077         }
3078         min = simple_strtoul(buf, &e, 10);
3079         if (buf == e || (*e && *e != '\n') || min <= 0)
3080                 return -EINVAL;
3081         mddev->sync_speed_min = min;
3082         return len;
3083 }
3084
3085 static struct md_sysfs_entry md_sync_min =
3086 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3087
3088 static ssize_t
3089 sync_max_show(mddev_t *mddev, char *page)
3090 {
3091         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3092                        mddev->sync_speed_max ? "local": "system");
3093 }
3094
3095 static ssize_t
3096 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3097 {
3098         int max;
3099         char *e;
3100         if (strncmp(buf, "system", 6)==0) {
3101                 mddev->sync_speed_max = 0;
3102                 return len;
3103         }
3104         max = simple_strtoul(buf, &e, 10);
3105         if (buf == e || (*e && *e != '\n') || max <= 0)
3106                 return -EINVAL;
3107         mddev->sync_speed_max = max;
3108         return len;
3109 }
3110
3111 static struct md_sysfs_entry md_sync_max =
3112 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3113
3114 static ssize_t
3115 degraded_show(mddev_t *mddev, char *page)
3116 {
3117         return sprintf(page, "%d\n", mddev->degraded);
3118 }
3119 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3120
3121 static ssize_t
3122 sync_force_parallel_show(mddev_t *mddev, char *page)
3123 {
3124         return sprintf(page, "%d\n", mddev->parallel_resync);
3125 }
3126
3127 static ssize_t
3128 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3129 {
3130         long n;
3131
3132         if (strict_strtol(buf, 10, &n))
3133                 return -EINVAL;
3134
3135         if (n != 0 && n != 1)
3136                 return -EINVAL;
3137
3138         mddev->parallel_resync = n;
3139
3140         if (mddev->sync_thread)
3141                 wake_up(&resync_wait);
3142
3143         return len;
3144 }
3145
3146 /* force parallel resync, even with shared block devices */
3147 static struct md_sysfs_entry md_sync_force_parallel =
3148 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3149        sync_force_parallel_show, sync_force_parallel_store);
3150
3151 static ssize_t
3152 sync_speed_show(mddev_t *mddev, char *page)
3153 {
3154         unsigned long resync, dt, db;
3155         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3156         dt = (jiffies - mddev->resync_mark) / HZ;
3157         if (!dt) dt++;
3158         db = resync - mddev->resync_mark_cnt;
3159         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3160 }
3161
3162 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3163
3164 static ssize_t
3165 sync_completed_show(mddev_t *mddev, char *page)
3166 {
3167         unsigned long max_blocks, resync;
3168
3169         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3170                 max_blocks = mddev->resync_max_sectors;
3171         else
3172                 max_blocks = mddev->size << 1;
3173
3174         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3175         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
3176 }
3177
3178 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3179
3180 static ssize_t
3181 min_sync_show(mddev_t *mddev, char *page)
3182 {
3183         return sprintf(page, "%llu\n",
3184                        (unsigned long long)mddev->resync_min);
3185 }
3186 static ssize_t
3187 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3188 {
3189         unsigned long long min;
3190         if (strict_strtoull(buf, 10, &min))
3191                 return -EINVAL;
3192         if (min > mddev->resync_max)
3193                 return -EINVAL;
3194         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3195                 return -EBUSY;
3196
3197         /* Must be a multiple of chunk_size */
3198         if (mddev->chunk_size) {
3199                 if (min & (sector_t)((mddev->chunk_size>>9)-1))
3200                         return -EINVAL;
3201         }
3202         mddev->resync_min = min;
3203
3204         return len;
3205 }
3206
3207 static struct md_sysfs_entry md_min_sync =
3208 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3209
3210 static ssize_t
3211 max_sync_show(mddev_t *mddev, char *page)
3212 {
3213         if (mddev->resync_max == MaxSector)
3214                 return sprintf(page, "max\n");
3215         else
3216                 return sprintf(page, "%llu\n",
3217                                (unsigned long long)mddev->resync_max);
3218 }
3219 static ssize_t
3220 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3221 {
3222         if (strncmp(buf, "max", 3) == 0)
3223                 mddev->resync_max = MaxSector;
3224         else {
3225                 unsigned long long max;
3226                 if (strict_strtoull(buf, 10, &max))
3227                         return -EINVAL;
3228                 if (max < mddev->resync_min)
3229                         return -EINVAL;
3230                 if (max < mddev->resync_max &&
3231                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3232                         return -EBUSY;
3233
3234                 /* Must be a multiple of chunk_size */
3235                 if (mddev->chunk_size) {
3236                         if (max & (sector_t)((mddev->chunk_size>>9)-1))
3237                                 return -EINVAL;
3238                 }
3239                 mddev->resync_max = max;
3240         }
3241         wake_up(&mddev->recovery_wait);
3242         return len;
3243 }
3244
3245 static struct md_sysfs_entry md_max_sync =
3246 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3247
3248 static ssize_t
3249 suspend_lo_show(mddev_t *mddev, char *page)
3250 {
3251         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3252 }
3253
3254 static ssize_t
3255 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3256 {
3257         char *e;
3258         unsigned long long new = simple_strtoull(buf, &e, 10);
3259
3260         if (mddev->pers->quiesce == NULL)
3261                 return -EINVAL;
3262         if (buf == e || (*e && *e != '\n'))
3263                 return -EINVAL;
3264         if (new >= mddev->suspend_hi ||
3265             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3266                 mddev->suspend_lo = new;
3267                 mddev->pers->quiesce(mddev, 2);
3268                 return len;
3269         } else
3270                 return -EINVAL;
3271 }
3272 static struct md_sysfs_entry md_suspend_lo =
3273 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3274
3275
3276 static ssize_t
3277 suspend_hi_show(mddev_t *mddev, char *page)
3278 {
3279         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3280 }
3281
3282 static ssize_t
3283 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3284 {
3285         char *e;
3286         unsigned long long new = simple_strtoull(buf, &e, 10);
3287
3288         if (mddev->pers->quiesce == NULL)
3289                 return -EINVAL;
3290         if (buf == e || (*e && *e != '\n'))
3291                 return -EINVAL;
3292         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3293             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3294                 mddev->suspend_hi = new;
3295                 mddev->pers->quiesce(mddev, 1);
3296                 mddev->pers->quiesce(mddev, 0);
3297                 return len;
3298         } else
3299                 return -EINVAL;
3300 }
3301 static struct md_sysfs_entry md_suspend_hi =
3302 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3303
3304 static ssize_t
3305 reshape_position_show(mddev_t *mddev, char *page)
3306 {
3307         if (mddev->reshape_position != MaxSector)
3308                 return sprintf(page, "%llu\n",
3309                                (unsigned long long)mddev->reshape_position);
3310         strcpy(page, "none\n");
3311         return 5;
3312 }
3313
3314 static ssize_t
3315 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3316 {
3317         char *e;
3318         unsigned long long new = simple_strtoull(buf, &e, 10);
3319         if (mddev->pers)
3320                 return -EBUSY;
3321         if (buf == e || (*e && *e != '\n'))
3322                 return -EINVAL;
3323         mddev->reshape_position = new;
3324         mddev->delta_disks = 0;
3325         mddev->new_level = mddev->level;
3326         mddev->new_layout = mddev->layout;
3327         mddev->new_chunk = mddev->chunk_size;
3328         return len;
3329 }
3330
3331 static struct md_sysfs_entry md_reshape_position =
3332 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3333        reshape_position_store);
3334
3335
3336 static struct attribute *md_default_attrs[] = {
3337         &md_level.attr,
3338         &md_layout.attr,
3339         &md_raid_disks.attr,
3340         &md_chunk_size.attr,
3341         &md_size.attr,
3342         &md_resync_start.attr,
3343         &md_metadata.attr,
3344         &md_new_device.attr,
3345         &md_safe_delay.attr,
3346         &md_array_state.attr,
3347         &md_reshape_position.attr,
3348         NULL,
3349 };
3350
3351 static struct attribute *md_redundancy_attrs[] = {
3352         &md_scan_mode.attr,
3353         &md_mismatches.attr,
3354         &md_sync_min.attr,
3355         &md_sync_max.attr,
3356         &md_sync_speed.attr,
3357         &md_sync_force_parallel.attr,
3358         &md_sync_completed.attr,
3359         &md_min_sync.attr,
3360         &md_max_sync.attr,
3361         &md_suspend_lo.attr,
3362         &md_suspend_hi.attr,
3363         &md_bitmap.attr,
3364         &md_degraded.attr,
3365         NULL,
3366 };
3367 static struct attribute_group md_redundancy_group = {
3368         .name = NULL,
3369         .attrs = md_redundancy_attrs,
3370 };
3371
3372
3373 static ssize_t
3374 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3375 {
3376         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3377         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3378         ssize_t rv;
3379
3380         if (!entry->show)
3381                 return -EIO;
3382         rv = mddev_lock(mddev);
3383         if (!rv) {
3384                 rv = entry->show(mddev, page);
3385                 mddev_unlock(mddev);
3386         }
3387         return rv;
3388 }
3389
3390 static ssize_t
3391 md_attr_store(struct kobject *kobj, struct attribute *attr,
3392               const char *page, size_t length)
3393 {
3394         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3395         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3396         ssize_t rv;
3397
3398         if (!entry->store)
3399                 return -EIO;
3400         if (!capable(CAP_SYS_ADMIN))
3401                 return -EACCES;
3402         rv = mddev_lock(mddev);
3403         if (!rv) {
3404                 rv = entry->store(mddev, page, length);
3405                 mddev_unlock(mddev);
3406         }
3407         return rv;
3408 }
3409
3410 static void md_free(struct kobject *ko)
3411 {
3412         mddev_t *mddev = container_of(ko, mddev_t, kobj);
3413         kfree(mddev);
3414 }
3415
3416 static struct sysfs_ops md_sysfs_ops = {
3417         .show   = md_attr_show,
3418         .store  = md_attr_store,
3419 };
3420 static struct kobj_type md_ktype = {
3421         .release        = md_free,
3422         .sysfs_ops      = &md_sysfs_ops,
3423         .default_attrs  = md_default_attrs,
3424 };
3425
3426 int mdp_major = 0;
3427
3428 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3429 {
3430         static DEFINE_MUTEX(disks_mutex);
3431         mddev_t *mddev = mddev_find(dev);
3432         struct gendisk *disk;
3433         int partitioned = (MAJOR(dev) != MD_MAJOR);
3434         int shift = partitioned ? MdpMinorShift : 0;
3435         int unit = MINOR(dev) >> shift;
3436         int error;
3437
3438         if (!mddev)
3439                 return NULL;
3440
3441         mutex_lock(&disks_mutex);
3442         if (mddev->gendisk) {
3443                 mutex_unlock(&disks_mutex);
3444                 mddev_put(mddev);
3445                 return NULL;
3446         }
3447         disk = alloc_disk(1 << shift);
3448         if (!disk) {
3449                 mutex_unlock(&disks_mutex);
3450                 mddev_put(mddev);
3451                 return NULL;
3452         }
3453         disk->major = MAJOR(dev);
3454         disk->first_minor = unit << shift;
3455         if (partitioned)
3456                 sprintf(disk->disk_name, "md_d%d", unit);
3457         else
3458                 sprintf(disk->disk_name, "md%d", unit);
3459         disk->fops = &md_fops;
3460         disk->private_data = mddev;
3461         disk->queue = mddev->queue;
3462         add_disk(disk);
3463         mddev->gendisk = disk;
3464         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3465                                      &disk_to_dev(disk)->kobj, "%s", "md");
3466         mutex_unlock(&disks_mutex);
3467         if (error)
3468                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3469                        disk->disk_name);
3470         else
3471                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3472         return NULL;
3473 }
3474
3475 static void md_safemode_timeout(unsigned long data)
3476 {
3477         mddev_t *mddev = (mddev_t *) data;
3478
3479         if (!atomic_read(&mddev->writes_pending)) {
3480                 mddev->safemode = 1;
3481                 if (mddev->external)
3482                         set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags);
3483         }
3484         md_wakeup_thread(mddev->thread);
3485 }
3486
3487 static int start_dirty_degraded;
3488
3489 static int do_md_run(mddev_t * mddev)
3490 {
3491         int err;
3492         int chunk_size;
3493         struct list_head *tmp;
3494         mdk_rdev_t *rdev;
3495         struct gendisk *disk;
3496         struct mdk_personality *pers;
3497         char b[BDEVNAME_SIZE];
3498
3499         if (list_empty(&mddev->disks))
3500                 /* cannot run an array with no devices.. */
3501                 return -EINVAL;
3502
3503         if (mddev->pers)
3504                 return -EBUSY;
3505
3506         /*
3507          * Analyze all RAID superblock(s)
3508          */
3509         if (!mddev->raid_disks) {
3510                 if (!mddev->persistent)
3511                         return -EINVAL;
3512                 analyze_sbs(mddev);
3513         }
3514
3515         chunk_size = mddev->chunk_size;
3516
3517         if (chunk_size) {
3518                 if (chunk_size > MAX_CHUNK_SIZE) {
3519                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3520                                 chunk_size, MAX_CHUNK_SIZE);
3521                         return -EINVAL;
3522                 }
3523                 /*
3524                  * chunk-size has to be a power of 2
3525                  */
3526                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3527                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3528                         return -EINVAL;
3529                 }
3530
3531                 /* devices must have minimum size of one chunk */
3532                 rdev_for_each(rdev, tmp, mddev) {
3533                         if (test_bit(Faulty, &rdev->flags))
3534                                 continue;
3535                         if (rdev->size < chunk_size / 1024) {
3536                                 printk(KERN_WARNING
3537                                         "md: Dev %s smaller than chunk_size:"
3538                                         " %lluk < %dk\n",
3539                                         bdevname(rdev->bdev,b),
3540                                         (unsigned long long)rdev->size,
3541                                         chunk_size / 1024);
3542                                 return -EINVAL;
3543                         }
3544                 }
3545         }
3546
3547         if (mddev->level != LEVEL_NONE)
3548                 request_module("md-level-%d", mddev->level);
3549         else if (mddev->clevel[0])
3550                 request_module("md-%s", mddev->clevel);
3551
3552         /*
3553          * Drop all container device buffers, from now on
3554          * the only valid external interface is through the md
3555          * device.
3556          */
3557         rdev_for_each(rdev, tmp, mddev) {
3558                 if (test_bit(Faulty, &rdev->flags))
3559                         continue;
3560                 sync_blockdev(rdev->bdev);
3561                 invalidate_bdev(rdev->bdev);
3562
3563                 /* perform some consistency tests on the device.
3564                  * We don't want the data to overlap the metadata,
3565                  * Internal Bitmap issues has handled elsewhere.
3566                  */
3567                 if (rdev->data_offset < rdev->sb_start) {
3568                         if (mddev->size &&
3569                             rdev->data_offset + mddev->size*2
3570                             > rdev->sb_start) {
3571                                 printk("md: %s: data overlaps metadata\n",
3572                                        mdname(mddev));
3573                                 return -EINVAL;
3574                         }
3575                 } else {
3576                         if (rdev->sb_start + rdev->sb_size/512
3577                             > rdev->data_offset) {
3578                                 printk("md: %s: metadata overlaps data\n",
3579                                        mdname(mddev));
3580                                 return -EINVAL;
3581                         }
3582                 }
3583                 sysfs_notify(&rdev->kobj, NULL, "state");
3584         }
3585
3586         md_probe(mddev->unit, NULL, NULL);
3587         disk = mddev->gendisk;
3588         if (!disk)
3589                 return -ENOMEM;
3590
3591         spin_lock(&pers_lock);
3592         pers = find_pers(mddev->level, mddev->clevel);
3593         if (!pers || !try_module_get(pers->owner)) {
3594                 spin_unlock(&pers_lock);
3595                 if (mddev->level != LEVEL_NONE)
3596                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3597                                mddev->level);
3598                 else
3599                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3600                                mddev->clevel);
3601                 return -EINVAL;
3602         }
3603         mddev->pers = pers;
3604         spin_unlock(&pers_lock);
3605         mddev->level = pers->level;
3606         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3607
3608         if (mddev->reshape_position != MaxSector &&
3609             pers->start_reshape == NULL) {
3610                 /* This personality cannot handle reshaping... */
3611                 mddev->pers = NULL;
3612                 module_put(pers->owner);
3613                 return -EINVAL;
3614         }
3615
3616         if (pers->sync_request) {
3617                 /* Warn if this is a potentially silly
3618                  * configuration.
3619                  */
3620                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3621                 mdk_rdev_t *rdev2;
3622                 struct list_head *tmp2;
3623                 int warned = 0;
3624                 rdev_for_each(rdev, tmp, mddev) {
3625                         rdev_for_each(rdev2, tmp2, mddev) {
3626                                 if (rdev < rdev2 &&
3627                                     rdev->bdev->bd_contains ==
3628                                     rdev2->bdev->bd_contains) {
3629                                         printk(KERN_WARNING
3630                                                "%s: WARNING: %s appears to be"
3631                                                " on the same physical disk as"
3632                                                " %s.\n",
3633                                                mdname(mddev),
3634                                                bdevname(rdev->bdev,b),
3635                                                bdevname(rdev2->bdev,b2));
3636                                         warned = 1;
3637                                 }
3638                         }
3639                 }
3640                 if (warned)
3641                         printk(KERN_WARNING
3642                                "True protection against single-disk"
3643                                " failure might be compromised.\n");
3644         }
3645
3646         mddev->recovery = 0;
3647         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3648         mddev->barriers_work = 1;
3649         mddev->ok_start_degraded = start_dirty_degraded;
3650
3651         if (start_readonly)
3652                 mddev->ro = 2; /* read-only, but switch on first write */
3653
3654         err = mddev->pers->run(mddev);
3655         if (err)
3656                 printk(KERN_ERR "md: pers->run() failed ...\n");
3657         else if (mddev->pers->sync_request) {
3658                 err = bitmap_create(mddev);
3659                 if (err) {
3660                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3661                                mdname(mddev), err);
3662                         mddev->pers->stop(mddev);
3663                 }
3664         }
3665         if (err) {
3666                 module_put(mddev->pers->owner);
3667                 mddev->pers = NULL;
3668                 bitmap_destroy(mddev);
3669                 return err;
3670         }
3671         if (mddev->pers->sync_request) {
3672                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3673                         printk(KERN_WARNING
3674                                "md: cannot register extra attributes for %s\n",
3675                                mdname(mddev));
3676         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3677                 mddev->ro = 0;
3678
3679         atomic_set(&mddev->writes_pending,0);
3680         mddev->safemode = 0;
3681         mddev->safemode_timer.function = md_safemode_timeout;
3682         mddev->safemode_timer.data = (unsigned long) mddev;
3683         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3684         mddev->in_sync = 1;
3685
3686         rdev_for_each(rdev, tmp, mddev)
3687                 if (rdev->raid_disk >= 0) {
3688                         char nm[20];
3689                         sprintf(nm, "rd%d", rdev->raid_disk);
3690                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3691                                 printk("md: cannot register %s for %s\n",
3692                                        nm, mdname(mddev));
3693                 }
3694         
3695         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3696         
3697         if (mddev->flags)
3698                 md_update_sb(mddev, 0);
3699
3700         set_capacity(disk, mddev->array_sectors);
3701
3702         /* If we call blk_queue_make_request here, it will
3703          * re-initialise max_sectors etc which may have been
3704          * refined inside -> run.  So just set the bits we need to set.
3705          * Most initialisation happended when we called
3706          * blk_queue_make_request(..., md_fail_request)
3707          * earlier.
3708          */
3709         mddev->queue->queuedata = mddev;
3710         mddev->queue->make_request_fn = mddev->pers->make_request;
3711
3712         /* If there is a partially-recovered drive we need to
3713          * start recovery here.  If we leave it to md_check_recovery,
3714          * it will remove the drives and not do the right thing
3715          */
3716         if (mddev->degraded && !mddev->sync_thread) {
3717                 struct list_head *rtmp;
3718                 int spares = 0;
3719                 rdev_for_each(rdev, rtmp, mddev)
3720                         if (rdev->raid_disk >= 0 &&
3721                             !test_bit(In_sync, &rdev->flags) &&
3722                             !test_bit(Faulty, &rdev->flags))
3723                                 /* complete an interrupted recovery */
3724                                 spares++;
3725                 if (spares && mddev->pers->sync_request) {
3726                         mddev->recovery = 0;
3727                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3728                         mddev->sync_thread = md_register_thread(md_do_sync,
3729                                                                 mddev,
3730                                                                 "%s_resync");
3731                         if (!mddev->sync_thread) {
3732                                 printk(KERN_ERR "%s: could not start resync"
3733                                        " thread...\n",
3734                                        mdname(mddev));
3735                                 /* leave the spares where they are, it shouldn't hurt */
3736                                 mddev->recovery = 0;
3737                         }
3738                 }
3739         }
3740         md_wakeup_thread(mddev->thread);
3741         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3742
3743         mddev->changed = 1;
3744         md_new_event(mddev);
3745         sysfs_notify(&mddev->kobj, NULL, "array_state");
3746         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3747         sysfs_notify(&mddev->kobj, NULL, "degraded");
3748         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3749         return 0;
3750 }
3751
3752 static int restart_array(mddev_t *mddev)
3753 {
3754         struct gendisk *disk = mddev->gendisk;
3755
3756         /* Complain if it has no devices */
3757         if (list_empty(&mddev->disks))
3758                 return -ENXIO;
3759         if (!mddev->pers)
3760                 return -EINVAL;
3761         if (!mddev->ro)
3762                 return -EBUSY;
3763         mddev->safemode = 0;
3764         mddev->ro = 0;
3765         set_disk_ro(disk, 0);
3766         printk(KERN_INFO "md: %s switched to read-write mode.\n",
3767                 mdname(mddev));
3768         /* Kick recovery or resync if necessary */
3769         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3770         md_wakeup_thread(mddev->thread);
3771         md_wakeup_thread(mddev->sync_thread);
3772         sysfs_notify(&mddev->kobj, NULL, "array_state");
3773         return 0;
3774 }
3775
3776 /* similar to deny_write_access, but accounts for our holding a reference
3777  * to the file ourselves */
3778 static int deny_bitmap_write_access(struct file * file)
3779 {
3780         struct inode *inode = file->f_mapping->host;
3781
3782         spin_lock(&inode->i_lock);
3783         if (atomic_read(&inode->i_writecount) > 1) {
3784                 spin_unlock(&inode->i_lock);
3785                 return -ETXTBSY;
3786         }
3787         atomic_set(&inode->i_writecount, -1);
3788         spin_unlock(&inode->i_lock);
3789
3790         return 0;
3791 }
3792
3793 static void restore_bitmap_write_access(struct file *file)
3794 {
3795         struct inode *inode = file->f_mapping->host;
3796
3797         spin_lock(&inode->i_lock);
3798         atomic_set(&inode->i_writecount, 1);
3799         spin_unlock(&inode->i_lock);
3800 }
3801
3802 /* mode:
3803  *   0 - completely stop and dis-assemble array
3804  *   1 - switch to readonly
3805  *   2 - stop but do not disassemble array
3806  */
3807 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3808 {
3809         int err = 0;
3810         struct gendisk *disk = mddev->gendisk;
3811
3812         if (atomic_read(&mddev->openers) > is_open) {
3813                 printk("md: %s still in use.\n",mdname(mddev));
3814                 return -EBUSY;
3815         }
3816
3817         if (mddev->pers) {
3818
3819                 if (mddev->sync_thread) {
3820                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3821                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3822                         md_unregister_thread(mddev->sync_thread);
3823                         mddev->sync_thread = NULL;
3824                 }
3825
3826                 del_timer_sync(&mddev->safemode_timer);
3827
3828                 switch(mode) {
3829                 case 1: /* readonly */
3830                         err  = -ENXIO;
3831                         if (mddev->ro==1)
3832                                 goto out;
3833                         mddev->ro = 1;
3834                         break;
3835                 case 0: /* disassemble */
3836                 case 2: /* stop */
3837                         bitmap_flush(mddev);
3838                         md_super_wait(mddev);
3839                         if (mddev->ro)
3840                                 set_disk_ro(disk, 0);
3841                         blk_queue_make_request(mddev->queue, md_fail_request);
3842                         mddev->pers->stop(mddev);
3843                         mddev->queue->merge_bvec_fn = NULL;
3844                         mddev->queue->unplug_fn = NULL;
3845                         mddev->queue->backing_dev_info.congested_fn = NULL;
3846                         if (mddev->pers->sync_request)
3847                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3848
3849                         module_put(mddev->pers->owner);
3850                         mddev->pers = NULL;
3851                         /* tell userspace to handle 'inactive' */
3852                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3853
3854                         set_capacity(disk, 0);
3855                         mddev->changed = 1;
3856
3857                         if (mddev->ro)
3858                                 mddev->ro = 0;
3859                 }
3860                 if (!mddev->in_sync || mddev->flags) {
3861                         /* mark array as shutdown cleanly */
3862                         mddev->in_sync = 1;
3863                         md_update_sb(mddev, 1);
3864                 }
3865                 if (mode == 1)
3866                         set_disk_ro(disk, 1);
3867                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3868         }
3869
3870         /*
3871          * Free resources if final stop
3872          */
3873         if (mode == 0) {
3874                 mdk_rdev_t *rdev;
3875                 struct list_head *tmp;
3876
3877                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3878
3879                 bitmap_destroy(mddev);
3880                 if (mddev->bitmap_file) {
3881                         restore_bitmap_write_access(mddev->bitmap_file);
3882                         fput(mddev->bitmap_file);
3883                         mddev->bitmap_file = NULL;
3884                 }
3885                 mddev->bitmap_offset = 0;
3886
3887                 rdev_for_each(rdev, tmp, mddev)
3888                         if (rdev->raid_disk >= 0) {
3889                                 char nm[20];
3890                                 sprintf(nm, "rd%d", rdev->raid_disk);
3891                                 sysfs_remove_link(&mddev->kobj, nm);
3892                         }
3893
3894                 /* make sure all md_delayed_delete calls have finished */
3895                 flush_scheduled_work();
3896
3897                 export_array(mddev);
3898
3899                 mddev->array_sectors = 0;
3900                 mddev->size = 0;
3901                 mddev->raid_disks = 0;
3902                 mddev->recovery_cp = 0;
3903                 mddev->resync_min = 0;
3904                 mddev->resync_max = MaxSector;
3905                 mddev->reshape_position = MaxSector;
3906                 mddev->external = 0;
3907                 mddev->persistent = 0;
3908                 mddev->level = LEVEL_NONE;
3909                 mddev->clevel[0] = 0;
3910                 mddev->flags = 0;
3911                 mddev->ro = 0;
3912                 mddev->metadata_type[0] = 0;
3913                 mddev->chunk_size = 0;
3914                 mddev->ctime = mddev->utime = 0;
3915                 mddev->layout = 0;
3916                 mddev->max_disks = 0;
3917                 mddev->events = 0;
3918                 mddev->delta_disks = 0;
3919                 mddev->new_level = LEVEL_NONE;
3920                 mddev->new_layout = 0;
3921                 mddev->new_chunk = 0;
3922                 mddev->curr_resync = 0;
3923                 mddev->resync_mismatches = 0;
3924                 mddev->suspend_lo = mddev->suspend_hi = 0;
3925                 mddev->sync_speed_min = mddev->sync_speed_max = 0;
3926                 mddev->recovery = 0;
3927                 mddev->in_sync = 0;
3928                 mddev->changed = 0;
3929                 mddev->degraded = 0;
3930                 mddev->barriers_work = 0;
3931                 mddev->safemode = 0;
3932
3933         } else if (mddev->pers)
3934                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3935                         mdname(mddev));
3936         err = 0;
3937         md_new_event(mddev);
3938         sysfs_notify(&mddev->kobj, NULL, "array_state");
3939 out:
3940         return err;
3941 }
3942
3943 #ifndef MODULE
3944 static void autorun_array(mddev_t *mddev)
3945 {
3946         mdk_rdev_t *rdev;
3947         struct list_head *tmp;
3948         int err;
3949
3950         if (list_empty(&mddev->disks))
3951                 return;
3952
3953         printk(KERN_INFO "md: running: ");
3954
3955         rdev_for_each(rdev, tmp, mddev) {
3956                 char b[BDEVNAME_SIZE];
3957                 printk("<%s>", bdevname(rdev->bdev,b));
3958         }
3959         printk("\n");
3960
3961         err = do_md_run(mddev);
3962         if (err) {
3963                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3964                 do_md_stop(mddev, 0, 0);
3965         }
3966 }
3967
3968 /*
3969  * lets try to run arrays based on all disks that have arrived
3970  * until now. (those are in pending_raid_disks)
3971  *
3972  * the method: pick the first pending disk, collect all disks with
3973  * the same UUID, remove all from the pending list and put them into
3974  * the 'same_array' list. Then order this list based on superblock
3975  * update time (freshest comes first), kick out 'old' disks and
3976  * compare superblocks. If everything's fine then run it.
3977  *
3978  * If "unit" is allocated, then bump its reference count
3979  */
3980 static void autorun_devices(int part)
3981 {
3982         struct list_head *tmp;
3983         mdk_rdev_t *rdev0, *rdev;
3984         mddev_t *mddev;
3985         char b[BDEVNAME_SIZE];
3986
3987         printk(KERN_INFO "md: autorun ...\n");
3988         while (!list_empty(&pending_raid_disks)) {
3989                 int unit;
3990                 dev_t dev;
3991                 LIST_HEAD(candidates);
3992                 rdev0 = list_entry(pending_raid_disks.next,
3993                                          mdk_rdev_t, same_set);
3994
3995                 printk(KERN_INFO "md: considering %s ...\n",
3996                         bdevname(rdev0->bdev,b));
3997                 INIT_LIST_HEAD(&candidates);
3998                 rdev_for_each_list(rdev, tmp, pending_raid_disks)
3999                         if (super_90_load(rdev, rdev0, 0) >= 0) {
4000                                 printk(KERN_INFO "md:  adding %s ...\n",
4001                                         bdevname(rdev->bdev,b));
4002                                 list_move(&rdev->same_set, &candidates);
4003                         }
4004                 /*
4005                  * now we have a set of devices, with all of them having
4006                  * mostly sane superblocks. It's time to allocate the
4007                  * mddev.
4008                  */
4009                 if (part) {
4010                         dev = MKDEV(mdp_major,
4011                                     rdev0->preferred_minor << MdpMinorShift);
4012                         unit = MINOR(dev) >> MdpMinorShift;
4013                 } else {
4014                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4015                         unit = MINOR(dev);
4016                 }
4017                 if (rdev0->preferred_minor != unit) {
4018                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4019                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4020                         break;
4021                 }
4022
4023                 md_probe(dev, NULL, NULL);
4024                 mddev = mddev_find(dev);
4025                 if (!mddev || !mddev->gendisk) {
4026                         if (mddev)
4027                                 mddev_put(mddev);
4028                         printk(KERN_ERR
4029                                 "md: cannot allocate memory for md drive.\n");
4030                         break;
4031                 }
4032                 if (mddev_lock(mddev)) 
4033                         printk(KERN_WARNING "md: %s locked, cannot run\n",
4034                                mdname(mddev));
4035                 else if (mddev->raid_disks || mddev->major_version
4036                          || !list_empty(&mddev->disks)) {
4037                         printk(KERN_WARNING 
4038                                 "md: %s already running, cannot run %s\n",
4039                                 mdname(mddev), bdevname(rdev0->bdev,b));
4040                         mddev_unlock(mddev);
4041                 } else {
4042                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4043                         mddev->persistent = 1;
4044                         rdev_for_each_list(rdev, tmp, candidates) {
4045                                 list_del_init(&rdev->same_set);
4046                                 if (bind_rdev_to_array(rdev, mddev))
4047                                         export_rdev(rdev);
4048                         }
4049                         autorun_array(mddev);
4050                         mddev_unlock(mddev);
4051                 }
4052                 /* on success, candidates will be empty, on error
4053                  * it won't...
4054                  */
4055                 rdev_for_each_list(rdev, tmp, candidates) {
4056                         list_del_init(&rdev->same_set);
4057                         export_rdev(rdev);
4058                 }
4059                 mddev_put(mddev);
4060         }
4061         printk(KERN_INFO "md: ... autorun DONE.\n");
4062 }
4063 #endif /* !MODULE */
4064
4065 static int get_version(void __user * arg)
4066 {
4067         mdu_version_t ver;
4068
4069         ver.major = MD_MAJOR_VERSION;
4070         ver.minor = MD_MINOR_VERSION;
4071         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4072
4073         if (copy_to_user(arg, &ver, sizeof(ver)))
4074                 return -EFAULT;
4075
4076         return 0;
4077 }
4078
4079 static int get_array_info(mddev_t * mddev, void __user * arg)
4080 {
4081         mdu_array_info_t info;
4082         int nr,working,active,failed,spare;
4083         mdk_rdev_t *rdev;
4084         struct list_head *tmp;
4085
4086         nr=working=active=failed=spare=0;
4087         rdev_for_each(rdev, tmp, mddev) {
4088                 nr++;
4089                 if (test_bit(Faulty, &rdev->flags))
4090                         failed++;
4091                 else {
4092                         working++;
4093                         if (test_bit(In_sync, &rdev->flags))
4094                                 active++;       
4095                         else
4096                                 spare++;
4097                 }
4098         }
4099
4100         info.major_version = mddev->major_version;
4101         info.minor_version = mddev->minor_version;
4102         info.patch_version = MD_PATCHLEVEL_VERSION;
4103         info.ctime         = mddev->ctime;
4104         info.level         = mddev->level;
4105         info.size          = mddev->size;
4106         if (info.size != mddev->size) /* overflow */
4107                 info.size = -1;
4108         info.nr_disks      = nr;
4109         info.raid_disks    = mddev->raid_disks;
4110         info.md_minor      = mddev->md_minor;
4111         info.not_persistent= !mddev->persistent;
4112
4113         info.utime         = mddev->utime;
4114         info.state         = 0;
4115         if (mddev->in_sync)
4116                 info.state = (1<<MD_SB_CLEAN);
4117         if (mddev->bitmap && mddev->bitmap_offset)
4118                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4119         info.active_disks  = active;
4120         info.working_disks = working;
4121         info.failed_disks  = failed;
4122         info.spare_disks   = spare;
4123
4124         info.layout        = mddev->layout;
4125         info.chunk_size    = mddev->chunk_size;
4126
4127         if (copy_to_user(arg, &info, sizeof(info)))
4128                 return -EFAULT;
4129
4130         return 0;
4131 }
4132
4133 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4134 {
4135         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4136         char *ptr, *buf = NULL;
4137         int err = -ENOMEM;
4138
4139         if (md_allow_write(mddev))
4140                 file = kmalloc(sizeof(*file), GFP_NOIO);
4141         else
4142                 file = kmalloc(sizeof(*file), GFP_KERNEL);
4143
4144         if (!file)
4145                 goto out;
4146
4147         /* bitmap disabled, zero the first byte and copy out */
4148         if (!mddev->bitmap || !mddev->bitmap->file) {
4149                 file->pathname[0] = '\0';
4150                 goto copy_out;
4151         }
4152
4153         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4154         if (!buf)
4155                 goto out;
4156
4157         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4158         if (IS_ERR(ptr))
4159                 goto out;
4160
4161         strcpy(file->pathname, ptr);
4162
4163 copy_out:
4164         err = 0;
4165         if (copy_to_user(arg, file, sizeof(*file)))
4166                 err = -EFAULT;
4167 out:
4168         kfree(buf);
4169         kfree(file);
4170         return err;
4171 }
4172
4173 static int get_disk_info(mddev_t * mddev, void __user * arg)
4174 {
4175         mdu_disk_info_t info;
4176         mdk_rdev_t *rdev;
4177
4178         if (copy_from_user(&info, arg, sizeof(info)))
4179                 return -EFAULT;
4180
4181         rdev = find_rdev_nr(mddev, info.number);
4182         if (rdev) {
4183                 info.major = MAJOR(rdev->bdev->bd_dev);
4184                 info.minor = MINOR(rdev->bdev->bd_dev);
4185                 info.raid_disk = rdev->raid_disk;
4186                 info.state = 0;
4187                 if (test_bit(Faulty, &rdev->flags))
4188                         info.state |= (1<<MD_DISK_FAULTY);
4189                 else if (test_bit(In_sync, &rdev->flags)) {
4190                         info.state |= (1<<MD_DISK_ACTIVE);
4191                         info.state |= (1<<MD_DISK_SYNC);
4192                 }
4193                 if (test_bit(WriteMostly, &rdev->flags))
4194                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
4195         } else {
4196                 info.major = info.minor = 0;
4197                 info.raid_disk = -1;
4198                 info.state = (1<<MD_DISK_REMOVED);
4199         }
4200
4201         if (copy_to_user(arg, &info, sizeof(info)))
4202                 return -EFAULT;
4203
4204         return 0;
4205 }
4206
4207 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4208 {
4209         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4210         mdk_rdev_t *rdev;
4211         dev_t dev = MKDEV(info->major,info->minor);
4212
4213         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4214                 return -EOVERFLOW;
4215
4216         if (!mddev->raid_disks) {
4217                 int err;
4218                 /* expecting a device which has a superblock */
4219                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4220                 if (IS_ERR(rdev)) {
4221                         printk(KERN_WARNING 
4222                                 "md: md_import_device returned %ld\n",
4223                                 PTR_ERR(rdev));
4224                         return PTR_ERR(rdev);
4225                 }
4226                 if (!list_empty(&mddev->disks)) {
4227                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4228                                                         mdk_rdev_t, same_set);
4229                         int err = super_types[mddev->major_version]
4230                                 .load_super(rdev, rdev0, mddev->minor_version);
4231                         if (err < 0) {
4232                                 printk(KERN_WARNING 
4233                                         "md: %s has different UUID to %s\n",
4234                                         bdevname(rdev->bdev,b), 
4235                                         bdevname(rdev0->bdev,b2));
4236                                 export_rdev(rdev);
4237                                 return -EINVAL;
4238                         }
4239                 }
4240                 err = bind_rdev_to_array(rdev, mddev);
4241                 if (err)
4242                         export_rdev(rdev);
4243                 return err;
4244         }
4245
4246         /*
4247          * add_new_disk can be used once the array is assembled
4248          * to add "hot spares".  They must already have a superblock
4249          * written
4250          */
4251         if (mddev->pers) {
4252                 int err;
4253                 if (!mddev->pers->hot_add_disk) {
4254                         printk(KERN_WARNING 
4255                                 "%s: personality does not support diskops!\n",
4256                                mdname(mddev));
4257                         return -EINVAL;
4258                 }
4259                 if (mddev->persistent)
4260                         rdev = md_import_device(dev, mddev->major_version,
4261                                                 mddev->minor_version);
4262                 else
4263                         rdev = md_import_device(dev, -1, -1);
4264                 if (IS_ERR(rdev)) {
4265                         printk(KERN_WARNING 
4266                                 "md: md_import_device returned %ld\n",
4267                                 PTR_ERR(rdev));
4268                         return PTR_ERR(rdev);
4269                 }
4270                 /* set save_raid_disk if appropriate */
4271                 if (!mddev->persistent) {
4272                         if (info->state & (1<<MD_DISK_SYNC)  &&
4273                             info->raid_disk < mddev->raid_disks)
4274                                 rdev->raid_disk = info->raid_disk;
4275                         else
4276                                 rdev->raid_disk = -1;
4277                 } else
4278                         super_types[mddev->major_version].
4279                                 validate_super(mddev, rdev);
4280                 rdev->saved_raid_disk = rdev->raid_disk;
4281
4282                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4283                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4284                         set_bit(WriteMostly, &rdev->flags);
4285
4286                 rdev->raid_disk = -1;
4287                 err = bind_rdev_to_array(rdev, mddev);
4288                 if (!err && !mddev->pers->hot_remove_disk) {
4289                         /* If there is hot_add_disk but no hot_remove_disk
4290                          * then added disks for geometry changes,
4291                          * and should be added immediately.
4292                          */
4293                         super_types[mddev->major_version].
4294                                 validate_super(mddev, rdev);
4295                         err = mddev->pers->hot_add_disk(mddev, rdev);
4296                         if (err)
4297                                 unbind_rdev_from_array(rdev);
4298                 }
4299                 if (err)
4300                         export_rdev(rdev);
4301                 else
4302                         sysfs_notify(&rdev->kobj, NULL, "state");
4303
4304                 md_update_sb(mddev, 1);
4305                 if (mddev->degraded)
4306                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4307                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4308                 md_wakeup_thread(mddev->thread);
4309                 return err;
4310         }
4311
4312         /* otherwise, add_new_disk is only allowed
4313          * for major_version==0 superblocks
4314          */
4315         if (mddev->major_version != 0) {
4316                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4317                        mdname(mddev));
4318                 return -EINVAL;
4319         }
4320
4321         if (!(info->state & (1<<MD_DISK_FAULTY))) {
4322                 int err;
4323                 rdev = md_import_device(dev, -1, 0);
4324                 if (IS_ERR(rdev)) {
4325                         printk(KERN_WARNING 
4326                                 "md: error, md_import_device() returned %ld\n",
4327                                 PTR_ERR(rdev));
4328                         return PTR_ERR(rdev);
4329                 }
4330                 rdev->desc_nr = info->number;
4331                 if (info->raid_disk < mddev->raid_disks)
4332                         rdev->raid_disk = info->raid_disk;
4333                 else
4334                         rdev->raid_disk = -1;
4335
4336                 if (rdev->raid_disk < mddev->raid_disks)
4337                         if (info->state & (1<<MD_DISK_SYNC))
4338                                 set_bit(In_sync, &rdev->flags);
4339
4340                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4341                         set_bit(WriteMostly, &rdev->flags);
4342
4343                 if (!mddev->persistent) {
4344                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
4345                         rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4346                 } else 
4347                         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4348                 rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2;
4349
4350                 err = bind_rdev_to_array(rdev, mddev);
4351                 if (err) {
4352                         export_rdev(rdev);
4353                         return err;
4354                 }
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4361 {
4362         char b[BDEVNAME_SIZE];
4363         mdk_rdev_t *rdev;
4364
4365         rdev = find_rdev(mddev, dev);
4366         if (!rdev)
4367                 return -ENXIO;
4368
4369         if (rdev->raid_disk >= 0)
4370                 goto busy;
4371
4372         kick_rdev_from_array(rdev);
4373         md_update_sb(mddev, 1);
4374         md_new_event(mddev);
4375
4376         return 0;
4377 busy:
4378         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4379                 bdevname(rdev->bdev,b), mdname(mddev));
4380         return -EBUSY;
4381 }
4382
4383 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4384 {
4385         char b[BDEVNAME_SIZE];
4386         int err;
4387         mdk_rdev_t *rdev;
4388
4389         if (!mddev->pers)
4390                 return -ENODEV;
4391
4392         if (mddev->major_version != 0) {
4393                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4394                         " version-0 superblocks.\n",
4395                         mdname(mddev));
4396                 return -EINVAL;
4397         }
4398         if (!mddev->pers->hot_add_disk) {
4399                 printk(KERN_WARNING 
4400                         "%s: personality does not support diskops!\n",
4401                         mdname(mddev));
4402                 return -EINVAL;
4403         }
4404
4405         rdev = md_import_device(dev, -1, 0);
4406         if (IS_ERR(rdev)) {
4407                 printk(KERN_WARNING 
4408                         "md: error, md_import_device() returned %ld\n",
4409                         PTR_ERR(rdev));
4410                 return -EINVAL;
4411         }
4412
4413         if (mddev->persistent)
4414                 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4415         else
4416                 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4417
4418         rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2;
4419
4420         if (test_bit(Faulty, &rdev->flags)) {
4421                 printk(KERN_WARNING 
4422                         "md: can not hot-add faulty %s disk to %s!\n",
4423                         bdevname(rdev->bdev,b), mdname(mddev));
4424                 err = -EINVAL;
4425                 goto abort_export;
4426         }
4427         clear_bit(In_sync, &rdev->flags);
4428         rdev->desc_nr = -1;
4429         rdev->saved_raid_disk = -1;
4430         err = bind_rdev_to_array(rdev, mddev);
4431         if (err)
4432                 goto abort_export;
4433
4434         /*
4435          * The rest should better be atomic, we can have disk failures
4436          * noticed in interrupt contexts ...
4437          */
4438
4439         if (rdev->desc_nr == mddev->max_disks) {
4440                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
4441                         mdname(mddev));
4442                 err = -EBUSY;
4443                 goto abort_unbind_export;
4444         }
4445
4446         rdev->raid_disk = -1;
4447
4448         md_update_sb(mddev, 1);
4449
4450         /*
4451          * Kick recovery, maybe this spare has to be added to the
4452          * array immediately.
4453          */
4454         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4455         md_wakeup_thread(mddev->thread);
4456         md_new_event(mddev);
4457         return 0;
4458
4459 abort_unbind_export:
4460         unbind_rdev_from_array(rdev);
4461
4462 abort_export:
4463         export_rdev(rdev);
4464         return err;
4465 }
4466
4467 static int set_bitmap_file(mddev_t *mddev, int fd)
4468 {
4469         int err;
4470
4471         if (mddev->pers) {
4472                 if (!mddev->pers->quiesce)
4473                         return -EBUSY;
4474                 if (mddev->recovery || mddev->sync_thread)
4475                         return -EBUSY;
4476                 /* we should be able to change the bitmap.. */
4477         }
4478
4479
4480         if (fd >= 0) {
4481                 if (mddev->bitmap)
4482                         return -EEXIST; /* cannot add when bitmap is present */
4483                 mddev->bitmap_file = fget(fd);
4484
4485                 if (mddev->bitmap_file == NULL) {
4486                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4487                                mdname(mddev));
4488                         return -EBADF;
4489                 }
4490
4491                 err = deny_bitmap_write_access(mddev->bitmap_file);
4492                 if (err) {
4493                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4494                                mdname(mddev));
4495                         fput(mddev->bitmap_file);
4496                         mddev->bitmap_file = NULL;
4497                         return err;
4498                 }
4499                 mddev->bitmap_offset = 0; /* file overrides offset */
4500         } else if (mddev->bitmap == NULL)
4501                 return -ENOENT; /* cannot remove what isn't there */
4502         err = 0;
4503         if (mddev->pers) {
4504                 mddev->pers->quiesce(mddev, 1);
4505                 if (fd >= 0)
4506                         err = bitmap_create(mddev);
4507                 if (fd < 0 || err) {
4508                         bitmap_destroy(mddev);
4509                         fd = -1; /* make sure to put the file */
4510                 }
4511                 mddev->pers->quiesce(mddev, 0);
4512         }
4513         if (fd < 0) {
4514                 if (mddev->bitmap_file) {
4515                         restore_bitmap_write_access(mddev->bitmap_file);
4516                         fput(mddev->bitmap_file);
4517                 }
4518                 mddev->bitmap_file = NULL;
4519         }
4520
4521         return err;
4522 }
4523
4524 /*
4525  * set_array_info is used two different ways
4526  * The original usage is when creating a new array.
4527  * In this usage, raid_disks is > 0 and it together with
4528  *  level, size, not_persistent,layout,chunksize determine the
4529  *  shape of the array.
4530  *  This will always create an array with a type-0.90.0 superblock.
4531  * The newer usage is when assembling an array.
4532  *  In this case raid_disks will be 0, and the major_version field is
4533  *  use to determine which style super-blocks are to be found on the devices.
4534  *  The minor and patch _version numbers are also kept incase the
4535  *  super_block handler wishes to interpret them.
4536  */
4537 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4538 {
4539
4540         if (info->raid_disks == 0) {
4541                 /* just setting version number for superblock loading */
4542                 if (info->major_version < 0 ||
4543                     info->major_version >= ARRAY_SIZE(super_types) ||
4544                     super_types[info->major_version].name == NULL) {
4545                         /* maybe try to auto-load a module? */
4546                         printk(KERN_INFO 
4547                                 "md: superblock version %d not known\n",
4548                                 info->major_version);
4549                         return -EINVAL;
4550                 }
4551                 mddev->major_version = info->major_version;
4552                 mddev->minor_version = info->minor_version;
4553                 mddev->patch_version = info->patch_version;
4554                 mddev->persistent = !info->not_persistent;
4555                 return 0;
4556         }
4557         mddev->major_version = MD_MAJOR_VERSION;
4558         mddev->minor_version = MD_MINOR_VERSION;
4559         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4560         mddev->ctime         = get_seconds();
4561
4562         mddev->level         = info->level;
4563         mddev->clevel[0]     = 0;
4564         mddev->size          = info->size;
4565         mddev->raid_disks    = info->raid_disks;
4566         /* don't set md_minor, it is determined by which /dev/md* was
4567          * openned
4568          */
4569         if (info->state & (1<<MD_SB_CLEAN))
4570                 mddev->recovery_cp = MaxSector;
4571         else
4572                 mddev->recovery_cp = 0;
4573         mddev->persistent    = ! info->not_persistent;
4574         mddev->external      = 0;
4575
4576         mddev->layout        = info->layout;
4577         mddev->chunk_size    = info->chunk_size;
4578
4579         mddev->max_disks     = MD_SB_DISKS;
4580
4581         if (mddev->persistent)
4582                 mddev->flags         = 0;
4583         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4584
4585         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4586         mddev->bitmap_offset = 0;
4587
4588         mddev->reshape_position = MaxSector;
4589
4590         /*
4591          * Generate a 128 bit UUID
4592          */
4593         get_random_bytes(mddev->uuid, 16);
4594
4595         mddev->new_level = mddev->level;
4596         mddev->new_chunk = mddev->chunk_size;
4597         mddev->new_layout = mddev->layout;
4598         mddev->delta_disks = 0;
4599
4600         return 0;
4601 }
4602
4603 static int update_size(mddev_t *mddev, sector_t num_sectors)
4604 {
4605         mdk_rdev_t * rdev;
4606         int rv;
4607         struct list_head *tmp;
4608         int fit = (num_sectors == 0);
4609
4610         if (mddev->pers->resize == NULL)
4611                 return -EINVAL;
4612         /* The "num_sectors" is the number of sectors of each device that
4613          * is used.  This can only make sense for arrays with redundancy.
4614          * linear and raid0 always use whatever space is available. We can only
4615          * consider changing this number if no resync or reconstruction is
4616          * happening, and if the new size is acceptable. It must fit before the
4617          * sb_start or, if that is <data_offset, it must fit before the size
4618          * of each device.  If num_sectors is zero, we find the largest size
4619          * that fits.
4620
4621          */
4622         if (mddev->sync_thread)
4623                 return -EBUSY;
4624         if (mddev->bitmap)
4625                 /* Sorry, cannot grow a bitmap yet, just remove it,
4626                  * grow, and re-add.
4627                  */
4628                 return -EBUSY;
4629         rdev_for_each(rdev, tmp, mddev) {
4630                 sector_t avail;
4631                 avail = rdev->size * 2;
4632
4633                 if (fit && (num_sectors == 0 || num_sectors > avail))
4634                         num_sectors = avail;
4635                 if (avail < num_sectors)
4636                         return -ENOSPC;
4637         }
4638         rv = mddev->pers->resize(mddev, num_sectors);
4639         if (!rv) {
4640                 struct block_device *bdev;
4641
4642                 bdev = bdget_disk(mddev->gendisk, 0);
4643                 if (bdev) {
4644                         mutex_lock(&bdev->bd_inode->i_mutex);
4645                         i_size_write(bdev->bd_inode,
4646                                      (loff_t)mddev->array_sectors << 9);
4647                         mutex_unlock(&bdev->bd_inode->i_mutex);
4648                         bdput(bdev);
4649                 }
4650         }
4651         return rv;
4652 }
4653
4654 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4655 {
4656         int rv;
4657         /* change the number of raid disks */
4658         if (mddev->pers->check_reshape == NULL)
4659                 return -EINVAL;
4660         if (raid_disks <= 0 ||
4661             raid_disks >= mddev->max_disks)
4662                 return -EINVAL;
4663         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4664                 return -EBUSY;
4665         mddev->delta_disks = raid_disks - mddev->raid_disks;
4666
4667         rv = mddev->pers->check_reshape(mddev);
4668         return rv;
4669 }
4670
4671
4672 /*
4673  * update_array_info is used to change the configuration of an
4674  * on-line array.
4675  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4676  * fields in the info are checked against the array.
4677  * Any differences that cannot be handled will cause an error.
4678  * Normally, only one change can be managed at a time.
4679  */
4680 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4681 {
4682         int rv = 0;
4683         int cnt = 0;
4684         int state = 0;
4685
4686         /* calculate expected state,ignoring low bits */
4687         if (mddev->bitmap && mddev->bitmap_offset)
4688                 state |= (1 << MD_SB_BITMAP_PRESENT);
4689
4690         if (mddev->major_version != info->major_version ||
4691             mddev->minor_version != info->minor_version ||
4692 /*          mddev->patch_version != info->patch_version || */
4693             mddev->ctime         != info->ctime         ||
4694             mddev->level         != info->level         ||
4695 /*          mddev->layout        != info->layout        || */
4696             !mddev->persistent   != info->not_persistent||
4697             mddev->chunk_size    != info->chunk_size    ||
4698             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4699             ((state^info->state) & 0xfffffe00)
4700                 )
4701                 return -EINVAL;
4702         /* Check there is only one change */
4703         if (info->size >= 0 && mddev->size != info->size) cnt++;
4704         if (mddev->raid_disks != info->raid_disks) cnt++;
4705         if (mddev->layout != info->layout) cnt++;
4706         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4707         if (cnt == 0) return 0;
4708         if (cnt > 1) return -EINVAL;
4709
4710         if (mddev->layout != info->layout) {
4711                 /* Change layout
4712                  * we don't need to do anything at the md level, the
4713                  * personality will take care of it all.
4714                  */
4715                 if (mddev->pers->reconfig == NULL)
4716                         return -EINVAL;
4717                 else
4718                         return mddev->pers->reconfig(mddev, info->layout, -1);
4719         }
4720         if (info->size >= 0 && mddev->size != info->size)
4721                 rv = update_size(mddev, (sector_t)info->size * 2);
4722
4723         if (mddev->raid_disks    != info->raid_disks)
4724                 rv = update_raid_disks(mddev, info->raid_disks);
4725
4726         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4727                 if (mddev->pers->quiesce == NULL)
4728                         return -EINVAL;
4729                 if (mddev->recovery || mddev->sync_thread)
4730                         return -EBUSY;
4731                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4732                         /* add the bitmap */
4733                         if (mddev->bitmap)
4734                                 return -EEXIST;
4735                         if (mddev->default_bitmap_offset == 0)
4736                                 return -EINVAL;
4737                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4738                         mddev->pers->quiesce(mddev, 1);
4739                         rv = bitmap_create(mddev);
4740                         if (rv)
4741                                 bitmap_destroy(mddev);
4742                         mddev->pers->quiesce(mddev, 0);
4743                 } else {
4744                         /* remove the bitmap */
4745                         if (!mddev->bitmap)
4746                                 return -ENOENT;
4747                         if (mddev->bitmap->file)
4748                                 return -EINVAL;
4749                         mddev->pers->quiesce(mddev, 1);
4750                         bitmap_destroy(mddev);
4751                         mddev->pers->quiesce(mddev, 0);
4752                         mddev->bitmap_offset = 0;
4753                 }
4754         }
4755         md_update_sb(mddev, 1);
4756         return rv;
4757 }
4758
4759 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4760 {
4761         mdk_rdev_t *rdev;
4762
4763         if (mddev->pers == NULL)
4764                 return -ENODEV;
4765
4766         rdev = find_rdev(mddev, dev);
4767         if (!rdev)
4768                 return -ENODEV;
4769
4770         md_error(mddev, rdev);
4771         return 0;
4772 }
4773
4774 /*
4775  * We have a problem here : there is no easy way to give a CHS
4776  * virtual geometry. We currently pretend that we have a 2 heads
4777  * 4 sectors (with a BIG number of cylinders...). This drives
4778  * dosfs just mad... ;-)
4779  */
4780 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4781 {
4782         mddev_t *mddev = bdev->bd_disk->private_data;
4783
4784         geo->heads = 2;
4785         geo->sectors = 4;
4786         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4787         return 0;
4788 }
4789
4790 static int md_ioctl(struct inode *inode, struct file *file,
4791                         unsigned int cmd, unsigned long arg)
4792 {
4793         int err = 0;
4794         void __user *argp = (void __user *)arg;
4795         mddev_t *mddev = NULL;
4796
4797         if (!capable(CAP_SYS_ADMIN))
4798                 return -EACCES;
4799
4800         /*
4801          * Commands dealing with the RAID driver but not any
4802          * particular array:
4803          */
4804         switch (cmd)
4805         {
4806                 case RAID_VERSION:
4807                         err = get_version(argp);
4808                         goto done;
4809
4810                 case PRINT_RAID_DEBUG:
4811                         err = 0;
4812                         md_print_devices();
4813                         goto done;
4814
4815 #ifndef MODULE
4816                 case RAID_AUTORUN:
4817                         err = 0;
4818                         autostart_arrays(arg);
4819                         goto done;
4820 #endif
4821                 default:;
4822         }
4823
4824         /*
4825          * Commands creating/starting a new array:
4826          */
4827
4828         mddev = inode->i_bdev->bd_disk->private_data;
4829
4830         if (!mddev) {
4831                 BUG();
4832                 goto abort;
4833         }
4834
4835         err = mddev_lock(mddev);
4836         if (err) {
4837                 printk(KERN_INFO 
4838                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4839                         err, cmd);
4840                 goto abort;
4841         }
4842
4843         switch (cmd)
4844         {
4845                 case SET_ARRAY_INFO:
4846                         {
4847                                 mdu_array_info_t info;
4848                                 if (!arg)
4849                                         memset(&info, 0, sizeof(info));
4850                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4851                                         err = -EFAULT;
4852                                         goto abort_unlock;
4853                                 }
4854                                 if (mddev->pers) {
4855                                         err = update_array_info(mddev, &info);
4856                                         if (err) {
4857                                                 printk(KERN_WARNING "md: couldn't update"
4858                                                        " array info. %d\n", err);
4859                                                 goto abort_unlock;
4860                                         }
4861                                         goto done_unlock;
4862                                 }
4863                                 if (!list_empty(&mddev->disks)) {
4864                                         printk(KERN_WARNING
4865                                                "md: array %s already has disks!\n",
4866                                                mdname(mddev));
4867                                         err = -EBUSY;
4868                                         goto abort_unlock;
4869                                 }
4870                                 if (mddev->raid_disks) {
4871                                         printk(KERN_WARNING
4872                                                "md: array %s already initialised!\n",
4873                                                mdname(mddev));
4874                                         err = -EBUSY;
4875                                         goto abort_unlock;
4876                                 }
4877                                 err = set_array_info(mddev, &info);
4878                                 if (err) {
4879                                         printk(KERN_WARNING "md: couldn't set"
4880                                                " array info. %d\n", err);
4881                                         goto abort_unlock;
4882                                 }
4883                         }
4884                         goto done_unlock;
4885
4886                 default:;
4887         }
4888
4889         /*
4890          * Commands querying/configuring an existing array:
4891          */
4892         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4893          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4894         if ((!mddev->raid_disks && !mddev->external)
4895             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4896             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4897             && cmd != GET_BITMAP_FILE) {
4898                 err = -ENODEV;
4899                 goto abort_unlock;
4900         }
4901
4902         /*
4903          * Commands even a read-only array can execute:
4904          */
4905         switch (cmd)
4906         {
4907                 case GET_ARRAY_INFO:
4908                         err = get_array_info(mddev, argp);
4909                         goto done_unlock;
4910
4911                 case GET_BITMAP_FILE:
4912                         err = get_bitmap_file(mddev, argp);
4913                         goto done_unlock;
4914
4915                 case GET_DISK_INFO:
4916                         err = get_disk_info(mddev, argp);
4917                         goto done_unlock;
4918
4919                 case RESTART_ARRAY_RW:
4920                         err = restart_array(mddev);
4921                         goto done_unlock;
4922
4923                 case STOP_ARRAY:
4924                         err = do_md_stop(mddev, 0, 1);
4925                         goto done_unlock;
4926
4927                 case STOP_ARRAY_RO:
4928                         err = do_md_stop(mddev, 1, 1);
4929                         goto done_unlock;
4930
4931         }
4932
4933         /*
4934          * The remaining ioctls are changing the state of the
4935          * superblock, so we do not allow them on read-only arrays.
4936          * However non-MD ioctls (e.g. get-size) will still come through
4937          * here and hit the 'default' below, so only disallow
4938          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4939          */
4940         if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
4941                 if (mddev->ro == 2) {
4942                         mddev->ro = 0;
4943                         sysfs_notify(&mddev->kobj, NULL, "array_state");
4944                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4945                         md_wakeup_thread(mddev->thread);
4946                 } else {
4947                         err = -EROFS;
4948                         goto abort_unlock;
4949                 }
4950         }
4951
4952         switch (cmd)
4953         {
4954                 case ADD_NEW_DISK:
4955                 {
4956                         mdu_disk_info_t info;
4957                         if (copy_from_user(&info, argp, sizeof(info)))
4958                                 err = -EFAULT;
4959                         else
4960                                 err = add_new_disk(mddev, &info);
4961                         goto done_unlock;
4962                 }
4963
4964                 case HOT_REMOVE_DISK:
4965                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4966                         goto done_unlock;
4967
4968                 case HOT_ADD_DISK:
4969                         err = hot_add_disk(mddev, new_decode_dev(arg));
4970                         goto done_unlock;
4971
4972                 case SET_DISK_FAULTY:
4973                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4974                         goto done_unlock;
4975
4976                 case RUN_ARRAY:
4977                         err = do_md_run(mddev);
4978                         goto done_unlock;
4979
4980                 case SET_BITMAP_FILE:
4981                         err = set_bitmap_file(mddev, (int)arg);
4982                         goto done_unlock;
4983
4984                 default:
4985                         err = -EINVAL;
4986                         goto abort_unlock;
4987         }
4988
4989 done_unlock:
4990 abort_unlock:
4991         mddev_unlock(mddev);
4992
4993         return err;
4994 done:
4995         if (err)
4996                 MD_BUG();
4997 abort:
4998         return err;
4999 }
5000
5001 static int md_open(struct inode *inode, struct file *file)
5002 {
5003         /*
5004          * Succeed if we can lock the mddev, which confirms that
5005          * it isn't being stopped right now.
5006          */
5007         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
5008         int err;
5009
5010         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
5011                 goto out;
5012
5013         err = 0;
5014         mddev_get(mddev);
5015         atomic_inc(&mddev->openers);
5016         mddev_unlock(mddev);
5017
5018         check_disk_change(inode->i_bdev);
5019  out:
5020         return err;
5021 }
5022
5023 static int md_release(struct inode *inode, struct file * file)
5024 {
5025         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
5026
5027         BUG_ON(!mddev);
5028         atomic_dec(&mddev->openers);
5029         mddev_put(mddev);
5030
5031         return 0;
5032 }
5033
5034 static int md_media_changed(struct gendisk *disk)
5035 {
5036         mddev_t *mddev = disk->private_data;
5037
5038         return mddev->changed;
5039 }
5040
5041 static int md_revalidate(struct gendisk *disk)
5042 {
5043         mddev_t *mddev = disk->private_data;
5044
5045         mddev->changed = 0;
5046         return 0;
5047 }
5048 static struct block_device_operations md_fops =
5049 {
5050         .owner          = THIS_MODULE,
5051         .open           = md_open,
5052         .release        = md_release,
5053         .ioctl          = md_ioctl,
5054         .getgeo         = md_getgeo,
5055         .media_changed  = md_media_changed,
5056         .revalidate_disk= md_revalidate,
5057 };
5058
5059 static int md_thread(void * arg)
5060 {
5061         mdk_thread_t *thread = arg;
5062
5063         /*
5064          * md_thread is a 'system-thread', it's priority should be very
5065          * high. We avoid resource deadlocks individually in each
5066          * raid personality. (RAID5 does preallocation) We also use RR and
5067          * the very same RT priority as kswapd, thus we will never get
5068          * into a priority inversion deadlock.
5069          *
5070          * we definitely have to have equal or higher priority than
5071          * bdflush, otherwise bdflush will deadlock if there are too
5072          * many dirty RAID5 blocks.
5073          */
5074
5075         allow_signal(SIGKILL);
5076         while (!kthread_should_stop()) {
5077
5078                 /* We need to wait INTERRUPTIBLE so that
5079                  * we don't add to the load-average.
5080                  * That means we need to be sure no signals are
5081                  * pending
5082                  */
5083                 if (signal_pending(current))
5084                         flush_signals(current);
5085
5086                 wait_event_interruptible_timeout
5087                         (thread->wqueue,
5088                          test_bit(THREAD_WAKEUP, &thread->flags)
5089                          || kthread_should_stop(),
5090                          thread->timeout);
5091
5092                 clear_bit(THREAD_WAKEUP, &thread->flags);
5093
5094                 thread->run(thread->mddev);
5095         }
5096
5097         return 0;
5098 }
5099
5100 void md_wakeup_thread(mdk_thread_t *thread)
5101 {
5102         if (thread) {
5103                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5104                 set_bit(THREAD_WAKEUP, &thread->flags);
5105                 wake_up(&thread->wqueue);
5106         }
5107 }
5108
5109 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5110                                  const char *name)
5111 {
5112         mdk_thread_t *thread;
5113
5114         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5115         if (!thread)
5116                 return NULL;
5117
5118         init_waitqueue_head(&thread->wqueue);
5119
5120         thread->run = run;
5121         thread->mddev = mddev;
5122         thread->timeout = MAX_SCHEDULE_TIMEOUT;
5123         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
5124         if (IS_ERR(thread->tsk)) {
5125                 kfree(thread);
5126                 return NULL;
5127         }
5128         return thread;
5129 }
5130
5131 void md_unregister_thread(mdk_thread_t *thread)
5132 {
5133         dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5134
5135         kthread_stop(thread->tsk);
5136         kfree(thread);
5137 }
5138
5139 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5140 {
5141         if (!mddev) {
5142                 MD_BUG();
5143                 return;
5144         }
5145
5146         if (!rdev || test_bit(Faulty, &rdev->flags))
5147                 return;
5148
5149         if (mddev->external)
5150                 set_bit(Blocked, &rdev->flags);
5151 /*
5152         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5153                 mdname(mddev),
5154                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5155                 __builtin_return_address(0),__builtin_return_address(1),
5156                 __builtin_return_address(2),__builtin_return_address(3));
5157 */
5158         if (!mddev->pers)
5159                 return;
5160         if (!mddev->pers->error_handler)
5161                 return;
5162         mddev->pers->error_handler(mddev,rdev);
5163         if (mddev->degraded)
5164                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5165         set_bit(StateChanged, &rdev->flags);
5166         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5167         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5168         md_wakeup_thread(mddev->thread);
5169         md_new_event_inintr(mddev);
5170 }
5171
5172 /* seq_file implementation /proc/mdstat */
5173
5174 static void status_unused(struct seq_file *seq)
5175 {
5176         int i = 0;
5177         mdk_rdev_t *rdev;
5178         struct list_head *tmp;
5179
5180         seq_printf(seq, "unused devices: ");
5181
5182         rdev_for_each_list(rdev, tmp, pending_raid_disks) {
5183                 char b[BDEVNAME_SIZE];
5184                 i++;
5185                 seq_printf(seq, "%s ",
5186                               bdevname(rdev->bdev,b));
5187         }
5188         if (!i)
5189                 seq_printf(seq, "<none>");
5190
5191         seq_printf(seq, "\n");
5192 }
5193
5194
5195 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5196 {
5197         sector_t max_blocks, resync, res;
5198         unsigned long dt, db, rt;
5199         int scale;
5200         unsigned int per_milli;
5201
5202         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
5203
5204         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5205                 max_blocks = mddev->resync_max_sectors >> 1;
5206         else
5207                 max_blocks = mddev->size;
5208
5209         /*
5210          * Should not happen.
5211          */
5212         if (!max_blocks) {
5213                 MD_BUG();
5214                 return;
5215         }
5216         /* Pick 'scale' such that (resync>>scale)*1000 will fit
5217          * in a sector_t, and (max_blocks>>scale) will fit in a
5218          * u32, as those are the requirements for sector_div.
5219          * Thus 'scale' must be at least 10
5220          */
5221         scale = 10;
5222         if (sizeof(sector_t) > sizeof(unsigned long)) {
5223                 while ( max_blocks/2 > (1ULL<<(scale+32)))
5224                         scale++;
5225         }
5226         res = (resync>>scale)*1000;
5227         sector_div(res, (u32)((max_blocks>>scale)+1));
5228
5229         per_milli = res;
5230         {
5231                 int i, x = per_milli/50, y = 20-x;
5232                 seq_printf(seq, "[");
5233                 for (i = 0; i < x; i++)
5234                         seq_printf(seq, "=");
5235                 seq_printf(seq, ">");
5236                 for (i = 0; i < y; i++)
5237                         seq_printf(seq, ".");
5238                 seq_printf(seq, "] ");
5239         }
5240         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5241                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5242                     "reshape" :
5243                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5244                      "check" :
5245                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5246                       "resync" : "recovery"))),
5247                    per_milli/10, per_milli % 10,
5248                    (unsigned long long) resync,
5249                    (unsigned long long) max_blocks);
5250
5251         /*
5252          * We do not want to overflow, so the order of operands and
5253          * the * 100 / 100 trick are important. We do a +1 to be
5254          * safe against division by zero. We only estimate anyway.
5255          *
5256          * dt: time from mark until now
5257          * db: blocks written from mark until now
5258          * rt: remaining time
5259          */
5260         dt = ((jiffies - mddev->resync_mark) / HZ);
5261         if (!dt) dt++;
5262         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5263                 - mddev->resync_mark_cnt;
5264         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5265
5266         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
5267
5268         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5269 }
5270
5271 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5272 {
5273         struct list_head *tmp;
5274         loff_t l = *pos;
5275         mddev_t *mddev;
5276
5277         if (l >= 0x10000)
5278                 return NULL;
5279         if (!l--)
5280                 /* header */
5281                 return (void*)1;
5282
5283         spin_lock(&all_mddevs_lock);
5284         list_for_each(tmp,&all_mddevs)
5285                 if (!l--) {
5286                         mddev = list_entry(tmp, mddev_t, all_mddevs);
5287                         mddev_get(mddev);
5288                         spin_unlock(&all_mddevs_lock);
5289                         return mddev;
5290                 }
5291         spin_unlock(&all_mddevs_lock);
5292         if (!l--)
5293                 return (void*)2;/* tail */
5294         return NULL;
5295 }
5296
5297 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5298 {
5299         struct list_head *tmp;
5300         mddev_t *next_mddev, *mddev = v;
5301         
5302         ++*pos;
5303         if (v == (void*)2)
5304                 return NULL;
5305
5306         spin_lock(&all_mddevs_lock);
5307         if (v == (void*)1)
5308                 tmp = all_mddevs.next;
5309         else
5310                 tmp = mddev->all_mddevs.next;
5311         if (tmp != &all_mddevs)
5312                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5313         else {
5314                 next_mddev = (void*)2;
5315                 *pos = 0x10000;
5316         }               
5317         spin_unlock(&all_mddevs_lock);
5318
5319         if (v != (void*)1)
5320                 mddev_put(mddev);
5321         return next_mddev;
5322
5323 }
5324
5325 static void md_seq_stop(struct seq_file *seq, void *v)
5326 {
5327         mddev_t *mddev = v;
5328
5329         if (mddev && v != (void*)1 && v != (void*)2)
5330                 mddev_put(mddev);
5331 }
5332
5333 struct mdstat_info {
5334         int event;
5335 };
5336
5337 static int md_seq_show(struct seq_file *seq, void *v)
5338 {
5339         mddev_t *mddev = v;
5340         sector_t size;
5341         struct list_head *tmp2;
5342         mdk_rdev_t *rdev;
5343         struct mdstat_info *mi = seq->private;
5344         struct bitmap *bitmap;
5345
5346         if (v == (void*)1) {
5347                 struct mdk_personality *pers;
5348                 seq_printf(seq, "Personalities : ");
5349                 spin_lock(&pers_lock);
5350                 list_for_each_entry(pers, &pers_list, list)
5351                         seq_printf(seq, "[%s] ", pers->name);
5352
5353                 spin_unlock(&pers_lock);
5354                 seq_printf(seq, "\n");
5355                 mi->event = atomic_read(&md_event_count);
5356                 return 0;
5357         }
5358         if (v == (void*)2) {
5359                 status_unused(seq);
5360                 return 0;
5361         }
5362
5363         if (mddev_lock(mddev) < 0)
5364                 return -EINTR;
5365
5366         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5367                 seq_printf(seq, "%s : %sactive", mdname(mddev),
5368                                                 mddev->pers ? "" : "in");
5369                 if (mddev->pers) {
5370                         if (mddev->ro==1)
5371                                 seq_printf(seq, " (read-only)");
5372                         if (mddev->ro==2)
5373                                 seq_printf(seq, " (auto-read-only)");
5374                         seq_printf(seq, " %s", mddev->pers->name);
5375                 }
5376
5377                 size = 0;
5378                 rdev_for_each(rdev, tmp2, mddev) {
5379                         char b[BDEVNAME_SIZE];
5380                         seq_printf(seq, " %s[%d]",
5381                                 bdevname(rdev->bdev,b), rdev->desc_nr);
5382                         if (test_bit(WriteMostly, &rdev->flags))
5383                                 seq_printf(seq, "(W)");
5384                         if (test_bit(Faulty, &rdev->flags)) {
5385                                 seq_printf(seq, "(F)");
5386                                 continue;
5387                         } else if (rdev->raid_disk < 0)
5388                                 seq_printf(seq, "(S)"); /* spare */
5389                         size += rdev->size;
5390                 }
5391
5392                 if (!list_empty(&mddev->disks)) {
5393                         if (mddev->pers)
5394                                 seq_printf(seq, "\n      %llu blocks",
5395                                            (unsigned long long)
5396                                            mddev->array_sectors / 2);
5397                         else
5398                                 seq_printf(seq, "\n      %llu blocks",
5399                                            (unsigned long long)size);
5400                 }
5401                 if (mddev->persistent) {
5402                         if (mddev->major_version != 0 ||
5403                             mddev->minor_version != 90) {
5404                                 seq_printf(seq," super %d.%d",
5405                                            mddev->major_version,
5406                                            mddev->minor_version);
5407                         }
5408                 } else if (mddev->external)
5409                         seq_printf(seq, " super external:%s",
5410                                    mddev->metadata_type);
5411                 else
5412                         seq_printf(seq, " super non-persistent");
5413
5414                 if (mddev->pers) {
5415                         mddev->pers->status(seq, mddev);
5416                         seq_printf(seq, "\n      ");
5417                         if (mddev->pers->sync_request) {
5418                                 if (mddev->curr_resync > 2) {
5419                                         status_resync(seq, mddev);
5420                                         seq_printf(seq, "\n      ");
5421                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5422                                         seq_printf(seq, "\tresync=DELAYED\n      ");
5423                                 else if (mddev->recovery_cp < MaxSector)
5424                                         seq_printf(seq, "\tresync=PENDING\n      ");
5425                         }
5426                 } else
5427                         seq_printf(seq, "\n       ");
5428
5429                 if ((bitmap = mddev->bitmap)) {
5430                         unsigned long chunk_kb;
5431                         unsigned long flags;
5432                         spin_lock_irqsave(&bitmap->lock, flags);
5433                         chunk_kb = bitmap->chunksize >> 10;
5434                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5435                                 "%lu%s chunk",
5436                                 bitmap->pages - bitmap->missing_pages,
5437                                 bitmap->pages,
5438                                 (bitmap->pages - bitmap->missing_pages)
5439                                         << (PAGE_SHIFT - 10),
5440                                 chunk_kb ? chunk_kb : bitmap->chunksize,
5441                                 chunk_kb ? "KB" : "B");
5442                         if (bitmap->file) {
5443                                 seq_printf(seq, ", file: ");
5444                                 seq_path(seq, &bitmap->file->f_path, " \t\n");
5445                         }
5446
5447                         seq_printf(seq, "\n");
5448                         spin_unlock_irqrestore(&bitmap->lock, flags);
5449                 }
5450
5451                 seq_printf(seq, "\n");
5452         }
5453         mddev_unlock(mddev);
5454         
5455         return 0;
5456 }
5457
5458 static struct seq_operations md_seq_ops = {
5459         .start  = md_seq_start,
5460         .next   = md_seq_next,
5461         .stop   = md_seq_stop,
5462         .show   = md_seq_show,
5463 };
5464
5465 static int md_seq_open(struct inode *inode, struct file *file)
5466 {
5467         int error;
5468         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
5469         if (mi == NULL)
5470                 return -ENOMEM;
5471
5472         error = seq_open(file, &md_seq_ops);
5473         if (error)
5474                 kfree(mi);
5475         else {
5476                 struct seq_file *p = file->private_data;
5477                 p->private = mi;
5478                 mi->event = atomic_read(&md_event_count);
5479         }
5480         return error;
5481 }
5482
5483 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
5484 {
5485         struct seq_file *m = filp->private_data;
5486         struct mdstat_info *mi = m->private;
5487         int mask;
5488
5489         poll_wait(filp, &md_event_waiters, wait);
5490
5491         /* always allow read */
5492         mask = POLLIN | POLLRDNORM;
5493
5494         if (mi->event != atomic_read(&md_event_count))
5495                 mask |= POLLERR | POLLPRI;
5496         return mask;
5497 }
5498
5499 static const struct file_operations md_seq_fops = {
5500         .owner          = THIS_MODULE,
5501         .open           = md_seq_open,
5502         .read           = seq_read,
5503         .llseek         = seq_lseek,
5504         .release        = seq_release_private,
5505         .poll           = mdstat_poll,
5506 };
5507
5508 int register_md_personality(struct mdk_personality *p)
5509 {
5510         spin_lock(&pers_lock);
5511         list_add_tail(&p->list, &pers_list);
5512         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5513         spin_unlock(&pers_lock);
5514         return 0;
5515 }
5516
5517 int unregister_md_personality(struct mdk_personality *p)
5518 {
5519         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5520         spin_lock(&pers_lock);
5521         list_del_init(&p->list);
5522         spin_unlock(&pers_lock);
5523         return 0;
5524 }
5525
5526 static int is_mddev_idle(mddev_t *mddev)
5527 {
5528         mdk_rdev_t * rdev;
5529         int idle;
5530         long curr_events;
5531
5532         idle = 1;
5533         rcu_read_lock();
5534         rdev_for_each_rcu(rdev, mddev) {
5535                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5536                 curr_events = part_stat_read(&disk->part0, sectors[0]) +
5537                                 part_stat_read(&disk->part0, sectors[1]) -
5538                                 atomic_read(&disk->sync_io);
5539                 /* sync IO will cause sync_io to increase before the disk_stats
5540                  * as sync_io is counted when a request starts, and
5541                  * disk_stats is counted when it completes.
5542                  * So resync activity will cause curr_events to be smaller than
5543                  * when there was no such activity.
5544                  * non-sync IO will cause disk_stat to increase without
5545                  * increasing sync_io so curr_events will (eventually)
5546                  * be larger than it was before.  Once it becomes
5547                  * substantially larger, the test below will cause
5548                  * the array to appear non-idle, and resync will slow
5549                  * down.
5550                  * If there is a lot of outstanding resync activity when
5551                  * we set last_event to curr_events, then all that activity
5552                  * completing might cause the array to appear non-idle
5553                  * and resync will be slowed down even though there might
5554                  * not have been non-resync activity.  This will only
5555                  * happen once though.  'last_events' will soon reflect
5556                  * the state where there is little or no outstanding
5557                  * resync requests, and further resync activity will
5558                  * always make curr_events less than last_events.
5559                  *
5560                  */
5561                 if (curr_events - rdev->last_events > 4096) {
5562                         rdev->last_events = curr_events;
5563                         idle = 0;
5564                 }
5565         }
5566         rcu_read_unlock();
5567         return idle;
5568 }
5569
5570 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5571 {
5572         /* another "blocks" (512byte) blocks have been synced */
5573         atomic_sub(blocks, &mddev->recovery_active);
5574         wake_up(&mddev->recovery_wait);
5575         if (!ok) {
5576                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5577                 md_wakeup_thread(mddev->thread);
5578                 // stop recovery, signal do_sync ....
5579         }
5580 }
5581
5582
5583 /* md_write_start(mddev, bi)
5584  * If we need to update some array metadata (e.g. 'active' flag
5585  * in superblock) before writing, schedule a superblock update
5586  * and wait for it to complete.
5587  */
5588 void md_write_start(mddev_t *mddev, struct bio *bi)
5589 {
5590         int did_change = 0;
5591         if (bio_data_dir(bi) != WRITE)
5592                 return;
5593
5594         BUG_ON(mddev->ro == 1);
5595         if (mddev->ro == 2) {
5596                 /* need to switch to read/write */
5597                 mddev->ro = 0;
5598                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5599                 md_wakeup_thread(mddev->thread);
5600                 md_wakeup_thread(mddev->sync_thread);
5601                 did_change = 1;
5602         }
5603         atomic_inc(&mddev->writes_pending);
5604         if (mddev->safemode == 1)
5605                 mddev->safemode = 0;
5606         if (mddev->in_sync) {
5607                 spin_lock_irq(&mddev->write_lock);
5608                 if (mddev->in_sync) {
5609                         mddev->in_sync = 0;
5610                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5611                         md_wakeup_thread(mddev->thread);
5612                         did_change = 1;
5613                 }
5614                 spin_unlock_irq(&mddev->write_lock);
5615         }
5616         if (did_change)
5617                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5618         wait_event(mddev->sb_wait,
5619                    !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5620                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5621 }
5622
5623 void md_write_end(mddev_t *mddev)
5624 {
5625         if (atomic_dec_and_test(&mddev->writes_pending)) {
5626                 if (mddev->safemode == 2)
5627                         md_wakeup_thread(mddev->thread);
5628                 else if (mddev->safemode_delay)
5629                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5630         }
5631 }
5632
5633 /* md_allow_write(mddev)
5634  * Calling this ensures that the array is marked 'active' so that writes
5635  * may proceed without blocking.  It is important to call this before
5636  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5637  * Must be called with mddev_lock held.
5638  *
5639  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
5640  * is dropped, so return -EAGAIN after notifying userspace.
5641  */
5642 int md_allow_write(mddev_t *mddev)
5643 {
5644         if (!mddev->pers)
5645                 return 0;
5646         if (mddev->ro)
5647                 return 0;
5648         if (!mddev->pers->sync_request)
5649                 return 0;
5650
5651         spin_lock_irq(&mddev->write_lock);
5652         if (mddev->in_sync) {
5653                 mddev->in_sync = 0;
5654                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5655                 if (mddev->safemode_delay &&
5656                     mddev->safemode == 0)
5657                         mddev->safemode = 1;
5658                 spin_unlock_irq(&mddev->write_lock);
5659                 md_update_sb(mddev, 0);
5660                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5661         } else
5662                 spin_unlock_irq(&mddev->write_lock);
5663
5664         if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
5665                 return -EAGAIN;
5666         else
5667                 return 0;
5668 }
5669 EXPORT_SYMBOL_GPL(md_allow_write);
5670
5671 #define SYNC_MARKS      10
5672 #define SYNC_MARK_STEP  (3*HZ)
5673 void md_do_sync(mddev_t *mddev)
5674 {
5675         mddev_t *mddev2;
5676         unsigned int currspeed = 0,
5677                  window;
5678         sector_t max_sectors,j, io_sectors;
5679         unsigned long mark[SYNC_MARKS];
5680         sector_t mark_cnt[SYNC_MARKS];
5681         int last_mark,m;
5682         struct list_head *tmp;
5683         sector_t last_check;
5684         int skipped = 0;
5685         struct list_head *rtmp;
5686         mdk_rdev_t *rdev;
5687         char *desc;
5688
5689         /* just incase thread restarts... */
5690         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5691                 return;
5692         if (mddev->ro) /* never try to sync a read-only array */
5693                 return;
5694
5695         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5696                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5697                         desc = "data-check";
5698                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5699                         desc = "requested-resync";
5700                 else
5701                         desc = "resync";
5702         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5703                 desc = "reshape";
5704         else
5705                 desc = "recovery";
5706
5707         /* we overload curr_resync somewhat here.
5708          * 0 == not engaged in resync at all
5709          * 2 == checking that there is no conflict with another sync
5710          * 1 == like 2, but have yielded to allow conflicting resync to
5711          *              commense
5712          * other == active in resync - this many blocks
5713          *
5714          * Before starting a resync we must have set curr_resync to
5715          * 2, and then checked that every "conflicting" array has curr_resync
5716          * less than ours.  When we find one that is the same or higher
5717          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5718          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5719          * This will mean we have to start checking from the beginning again.
5720          *
5721          */
5722
5723         do {
5724                 mddev->curr_resync = 2;
5725
5726         try_again:
5727                 if (kthread_should_stop()) {
5728                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5729                         goto skip;
5730                 }
5731                 for_each_mddev(mddev2, tmp) {
5732                         if (mddev2 == mddev)
5733                                 continue;
5734                         if (!mddev->parallel_resync
5735                         &&  mddev2->curr_resync
5736                         &&  match_mddev_units(mddev, mddev2)) {
5737                                 DEFINE_WAIT(wq);
5738                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5739                                         /* arbitrarily yield */
5740                                         mddev->curr_resync = 1;
5741                                         wake_up(&resync_wait);
5742                                 }
5743                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5744                                         /* no need to wait here, we can wait the next
5745                                          * time 'round when curr_resync == 2
5746                                          */
5747                                         continue;
5748                                 /* We need to wait 'interruptible' so as not to
5749                                  * contribute to the load average, and not to
5750                                  * be caught by 'softlockup'
5751                                  */
5752                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
5753                                 if (!kthread_should_stop() &&
5754                                     mddev2->curr_resync >= mddev->curr_resync) {
5755                                         printk(KERN_INFO "md: delaying %s of %s"
5756                                                " until %s has finished (they"
5757                                                " share one or more physical units)\n",
5758                                                desc, mdname(mddev), mdname(mddev2));
5759                                         mddev_put(mddev2);
5760                                         if (signal_pending(current))
5761                                                 flush_signals(current);
5762                                         schedule();
5763                                         finish_wait(&resync_wait, &wq);
5764                                         goto try_again;
5765                                 }
5766                                 finish_wait(&resync_wait, &wq);
5767                         }
5768                 }
5769         } while (mddev->curr_resync < 2);
5770
5771         j = 0;
5772         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5773                 /* resync follows the size requested by the personality,
5774                  * which defaults to physical size, but can be virtual size
5775                  */
5776                 max_sectors = mddev->resync_max_sectors;
5777                 mddev->resync_mismatches = 0;
5778                 /* we don't use the checkpoint if there's a bitmap */
5779                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5780                         j = mddev->resync_min;
5781                 else if (!mddev->bitmap)
5782                         j = mddev->recovery_cp;
5783
5784         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5785                 max_sectors = mddev->size << 1;
5786         else {
5787                 /* recovery follows the physical size of devices */
5788                 max_sectors = mddev->size << 1;
5789                 j = MaxSector;
5790                 rdev_for_each(rdev, rtmp, mddev)
5791                         if (rdev->raid_disk >= 0 &&
5792                             !test_bit(Faulty, &rdev->flags) &&
5793                             !test_bit(In_sync, &rdev->flags) &&
5794                             rdev->recovery_offset < j)
5795                                 j = rdev->recovery_offset;
5796         }
5797
5798         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5799         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5800                 " %d KB/sec/disk.\n", speed_min(mddev));
5801         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5802                "(but not more than %d KB/sec) for %s.\n",
5803                speed_max(mddev), desc);
5804
5805         is_mddev_idle(mddev); /* this also initializes IO event counters */
5806
5807         io_sectors = 0;
5808         for (m = 0; m < SYNC_MARKS; m++) {
5809                 mark[m] = jiffies;
5810                 mark_cnt[m] = io_sectors;
5811         }
5812         last_mark = 0;
5813         mddev->resync_mark = mark[last_mark];
5814         mddev->resync_mark_cnt = mark_cnt[last_mark];
5815
5816         /*
5817          * Tune reconstruction:
5818          */
5819         window = 32*(PAGE_SIZE/512);
5820         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5821                 window/2,(unsigned long long) max_sectors/2);
5822
5823         atomic_set(&mddev->recovery_active, 0);
5824         last_check = 0;
5825
5826         if (j>2) {
5827                 printk(KERN_INFO 
5828                        "md: resuming %s of %s from checkpoint.\n",
5829                        desc, mdname(mddev));
5830                 mddev->curr_resync = j;
5831         }
5832
5833         while (j < max_sectors) {
5834                 sector_t sectors;
5835
5836                 skipped = 0;
5837                 if (j >= mddev->resync_max) {
5838                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5839                         wait_event(mddev->recovery_wait,
5840                                    mddev->resync_max > j
5841                                    || kthread_should_stop());
5842                 }
5843                 if (kthread_should_stop())
5844                         goto interrupted;
5845                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5846                                                   currspeed < speed_min(mddev));
5847                 if (sectors == 0) {
5848                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5849                         goto out;
5850                 }
5851
5852                 if (!skipped) { /* actual IO requested */
5853                         io_sectors += sectors;
5854                         atomic_add(sectors, &mddev->recovery_active);
5855                 }
5856
5857                 j += sectors;
5858                 if (j>1) mddev->curr_resync = j;
5859                 mddev->curr_mark_cnt = io_sectors;
5860                 if (last_check == 0)
5861                         /* this is the earliers that rebuilt will be
5862                          * visible in /proc/mdstat
5863                          */
5864                         md_new_event(mddev);
5865
5866                 if (last_check + window > io_sectors || j == max_sectors)
5867                         continue;
5868
5869                 last_check = io_sectors;
5870
5871                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5872                         break;
5873
5874         repeat:
5875                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5876                         /* step marks */
5877                         int next = (last_mark+1) % SYNC_MARKS;
5878
5879                         mddev->resync_mark = mark[next];
5880                         mddev->resync_mark_cnt = mark_cnt[next];
5881                         mark[next] = jiffies;
5882                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5883                         last_mark = next;
5884                 }
5885
5886
5887                 if (kthread_should_stop())
5888                         goto interrupted;
5889
5890
5891                 /*
5892                  * this loop exits only if either when we are slower than
5893                  * the 'hard' speed limit, or the system was IO-idle for
5894                  * a jiffy.
5895                  * the system might be non-idle CPU-wise, but we only care
5896                  * about not overloading the IO subsystem. (things like an
5897                  * e2fsck being done on the RAID array should execute fast)
5898                  */
5899                 blk_unplug(mddev->queue);
5900                 cond_resched();
5901
5902                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5903                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5904
5905                 if (currspeed > speed_min(mddev)) {
5906                         if ((currspeed > speed_max(mddev)) ||
5907                                         !is_mddev_idle(mddev)) {
5908                                 msleep(500);
5909                                 goto repeat;
5910                         }
5911                 }
5912         }
5913         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5914         /*
5915          * this also signals 'finished resyncing' to md_stop
5916          */
5917  out:
5918         blk_unplug(mddev->queue);
5919
5920         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5921
5922         /* tell personality that we are finished */
5923         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5924
5925         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5926             mddev->curr_resync > 2) {
5927                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5928                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5929                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5930                                         printk(KERN_INFO
5931                                                "md: checkpointing %s of %s.\n",
5932                                                desc, mdname(mddev));
5933                                         mddev->recovery_cp = mddev->curr_resync;
5934                                 }
5935                         } else
5936                                 mddev->recovery_cp = MaxSector;
5937                 } else {
5938                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5939                                 mddev->curr_resync = MaxSector;
5940                         rdev_for_each(rdev, rtmp, mddev)
5941                                 if (rdev->raid_disk >= 0 &&
5942                                     !test_bit(Faulty, &rdev->flags) &&
5943                                     !test_bit(In_sync, &rdev->flags) &&
5944                                     rdev->recovery_offset < mddev->curr_resync)
5945                                         rdev->recovery_offset = mddev->curr_resync;
5946                 }
5947         }
5948         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5949
5950  skip:
5951         mddev->curr_resync = 0;
5952         mddev->resync_min = 0;
5953         mddev->resync_max = MaxSector;
5954         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5955         wake_up(&resync_wait);
5956         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5957         md_wakeup_thread(mddev->thread);
5958         return;
5959
5960  interrupted:
5961         /*
5962          * got a signal, exit.
5963          */
5964         printk(KERN_INFO
5965                "md: md_do_sync() got signal ... exiting\n");
5966         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5967         goto out;
5968
5969 }
5970 EXPORT_SYMBOL_GPL(md_do_sync);
5971
5972
5973 static int remove_and_add_spares(mddev_t *mddev)
5974 {
5975         mdk_rdev_t *rdev;
5976         struct list_head *rtmp;
5977         int spares = 0;
5978
5979         rdev_for_each(rdev, rtmp, mddev)
5980                 if (rdev->raid_disk >= 0 &&
5981                     !test_bit(Blocked, &rdev->flags) &&
5982                     (test_bit(Faulty, &rdev->flags) ||
5983                      ! test_bit(In_sync, &rdev->flags)) &&
5984                     atomic_read(&rdev->nr_pending)==0) {
5985                         if (mddev->pers->hot_remove_disk(
5986                                     mddev, rdev->raid_disk)==0) {
5987                                 char nm[20];
5988                                 sprintf(nm,"rd%d", rdev->raid_disk);
5989                                 sysfs_remove_link(&mddev->kobj, nm);
5990                                 rdev->raid_disk = -1;
5991                         }
5992                 }
5993
5994         if (mddev->degraded && ! mddev->ro) {
5995                 rdev_for_each(rdev, rtmp, mddev) {
5996                         if (rdev->raid_disk >= 0 &&
5997                             !test_bit(In_sync, &rdev->flags) &&
5998                             !test_bit(Blocked, &rdev->flags))
5999                                 spares++;
6000                         if (rdev->raid_disk < 0
6001                             && !test_bit(Faulty, &rdev->flags)) {
6002                                 rdev->recovery_offset = 0;
6003                                 if (mddev->pers->
6004                                     hot_add_disk(mddev, rdev) == 0) {
6005                                         char nm[20];
6006                                         sprintf(nm, "rd%d", rdev->raid_disk);
6007                                         if (sysfs_create_link(&mddev->kobj,
6008                                                               &rdev->kobj, nm))
6009                                                 printk(KERN_WARNING
6010                                                        "md: cannot register "
6011                                                        "%s for %s\n",
6012                                                        nm, mdname(mddev));
6013                                         spares++;
6014                                         md_new_event(mddev);
6015                                 } else
6016                                         break;
6017                         }
6018                 }
6019         }
6020         return spares;
6021 }
6022 /*
6023  * This routine is regularly called by all per-raid-array threads to
6024  * deal with generic issues like resync and super-block update.
6025  * Raid personalities that don't have a thread (linear/raid0) do not
6026  * need this as they never do any recovery or update the superblock.
6027  *
6028  * It does not do any resync itself, but rather "forks" off other threads
6029  * to do that as needed.
6030  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6031  * "->recovery" and create a thread at ->sync_thread.
6032  * When the thread finishes it sets MD_RECOVERY_DONE
6033  * and wakeups up this thread which will reap the thread and finish up.
6034  * This thread also removes any faulty devices (with nr_pending == 0).
6035  *
6036  * The overall approach is:
6037  *  1/ if the superblock needs updating, update it.
6038  *  2/ If a recovery thread is running, don't do anything else.
6039  *  3/ If recovery has finished, clean up, possibly marking spares active.
6040  *  4/ If there are any faulty devices, remove them.
6041  *  5/ If array is degraded, try to add spares devices
6042  *  6/ If array has spares or is not in-sync, start a resync thread.
6043  */
6044 void md_check_recovery(mddev_t *mddev)
6045 {
6046         mdk_rdev_t *rdev;
6047         struct list_head *rtmp;
6048
6049
6050         if (mddev->bitmap)
6051                 bitmap_daemon_work(mddev->bitmap);
6052
6053         if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags))
6054                 sysfs_notify(&mddev->kobj, NULL, "array_state");
6055
6056         if (mddev->ro)
6057                 return;
6058
6059         if (signal_pending(current)) {
6060                 if (mddev->pers->sync_request && !mddev->external) {
6061                         printk(KERN_INFO "md: %s in immediate safe mode\n",
6062                                mdname(mddev));
6063                         mddev->safemode = 2;
6064                 }
6065                 flush_signals(current);
6066         }
6067
6068         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6069                 return;
6070         if ( ! (
6071                 (mddev->flags && !mddev->external) ||
6072                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6073                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6074                 (mddev->external == 0 && mddev->safemode == 1) ||
6075                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6076                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6077                 ))
6078                 return;
6079
6080         if (mddev_trylock(mddev)) {
6081                 int spares = 0;
6082
6083                 if (mddev->ro) {
6084                         /* Only thing we do on a ro array is remove
6085                          * failed devices.
6086                          */
6087                         remove_and_add_spares(mddev);
6088                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6089                         goto unlock;
6090                 }
6091
6092                 if (!mddev->external) {
6093                         int did_change = 0;
6094                         spin_lock_irq(&mddev->write_lock);
6095                         if (mddev->safemode &&
6096                             !atomic_read(&mddev->writes_pending) &&
6097                             !mddev->in_sync &&
6098                             mddev->recovery_cp == MaxSector) {
6099                                 mddev->in_sync = 1;
6100                                 did_change = 1;
6101                                 if (mddev->persistent)
6102                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6103                         }
6104                         if (mddev->safemode == 1)
6105                                 mddev->safemode = 0;
6106                         spin_unlock_irq(&mddev->write_lock);
6107                         if (did_change)
6108                                 sysfs_notify(&mddev->kobj, NULL, "array_state");
6109                 }
6110
6111                 if (mddev->flags)
6112                         md_update_sb(mddev, 0);
6113
6114                 rdev_for_each(rdev, rtmp, mddev)
6115                         if (test_and_clear_bit(StateChanged, &rdev->flags))
6116                                 sysfs_notify(&rdev->kobj, NULL, "state");
6117
6118
6119                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6120                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6121                         /* resync/recovery still happening */
6122                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6123                         goto unlock;
6124                 }
6125                 if (mddev->sync_thread) {
6126                         /* resync has finished, collect result */
6127                         md_unregister_thread(mddev->sync_thread);
6128                         mddev->sync_thread = NULL;
6129                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6130                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6131                                 /* success...*/
6132                                 /* activate any spares */
6133                                 if (mddev->pers->spare_active(mddev))
6134                                         sysfs_notify(&mddev->kobj, NULL,
6135                                                      "degraded");
6136                         }
6137                         md_update_sb(mddev, 1);
6138
6139                         /* if array is no-longer degraded, then any saved_raid_disk
6140                          * information must be scrapped
6141                          */
6142                         if (!mddev->degraded)
6143                                 rdev_for_each(rdev, rtmp, mddev)
6144                                         rdev->saved_raid_disk = -1;
6145
6146                         mddev->recovery = 0;
6147                         /* flag recovery needed just to double check */
6148                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6149                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6150                         md_new_event(mddev);
6151                         goto unlock;
6152                 }
6153                 /* Set RUNNING before clearing NEEDED to avoid
6154                  * any transients in the value of "sync_action".
6155                  */
6156                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6157                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6158                 /* Clear some bits that don't mean anything, but
6159                  * might be left set
6160                  */
6161                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6162                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6163
6164                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6165                         goto unlock;
6166                 /* no recovery is running.
6167                  * remove any failed drives, then
6168                  * add spares if possible.
6169                  * Spare are also removed and re-added, to allow
6170                  * the personality to fail the re-add.
6171                  */
6172
6173                 if (mddev->reshape_position != MaxSector) {
6174                         if (mddev->pers->check_reshape(mddev) != 0)
6175                                 /* Cannot proceed */
6176                                 goto unlock;
6177                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6178                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6179                 } else if ((spares = remove_and_add_spares(mddev))) {
6180                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6181                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6182                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6183                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6184                 } else if (mddev->recovery_cp < MaxSector) {
6185                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6186                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6187                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6188                         /* nothing to be done ... */
6189                         goto unlock;
6190
6191                 if (mddev->pers->sync_request) {
6192                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6193                                 /* We are adding a device or devices to an array
6194                                  * which has the bitmap stored on all devices.
6195                                  * So make sure all bitmap pages get written
6196                                  */
6197                                 bitmap_write_all(mddev->bitmap);
6198                         }
6199                         mddev->sync_thread = md_register_thread(md_do_sync,
6200                                                                 mddev,
6201                                                                 "%s_resync");
6202                         if (!mddev->sync_thread) {
6203                                 printk(KERN_ERR "%s: could not start resync"
6204                                         " thread...\n", 
6205                                         mdname(mddev));
6206                                 /* leave the spares where they are, it shouldn't hurt */
6207                                 mddev->recovery = 0;
6208                         } else
6209                                 md_wakeup_thread(mddev->sync_thread);
6210                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6211                         md_new_event(mddev);
6212                 }
6213         unlock:
6214                 if (!mddev->sync_thread) {
6215                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6216                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6217                                                &mddev->recovery))
6218                                 sysfs_notify(&mddev->kobj, NULL, "sync_action");
6219                 }
6220                 mddev_unlock(mddev);
6221         }
6222 }
6223
6224 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6225 {
6226         sysfs_notify(&rdev->kobj, NULL, "state");
6227         wait_event_timeout(rdev->blocked_wait,
6228                            !test_bit(Blocked, &rdev->flags),
6229                            msecs_to_jiffies(5000));
6230         rdev_dec_pending(rdev, mddev);
6231 }
6232 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6233
6234 static int md_notify_reboot(struct notifier_block *this,
6235                             unsigned long code, void *x)
6236 {
6237         struct list_head *tmp;
6238         mddev_t *mddev;
6239
6240         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6241
6242                 printk(KERN_INFO "md: stopping all md devices.\n");
6243
6244                 for_each_mddev(mddev, tmp)
6245                         if (mddev_trylock(mddev)) {
6246                                 /* Force a switch to readonly even array
6247                                  * appears to still be in use.  Hence
6248                                  * the '100'.
6249                                  */
6250                                 do_md_stop(mddev, 1, 100);
6251                                 mddev_unlock(mddev);
6252                         }
6253                 /*
6254                  * certain more exotic SCSI devices are known to be
6255                  * volatile wrt too early system reboots. While the
6256                  * right place to handle this issue is the given
6257                  * driver, we do want to have a safe RAID driver ...
6258                  */
6259                 mdelay(1000*1);
6260         }
6261         return NOTIFY_DONE;
6262 }
6263
6264 static struct notifier_block md_notifier = {
6265         .notifier_call  = md_notify_reboot,
6266         .next           = NULL,
6267         .priority       = INT_MAX, /* before any real devices */
6268 };
6269
6270 static void md_geninit(void)
6271 {
6272         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6273
6274         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6275 }
6276
6277 static int __init md_init(void)
6278 {
6279         if (register_blkdev(MAJOR_NR, "md"))
6280                 return -1;
6281         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6282                 unregister_blkdev(MAJOR_NR, "md");
6283                 return -1;
6284         }
6285         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
6286                             md_probe, NULL, NULL);
6287         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6288                             md_probe, NULL, NULL);
6289
6290         register_reboot_notifier(&md_notifier);
6291         raid_table_header = register_sysctl_table(raid_root_table);
6292
6293         md_geninit();
6294         return 0;
6295 }
6296
6297
6298 #ifndef MODULE
6299
6300 /*
6301  * Searches all registered partitions for autorun RAID arrays
6302  * at boot time.
6303  */
6304
6305 static LIST_HEAD(all_detected_devices);
6306 struct detected_devices_node {
6307         struct list_head list;
6308         dev_t dev;
6309 };
6310
6311 void md_autodetect_dev(dev_t dev)
6312 {
6313         struct detected_devices_node *node_detected_dev;
6314
6315         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6316         if (node_detected_dev) {
6317                 node_detected_dev->dev = dev;
6318                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6319         } else {
6320                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6321                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6322         }
6323 }
6324
6325
6326 static void autostart_arrays(int part)
6327 {
6328         mdk_rdev_t *rdev;
6329         struct detected_devices_node *node_detected_dev;
6330         dev_t dev;
6331         int i_scanned, i_passed;
6332
6333         i_scanned = 0;
6334         i_passed = 0;
6335
6336         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6337
6338         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6339                 i_scanned++;
6340                 node_detected_dev = list_entry(all_detected_devices.next,
6341                                         struct detected_devices_node, list);
6342                 list_del(&node_detected_dev->list);
6343                 dev = node_detected_dev->dev;
6344                 kfree(node_detected_dev);
6345                 rdev = md_import_device(dev,0, 90);
6346                 if (IS_ERR(rdev))
6347                         continue;
6348
6349                 if (test_bit(Faulty, &rdev->flags)) {
6350                         MD_BUG();
6351                         continue;
6352                 }
6353                 set_bit(AutoDetected, &rdev->flags);
6354                 list_add(&rdev->same_set, &pending_raid_disks);
6355                 i_passed++;
6356         }
6357
6358         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6359                                                 i_scanned, i_passed);
6360
6361         autorun_devices(part);
6362 }
6363
6364 #endif /* !MODULE */
6365
6366 static __exit void md_exit(void)
6367 {
6368         mddev_t *mddev;
6369         struct list_head *tmp;
6370
6371         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
6372         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6373
6374         unregister_blkdev(MAJOR_NR,"md");
6375         unregister_blkdev(mdp_major, "mdp");
6376         unregister_reboot_notifier(&md_notifier);
6377         unregister_sysctl_table(raid_table_header);
6378         remove_proc_entry("mdstat", NULL);
6379         for_each_mddev(mddev, tmp) {
6380                 struct gendisk *disk = mddev->gendisk;
6381                 if (!disk)
6382                         continue;
6383                 export_array(mddev);
6384                 del_gendisk(disk);
6385                 put_disk(disk);
6386                 mddev->gendisk = NULL;
6387                 mddev_put(mddev);
6388         }
6389 }
6390
6391 subsys_initcall(md_init);
6392 module_exit(md_exit)
6393
6394 static int get_ro(char *buffer, struct kernel_param *kp)
6395 {
6396         return sprintf(buffer, "%d", start_readonly);
6397 }
6398 static int set_ro(const char *val, struct kernel_param *kp)
6399 {
6400         char *e;
6401         int num = simple_strtoul(val, &e, 10);
6402         if (*val && (*e == '\0' || *e == '\n')) {
6403                 start_readonly = num;
6404                 return 0;
6405         }
6406         return -EINVAL;
6407 }
6408
6409 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6410 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6411
6412
6413 EXPORT_SYMBOL(register_md_personality);
6414 EXPORT_SYMBOL(unregister_md_personality);
6415 EXPORT_SYMBOL(md_error);
6416 EXPORT_SYMBOL(md_done_sync);
6417 EXPORT_SYMBOL(md_write_start);
6418 EXPORT_SYMBOL(md_write_end);
6419 EXPORT_SYMBOL(md_register_thread);
6420 EXPORT_SYMBOL(md_unregister_thread);
6421 EXPORT_SYMBOL(md_wakeup_thread);
6422 EXPORT_SYMBOL(md_check_recovery);
6423 MODULE_LICENSE("GPL");
6424 MODULE_ALIAS("md");
6425 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);