]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/md/md.c
bad324171cad49dc1b247199f88bca6537d1de69
[linux-2.6-omap-h63xx.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
47
48 #include <linux/init.h>
49
50 #include <linux/file.h>
51
52 #ifdef CONFIG_KMOD
53 #include <linux/kmod.h>
54 #endif
55
56 #include <asm/unaligned.h>
57
58 #define MAJOR_NR MD_MAJOR
59 #define MD_DRIVER
60
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
63
64 #define DEBUG 0
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
66
67
68 #ifndef MODULE
69 static void autostart_arrays (int part);
70 #endif
71
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
74
75 static void md_print_devices(void);
76
77 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
78
79 /*
80  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
81  * is 1000 KB/sec, so the extra system load does not show up that much.
82  * Increase it if you want to have more _guaranteed_ speed. Note that
83  * the RAID driver will use the maximum available bandwidth if the IO
84  * subsystem is idle. There is also an 'absolute maximum' reconstruction
85  * speed limit - in case reconstruction slows down your system despite
86  * idle IO detection.
87  *
88  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
89  * or /sys/block/mdX/md/sync_speed_{min,max}
90  */
91
92 static int sysctl_speed_limit_min = 1000;
93 static int sysctl_speed_limit_max = 200000;
94 static inline int speed_min(mddev_t *mddev)
95 {
96         return mddev->sync_speed_min ?
97                 mddev->sync_speed_min : sysctl_speed_limit_min;
98 }
99
100 static inline int speed_max(mddev_t *mddev)
101 {
102         return mddev->sync_speed_max ?
103                 mddev->sync_speed_max : sysctl_speed_limit_max;
104 }
105
106 static struct ctl_table_header *raid_table_header;
107
108 static ctl_table raid_table[] = {
109         {
110                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
111                 .procname       = "speed_limit_min",
112                 .data           = &sysctl_speed_limit_min,
113                 .maxlen         = sizeof(int),
114                 .mode           = S_IRUGO|S_IWUSR,
115                 .proc_handler   = &proc_dointvec,
116         },
117         {
118                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
119                 .procname       = "speed_limit_max",
120                 .data           = &sysctl_speed_limit_max,
121                 .maxlen         = sizeof(int),
122                 .mode           = S_IRUGO|S_IWUSR,
123                 .proc_handler   = &proc_dointvec,
124         },
125         { .ctl_name = 0 }
126 };
127
128 static ctl_table raid_dir_table[] = {
129         {
130                 .ctl_name       = DEV_RAID,
131                 .procname       = "raid",
132                 .maxlen         = 0,
133                 .mode           = S_IRUGO|S_IXUGO,
134                 .child          = raid_table,
135         },
136         { .ctl_name = 0 }
137 };
138
139 static ctl_table raid_root_table[] = {
140         {
141                 .ctl_name       = CTL_DEV,
142                 .procname       = "dev",
143                 .maxlen         = 0,
144                 .mode           = 0555,
145                 .child          = raid_dir_table,
146         },
147         { .ctl_name = 0 }
148 };
149
150 static struct block_device_operations md_fops;
151
152 static int start_readonly;
153
154 /*
155  * We have a system wide 'event count' that is incremented
156  * on any 'interesting' event, and readers of /proc/mdstat
157  * can use 'poll' or 'select' to find out when the event
158  * count increases.
159  *
160  * Events are:
161  *  start array, stop array, error, add device, remove device,
162  *  start build, activate spare
163  */
164 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
165 static atomic_t md_event_count;
166 void md_new_event(mddev_t *mddev)
167 {
168         atomic_inc(&md_event_count);
169         wake_up(&md_event_waiters);
170         sysfs_notify(&mddev->kobj, NULL, "sync_action");
171 }
172 EXPORT_SYMBOL_GPL(md_new_event);
173
174 /* Alternate version that can be called from interrupts
175  * when calling sysfs_notify isn't needed.
176  */
177 static void md_new_event_inintr(mddev_t *mddev)
178 {
179         atomic_inc(&md_event_count);
180         wake_up(&md_event_waiters);
181 }
182
183 /*
184  * Enables to iterate over all existing md arrays
185  * all_mddevs_lock protects this list.
186  */
187 static LIST_HEAD(all_mddevs);
188 static DEFINE_SPINLOCK(all_mddevs_lock);
189
190
191 /*
192  * iterates through all used mddevs in the system.
193  * We take care to grab the all_mddevs_lock whenever navigating
194  * the list, and to always hold a refcount when unlocked.
195  * Any code which breaks out of this loop while own
196  * a reference to the current mddev and must mddev_put it.
197  */
198 #define ITERATE_MDDEV(mddev,tmp)                                        \
199                                                                         \
200         for (({ spin_lock(&all_mddevs_lock);                            \
201                 tmp = all_mddevs.next;                                  \
202                 mddev = NULL;});                                        \
203              ({ if (tmp != &all_mddevs)                                 \
204                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
205                 spin_unlock(&all_mddevs_lock);                          \
206                 if (mddev) mddev_put(mddev);                            \
207                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
208                 tmp != &all_mddevs;});                                  \
209              ({ spin_lock(&all_mddevs_lock);                            \
210                 tmp = tmp->next;})                                      \
211                 )
212
213
214 static int md_fail_request (request_queue_t *q, struct bio *bio)
215 {
216         bio_io_error(bio, bio->bi_size);
217         return 0;
218 }
219
220 static inline mddev_t *mddev_get(mddev_t *mddev)
221 {
222         atomic_inc(&mddev->active);
223         return mddev;
224 }
225
226 static void mddev_put(mddev_t *mddev)
227 {
228         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
229                 return;
230         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
231                 list_del(&mddev->all_mddevs);
232                 spin_unlock(&all_mddevs_lock);
233                 blk_cleanup_queue(mddev->queue);
234                 kobject_unregister(&mddev->kobj);
235         } else
236                 spin_unlock(&all_mddevs_lock);
237 }
238
239 static mddev_t * mddev_find(dev_t unit)
240 {
241         mddev_t *mddev, *new = NULL;
242
243  retry:
244         spin_lock(&all_mddevs_lock);
245         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
246                 if (mddev->unit == unit) {
247                         mddev_get(mddev);
248                         spin_unlock(&all_mddevs_lock);
249                         kfree(new);
250                         return mddev;
251                 }
252
253         if (new) {
254                 list_add(&new->all_mddevs, &all_mddevs);
255                 spin_unlock(&all_mddevs_lock);
256                 return new;
257         }
258         spin_unlock(&all_mddevs_lock);
259
260         new = kzalloc(sizeof(*new), GFP_KERNEL);
261         if (!new)
262                 return NULL;
263
264         new->unit = unit;
265         if (MAJOR(unit) == MD_MAJOR)
266                 new->md_minor = MINOR(unit);
267         else
268                 new->md_minor = MINOR(unit) >> MdpMinorShift;
269
270         mutex_init(&new->reconfig_mutex);
271         INIT_LIST_HEAD(&new->disks);
272         INIT_LIST_HEAD(&new->all_mddevs);
273         init_timer(&new->safemode_timer);
274         atomic_set(&new->active, 1);
275         spin_lock_init(&new->write_lock);
276         init_waitqueue_head(&new->sb_wait);
277
278         new->queue = blk_alloc_queue(GFP_KERNEL);
279         if (!new->queue) {
280                 kfree(new);
281                 return NULL;
282         }
283         set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
284
285         blk_queue_make_request(new->queue, md_fail_request);
286
287         goto retry;
288 }
289
290 static inline int mddev_lock(mddev_t * mddev)
291 {
292         return mutex_lock_interruptible(&mddev->reconfig_mutex);
293 }
294
295 static inline int mddev_trylock(mddev_t * mddev)
296 {
297         return mutex_trylock(&mddev->reconfig_mutex);
298 }
299
300 static inline void mddev_unlock(mddev_t * mddev)
301 {
302         mutex_unlock(&mddev->reconfig_mutex);
303
304         md_wakeup_thread(mddev->thread);
305 }
306
307 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
308 {
309         mdk_rdev_t * rdev;
310         struct list_head *tmp;
311
312         ITERATE_RDEV(mddev,rdev,tmp) {
313                 if (rdev->desc_nr == nr)
314                         return rdev;
315         }
316         return NULL;
317 }
318
319 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
320 {
321         struct list_head *tmp;
322         mdk_rdev_t *rdev;
323
324         ITERATE_RDEV(mddev,rdev,tmp) {
325                 if (rdev->bdev->bd_dev == dev)
326                         return rdev;
327         }
328         return NULL;
329 }
330
331 static struct mdk_personality *find_pers(int level, char *clevel)
332 {
333         struct mdk_personality *pers;
334         list_for_each_entry(pers, &pers_list, list) {
335                 if (level != LEVEL_NONE && pers->level == level)
336                         return pers;
337                 if (strcmp(pers->name, clevel)==0)
338                         return pers;
339         }
340         return NULL;
341 }
342
343 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
344 {
345         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
346         return MD_NEW_SIZE_BLOCKS(size);
347 }
348
349 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
350 {
351         sector_t size;
352
353         size = rdev->sb_offset;
354
355         if (chunk_size)
356                 size &= ~((sector_t)chunk_size/1024 - 1);
357         return size;
358 }
359
360 static int alloc_disk_sb(mdk_rdev_t * rdev)
361 {
362         if (rdev->sb_page)
363                 MD_BUG();
364
365         rdev->sb_page = alloc_page(GFP_KERNEL);
366         if (!rdev->sb_page) {
367                 printk(KERN_ALERT "md: out of memory.\n");
368                 return -EINVAL;
369         }
370
371         return 0;
372 }
373
374 static void free_disk_sb(mdk_rdev_t * rdev)
375 {
376         if (rdev->sb_page) {
377                 put_page(rdev->sb_page);
378                 rdev->sb_loaded = 0;
379                 rdev->sb_page = NULL;
380                 rdev->sb_offset = 0;
381                 rdev->size = 0;
382         }
383 }
384
385
386 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
387 {
388         mdk_rdev_t *rdev = bio->bi_private;
389         mddev_t *mddev = rdev->mddev;
390         if (bio->bi_size)
391                 return 1;
392
393         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
394                 printk("md: super_written gets error=%d, uptodate=%d\n",
395                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
396                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
397                 md_error(mddev, rdev);
398         }
399
400         if (atomic_dec_and_test(&mddev->pending_writes))
401                 wake_up(&mddev->sb_wait);
402         bio_put(bio);
403         return 0;
404 }
405
406 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
407 {
408         struct bio *bio2 = bio->bi_private;
409         mdk_rdev_t *rdev = bio2->bi_private;
410         mddev_t *mddev = rdev->mddev;
411         if (bio->bi_size)
412                 return 1;
413
414         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
415             error == -EOPNOTSUPP) {
416                 unsigned long flags;
417                 /* barriers don't appear to be supported :-( */
418                 set_bit(BarriersNotsupp, &rdev->flags);
419                 mddev->barriers_work = 0;
420                 spin_lock_irqsave(&mddev->write_lock, flags);
421                 bio2->bi_next = mddev->biolist;
422                 mddev->biolist = bio2;
423                 spin_unlock_irqrestore(&mddev->write_lock, flags);
424                 wake_up(&mddev->sb_wait);
425                 bio_put(bio);
426                 return 0;
427         }
428         bio_put(bio2);
429         bio->bi_private = rdev;
430         return super_written(bio, bytes_done, error);
431 }
432
433 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
434                    sector_t sector, int size, struct page *page)
435 {
436         /* write first size bytes of page to sector of rdev
437          * Increment mddev->pending_writes before returning
438          * and decrement it on completion, waking up sb_wait
439          * if zero is reached.
440          * If an error occurred, call md_error
441          *
442          * As we might need to resubmit the request if BIO_RW_BARRIER
443          * causes ENOTSUPP, we allocate a spare bio...
444          */
445         struct bio *bio = bio_alloc(GFP_NOIO, 1);
446         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
447
448         bio->bi_bdev = rdev->bdev;
449         bio->bi_sector = sector;
450         bio_add_page(bio, page, size, 0);
451         bio->bi_private = rdev;
452         bio->bi_end_io = super_written;
453         bio->bi_rw = rw;
454
455         atomic_inc(&mddev->pending_writes);
456         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
457                 struct bio *rbio;
458                 rw |= (1<<BIO_RW_BARRIER);
459                 rbio = bio_clone(bio, GFP_NOIO);
460                 rbio->bi_private = bio;
461                 rbio->bi_end_io = super_written_barrier;
462                 submit_bio(rw, rbio);
463         } else
464                 submit_bio(rw, bio);
465 }
466
467 void md_super_wait(mddev_t *mddev)
468 {
469         /* wait for all superblock writes that were scheduled to complete.
470          * if any had to be retried (due to BARRIER problems), retry them
471          */
472         DEFINE_WAIT(wq);
473         for(;;) {
474                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
475                 if (atomic_read(&mddev->pending_writes)==0)
476                         break;
477                 while (mddev->biolist) {
478                         struct bio *bio;
479                         spin_lock_irq(&mddev->write_lock);
480                         bio = mddev->biolist;
481                         mddev->biolist = bio->bi_next ;
482                         bio->bi_next = NULL;
483                         spin_unlock_irq(&mddev->write_lock);
484                         submit_bio(bio->bi_rw, bio);
485                 }
486                 schedule();
487         }
488         finish_wait(&mddev->sb_wait, &wq);
489 }
490
491 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
492 {
493         if (bio->bi_size)
494                 return 1;
495
496         complete((struct completion*)bio->bi_private);
497         return 0;
498 }
499
500 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
501                    struct page *page, int rw)
502 {
503         struct bio *bio = bio_alloc(GFP_NOIO, 1);
504         struct completion event;
505         int ret;
506
507         rw |= (1 << BIO_RW_SYNC);
508
509         bio->bi_bdev = bdev;
510         bio->bi_sector = sector;
511         bio_add_page(bio, page, size, 0);
512         init_completion(&event);
513         bio->bi_private = &event;
514         bio->bi_end_io = bi_complete;
515         submit_bio(rw, bio);
516         wait_for_completion(&event);
517
518         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
519         bio_put(bio);
520         return ret;
521 }
522 EXPORT_SYMBOL_GPL(sync_page_io);
523
524 static int read_disk_sb(mdk_rdev_t * rdev, int size)
525 {
526         char b[BDEVNAME_SIZE];
527         if (!rdev->sb_page) {
528                 MD_BUG();
529                 return -EINVAL;
530         }
531         if (rdev->sb_loaded)
532                 return 0;
533
534
535         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
536                 goto fail;
537         rdev->sb_loaded = 1;
538         return 0;
539
540 fail:
541         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
542                 bdevname(rdev->bdev,b));
543         return -EINVAL;
544 }
545
546 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
547 {
548         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
549                 (sb1->set_uuid1 == sb2->set_uuid1) &&
550                 (sb1->set_uuid2 == sb2->set_uuid2) &&
551                 (sb1->set_uuid3 == sb2->set_uuid3))
552
553                 return 1;
554
555         return 0;
556 }
557
558
559 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
560 {
561         int ret;
562         mdp_super_t *tmp1, *tmp2;
563
564         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
565         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
566
567         if (!tmp1 || !tmp2) {
568                 ret = 0;
569                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
570                 goto abort;
571         }
572
573         *tmp1 = *sb1;
574         *tmp2 = *sb2;
575
576         /*
577          * nr_disks is not constant
578          */
579         tmp1->nr_disks = 0;
580         tmp2->nr_disks = 0;
581
582         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
583                 ret = 0;
584         else
585                 ret = 1;
586
587 abort:
588         kfree(tmp1);
589         kfree(tmp2);
590         return ret;
591 }
592
593
594 static u32 md_csum_fold(u32 csum)
595 {
596         csum = (csum & 0xffff) + (csum >> 16);
597         return (csum & 0xffff) + (csum >> 16);
598 }
599
600 static unsigned int calc_sb_csum(mdp_super_t * sb)
601 {
602         u64 newcsum = 0;
603         u32 *sb32 = (u32*)sb;
604         int i;
605         unsigned int disk_csum, csum;
606
607         disk_csum = sb->sb_csum;
608         sb->sb_csum = 0;
609
610         for (i = 0; i < MD_SB_BYTES/4 ; i++)
611                 newcsum += sb32[i];
612         csum = (newcsum & 0xffffffff) + (newcsum>>32);
613
614
615 #ifdef CONFIG_ALPHA
616         /* This used to use csum_partial, which was wrong for several
617          * reasons including that different results are returned on
618          * different architectures.  It isn't critical that we get exactly
619          * the same return value as before (we always csum_fold before
620          * testing, and that removes any differences).  However as we
621          * know that csum_partial always returned a 16bit value on
622          * alphas, do a fold to maximise conformity to previous behaviour.
623          */
624         sb->sb_csum = md_csum_fold(disk_csum);
625 #else
626         sb->sb_csum = disk_csum;
627 #endif
628         return csum;
629 }
630
631
632 /*
633  * Handle superblock details.
634  * We want to be able to handle multiple superblock formats
635  * so we have a common interface to them all, and an array of
636  * different handlers.
637  * We rely on user-space to write the initial superblock, and support
638  * reading and updating of superblocks.
639  * Interface methods are:
640  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
641  *      loads and validates a superblock on dev.
642  *      if refdev != NULL, compare superblocks on both devices
643  *    Return:
644  *      0 - dev has a superblock that is compatible with refdev
645  *      1 - dev has a superblock that is compatible and newer than refdev
646  *          so dev should be used as the refdev in future
647  *     -EINVAL superblock incompatible or invalid
648  *     -othererror e.g. -EIO
649  *
650  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
651  *      Verify that dev is acceptable into mddev.
652  *       The first time, mddev->raid_disks will be 0, and data from
653  *       dev should be merged in.  Subsequent calls check that dev
654  *       is new enough.  Return 0 or -EINVAL
655  *
656  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
657  *     Update the superblock for rdev with data in mddev
658  *     This does not write to disc.
659  *
660  */
661
662 struct super_type  {
663         char            *name;
664         struct module   *owner;
665         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
666         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
667         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
668 };
669
670 /*
671  * load_super for 0.90.0 
672  */
673 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
674 {
675         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
676         mdp_super_t *sb;
677         int ret;
678         sector_t sb_offset;
679
680         /*
681          * Calculate the position of the superblock,
682          * it's at the end of the disk.
683          *
684          * It also happens to be a multiple of 4Kb.
685          */
686         sb_offset = calc_dev_sboffset(rdev->bdev);
687         rdev->sb_offset = sb_offset;
688
689         ret = read_disk_sb(rdev, MD_SB_BYTES);
690         if (ret) return ret;
691
692         ret = -EINVAL;
693
694         bdevname(rdev->bdev, b);
695         sb = (mdp_super_t*)page_address(rdev->sb_page);
696
697         if (sb->md_magic != MD_SB_MAGIC) {
698                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
699                        b);
700                 goto abort;
701         }
702
703         if (sb->major_version != 0 ||
704             sb->minor_version < 90 ||
705             sb->minor_version > 91) {
706                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
707                         sb->major_version, sb->minor_version,
708                         b);
709                 goto abort;
710         }
711
712         if (sb->raid_disks <= 0)
713                 goto abort;
714
715         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
716                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
717                         b);
718                 goto abort;
719         }
720
721         rdev->preferred_minor = sb->md_minor;
722         rdev->data_offset = 0;
723         rdev->sb_size = MD_SB_BYTES;
724
725         if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
726                 if (sb->level != 1 && sb->level != 4
727                     && sb->level != 5 && sb->level != 6
728                     && sb->level != 10) {
729                         /* FIXME use a better test */
730                         printk(KERN_WARNING
731                                "md: bitmaps not supported for this level.\n");
732                         goto abort;
733                 }
734         }
735
736         if (sb->level == LEVEL_MULTIPATH)
737                 rdev->desc_nr = -1;
738         else
739                 rdev->desc_nr = sb->this_disk.number;
740
741         if (refdev == 0)
742                 ret = 1;
743         else {
744                 __u64 ev1, ev2;
745                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
746                 if (!uuid_equal(refsb, sb)) {
747                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
748                                 b, bdevname(refdev->bdev,b2));
749                         goto abort;
750                 }
751                 if (!sb_equal(refsb, sb)) {
752                         printk(KERN_WARNING "md: %s has same UUID"
753                                " but different superblock to %s\n",
754                                b, bdevname(refdev->bdev, b2));
755                         goto abort;
756                 }
757                 ev1 = md_event(sb);
758                 ev2 = md_event(refsb);
759                 if (ev1 > ev2)
760                         ret = 1;
761                 else 
762                         ret = 0;
763         }
764         rdev->size = calc_dev_size(rdev, sb->chunk_size);
765
766         if (rdev->size < sb->size && sb->level > 1)
767                 /* "this cannot possibly happen" ... */
768                 ret = -EINVAL;
769
770  abort:
771         return ret;
772 }
773
774 /*
775  * validate_super for 0.90.0
776  */
777 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
778 {
779         mdp_disk_t *desc;
780         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
781         __u64 ev1 = md_event(sb);
782
783         rdev->raid_disk = -1;
784         rdev->flags = 0;
785         if (mddev->raid_disks == 0) {
786                 mddev->major_version = 0;
787                 mddev->minor_version = sb->minor_version;
788                 mddev->patch_version = sb->patch_version;
789                 mddev->persistent = ! sb->not_persistent;
790                 mddev->chunk_size = sb->chunk_size;
791                 mddev->ctime = sb->ctime;
792                 mddev->utime = sb->utime;
793                 mddev->level = sb->level;
794                 mddev->clevel[0] = 0;
795                 mddev->layout = sb->layout;
796                 mddev->raid_disks = sb->raid_disks;
797                 mddev->size = sb->size;
798                 mddev->events = ev1;
799                 mddev->bitmap_offset = 0;
800                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
801
802                 if (mddev->minor_version >= 91) {
803                         mddev->reshape_position = sb->reshape_position;
804                         mddev->delta_disks = sb->delta_disks;
805                         mddev->new_level = sb->new_level;
806                         mddev->new_layout = sb->new_layout;
807                         mddev->new_chunk = sb->new_chunk;
808                 } else {
809                         mddev->reshape_position = MaxSector;
810                         mddev->delta_disks = 0;
811                         mddev->new_level = mddev->level;
812                         mddev->new_layout = mddev->layout;
813                         mddev->new_chunk = mddev->chunk_size;
814                 }
815
816                 if (sb->state & (1<<MD_SB_CLEAN))
817                         mddev->recovery_cp = MaxSector;
818                 else {
819                         if (sb->events_hi == sb->cp_events_hi && 
820                                 sb->events_lo == sb->cp_events_lo) {
821                                 mddev->recovery_cp = sb->recovery_cp;
822                         } else
823                                 mddev->recovery_cp = 0;
824                 }
825
826                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
827                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
828                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
829                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
830
831                 mddev->max_disks = MD_SB_DISKS;
832
833                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
834                     mddev->bitmap_file == NULL)
835                         mddev->bitmap_offset = mddev->default_bitmap_offset;
836
837         } else if (mddev->pers == NULL) {
838                 /* Insist on good event counter while assembling */
839                 ++ev1;
840                 if (ev1 < mddev->events) 
841                         return -EINVAL;
842         } else if (mddev->bitmap) {
843                 /* if adding to array with a bitmap, then we can accept an
844                  * older device ... but not too old.
845                  */
846                 if (ev1 < mddev->bitmap->events_cleared)
847                         return 0;
848         } else {
849                 if (ev1 < mddev->events)
850                         /* just a hot-add of a new device, leave raid_disk at -1 */
851                         return 0;
852         }
853
854         if (mddev->level != LEVEL_MULTIPATH) {
855                 desc = sb->disks + rdev->desc_nr;
856
857                 if (desc->state & (1<<MD_DISK_FAULTY))
858                         set_bit(Faulty, &rdev->flags);
859                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
860                             desc->raid_disk < mddev->raid_disks */) {
861                         set_bit(In_sync, &rdev->flags);
862                         rdev->raid_disk = desc->raid_disk;
863                 }
864                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
865                         set_bit(WriteMostly, &rdev->flags);
866         } else /* MULTIPATH are always insync */
867                 set_bit(In_sync, &rdev->flags);
868         return 0;
869 }
870
871 /*
872  * sync_super for 0.90.0
873  */
874 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
875 {
876         mdp_super_t *sb;
877         struct list_head *tmp;
878         mdk_rdev_t *rdev2;
879         int next_spare = mddev->raid_disks;
880
881
882         /* make rdev->sb match mddev data..
883          *
884          * 1/ zero out disks
885          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
886          * 3/ any empty disks < next_spare become removed
887          *
888          * disks[0] gets initialised to REMOVED because
889          * we cannot be sure from other fields if it has
890          * been initialised or not.
891          */
892         int i;
893         int active=0, working=0,failed=0,spare=0,nr_disks=0;
894
895         rdev->sb_size = MD_SB_BYTES;
896
897         sb = (mdp_super_t*)page_address(rdev->sb_page);
898
899         memset(sb, 0, sizeof(*sb));
900
901         sb->md_magic = MD_SB_MAGIC;
902         sb->major_version = mddev->major_version;
903         sb->patch_version = mddev->patch_version;
904         sb->gvalid_words  = 0; /* ignored */
905         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
906         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
907         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
908         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
909
910         sb->ctime = mddev->ctime;
911         sb->level = mddev->level;
912         sb->size  = mddev->size;
913         sb->raid_disks = mddev->raid_disks;
914         sb->md_minor = mddev->md_minor;
915         sb->not_persistent = !mddev->persistent;
916         sb->utime = mddev->utime;
917         sb->state = 0;
918         sb->events_hi = (mddev->events>>32);
919         sb->events_lo = (u32)mddev->events;
920
921         if (mddev->reshape_position == MaxSector)
922                 sb->minor_version = 90;
923         else {
924                 sb->minor_version = 91;
925                 sb->reshape_position = mddev->reshape_position;
926                 sb->new_level = mddev->new_level;
927                 sb->delta_disks = mddev->delta_disks;
928                 sb->new_layout = mddev->new_layout;
929                 sb->new_chunk = mddev->new_chunk;
930         }
931         mddev->minor_version = sb->minor_version;
932         if (mddev->in_sync)
933         {
934                 sb->recovery_cp = mddev->recovery_cp;
935                 sb->cp_events_hi = (mddev->events>>32);
936                 sb->cp_events_lo = (u32)mddev->events;
937                 if (mddev->recovery_cp == MaxSector)
938                         sb->state = (1<< MD_SB_CLEAN);
939         } else
940                 sb->recovery_cp = 0;
941
942         sb->layout = mddev->layout;
943         sb->chunk_size = mddev->chunk_size;
944
945         if (mddev->bitmap && mddev->bitmap_file == NULL)
946                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
947
948         sb->disks[0].state = (1<<MD_DISK_REMOVED);
949         ITERATE_RDEV(mddev,rdev2,tmp) {
950                 mdp_disk_t *d;
951                 int desc_nr;
952                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
953                     && !test_bit(Faulty, &rdev2->flags))
954                         desc_nr = rdev2->raid_disk;
955                 else
956                         desc_nr = next_spare++;
957                 rdev2->desc_nr = desc_nr;
958                 d = &sb->disks[rdev2->desc_nr];
959                 nr_disks++;
960                 d->number = rdev2->desc_nr;
961                 d->major = MAJOR(rdev2->bdev->bd_dev);
962                 d->minor = MINOR(rdev2->bdev->bd_dev);
963                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
964                     && !test_bit(Faulty, &rdev2->flags))
965                         d->raid_disk = rdev2->raid_disk;
966                 else
967                         d->raid_disk = rdev2->desc_nr; /* compatibility */
968                 if (test_bit(Faulty, &rdev2->flags))
969                         d->state = (1<<MD_DISK_FAULTY);
970                 else if (test_bit(In_sync, &rdev2->flags)) {
971                         d->state = (1<<MD_DISK_ACTIVE);
972                         d->state |= (1<<MD_DISK_SYNC);
973                         active++;
974                         working++;
975                 } else {
976                         d->state = 0;
977                         spare++;
978                         working++;
979                 }
980                 if (test_bit(WriteMostly, &rdev2->flags))
981                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
982         }
983         /* now set the "removed" and "faulty" bits on any missing devices */
984         for (i=0 ; i < mddev->raid_disks ; i++) {
985                 mdp_disk_t *d = &sb->disks[i];
986                 if (d->state == 0 && d->number == 0) {
987                         d->number = i;
988                         d->raid_disk = i;
989                         d->state = (1<<MD_DISK_REMOVED);
990                         d->state |= (1<<MD_DISK_FAULTY);
991                         failed++;
992                 }
993         }
994         sb->nr_disks = nr_disks;
995         sb->active_disks = active;
996         sb->working_disks = working;
997         sb->failed_disks = failed;
998         sb->spare_disks = spare;
999
1000         sb->this_disk = sb->disks[rdev->desc_nr];
1001         sb->sb_csum = calc_sb_csum(sb);
1002 }
1003
1004 /*
1005  * version 1 superblock
1006  */
1007
1008 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1009 {
1010         __le32 disk_csum;
1011         u32 csum;
1012         unsigned long long newcsum;
1013         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1014         __le32 *isuper = (__le32*)sb;
1015         int i;
1016
1017         disk_csum = sb->sb_csum;
1018         sb->sb_csum = 0;
1019         newcsum = 0;
1020         for (i=0; size>=4; size -= 4 )
1021                 newcsum += le32_to_cpu(*isuper++);
1022
1023         if (size == 2)
1024                 newcsum += le16_to_cpu(*(__le16*) isuper);
1025
1026         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1027         sb->sb_csum = disk_csum;
1028         return cpu_to_le32(csum);
1029 }
1030
1031 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1032 {
1033         struct mdp_superblock_1 *sb;
1034         int ret;
1035         sector_t sb_offset;
1036         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1037         int bmask;
1038
1039         /*
1040          * Calculate the position of the superblock.
1041          * It is always aligned to a 4K boundary and
1042          * depeding on minor_version, it can be:
1043          * 0: At least 8K, but less than 12K, from end of device
1044          * 1: At start of device
1045          * 2: 4K from start of device.
1046          */
1047         switch(minor_version) {
1048         case 0:
1049                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1050                 sb_offset -= 8*2;
1051                 sb_offset &= ~(sector_t)(4*2-1);
1052                 /* convert from sectors to K */
1053                 sb_offset /= 2;
1054                 break;
1055         case 1:
1056                 sb_offset = 0;
1057                 break;
1058         case 2:
1059                 sb_offset = 4;
1060                 break;
1061         default:
1062                 return -EINVAL;
1063         }
1064         rdev->sb_offset = sb_offset;
1065
1066         /* superblock is rarely larger than 1K, but it can be larger,
1067          * and it is safe to read 4k, so we do that
1068          */
1069         ret = read_disk_sb(rdev, 4096);
1070         if (ret) return ret;
1071
1072
1073         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1074
1075         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1076             sb->major_version != cpu_to_le32(1) ||
1077             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1078             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1079             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1080                 return -EINVAL;
1081
1082         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1083                 printk("md: invalid superblock checksum on %s\n",
1084                         bdevname(rdev->bdev,b));
1085                 return -EINVAL;
1086         }
1087         if (le64_to_cpu(sb->data_size) < 10) {
1088                 printk("md: data_size too small on %s\n",
1089                        bdevname(rdev->bdev,b));
1090                 return -EINVAL;
1091         }
1092         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1093                 if (sb->level != cpu_to_le32(1) &&
1094                     sb->level != cpu_to_le32(4) &&
1095                     sb->level != cpu_to_le32(5) &&
1096                     sb->level != cpu_to_le32(6) &&
1097                     sb->level != cpu_to_le32(10)) {
1098                         printk(KERN_WARNING
1099                                "md: bitmaps not supported for this level.\n");
1100                         return -EINVAL;
1101                 }
1102         }
1103
1104         rdev->preferred_minor = 0xffff;
1105         rdev->data_offset = le64_to_cpu(sb->data_offset);
1106         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1107
1108         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1109         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1110         if (rdev->sb_size & bmask)
1111                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1112
1113         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1114                 rdev->desc_nr = -1;
1115         else
1116                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1117
1118         if (refdev == 0)
1119                 ret = 1;
1120         else {
1121                 __u64 ev1, ev2;
1122                 struct mdp_superblock_1 *refsb = 
1123                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1124
1125                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1126                     sb->level != refsb->level ||
1127                     sb->layout != refsb->layout ||
1128                     sb->chunksize != refsb->chunksize) {
1129                         printk(KERN_WARNING "md: %s has strangely different"
1130                                 " superblock to %s\n",
1131                                 bdevname(rdev->bdev,b),
1132                                 bdevname(refdev->bdev,b2));
1133                         return -EINVAL;
1134                 }
1135                 ev1 = le64_to_cpu(sb->events);
1136                 ev2 = le64_to_cpu(refsb->events);
1137
1138                 if (ev1 > ev2)
1139                         ret = 1;
1140                 else
1141                         ret = 0;
1142         }
1143         if (minor_version) 
1144                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1145         else
1146                 rdev->size = rdev->sb_offset;
1147         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1148                 return -EINVAL;
1149         rdev->size = le64_to_cpu(sb->data_size)/2;
1150         if (le32_to_cpu(sb->chunksize))
1151                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1152
1153         if (le64_to_cpu(sb->size) > rdev->size*2)
1154                 return -EINVAL;
1155         return ret;
1156 }
1157
1158 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1159 {
1160         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1161         __u64 ev1 = le64_to_cpu(sb->events);
1162
1163         rdev->raid_disk = -1;
1164         rdev->flags = 0;
1165         if (mddev->raid_disks == 0) {
1166                 mddev->major_version = 1;
1167                 mddev->patch_version = 0;
1168                 mddev->persistent = 1;
1169                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1170                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1171                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1172                 mddev->level = le32_to_cpu(sb->level);
1173                 mddev->clevel[0] = 0;
1174                 mddev->layout = le32_to_cpu(sb->layout);
1175                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1176                 mddev->size = le64_to_cpu(sb->size)/2;
1177                 mddev->events = ev1;
1178                 mddev->bitmap_offset = 0;
1179                 mddev->default_bitmap_offset = 1024 >> 9;
1180                 
1181                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1182                 memcpy(mddev->uuid, sb->set_uuid, 16);
1183
1184                 mddev->max_disks =  (4096-256)/2;
1185
1186                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1187                     mddev->bitmap_file == NULL )
1188                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1189
1190                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1191                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1192                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1193                         mddev->new_level = le32_to_cpu(sb->new_level);
1194                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1195                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1196                 } else {
1197                         mddev->reshape_position = MaxSector;
1198                         mddev->delta_disks = 0;
1199                         mddev->new_level = mddev->level;
1200                         mddev->new_layout = mddev->layout;
1201                         mddev->new_chunk = mddev->chunk_size;
1202                 }
1203
1204         } else if (mddev->pers == NULL) {
1205                 /* Insist of good event counter while assembling */
1206                 ++ev1;
1207                 if (ev1 < mddev->events)
1208                         return -EINVAL;
1209         } else if (mddev->bitmap) {
1210                 /* If adding to array with a bitmap, then we can accept an
1211                  * older device, but not too old.
1212                  */
1213                 if (ev1 < mddev->bitmap->events_cleared)
1214                         return 0;
1215         } else {
1216                 if (ev1 < mddev->events)
1217                         /* just a hot-add of a new device, leave raid_disk at -1 */
1218                         return 0;
1219         }
1220         if (mddev->level != LEVEL_MULTIPATH) {
1221                 int role;
1222                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1223                 switch(role) {
1224                 case 0xffff: /* spare */
1225                         break;
1226                 case 0xfffe: /* faulty */
1227                         set_bit(Faulty, &rdev->flags);
1228                         break;
1229                 default:
1230                         if ((le32_to_cpu(sb->feature_map) &
1231                              MD_FEATURE_RECOVERY_OFFSET))
1232                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1233                         else
1234                                 set_bit(In_sync, &rdev->flags);
1235                         rdev->raid_disk = role;
1236                         break;
1237                 }
1238                 if (sb->devflags & WriteMostly1)
1239                         set_bit(WriteMostly, &rdev->flags);
1240         } else /* MULTIPATH are always insync */
1241                 set_bit(In_sync, &rdev->flags);
1242
1243         return 0;
1244 }
1245
1246 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1247 {
1248         struct mdp_superblock_1 *sb;
1249         struct list_head *tmp;
1250         mdk_rdev_t *rdev2;
1251         int max_dev, i;
1252         /* make rdev->sb match mddev and rdev data. */
1253
1254         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1255
1256         sb->feature_map = 0;
1257         sb->pad0 = 0;
1258         sb->recovery_offset = cpu_to_le64(0);
1259         memset(sb->pad1, 0, sizeof(sb->pad1));
1260         memset(sb->pad2, 0, sizeof(sb->pad2));
1261         memset(sb->pad3, 0, sizeof(sb->pad3));
1262
1263         sb->utime = cpu_to_le64((__u64)mddev->utime);
1264         sb->events = cpu_to_le64(mddev->events);
1265         if (mddev->in_sync)
1266                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1267         else
1268                 sb->resync_offset = cpu_to_le64(0);
1269
1270         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1271
1272         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1273         sb->size = cpu_to_le64(mddev->size<<1);
1274
1275         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1276                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1277                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1278         }
1279
1280         if (rdev->raid_disk >= 0 &&
1281             !test_bit(In_sync, &rdev->flags) &&
1282             rdev->recovery_offset > 0) {
1283                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1284                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1285         }
1286
1287         if (mddev->reshape_position != MaxSector) {
1288                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1289                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1290                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1291                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1292                 sb->new_level = cpu_to_le32(mddev->new_level);
1293                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1294         }
1295
1296         max_dev = 0;
1297         ITERATE_RDEV(mddev,rdev2,tmp)
1298                 if (rdev2->desc_nr+1 > max_dev)
1299                         max_dev = rdev2->desc_nr+1;
1300         
1301         sb->max_dev = cpu_to_le32(max_dev);
1302         for (i=0; i<max_dev;i++)
1303                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1304         
1305         ITERATE_RDEV(mddev,rdev2,tmp) {
1306                 i = rdev2->desc_nr;
1307                 if (test_bit(Faulty, &rdev2->flags))
1308                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1309                 else if (test_bit(In_sync, &rdev2->flags))
1310                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1311                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1312                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1313                 else
1314                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1315         }
1316
1317         sb->sb_csum = calc_sb_1_csum(sb);
1318 }
1319
1320
1321 static struct super_type super_types[] = {
1322         [0] = {
1323                 .name   = "0.90.0",
1324                 .owner  = THIS_MODULE,
1325                 .load_super     = super_90_load,
1326                 .validate_super = super_90_validate,
1327                 .sync_super     = super_90_sync,
1328         },
1329         [1] = {
1330                 .name   = "md-1",
1331                 .owner  = THIS_MODULE,
1332                 .load_super     = super_1_load,
1333                 .validate_super = super_1_validate,
1334                 .sync_super     = super_1_sync,
1335         },
1336 };
1337
1338 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1339 {
1340         struct list_head *tmp, *tmp2;
1341         mdk_rdev_t *rdev, *rdev2;
1342
1343         ITERATE_RDEV(mddev1,rdev,tmp)
1344                 ITERATE_RDEV(mddev2, rdev2, tmp2)
1345                         if (rdev->bdev->bd_contains ==
1346                             rdev2->bdev->bd_contains)
1347                                 return 1;
1348
1349         return 0;
1350 }
1351
1352 static LIST_HEAD(pending_raid_disks);
1353
1354 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1355 {
1356         char b[BDEVNAME_SIZE];
1357         struct kobject *ko;
1358         char *s;
1359         int err;
1360
1361         if (rdev->mddev) {
1362                 MD_BUG();
1363                 return -EINVAL;
1364         }
1365         /* make sure rdev->size exceeds mddev->size */
1366         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1367                 if (mddev->pers)
1368                         /* Cannot change size, so fail */
1369                         return -ENOSPC;
1370                 else
1371                         mddev->size = rdev->size;
1372         }
1373
1374         /* Verify rdev->desc_nr is unique.
1375          * If it is -1, assign a free number, else
1376          * check number is not in use
1377          */
1378         if (rdev->desc_nr < 0) {
1379                 int choice = 0;
1380                 if (mddev->pers) choice = mddev->raid_disks;
1381                 while (find_rdev_nr(mddev, choice))
1382                         choice++;
1383                 rdev->desc_nr = choice;
1384         } else {
1385                 if (find_rdev_nr(mddev, rdev->desc_nr))
1386                         return -EBUSY;
1387         }
1388         bdevname(rdev->bdev,b);
1389         if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1390                 return -ENOMEM;
1391         while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1392                 *s = '!';
1393                         
1394         rdev->mddev = mddev;
1395         printk(KERN_INFO "md: bind<%s>\n", b);
1396
1397         rdev->kobj.parent = &mddev->kobj;
1398         if ((err = kobject_add(&rdev->kobj)))
1399                 goto fail;
1400
1401         if (rdev->bdev->bd_part)
1402                 ko = &rdev->bdev->bd_part->kobj;
1403         else
1404                 ko = &rdev->bdev->bd_disk->kobj;
1405         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1406                 kobject_del(&rdev->kobj);
1407                 goto fail;
1408         }
1409         list_add(&rdev->same_set, &mddev->disks);
1410         bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1411         return 0;
1412
1413  fail:
1414         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1415                b, mdname(mddev));
1416         return err;
1417 }
1418
1419 static void delayed_delete(struct work_struct *ws)
1420 {
1421         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1422         kobject_del(&rdev->kobj);
1423 }
1424
1425 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1426 {
1427         char b[BDEVNAME_SIZE];
1428         if (!rdev->mddev) {
1429                 MD_BUG();
1430                 return;
1431         }
1432         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1433         list_del_init(&rdev->same_set);
1434         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1435         rdev->mddev = NULL;
1436         sysfs_remove_link(&rdev->kobj, "block");
1437
1438         /* We need to delay this, otherwise we can deadlock when
1439          * writing to 'remove' to "dev/state"
1440          */
1441         INIT_WORK(&rdev->del_work, delayed_delete);
1442         schedule_work(&rdev->del_work);
1443 }
1444
1445 /*
1446  * prevent the device from being mounted, repartitioned or
1447  * otherwise reused by a RAID array (or any other kernel
1448  * subsystem), by bd_claiming the device.
1449  */
1450 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1451 {
1452         int err = 0;
1453         struct block_device *bdev;
1454         char b[BDEVNAME_SIZE];
1455
1456         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1457         if (IS_ERR(bdev)) {
1458                 printk(KERN_ERR "md: could not open %s.\n",
1459                         __bdevname(dev, b));
1460                 return PTR_ERR(bdev);
1461         }
1462         err = bd_claim(bdev, rdev);
1463         if (err) {
1464                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1465                         bdevname(bdev, b));
1466                 blkdev_put(bdev);
1467                 return err;
1468         }
1469         rdev->bdev = bdev;
1470         return err;
1471 }
1472
1473 static void unlock_rdev(mdk_rdev_t *rdev)
1474 {
1475         struct block_device *bdev = rdev->bdev;
1476         rdev->bdev = NULL;
1477         if (!bdev)
1478                 MD_BUG();
1479         bd_release(bdev);
1480         blkdev_put(bdev);
1481 }
1482
1483 void md_autodetect_dev(dev_t dev);
1484
1485 static void export_rdev(mdk_rdev_t * rdev)
1486 {
1487         char b[BDEVNAME_SIZE];
1488         printk(KERN_INFO "md: export_rdev(%s)\n",
1489                 bdevname(rdev->bdev,b));
1490         if (rdev->mddev)
1491                 MD_BUG();
1492         free_disk_sb(rdev);
1493         list_del_init(&rdev->same_set);
1494 #ifndef MODULE
1495         md_autodetect_dev(rdev->bdev->bd_dev);
1496 #endif
1497         unlock_rdev(rdev);
1498         kobject_put(&rdev->kobj);
1499 }
1500
1501 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1502 {
1503         unbind_rdev_from_array(rdev);
1504         export_rdev(rdev);
1505 }
1506
1507 static void export_array(mddev_t *mddev)
1508 {
1509         struct list_head *tmp;
1510         mdk_rdev_t *rdev;
1511
1512         ITERATE_RDEV(mddev,rdev,tmp) {
1513                 if (!rdev->mddev) {
1514                         MD_BUG();
1515                         continue;
1516                 }
1517                 kick_rdev_from_array(rdev);
1518         }
1519         if (!list_empty(&mddev->disks))
1520                 MD_BUG();
1521         mddev->raid_disks = 0;
1522         mddev->major_version = 0;
1523 }
1524
1525 static void print_desc(mdp_disk_t *desc)
1526 {
1527         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1528                 desc->major,desc->minor,desc->raid_disk,desc->state);
1529 }
1530
1531 static void print_sb(mdp_super_t *sb)
1532 {
1533         int i;
1534
1535         printk(KERN_INFO 
1536                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1537                 sb->major_version, sb->minor_version, sb->patch_version,
1538                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1539                 sb->ctime);
1540         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1541                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1542                 sb->md_minor, sb->layout, sb->chunk_size);
1543         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1544                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1545                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1546                 sb->failed_disks, sb->spare_disks,
1547                 sb->sb_csum, (unsigned long)sb->events_lo);
1548
1549         printk(KERN_INFO);
1550         for (i = 0; i < MD_SB_DISKS; i++) {
1551                 mdp_disk_t *desc;
1552
1553                 desc = sb->disks + i;
1554                 if (desc->number || desc->major || desc->minor ||
1555                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1556                         printk("     D %2d: ", i);
1557                         print_desc(desc);
1558                 }
1559         }
1560         printk(KERN_INFO "md:     THIS: ");
1561         print_desc(&sb->this_disk);
1562
1563 }
1564
1565 static void print_rdev(mdk_rdev_t *rdev)
1566 {
1567         char b[BDEVNAME_SIZE];
1568         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1569                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1570                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1571                 rdev->desc_nr);
1572         if (rdev->sb_loaded) {
1573                 printk(KERN_INFO "md: rdev superblock:\n");
1574                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1575         } else
1576                 printk(KERN_INFO "md: no rdev superblock!\n");
1577 }
1578
1579 static void md_print_devices(void)
1580 {
1581         struct list_head *tmp, *tmp2;
1582         mdk_rdev_t *rdev;
1583         mddev_t *mddev;
1584         char b[BDEVNAME_SIZE];
1585
1586         printk("\n");
1587         printk("md:     **********************************\n");
1588         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1589         printk("md:     **********************************\n");
1590         ITERATE_MDDEV(mddev,tmp) {
1591
1592                 if (mddev->bitmap)
1593                         bitmap_print_sb(mddev->bitmap);
1594                 else
1595                         printk("%s: ", mdname(mddev));
1596                 ITERATE_RDEV(mddev,rdev,tmp2)
1597                         printk("<%s>", bdevname(rdev->bdev,b));
1598                 printk("\n");
1599
1600                 ITERATE_RDEV(mddev,rdev,tmp2)
1601                         print_rdev(rdev);
1602         }
1603         printk("md:     **********************************\n");
1604         printk("\n");
1605 }
1606
1607
1608 static void sync_sbs(mddev_t * mddev, int nospares)
1609 {
1610         /* Update each superblock (in-memory image), but
1611          * if we are allowed to, skip spares which already
1612          * have the right event counter, or have one earlier
1613          * (which would mean they aren't being marked as dirty
1614          * with the rest of the array)
1615          */
1616         mdk_rdev_t *rdev;
1617         struct list_head *tmp;
1618
1619         ITERATE_RDEV(mddev,rdev,tmp) {
1620                 if (rdev->sb_events == mddev->events ||
1621                     (nospares &&
1622                      rdev->raid_disk < 0 &&
1623                      (rdev->sb_events&1)==0 &&
1624                      rdev->sb_events+1 == mddev->events)) {
1625                         /* Don't update this superblock */
1626                         rdev->sb_loaded = 2;
1627                 } else {
1628                         super_types[mddev->major_version].
1629                                 sync_super(mddev, rdev);
1630                         rdev->sb_loaded = 1;
1631                 }
1632         }
1633 }
1634
1635 static void md_update_sb(mddev_t * mddev, int force_change)
1636 {
1637         int err;
1638         struct list_head *tmp;
1639         mdk_rdev_t *rdev;
1640         int sync_req;
1641         int nospares = 0;
1642
1643 repeat:
1644         spin_lock_irq(&mddev->write_lock);
1645
1646         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1647         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1648                 force_change = 1;
1649         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1650                 /* just a clean<-> dirty transition, possibly leave spares alone,
1651                  * though if events isn't the right even/odd, we will have to do
1652                  * spares after all
1653                  */
1654                 nospares = 1;
1655         if (force_change)
1656                 nospares = 0;
1657         if (mddev->degraded)
1658                 /* If the array is degraded, then skipping spares is both
1659                  * dangerous and fairly pointless.
1660                  * Dangerous because a device that was removed from the array
1661                  * might have a event_count that still looks up-to-date,
1662                  * so it can be re-added without a resync.
1663                  * Pointless because if there are any spares to skip,
1664                  * then a recovery will happen and soon that array won't
1665                  * be degraded any more and the spare can go back to sleep then.
1666                  */
1667                 nospares = 0;
1668
1669         sync_req = mddev->in_sync;
1670         mddev->utime = get_seconds();
1671
1672         /* If this is just a dirty<->clean transition, and the array is clean
1673          * and 'events' is odd, we can roll back to the previous clean state */
1674         if (nospares
1675             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1676             && (mddev->events & 1)
1677             && mddev->events != 1)
1678                 mddev->events--;
1679         else {
1680                 /* otherwise we have to go forward and ... */
1681                 mddev->events ++;
1682                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1683                         /* .. if the array isn't clean, insist on an odd 'events' */
1684                         if ((mddev->events&1)==0) {
1685                                 mddev->events++;
1686                                 nospares = 0;
1687                         }
1688                 } else {
1689                         /* otherwise insist on an even 'events' (for clean states) */
1690                         if ((mddev->events&1)) {
1691                                 mddev->events++;
1692                                 nospares = 0;
1693                         }
1694                 }
1695         }
1696
1697         if (!mddev->events) {
1698                 /*
1699                  * oops, this 64-bit counter should never wrap.
1700                  * Either we are in around ~1 trillion A.C., assuming
1701                  * 1 reboot per second, or we have a bug:
1702                  */
1703                 MD_BUG();
1704                 mddev->events --;
1705         }
1706         sync_sbs(mddev, nospares);
1707
1708         /*
1709          * do not write anything to disk if using
1710          * nonpersistent superblocks
1711          */
1712         if (!mddev->persistent) {
1713                 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1714                 spin_unlock_irq(&mddev->write_lock);
1715                 wake_up(&mddev->sb_wait);
1716                 return;
1717         }
1718         spin_unlock_irq(&mddev->write_lock);
1719
1720         dprintk(KERN_INFO 
1721                 "md: updating %s RAID superblock on device (in sync %d)\n",
1722                 mdname(mddev),mddev->in_sync);
1723
1724         err = bitmap_update_sb(mddev->bitmap);
1725         ITERATE_RDEV(mddev,rdev,tmp) {
1726                 char b[BDEVNAME_SIZE];
1727                 dprintk(KERN_INFO "md: ");
1728                 if (rdev->sb_loaded != 1)
1729                         continue; /* no noise on spare devices */
1730                 if (test_bit(Faulty, &rdev->flags))
1731                         dprintk("(skipping faulty ");
1732
1733                 dprintk("%s ", bdevname(rdev->bdev,b));
1734                 if (!test_bit(Faulty, &rdev->flags)) {
1735                         md_super_write(mddev,rdev,
1736                                        rdev->sb_offset<<1, rdev->sb_size,
1737                                        rdev->sb_page);
1738                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1739                                 bdevname(rdev->bdev,b),
1740                                 (unsigned long long)rdev->sb_offset);
1741                         rdev->sb_events = mddev->events;
1742
1743                 } else
1744                         dprintk(")\n");
1745                 if (mddev->level == LEVEL_MULTIPATH)
1746                         /* only need to write one superblock... */
1747                         break;
1748         }
1749         md_super_wait(mddev);
1750         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1751
1752         spin_lock_irq(&mddev->write_lock);
1753         if (mddev->in_sync != sync_req ||
1754             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1755                 /* have to write it out again */
1756                 spin_unlock_irq(&mddev->write_lock);
1757                 goto repeat;
1758         }
1759         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1760         spin_unlock_irq(&mddev->write_lock);
1761         wake_up(&mddev->sb_wait);
1762
1763 }
1764
1765 /* words written to sysfs files may, or my not, be \n terminated.
1766  * We want to accept with case. For this we use cmd_match.
1767  */
1768 static int cmd_match(const char *cmd, const char *str)
1769 {
1770         /* See if cmd, written into a sysfs file, matches
1771          * str.  They must either be the same, or cmd can
1772          * have a trailing newline
1773          */
1774         while (*cmd && *str && *cmd == *str) {
1775                 cmd++;
1776                 str++;
1777         }
1778         if (*cmd == '\n')
1779                 cmd++;
1780         if (*str || *cmd)
1781                 return 0;
1782         return 1;
1783 }
1784
1785 struct rdev_sysfs_entry {
1786         struct attribute attr;
1787         ssize_t (*show)(mdk_rdev_t *, char *);
1788         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1789 };
1790
1791 static ssize_t
1792 state_show(mdk_rdev_t *rdev, char *page)
1793 {
1794         char *sep = "";
1795         int len=0;
1796
1797         if (test_bit(Faulty, &rdev->flags)) {
1798                 len+= sprintf(page+len, "%sfaulty",sep);
1799                 sep = ",";
1800         }
1801         if (test_bit(In_sync, &rdev->flags)) {
1802                 len += sprintf(page+len, "%sin_sync",sep);
1803                 sep = ",";
1804         }
1805         if (test_bit(WriteMostly, &rdev->flags)) {
1806                 len += sprintf(page+len, "%swrite_mostly",sep);
1807                 sep = ",";
1808         }
1809         if (!test_bit(Faulty, &rdev->flags) &&
1810             !test_bit(In_sync, &rdev->flags)) {
1811                 len += sprintf(page+len, "%sspare", sep);
1812                 sep = ",";
1813         }
1814         return len+sprintf(page+len, "\n");
1815 }
1816
1817 static ssize_t
1818 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1819 {
1820         /* can write
1821          *  faulty  - simulates and error
1822          *  remove  - disconnects the device
1823          *  writemostly - sets write_mostly
1824          *  -writemostly - clears write_mostly
1825          */
1826         int err = -EINVAL;
1827         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1828                 md_error(rdev->mddev, rdev);
1829                 err = 0;
1830         } else if (cmd_match(buf, "remove")) {
1831                 if (rdev->raid_disk >= 0)
1832                         err = -EBUSY;
1833                 else {
1834                         mddev_t *mddev = rdev->mddev;
1835                         kick_rdev_from_array(rdev);
1836                         if (mddev->pers)
1837                                 md_update_sb(mddev, 1);
1838                         md_new_event(mddev);
1839                         err = 0;
1840                 }
1841         } else if (cmd_match(buf, "writemostly")) {
1842                 set_bit(WriteMostly, &rdev->flags);
1843                 err = 0;
1844         } else if (cmd_match(buf, "-writemostly")) {
1845                 clear_bit(WriteMostly, &rdev->flags);
1846                 err = 0;
1847         }
1848         return err ? err : len;
1849 }
1850 static struct rdev_sysfs_entry rdev_state =
1851 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1852
1853 static ssize_t
1854 super_show(mdk_rdev_t *rdev, char *page)
1855 {
1856         if (rdev->sb_loaded && rdev->sb_size) {
1857                 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1858                 return rdev->sb_size;
1859         } else
1860                 return 0;
1861 }
1862 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1863
1864 static ssize_t
1865 errors_show(mdk_rdev_t *rdev, char *page)
1866 {
1867         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1868 }
1869
1870 static ssize_t
1871 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1872 {
1873         char *e;
1874         unsigned long n = simple_strtoul(buf, &e, 10);
1875         if (*buf && (*e == 0 || *e == '\n')) {
1876                 atomic_set(&rdev->corrected_errors, n);
1877                 return len;
1878         }
1879         return -EINVAL;
1880 }
1881 static struct rdev_sysfs_entry rdev_errors =
1882 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1883
1884 static ssize_t
1885 slot_show(mdk_rdev_t *rdev, char *page)
1886 {
1887         if (rdev->raid_disk < 0)
1888                 return sprintf(page, "none\n");
1889         else
1890                 return sprintf(page, "%d\n", rdev->raid_disk);
1891 }
1892
1893 static ssize_t
1894 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1895 {
1896         char *e;
1897         int slot = simple_strtoul(buf, &e, 10);
1898         if (strncmp(buf, "none", 4)==0)
1899                 slot = -1;
1900         else if (e==buf || (*e && *e!= '\n'))
1901                 return -EINVAL;
1902         if (rdev->mddev->pers)
1903                 /* Cannot set slot in active array (yet) */
1904                 return -EBUSY;
1905         if (slot >= rdev->mddev->raid_disks)
1906                 return -ENOSPC;
1907         rdev->raid_disk = slot;
1908         /* assume it is working */
1909         rdev->flags = 0;
1910         set_bit(In_sync, &rdev->flags);
1911         return len;
1912 }
1913
1914
1915 static struct rdev_sysfs_entry rdev_slot =
1916 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1917
1918 static ssize_t
1919 offset_show(mdk_rdev_t *rdev, char *page)
1920 {
1921         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1922 }
1923
1924 static ssize_t
1925 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1926 {
1927         char *e;
1928         unsigned long long offset = simple_strtoull(buf, &e, 10);
1929         if (e==buf || (*e && *e != '\n'))
1930                 return -EINVAL;
1931         if (rdev->mddev->pers)
1932                 return -EBUSY;
1933         rdev->data_offset = offset;
1934         return len;
1935 }
1936
1937 static struct rdev_sysfs_entry rdev_offset =
1938 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1939
1940 static ssize_t
1941 rdev_size_show(mdk_rdev_t *rdev, char *page)
1942 {
1943         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1944 }
1945
1946 static ssize_t
1947 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1948 {
1949         char *e;
1950         unsigned long long size = simple_strtoull(buf, &e, 10);
1951         if (e==buf || (*e && *e != '\n'))
1952                 return -EINVAL;
1953         if (rdev->mddev->pers)
1954                 return -EBUSY;
1955         rdev->size = size;
1956         if (size < rdev->mddev->size || rdev->mddev->size == 0)
1957                 rdev->mddev->size = size;
1958         return len;
1959 }
1960
1961 static struct rdev_sysfs_entry rdev_size =
1962 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1963
1964 static struct attribute *rdev_default_attrs[] = {
1965         &rdev_state.attr,
1966         &rdev_super.attr,
1967         &rdev_errors.attr,
1968         &rdev_slot.attr,
1969         &rdev_offset.attr,
1970         &rdev_size.attr,
1971         NULL,
1972 };
1973 static ssize_t
1974 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1975 {
1976         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1977         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1978
1979         if (!entry->show)
1980                 return -EIO;
1981         return entry->show(rdev, page);
1982 }
1983
1984 static ssize_t
1985 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1986               const char *page, size_t length)
1987 {
1988         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1989         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1990
1991         if (!entry->store)
1992                 return -EIO;
1993         if (!capable(CAP_SYS_ADMIN))
1994                 return -EACCES;
1995         return entry->store(rdev, page, length);
1996 }
1997
1998 static void rdev_free(struct kobject *ko)
1999 {
2000         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2001         kfree(rdev);
2002 }
2003 static struct sysfs_ops rdev_sysfs_ops = {
2004         .show           = rdev_attr_show,
2005         .store          = rdev_attr_store,
2006 };
2007 static struct kobj_type rdev_ktype = {
2008         .release        = rdev_free,
2009         .sysfs_ops      = &rdev_sysfs_ops,
2010         .default_attrs  = rdev_default_attrs,
2011 };
2012
2013 /*
2014  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2015  *
2016  * mark the device faulty if:
2017  *
2018  *   - the device is nonexistent (zero size)
2019  *   - the device has no valid superblock
2020  *
2021  * a faulty rdev _never_ has rdev->sb set.
2022  */
2023 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2024 {
2025         char b[BDEVNAME_SIZE];
2026         int err;
2027         mdk_rdev_t *rdev;
2028         sector_t size;
2029
2030         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2031         if (!rdev) {
2032                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2033                 return ERR_PTR(-ENOMEM);
2034         }
2035
2036         if ((err = alloc_disk_sb(rdev)))
2037                 goto abort_free;
2038
2039         err = lock_rdev(rdev, newdev);
2040         if (err)
2041                 goto abort_free;
2042
2043         rdev->kobj.parent = NULL;
2044         rdev->kobj.ktype = &rdev_ktype;
2045         kobject_init(&rdev->kobj);
2046
2047         rdev->desc_nr = -1;
2048         rdev->saved_raid_disk = -1;
2049         rdev->raid_disk = -1;
2050         rdev->flags = 0;
2051         rdev->data_offset = 0;
2052         rdev->sb_events = 0;
2053         atomic_set(&rdev->nr_pending, 0);
2054         atomic_set(&rdev->read_errors, 0);
2055         atomic_set(&rdev->corrected_errors, 0);
2056
2057         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2058         if (!size) {
2059                 printk(KERN_WARNING 
2060                         "md: %s has zero or unknown size, marking faulty!\n",
2061                         bdevname(rdev->bdev,b));
2062                 err = -EINVAL;
2063                 goto abort_free;
2064         }
2065
2066         if (super_format >= 0) {
2067                 err = super_types[super_format].
2068                         load_super(rdev, NULL, super_minor);
2069                 if (err == -EINVAL) {
2070                         printk(KERN_WARNING 
2071                                 "md: %s has invalid sb, not importing!\n",
2072                                 bdevname(rdev->bdev,b));
2073                         goto abort_free;
2074                 }
2075                 if (err < 0) {
2076                         printk(KERN_WARNING 
2077                                 "md: could not read %s's sb, not importing!\n",
2078                                 bdevname(rdev->bdev,b));
2079                         goto abort_free;
2080                 }
2081         }
2082         INIT_LIST_HEAD(&rdev->same_set);
2083
2084         return rdev;
2085
2086 abort_free:
2087         if (rdev->sb_page) {
2088                 if (rdev->bdev)
2089                         unlock_rdev(rdev);
2090                 free_disk_sb(rdev);
2091         }
2092         kfree(rdev);
2093         return ERR_PTR(err);
2094 }
2095
2096 /*
2097  * Check a full RAID array for plausibility
2098  */
2099
2100
2101 static void analyze_sbs(mddev_t * mddev)
2102 {
2103         int i;
2104         struct list_head *tmp;
2105         mdk_rdev_t *rdev, *freshest;
2106         char b[BDEVNAME_SIZE];
2107
2108         freshest = NULL;
2109         ITERATE_RDEV(mddev,rdev,tmp)
2110                 switch (super_types[mddev->major_version].
2111                         load_super(rdev, freshest, mddev->minor_version)) {
2112                 case 1:
2113                         freshest = rdev;
2114                         break;
2115                 case 0:
2116                         break;
2117                 default:
2118                         printk( KERN_ERR \
2119                                 "md: fatal superblock inconsistency in %s"
2120                                 " -- removing from array\n", 
2121                                 bdevname(rdev->bdev,b));
2122                         kick_rdev_from_array(rdev);
2123                 }
2124
2125
2126         super_types[mddev->major_version].
2127                 validate_super(mddev, freshest);
2128
2129         i = 0;
2130         ITERATE_RDEV(mddev,rdev,tmp) {
2131                 if (rdev != freshest)
2132                         if (super_types[mddev->major_version].
2133                             validate_super(mddev, rdev)) {
2134                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2135                                         " from array!\n",
2136                                         bdevname(rdev->bdev,b));
2137                                 kick_rdev_from_array(rdev);
2138                                 continue;
2139                         }
2140                 if (mddev->level == LEVEL_MULTIPATH) {
2141                         rdev->desc_nr = i++;
2142                         rdev->raid_disk = rdev->desc_nr;
2143                         set_bit(In_sync, &rdev->flags);
2144                 }
2145         }
2146
2147
2148
2149         if (mddev->recovery_cp != MaxSector &&
2150             mddev->level >= 1)
2151                 printk(KERN_ERR "md: %s: raid array is not clean"
2152                        " -- starting background reconstruction\n",
2153                        mdname(mddev));
2154
2155 }
2156
2157 static ssize_t
2158 safe_delay_show(mddev_t *mddev, char *page)
2159 {
2160         int msec = (mddev->safemode_delay*1000)/HZ;
2161         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2162 }
2163 static ssize_t
2164 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2165 {
2166         int scale=1;
2167         int dot=0;
2168         int i;
2169         unsigned long msec;
2170         char buf[30];
2171         char *e;
2172         /* remove a period, and count digits after it */
2173         if (len >= sizeof(buf))
2174                 return -EINVAL;
2175         strlcpy(buf, cbuf, len);
2176         buf[len] = 0;
2177         for (i=0; i<len; i++) {
2178                 if (dot) {
2179                         if (isdigit(buf[i])) {
2180                                 buf[i-1] = buf[i];
2181                                 scale *= 10;
2182                         }
2183                         buf[i] = 0;
2184                 } else if (buf[i] == '.') {
2185                         dot=1;
2186                         buf[i] = 0;
2187                 }
2188         }
2189         msec = simple_strtoul(buf, &e, 10);
2190         if (e == buf || (*e && *e != '\n'))
2191                 return -EINVAL;
2192         msec = (msec * 1000) / scale;
2193         if (msec == 0)
2194                 mddev->safemode_delay = 0;
2195         else {
2196                 mddev->safemode_delay = (msec*HZ)/1000;
2197                 if (mddev->safemode_delay == 0)
2198                         mddev->safemode_delay = 1;
2199         }
2200         return len;
2201 }
2202 static struct md_sysfs_entry md_safe_delay =
2203 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2204
2205 static ssize_t
2206 level_show(mddev_t *mddev, char *page)
2207 {
2208         struct mdk_personality *p = mddev->pers;
2209         if (p)
2210                 return sprintf(page, "%s\n", p->name);
2211         else if (mddev->clevel[0])
2212                 return sprintf(page, "%s\n", mddev->clevel);
2213         else if (mddev->level != LEVEL_NONE)
2214                 return sprintf(page, "%d\n", mddev->level);
2215         else
2216                 return 0;
2217 }
2218
2219 static ssize_t
2220 level_store(mddev_t *mddev, const char *buf, size_t len)
2221 {
2222         int rv = len;
2223         if (mddev->pers)
2224                 return -EBUSY;
2225         if (len == 0)
2226                 return 0;
2227         if (len >= sizeof(mddev->clevel))
2228                 return -ENOSPC;
2229         strncpy(mddev->clevel, buf, len);
2230         if (mddev->clevel[len-1] == '\n')
2231                 len--;
2232         mddev->clevel[len] = 0;
2233         mddev->level = LEVEL_NONE;
2234         return rv;
2235 }
2236
2237 static struct md_sysfs_entry md_level =
2238 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2239
2240
2241 static ssize_t
2242 layout_show(mddev_t *mddev, char *page)
2243 {
2244         /* just a number, not meaningful for all levels */
2245         return sprintf(page, "%d\n", mddev->layout);
2246 }
2247
2248 static ssize_t
2249 layout_store(mddev_t *mddev, const char *buf, size_t len)
2250 {
2251         char *e;
2252         unsigned long n = simple_strtoul(buf, &e, 10);
2253         if (mddev->pers)
2254                 return -EBUSY;
2255
2256         if (!*buf || (*e && *e != '\n'))
2257                 return -EINVAL;
2258
2259         mddev->layout = n;
2260         return len;
2261 }
2262 static struct md_sysfs_entry md_layout =
2263 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2264
2265
2266 static ssize_t
2267 raid_disks_show(mddev_t *mddev, char *page)
2268 {
2269         if (mddev->raid_disks == 0)
2270                 return 0;
2271         return sprintf(page, "%d\n", mddev->raid_disks);
2272 }
2273
2274 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2275
2276 static ssize_t
2277 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2278 {
2279         char *e;
2280         int rv = 0;
2281         unsigned long n = simple_strtoul(buf, &e, 10);
2282
2283         if (!*buf || (*e && *e != '\n'))
2284                 return -EINVAL;
2285
2286         if (mddev->pers)
2287                 rv = update_raid_disks(mddev, n);
2288         else
2289                 mddev->raid_disks = n;
2290         return rv ? rv : len;
2291 }
2292 static struct md_sysfs_entry md_raid_disks =
2293 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2294
2295 static ssize_t
2296 chunk_size_show(mddev_t *mddev, char *page)
2297 {
2298         return sprintf(page, "%d\n", mddev->chunk_size);
2299 }
2300
2301 static ssize_t
2302 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2303 {
2304         /* can only set chunk_size if array is not yet active */
2305         char *e;
2306         unsigned long n = simple_strtoul(buf, &e, 10);
2307
2308         if (mddev->pers)
2309                 return -EBUSY;
2310         if (!*buf || (*e && *e != '\n'))
2311                 return -EINVAL;
2312
2313         mddev->chunk_size = n;
2314         return len;
2315 }
2316 static struct md_sysfs_entry md_chunk_size =
2317 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2318
2319 static ssize_t
2320 resync_start_show(mddev_t *mddev, char *page)
2321 {
2322         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2323 }
2324
2325 static ssize_t
2326 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2327 {
2328         /* can only set chunk_size if array is not yet active */
2329         char *e;
2330         unsigned long long n = simple_strtoull(buf, &e, 10);
2331
2332         if (mddev->pers)
2333                 return -EBUSY;
2334         if (!*buf || (*e && *e != '\n'))
2335                 return -EINVAL;
2336
2337         mddev->recovery_cp = n;
2338         return len;
2339 }
2340 static struct md_sysfs_entry md_resync_start =
2341 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2342
2343 /*
2344  * The array state can be:
2345  *
2346  * clear
2347  *     No devices, no size, no level
2348  *     Equivalent to STOP_ARRAY ioctl
2349  * inactive
2350  *     May have some settings, but array is not active
2351  *        all IO results in error
2352  *     When written, doesn't tear down array, but just stops it
2353  * suspended (not supported yet)
2354  *     All IO requests will block. The array can be reconfigured.
2355  *     Writing this, if accepted, will block until array is quiessent
2356  * readonly
2357  *     no resync can happen.  no superblocks get written.
2358  *     write requests fail
2359  * read-auto
2360  *     like readonly, but behaves like 'clean' on a write request.
2361  *
2362  * clean - no pending writes, but otherwise active.
2363  *     When written to inactive array, starts without resync
2364  *     If a write request arrives then
2365  *       if metadata is known, mark 'dirty' and switch to 'active'.
2366  *       if not known, block and switch to write-pending
2367  *     If written to an active array that has pending writes, then fails.
2368  * active
2369  *     fully active: IO and resync can be happening.
2370  *     When written to inactive array, starts with resync
2371  *
2372  * write-pending
2373  *     clean, but writes are blocked waiting for 'active' to be written.
2374  *
2375  * active-idle
2376  *     like active, but no writes have been seen for a while (100msec).
2377  *
2378  */
2379 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2380                    write_pending, active_idle, bad_word};
2381 static char *array_states[] = {
2382         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2383         "write-pending", "active-idle", NULL };
2384
2385 static int match_word(const char *word, char **list)
2386 {
2387         int n;
2388         for (n=0; list[n]; n++)
2389                 if (cmd_match(word, list[n]))
2390                         break;
2391         return n;
2392 }
2393
2394 static ssize_t
2395 array_state_show(mddev_t *mddev, char *page)
2396 {
2397         enum array_state st = inactive;
2398
2399         if (mddev->pers)
2400                 switch(mddev->ro) {
2401                 case 1:
2402                         st = readonly;
2403                         break;
2404                 case 2:
2405                         st = read_auto;
2406                         break;
2407                 case 0:
2408                         if (mddev->in_sync)
2409                                 st = clean;
2410                         else if (mddev->safemode)
2411                                 st = active_idle;
2412                         else
2413                                 st = active;
2414                 }
2415         else {
2416                 if (list_empty(&mddev->disks) &&
2417                     mddev->raid_disks == 0 &&
2418                     mddev->size == 0)
2419                         st = clear;
2420                 else
2421                         st = inactive;
2422         }
2423         return sprintf(page, "%s\n", array_states[st]);
2424 }
2425
2426 static int do_md_stop(mddev_t * mddev, int ro);
2427 static int do_md_run(mddev_t * mddev);
2428 static int restart_array(mddev_t *mddev);
2429
2430 static ssize_t
2431 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2432 {
2433         int err = -EINVAL;
2434         enum array_state st = match_word(buf, array_states);
2435         switch(st) {
2436         case bad_word:
2437                 break;
2438         case clear:
2439                 /* stopping an active array */
2440                 if (mddev->pers) {
2441                         if (atomic_read(&mddev->active) > 1)
2442                                 return -EBUSY;
2443                         err = do_md_stop(mddev, 0);
2444                 }
2445                 break;
2446         case inactive:
2447                 /* stopping an active array */
2448                 if (mddev->pers) {
2449                         if (atomic_read(&mddev->active) > 1)
2450                                 return -EBUSY;
2451                         err = do_md_stop(mddev, 2);
2452                 }
2453                 break;
2454         case suspended:
2455                 break; /* not supported yet */
2456         case readonly:
2457                 if (mddev->pers)
2458                         err = do_md_stop(mddev, 1);
2459                 else {
2460                         mddev->ro = 1;
2461                         err = do_md_run(mddev);
2462                 }
2463                 break;
2464         case read_auto:
2465                 /* stopping an active array */
2466                 if (mddev->pers) {
2467                         err = do_md_stop(mddev, 1);
2468                         if (err == 0)
2469                                 mddev->ro = 2; /* FIXME mark devices writable */
2470                 } else {
2471                         mddev->ro = 2;
2472                         err = do_md_run(mddev);
2473                 }
2474                 break;
2475         case clean:
2476                 if (mddev->pers) {
2477                         restart_array(mddev);
2478                         spin_lock_irq(&mddev->write_lock);
2479                         if (atomic_read(&mddev->writes_pending) == 0) {
2480                                 mddev->in_sync = 1;
2481                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2482                         }
2483                         spin_unlock_irq(&mddev->write_lock);
2484                 } else {
2485                         mddev->ro = 0;
2486                         mddev->recovery_cp = MaxSector;
2487                         err = do_md_run(mddev);
2488                 }
2489                 break;
2490         case active:
2491                 if (mddev->pers) {
2492                         restart_array(mddev);
2493                         clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2494                         wake_up(&mddev->sb_wait);
2495                         err = 0;
2496                 } else {
2497                         mddev->ro = 0;
2498                         err = do_md_run(mddev);
2499                 }
2500                 break;
2501         case write_pending:
2502         case active_idle:
2503                 /* these cannot be set */
2504                 break;
2505         }
2506         if (err)
2507                 return err;
2508         else
2509                 return len;
2510 }
2511 static struct md_sysfs_entry md_array_state =
2512 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2513
2514 static ssize_t
2515 null_show(mddev_t *mddev, char *page)
2516 {
2517         return -EINVAL;
2518 }
2519
2520 static ssize_t
2521 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2522 {
2523         /* buf must be %d:%d\n? giving major and minor numbers */
2524         /* The new device is added to the array.
2525          * If the array has a persistent superblock, we read the
2526          * superblock to initialise info and check validity.
2527          * Otherwise, only checking done is that in bind_rdev_to_array,
2528          * which mainly checks size.
2529          */
2530         char *e;
2531         int major = simple_strtoul(buf, &e, 10);
2532         int minor;
2533         dev_t dev;
2534         mdk_rdev_t *rdev;
2535         int err;
2536
2537         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2538                 return -EINVAL;
2539         minor = simple_strtoul(e+1, &e, 10);
2540         if (*e && *e != '\n')
2541                 return -EINVAL;
2542         dev = MKDEV(major, minor);
2543         if (major != MAJOR(dev) ||
2544             minor != MINOR(dev))
2545                 return -EOVERFLOW;
2546
2547
2548         if (mddev->persistent) {
2549                 rdev = md_import_device(dev, mddev->major_version,
2550                                         mddev->minor_version);
2551                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2552                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2553                                                        mdk_rdev_t, same_set);
2554                         err = super_types[mddev->major_version]
2555                                 .load_super(rdev, rdev0, mddev->minor_version);
2556                         if (err < 0)
2557                                 goto out;
2558                 }
2559         } else
2560                 rdev = md_import_device(dev, -1, -1);
2561
2562         if (IS_ERR(rdev))
2563                 return PTR_ERR(rdev);
2564         err = bind_rdev_to_array(rdev, mddev);
2565  out:
2566         if (err)
2567                 export_rdev(rdev);
2568         return err ? err : len;
2569 }
2570
2571 static struct md_sysfs_entry md_new_device =
2572 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2573
2574 static ssize_t
2575 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2576 {
2577         char *end;
2578         unsigned long chunk, end_chunk;
2579
2580         if (!mddev->bitmap)
2581                 goto out;
2582         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2583         while (*buf) {
2584                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2585                 if (buf == end) break;
2586                 if (*end == '-') { /* range */
2587                         buf = end + 1;
2588                         end_chunk = simple_strtoul(buf, &end, 0);
2589                         if (buf == end) break;
2590                 }
2591                 if (*end && !isspace(*end)) break;
2592                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2593                 buf = end;
2594                 while (isspace(*buf)) buf++;
2595         }
2596         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2597 out:
2598         return len;
2599 }
2600
2601 static struct md_sysfs_entry md_bitmap =
2602 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2603
2604 static ssize_t
2605 size_show(mddev_t *mddev, char *page)
2606 {
2607         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2608 }
2609
2610 static int update_size(mddev_t *mddev, unsigned long size);
2611
2612 static ssize_t
2613 size_store(mddev_t *mddev, const char *buf, size_t len)
2614 {
2615         /* If array is inactive, we can reduce the component size, but
2616          * not increase it (except from 0).
2617          * If array is active, we can try an on-line resize
2618          */
2619         char *e;
2620         int err = 0;
2621         unsigned long long size = simple_strtoull(buf, &e, 10);
2622         if (!*buf || *buf == '\n' ||
2623             (*e && *e != '\n'))
2624                 return -EINVAL;
2625
2626         if (mddev->pers) {
2627                 err = update_size(mddev, size);
2628                 md_update_sb(mddev, 1);
2629         } else {
2630                 if (mddev->size == 0 ||
2631                     mddev->size > size)
2632                         mddev->size = size;
2633                 else
2634                         err = -ENOSPC;
2635         }
2636         return err ? err : len;
2637 }
2638
2639 static struct md_sysfs_entry md_size =
2640 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2641
2642
2643 /* Metdata version.
2644  * This is either 'none' for arrays with externally managed metadata,
2645  * or N.M for internally known formats
2646  */
2647 static ssize_t
2648 metadata_show(mddev_t *mddev, char *page)
2649 {
2650         if (mddev->persistent)
2651                 return sprintf(page, "%d.%d\n",
2652                                mddev->major_version, mddev->minor_version);
2653         else
2654                 return sprintf(page, "none\n");
2655 }
2656
2657 static ssize_t
2658 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2659 {
2660         int major, minor;
2661         char *e;
2662         if (!list_empty(&mddev->disks))
2663                 return -EBUSY;
2664
2665         if (cmd_match(buf, "none")) {
2666                 mddev->persistent = 0;
2667                 mddev->major_version = 0;
2668                 mddev->minor_version = 90;
2669                 return len;
2670         }
2671         major = simple_strtoul(buf, &e, 10);
2672         if (e==buf || *e != '.')
2673                 return -EINVAL;
2674         buf = e+1;
2675         minor = simple_strtoul(buf, &e, 10);
2676         if (e==buf || (*e && *e != '\n') )
2677                 return -EINVAL;
2678         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2679                 return -ENOENT;
2680         mddev->major_version = major;
2681         mddev->minor_version = minor;
2682         mddev->persistent = 1;
2683         return len;
2684 }
2685
2686 static struct md_sysfs_entry md_metadata =
2687 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2688
2689 static ssize_t
2690 action_show(mddev_t *mddev, char *page)
2691 {
2692         char *type = "idle";
2693         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2694             test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2695                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2696                         type = "reshape";
2697                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2698                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2699                                 type = "resync";
2700                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2701                                 type = "check";
2702                         else
2703                                 type = "repair";
2704                 } else
2705                         type = "recover";
2706         }
2707         return sprintf(page, "%s\n", type);
2708 }
2709
2710 static ssize_t
2711 action_store(mddev_t *mddev, const char *page, size_t len)
2712 {
2713         if (!mddev->pers || !mddev->pers->sync_request)
2714                 return -EINVAL;
2715
2716         if (cmd_match(page, "idle")) {
2717                 if (mddev->sync_thread) {
2718                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2719                         md_unregister_thread(mddev->sync_thread);
2720                         mddev->sync_thread = NULL;
2721                         mddev->recovery = 0;
2722                 }
2723         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2724                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2725                 return -EBUSY;
2726         else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2727                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2728         else if (cmd_match(page, "reshape")) {
2729                 int err;
2730                 if (mddev->pers->start_reshape == NULL)
2731                         return -EINVAL;
2732                 err = mddev->pers->start_reshape(mddev);
2733                 if (err)
2734                         return err;
2735         } else {
2736                 if (cmd_match(page, "check"))
2737                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2738                 else if (!cmd_match(page, "repair"))
2739                         return -EINVAL;
2740                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2741                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2742         }
2743         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2744         md_wakeup_thread(mddev->thread);
2745         return len;
2746 }
2747
2748 static ssize_t
2749 mismatch_cnt_show(mddev_t *mddev, char *page)
2750 {
2751         return sprintf(page, "%llu\n",
2752                        (unsigned long long) mddev->resync_mismatches);
2753 }
2754
2755 static struct md_sysfs_entry md_scan_mode =
2756 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2757
2758
2759 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2760
2761 static ssize_t
2762 sync_min_show(mddev_t *mddev, char *page)
2763 {
2764         return sprintf(page, "%d (%s)\n", speed_min(mddev),
2765                        mddev->sync_speed_min ? "local": "system");
2766 }
2767
2768 static ssize_t
2769 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2770 {
2771         int min;
2772         char *e;
2773         if (strncmp(buf, "system", 6)==0) {
2774                 mddev->sync_speed_min = 0;
2775                 return len;
2776         }
2777         min = simple_strtoul(buf, &e, 10);
2778         if (buf == e || (*e && *e != '\n') || min <= 0)
2779                 return -EINVAL;
2780         mddev->sync_speed_min = min;
2781         return len;
2782 }
2783
2784 static struct md_sysfs_entry md_sync_min =
2785 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2786
2787 static ssize_t
2788 sync_max_show(mddev_t *mddev, char *page)
2789 {
2790         return sprintf(page, "%d (%s)\n", speed_max(mddev),
2791                        mddev->sync_speed_max ? "local": "system");
2792 }
2793
2794 static ssize_t
2795 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2796 {
2797         int max;
2798         char *e;
2799         if (strncmp(buf, "system", 6)==0) {
2800                 mddev->sync_speed_max = 0;
2801                 return len;
2802         }
2803         max = simple_strtoul(buf, &e, 10);
2804         if (buf == e || (*e && *e != '\n') || max <= 0)
2805                 return -EINVAL;
2806         mddev->sync_speed_max = max;
2807         return len;
2808 }
2809
2810 static struct md_sysfs_entry md_sync_max =
2811 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2812
2813
2814 static ssize_t
2815 sync_speed_show(mddev_t *mddev, char *page)
2816 {
2817         unsigned long resync, dt, db;
2818         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2819         dt = ((jiffies - mddev->resync_mark) / HZ);
2820         if (!dt) dt++;
2821         db = resync - (mddev->resync_mark_cnt);
2822         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2823 }
2824
2825 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2826
2827 static ssize_t
2828 sync_completed_show(mddev_t *mddev, char *page)
2829 {
2830         unsigned long max_blocks, resync;
2831
2832         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2833                 max_blocks = mddev->resync_max_sectors;
2834         else
2835                 max_blocks = mddev->size << 1;
2836
2837         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2838         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2839 }
2840
2841 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2842
2843 static ssize_t
2844 suspend_lo_show(mddev_t *mddev, char *page)
2845 {
2846         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2847 }
2848
2849 static ssize_t
2850 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2851 {
2852         char *e;
2853         unsigned long long new = simple_strtoull(buf, &e, 10);
2854
2855         if (mddev->pers->quiesce == NULL)
2856                 return -EINVAL;
2857         if (buf == e || (*e && *e != '\n'))
2858                 return -EINVAL;
2859         if (new >= mddev->suspend_hi ||
2860             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2861                 mddev->suspend_lo = new;
2862                 mddev->pers->quiesce(mddev, 2);
2863                 return len;
2864         } else
2865                 return -EINVAL;
2866 }
2867 static struct md_sysfs_entry md_suspend_lo =
2868 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2869
2870
2871 static ssize_t
2872 suspend_hi_show(mddev_t *mddev, char *page)
2873 {
2874         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2875 }
2876
2877 static ssize_t
2878 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2879 {
2880         char *e;
2881         unsigned long long new = simple_strtoull(buf, &e, 10);
2882
2883         if (mddev->pers->quiesce == NULL)
2884                 return -EINVAL;
2885         if (buf == e || (*e && *e != '\n'))
2886                 return -EINVAL;
2887         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2888             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2889                 mddev->suspend_hi = new;
2890                 mddev->pers->quiesce(mddev, 1);
2891                 mddev->pers->quiesce(mddev, 0);
2892                 return len;
2893         } else
2894                 return -EINVAL;
2895 }
2896 static struct md_sysfs_entry md_suspend_hi =
2897 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2898
2899
2900 static struct attribute *md_default_attrs[] = {
2901         &md_level.attr,
2902         &md_layout.attr,
2903         &md_raid_disks.attr,
2904         &md_chunk_size.attr,
2905         &md_size.attr,
2906         &md_resync_start.attr,
2907         &md_metadata.attr,
2908         &md_new_device.attr,
2909         &md_safe_delay.attr,
2910         &md_array_state.attr,
2911         NULL,
2912 };
2913
2914 static struct attribute *md_redundancy_attrs[] = {
2915         &md_scan_mode.attr,
2916         &md_mismatches.attr,
2917         &md_sync_min.attr,
2918         &md_sync_max.attr,
2919         &md_sync_speed.attr,
2920         &md_sync_completed.attr,
2921         &md_suspend_lo.attr,
2922         &md_suspend_hi.attr,
2923         &md_bitmap.attr,
2924         NULL,
2925 };
2926 static struct attribute_group md_redundancy_group = {
2927         .name = NULL,
2928         .attrs = md_redundancy_attrs,
2929 };
2930
2931
2932 static ssize_t
2933 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2934 {
2935         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2936         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2937         ssize_t rv;
2938
2939         if (!entry->show)
2940                 return -EIO;
2941         rv = mddev_lock(mddev);
2942         if (!rv) {
2943                 rv = entry->show(mddev, page);
2944                 mddev_unlock(mddev);
2945         }
2946         return rv;
2947 }
2948
2949 static ssize_t
2950 md_attr_store(struct kobject *kobj, struct attribute *attr,
2951               const char *page, size_t length)
2952 {
2953         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2954         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2955         ssize_t rv;
2956
2957         if (!entry->store)
2958                 return -EIO;
2959         if (!capable(CAP_SYS_ADMIN))
2960                 return -EACCES;
2961         rv = mddev_lock(mddev);
2962         if (!rv) {
2963                 rv = entry->store(mddev, page, length);
2964                 mddev_unlock(mddev);
2965         }
2966         return rv;
2967 }
2968
2969 static void md_free(struct kobject *ko)
2970 {
2971         mddev_t *mddev = container_of(ko, mddev_t, kobj);
2972         kfree(mddev);
2973 }
2974
2975 static struct sysfs_ops md_sysfs_ops = {
2976         .show   = md_attr_show,
2977         .store  = md_attr_store,
2978 };
2979 static struct kobj_type md_ktype = {
2980         .release        = md_free,
2981         .sysfs_ops      = &md_sysfs_ops,
2982         .default_attrs  = md_default_attrs,
2983 };
2984
2985 int mdp_major = 0;
2986
2987 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2988 {
2989         static DEFINE_MUTEX(disks_mutex);
2990         mddev_t *mddev = mddev_find(dev);
2991         struct gendisk *disk;
2992         int partitioned = (MAJOR(dev) != MD_MAJOR);
2993         int shift = partitioned ? MdpMinorShift : 0;
2994         int unit = MINOR(dev) >> shift;
2995
2996         if (!mddev)
2997                 return NULL;
2998
2999         mutex_lock(&disks_mutex);
3000         if (mddev->gendisk) {
3001                 mutex_unlock(&disks_mutex);
3002                 mddev_put(mddev);
3003                 return NULL;
3004         }
3005         disk = alloc_disk(1 << shift);
3006         if (!disk) {
3007                 mutex_unlock(&disks_mutex);
3008                 mddev_put(mddev);
3009                 return NULL;
3010         }
3011         disk->major = MAJOR(dev);
3012         disk->first_minor = unit << shift;
3013         if (partitioned)
3014                 sprintf(disk->disk_name, "md_d%d", unit);
3015         else
3016                 sprintf(disk->disk_name, "md%d", unit);
3017         disk->fops = &md_fops;
3018         disk->private_data = mddev;
3019         disk->queue = mddev->queue;
3020         add_disk(disk);
3021         mddev->gendisk = disk;
3022         mutex_unlock(&disks_mutex);
3023         mddev->kobj.parent = &disk->kobj;
3024         mddev->kobj.k_name = NULL;
3025         snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
3026         mddev->kobj.ktype = &md_ktype;
3027         if (kobject_register(&mddev->kobj))
3028                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3029                        disk->disk_name);
3030         return NULL;
3031 }
3032
3033 static void md_safemode_timeout(unsigned long data)
3034 {
3035         mddev_t *mddev = (mddev_t *) data;
3036
3037         mddev->safemode = 1;
3038         md_wakeup_thread(mddev->thread);
3039 }
3040
3041 static int start_dirty_degraded;
3042
3043 static int do_md_run(mddev_t * mddev)
3044 {
3045         int err;
3046         int chunk_size;
3047         struct list_head *tmp;
3048         mdk_rdev_t *rdev;
3049         struct gendisk *disk;
3050         struct mdk_personality *pers;
3051         char b[BDEVNAME_SIZE];
3052
3053         if (list_empty(&mddev->disks))
3054                 /* cannot run an array with no devices.. */
3055                 return -EINVAL;
3056
3057         if (mddev->pers)
3058                 return -EBUSY;
3059
3060         /*
3061          * Analyze all RAID superblock(s)
3062          */
3063         if (!mddev->raid_disks)
3064                 analyze_sbs(mddev);
3065
3066         chunk_size = mddev->chunk_size;
3067
3068         if (chunk_size) {
3069                 if (chunk_size > MAX_CHUNK_SIZE) {
3070                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3071                                 chunk_size, MAX_CHUNK_SIZE);
3072                         return -EINVAL;
3073                 }
3074                 /*
3075                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3076                  */
3077                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3078                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3079                         return -EINVAL;
3080                 }
3081                 if (chunk_size < PAGE_SIZE) {
3082                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3083                                 chunk_size, PAGE_SIZE);
3084                         return -EINVAL;
3085                 }
3086
3087                 /* devices must have minimum size of one chunk */
3088                 ITERATE_RDEV(mddev,rdev,tmp) {
3089                         if (test_bit(Faulty, &rdev->flags))
3090                                 continue;
3091                         if (rdev->size < chunk_size / 1024) {
3092                                 printk(KERN_WARNING
3093                                         "md: Dev %s smaller than chunk_size:"
3094                                         " %lluk < %dk\n",
3095                                         bdevname(rdev->bdev,b),
3096                                         (unsigned long long)rdev->size,
3097                                         chunk_size / 1024);
3098                                 return -EINVAL;
3099                         }
3100                 }
3101         }
3102
3103 #ifdef CONFIG_KMOD
3104         if (mddev->level != LEVEL_NONE)
3105                 request_module("md-level-%d", mddev->level);
3106         else if (mddev->clevel[0])
3107                 request_module("md-%s", mddev->clevel);
3108 #endif
3109
3110         /*
3111          * Drop all container device buffers, from now on
3112          * the only valid external interface is through the md
3113          * device.
3114          * Also find largest hardsector size
3115          */
3116         ITERATE_RDEV(mddev,rdev,tmp) {
3117                 if (test_bit(Faulty, &rdev->flags))
3118                         continue;
3119                 sync_blockdev(rdev->bdev);
3120                 invalidate_bdev(rdev->bdev);
3121         }
3122
3123         md_probe(mddev->unit, NULL, NULL);
3124         disk = mddev->gendisk;
3125         if (!disk)
3126                 return -ENOMEM;
3127
3128         spin_lock(&pers_lock);
3129         pers = find_pers(mddev->level, mddev->clevel);
3130         if (!pers || !try_module_get(pers->owner)) {
3131                 spin_unlock(&pers_lock);
3132                 if (mddev->level != LEVEL_NONE)
3133                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3134                                mddev->level);
3135                 else
3136                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3137                                mddev->clevel);
3138                 return -EINVAL;
3139         }
3140         mddev->pers = pers;
3141         spin_unlock(&pers_lock);
3142         mddev->level = pers->level;
3143         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3144
3145         if (mddev->reshape_position != MaxSector &&
3146             pers->start_reshape == NULL) {
3147                 /* This personality cannot handle reshaping... */
3148                 mddev->pers = NULL;
3149                 module_put(pers->owner);
3150                 return -EINVAL;
3151         }
3152
3153         if (pers->sync_request) {
3154                 /* Warn if this is a potentially silly
3155                  * configuration.
3156                  */
3157                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3158                 mdk_rdev_t *rdev2;
3159                 struct list_head *tmp2;
3160                 int warned = 0;
3161                 ITERATE_RDEV(mddev, rdev, tmp) {
3162                         ITERATE_RDEV(mddev, rdev2, tmp2) {
3163                                 if (rdev < rdev2 &&
3164                                     rdev->bdev->bd_contains ==
3165                                     rdev2->bdev->bd_contains) {
3166                                         printk(KERN_WARNING
3167                                                "%s: WARNING: %s appears to be"
3168                                                " on the same physical disk as"
3169                                                " %s.\n",
3170                                                mdname(mddev),
3171                                                bdevname(rdev->bdev,b),
3172                                                bdevname(rdev2->bdev,b2));
3173                                         warned = 1;
3174                                 }
3175                         }
3176                 }
3177                 if (warned)
3178                         printk(KERN_WARNING
3179                                "True protection against single-disk"
3180                                " failure might be compromised.\n");
3181         }
3182
3183         mddev->recovery = 0;
3184         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3185         mddev->barriers_work = 1;
3186         mddev->ok_start_degraded = start_dirty_degraded;
3187
3188         if (start_readonly)
3189                 mddev->ro = 2; /* read-only, but switch on first write */
3190
3191         err = mddev->pers->run(mddev);
3192         if (!err && mddev->pers->sync_request) {
3193                 err = bitmap_create(mddev);
3194                 if (err) {
3195                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3196                                mdname(mddev), err);
3197                         mddev->pers->stop(mddev);
3198                 }
3199         }
3200         if (err) {
3201                 printk(KERN_ERR "md: pers->run() failed ...\n");
3202                 module_put(mddev->pers->owner);
3203                 mddev->pers = NULL;
3204                 bitmap_destroy(mddev);
3205                 return err;
3206         }
3207         if (mddev->pers->sync_request) {
3208                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3209                         printk(KERN_WARNING
3210                                "md: cannot register extra attributes for %s\n",
3211                                mdname(mddev));
3212         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3213                 mddev->ro = 0;
3214
3215         atomic_set(&mddev->writes_pending,0);
3216         mddev->safemode = 0;
3217         mddev->safemode_timer.function = md_safemode_timeout;
3218         mddev->safemode_timer.data = (unsigned long) mddev;
3219         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3220         mddev->in_sync = 1;
3221
3222         ITERATE_RDEV(mddev,rdev,tmp)
3223                 if (rdev->raid_disk >= 0) {
3224                         char nm[20];
3225                         sprintf(nm, "rd%d", rdev->raid_disk);
3226                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3227                                 printk("md: cannot register %s for %s\n",
3228                                        nm, mdname(mddev));
3229                 }
3230         
3231         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3232         
3233         if (mddev->flags)
3234                 md_update_sb(mddev, 0);
3235
3236         set_capacity(disk, mddev->array_size<<1);
3237
3238         /* If we call blk_queue_make_request here, it will
3239          * re-initialise max_sectors etc which may have been
3240          * refined inside -> run.  So just set the bits we need to set.
3241          * Most initialisation happended when we called
3242          * blk_queue_make_request(..., md_fail_request)
3243          * earlier.
3244          */
3245         mddev->queue->queuedata = mddev;
3246         mddev->queue->make_request_fn = mddev->pers->make_request;
3247
3248         /* If there is a partially-recovered drive we need to
3249          * start recovery here.  If we leave it to md_check_recovery,
3250          * it will remove the drives and not do the right thing
3251          */
3252         if (mddev->degraded && !mddev->sync_thread) {
3253                 struct list_head *rtmp;
3254                 int spares = 0;
3255                 ITERATE_RDEV(mddev,rdev,rtmp)
3256                         if (rdev->raid_disk >= 0 &&
3257                             !test_bit(In_sync, &rdev->flags) &&
3258                             !test_bit(Faulty, &rdev->flags))
3259                                 /* complete an interrupted recovery */
3260                                 spares++;
3261                 if (spares && mddev->pers->sync_request) {
3262                         mddev->recovery = 0;
3263                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3264                         mddev->sync_thread = md_register_thread(md_do_sync,
3265                                                                 mddev,
3266                                                                 "%s_resync");
3267                         if (!mddev->sync_thread) {
3268                                 printk(KERN_ERR "%s: could not start resync"
3269                                        " thread...\n",
3270                                        mdname(mddev));
3271                                 /* leave the spares where they are, it shouldn't hurt */
3272                                 mddev->recovery = 0;
3273                         }
3274                 }
3275         }
3276         md_wakeup_thread(mddev->thread);
3277         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3278
3279         mddev->changed = 1;
3280         md_new_event(mddev);
3281         kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3282         return 0;
3283 }
3284
3285 static int restart_array(mddev_t *mddev)
3286 {
3287         struct gendisk *disk = mddev->gendisk;
3288         int err;
3289
3290         /*
3291          * Complain if it has no devices
3292          */
3293         err = -ENXIO;
3294         if (list_empty(&mddev->disks))
3295                 goto out;
3296
3297         if (mddev->pers) {
3298                 err = -EBUSY;
3299                 if (!mddev->ro)
3300                         goto out;
3301
3302                 mddev->safemode = 0;
3303                 mddev->ro = 0;
3304                 set_disk_ro(disk, 0);
3305
3306                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3307                         mdname(mddev));
3308                 /*
3309                  * Kick recovery or resync if necessary
3310                  */
3311                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3312                 md_wakeup_thread(mddev->thread);
3313                 md_wakeup_thread(mddev->sync_thread);
3314                 err = 0;
3315         } else
3316                 err = -EINVAL;
3317
3318 out:
3319         return err;
3320 }
3321
3322 /* similar to deny_write_access, but accounts for our holding a reference
3323  * to the file ourselves */
3324 static int deny_bitmap_write_access(struct file * file)
3325 {
3326         struct inode *inode = file->f_mapping->host;
3327
3328         spin_lock(&inode->i_lock);
3329         if (atomic_read(&inode->i_writecount) > 1) {
3330                 spin_unlock(&inode->i_lock);
3331                 return -ETXTBSY;
3332         }
3333         atomic_set(&inode->i_writecount, -1);
3334         spin_unlock(&inode->i_lock);
3335
3336         return 0;
3337 }
3338
3339 static void restore_bitmap_write_access(struct file *file)
3340 {
3341         struct inode *inode = file->f_mapping->host;
3342
3343         spin_lock(&inode->i_lock);
3344         atomic_set(&inode->i_writecount, 1);
3345         spin_unlock(&inode->i_lock);
3346 }
3347
3348 /* mode:
3349  *   0 - completely stop and dis-assemble array
3350  *   1 - switch to readonly
3351  *   2 - stop but do not disassemble array
3352  */
3353 static int do_md_stop(mddev_t * mddev, int mode)
3354 {
3355         int err = 0;
3356         struct gendisk *disk = mddev->gendisk;
3357
3358         if (mddev->pers) {
3359                 if (atomic_read(&mddev->active)>2) {
3360                         printk("md: %s still in use.\n",mdname(mddev));
3361                         return -EBUSY;
3362                 }
3363
3364                 if (mddev->sync_thread) {
3365                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3366                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3367                         md_unregister_thread(mddev->sync_thread);
3368                         mddev->sync_thread = NULL;
3369                 }
3370
3371                 del_timer_sync(&mddev->safemode_timer);
3372
3373                 invalidate_partition(disk, 0);
3374
3375                 switch(mode) {
3376                 case 1: /* readonly */
3377                         err  = -ENXIO;
3378                         if (mddev->ro==1)
3379                                 goto out;
3380                         mddev->ro = 1;
3381                         break;
3382                 case 0: /* disassemble */
3383                 case 2: /* stop */
3384                         bitmap_flush(mddev);
3385                         md_super_wait(mddev);
3386                         if (mddev->ro)
3387                                 set_disk_ro(disk, 0);
3388                         blk_queue_make_request(mddev->queue, md_fail_request);
3389                         mddev->pers->stop(mddev);
3390                         mddev->queue->merge_bvec_fn = NULL;
3391                         mddev->queue->unplug_fn = NULL;
3392                         mddev->queue->issue_flush_fn = NULL;
3393                         mddev->queue->backing_dev_info.congested_fn = NULL;
3394                         if (mddev->pers->sync_request)
3395                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3396
3397                         module_put(mddev->pers->owner);
3398                         mddev->pers = NULL;
3399
3400                         set_capacity(disk, 0);
3401                         mddev->changed = 1;
3402
3403                         if (mddev->ro)
3404                                 mddev->ro = 0;
3405                 }
3406                 if (!mddev->in_sync || mddev->flags) {
3407                         /* mark array as shutdown cleanly */
3408                         mddev->in_sync = 1;
3409                         md_update_sb(mddev, 1);
3410                 }
3411                 if (mode == 1)
3412                         set_disk_ro(disk, 1);
3413                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3414         }
3415
3416         /*
3417          * Free resources if final stop
3418          */
3419         if (mode == 0) {
3420                 mdk_rdev_t *rdev;
3421                 struct list_head *tmp;
3422
3423                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3424
3425                 bitmap_destroy(mddev);
3426                 if (mddev->bitmap_file) {
3427                         restore_bitmap_write_access(mddev->bitmap_file);
3428                         fput(mddev->bitmap_file);
3429                         mddev->bitmap_file = NULL;
3430                 }
3431                 mddev->bitmap_offset = 0;
3432
3433                 ITERATE_RDEV(mddev,rdev,tmp)
3434                         if (rdev->raid_disk >= 0) {
3435                                 char nm[20];
3436                                 sprintf(nm, "rd%d", rdev->raid_disk);
3437                                 sysfs_remove_link(&mddev->kobj, nm);
3438                         }
3439
3440                 /* make sure all delayed_delete calls have finished */
3441                 flush_scheduled_work();
3442
3443                 export_array(mddev);
3444
3445                 mddev->array_size = 0;
3446                 mddev->size = 0;
3447                 mddev->raid_disks = 0;
3448                 mddev->recovery_cp = 0;
3449
3450         } else if (mddev->pers)
3451                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3452                         mdname(mddev));
3453         err = 0;
3454         md_new_event(mddev);
3455 out:
3456         return err;
3457 }
3458
3459 #ifndef MODULE
3460 static void autorun_array(mddev_t *mddev)
3461 {
3462         mdk_rdev_t *rdev;
3463         struct list_head *tmp;
3464         int err;
3465
3466         if (list_empty(&mddev->disks))
3467                 return;
3468
3469         printk(KERN_INFO "md: running: ");
3470
3471         ITERATE_RDEV(mddev,rdev,tmp) {
3472                 char b[BDEVNAME_SIZE];
3473                 printk("<%s>", bdevname(rdev->bdev,b));
3474         }
3475         printk("\n");
3476
3477         err = do_md_run (mddev);
3478         if (err) {
3479                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3480                 do_md_stop (mddev, 0);
3481         }
3482 }
3483
3484 /*
3485  * lets try to run arrays based on all disks that have arrived
3486  * until now. (those are in pending_raid_disks)
3487  *
3488  * the method: pick the first pending disk, collect all disks with
3489  * the same UUID, remove all from the pending list and put them into
3490  * the 'same_array' list. Then order this list based on superblock
3491  * update time (freshest comes first), kick out 'old' disks and
3492  * compare superblocks. If everything's fine then run it.
3493  *
3494  * If "unit" is allocated, then bump its reference count
3495  */
3496 static void autorun_devices(int part)
3497 {
3498         struct list_head *tmp;
3499         mdk_rdev_t *rdev0, *rdev;
3500         mddev_t *mddev;
3501         char b[BDEVNAME_SIZE];
3502
3503         printk(KERN_INFO "md: autorun ...\n");
3504         while (!list_empty(&pending_raid_disks)) {
3505                 int unit;
3506                 dev_t dev;
3507                 LIST_HEAD(candidates);
3508                 rdev0 = list_entry(pending_raid_disks.next,
3509                                          mdk_rdev_t, same_set);
3510
3511                 printk(KERN_INFO "md: considering %s ...\n",
3512                         bdevname(rdev0->bdev,b));
3513                 INIT_LIST_HEAD(&candidates);
3514                 ITERATE_RDEV_PENDING(rdev,tmp)
3515                         if (super_90_load(rdev, rdev0, 0) >= 0) {
3516                                 printk(KERN_INFO "md:  adding %s ...\n",
3517                                         bdevname(rdev->bdev,b));
3518                                 list_move(&rdev->same_set, &candidates);
3519                         }
3520                 /*
3521                  * now we have a set of devices, with all of them having
3522                  * mostly sane superblocks. It's time to allocate the
3523                  * mddev.
3524                  */
3525                 if (part) {
3526                         dev = MKDEV(mdp_major,
3527                                     rdev0->preferred_minor << MdpMinorShift);
3528                         unit = MINOR(dev) >> MdpMinorShift;
3529                 } else {
3530                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3531                         unit = MINOR(dev);
3532                 }
3533                 if (rdev0->preferred_minor != unit) {
3534                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3535                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3536                         break;
3537                 }
3538
3539                 md_probe(dev, NULL, NULL);
3540                 mddev = mddev_find(dev);
3541                 if (!mddev) {
3542                         printk(KERN_ERR 
3543                                 "md: cannot allocate memory for md drive.\n");
3544                         break;
3545                 }
3546                 if (mddev_lock(mddev)) 
3547                         printk(KERN_WARNING "md: %s locked, cannot run\n",
3548                                mdname(mddev));
3549                 else if (mddev->raid_disks || mddev->major_version
3550                          || !list_empty(&mddev->disks)) {
3551                         printk(KERN_WARNING 
3552                                 "md: %s already running, cannot run %s\n",
3553                                 mdname(mddev), bdevname(rdev0->bdev,b));
3554                         mddev_unlock(mddev);
3555                 } else {
3556                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
3557                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3558                                 list_del_init(&rdev->same_set);
3559                                 if (bind_rdev_to_array(rdev, mddev))
3560                                         export_rdev(rdev);
3561                         }
3562                         autorun_array(mddev);
3563                         mddev_unlock(mddev);
3564                 }
3565                 /* on success, candidates will be empty, on error
3566                  * it won't...
3567                  */
3568                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3569                         export_rdev(rdev);
3570                 mddev_put(mddev);
3571         }
3572         printk(KERN_INFO "md: ... autorun DONE.\n");
3573 }
3574 #endif /* !MODULE */
3575
3576 static int get_version(void __user * arg)
3577 {
3578         mdu_version_t ver;
3579
3580         ver.major = MD_MAJOR_VERSION;
3581         ver.minor = MD_MINOR_VERSION;
3582         ver.patchlevel = MD_PATCHLEVEL_VERSION;
3583
3584         if (copy_to_user(arg, &ver, sizeof(ver)))
3585                 return -EFAULT;
3586
3587         return 0;
3588 }
3589
3590 static int get_array_info(mddev_t * mddev, void __user * arg)
3591 {
3592         mdu_array_info_t info;
3593         int nr,working,active,failed,spare;
3594         mdk_rdev_t *rdev;
3595         struct list_head *tmp;
3596
3597         nr=working=active=failed=spare=0;
3598         ITERATE_RDEV(mddev,rdev,tmp) {
3599                 nr++;
3600                 if (test_bit(Faulty, &rdev->flags))
3601                         failed++;
3602                 else {
3603                         working++;
3604                         if (test_bit(In_sync, &rdev->flags))
3605                                 active++;       
3606                         else
3607                                 spare++;
3608                 }
3609         }
3610
3611         info.major_version = mddev->major_version;
3612         info.minor_version = mddev->minor_version;
3613         info.patch_version = MD_PATCHLEVEL_VERSION;
3614         info.ctime         = mddev->ctime;
3615         info.level         = mddev->level;
3616         info.size          = mddev->size;
3617         if (info.size != mddev->size) /* overflow */
3618                 info.size = -1;
3619         info.nr_disks      = nr;
3620         info.raid_disks    = mddev->raid_disks;
3621         info.md_minor      = mddev->md_minor;
3622         info.not_persistent= !mddev->persistent;
3623
3624         info.utime         = mddev->utime;
3625         info.state         = 0;
3626         if (mddev->in_sync)
3627                 info.state = (1<<MD_SB_CLEAN);
3628         if (mddev->bitmap && mddev->bitmap_offset)
3629                 info.state = (1<<MD_SB_BITMAP_PRESENT);
3630         info.active_disks  = active;
3631         info.working_disks = working;
3632         info.failed_disks  = failed;
3633         info.spare_disks   = spare;
3634
3635         info.layout        = mddev->layout;
3636         info.chunk_size    = mddev->chunk_size;
3637
3638         if (copy_to_user(arg, &info, sizeof(info)))
3639                 return -EFAULT;
3640
3641         return 0;
3642 }
3643
3644 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3645 {
3646         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3647         char *ptr, *buf = NULL;
3648         int err = -ENOMEM;
3649
3650         md_allow_write(mddev);
3651
3652         file = kmalloc(sizeof(*file), GFP_KERNEL);
3653         if (!file)
3654                 goto out;
3655
3656         /* bitmap disabled, zero the first byte and copy out */
3657         if (!mddev->bitmap || !mddev->bitmap->file) {
3658                 file->pathname[0] = '\0';
3659                 goto copy_out;
3660         }
3661
3662         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3663         if (!buf)
3664                 goto out;
3665
3666         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3667         if (!ptr)
3668                 goto out;
3669
3670         strcpy(file->pathname, ptr);
3671
3672 copy_out:
3673         err = 0;
3674         if (copy_to_user(arg, file, sizeof(*file)))
3675                 err = -EFAULT;
3676 out:
3677         kfree(buf);
3678         kfree(file);
3679         return err;
3680 }
3681
3682 static int get_disk_info(mddev_t * mddev, void __user * arg)
3683 {
3684         mdu_disk_info_t info;
3685         unsigned int nr;
3686         mdk_rdev_t *rdev;
3687
3688         if (copy_from_user(&info, arg, sizeof(info)))
3689                 return -EFAULT;
3690
3691         nr = info.number;
3692
3693         rdev = find_rdev_nr(mddev, nr);
3694         if (rdev) {
3695                 info.major = MAJOR(rdev->bdev->bd_dev);
3696                 info.minor = MINOR(rdev->bdev->bd_dev);
3697                 info.raid_disk = rdev->raid_disk;
3698                 info.state = 0;
3699                 if (test_bit(Faulty, &rdev->flags))
3700                         info.state |= (1<<MD_DISK_FAULTY);
3701                 else if (test_bit(In_sync, &rdev->flags)) {
3702                         info.state |= (1<<MD_DISK_ACTIVE);
3703                         info.state |= (1<<MD_DISK_SYNC);
3704                 }
3705                 if (test_bit(WriteMostly, &rdev->flags))
3706                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
3707         } else {
3708                 info.major = info.minor = 0;
3709                 info.raid_disk = -1;
3710                 info.state = (1<<MD_DISK_REMOVED);
3711         }
3712
3713         if (copy_to_user(arg, &info, sizeof(info)))
3714                 return -EFAULT;
3715
3716         return 0;
3717 }
3718
3719 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3720 {
3721         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3722         mdk_rdev_t *rdev;
3723         dev_t dev = MKDEV(info->major,info->minor);
3724
3725         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3726                 return -EOVERFLOW;
3727
3728         if (!mddev->raid_disks) {
3729                 int err;
3730                 /* expecting a device which has a superblock */
3731                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3732                 if (IS_ERR(rdev)) {
3733                         printk(KERN_WARNING 
3734                                 "md: md_import_device returned %ld\n",
3735                                 PTR_ERR(rdev));
3736                         return PTR_ERR(rdev);
3737                 }
3738                 if (!list_empty(&mddev->disks)) {
3739                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3740                                                         mdk_rdev_t, same_set);
3741                         int err = super_types[mddev->major_version]
3742                                 .load_super(rdev, rdev0, mddev->minor_version);
3743                         if (err < 0) {
3744                                 printk(KERN_WARNING 
3745                                         "md: %s has different UUID to %s\n",
3746                                         bdevname(rdev->bdev,b), 
3747                                         bdevname(rdev0->bdev,b2));
3748                                 export_rdev(rdev);
3749                                 return -EINVAL;
3750                         }
3751                 }
3752                 err = bind_rdev_to_array(rdev, mddev);
3753                 if (err)
3754                         export_rdev(rdev);
3755                 return err;
3756         }
3757
3758         /*
3759          * add_new_disk can be used once the array is assembled
3760          * to add "hot spares".  They must already have a superblock
3761          * written
3762          */
3763         if (mddev->pers) {
3764                 int err;
3765                 if (!mddev->pers->hot_add_disk) {
3766                         printk(KERN_WARNING 
3767                                 "%s: personality does not support diskops!\n",
3768                                mdname(mddev));
3769                         return -EINVAL;
3770                 }
3771                 if (mddev->persistent)
3772                         rdev = md_import_device(dev, mddev->major_version,
3773                                                 mddev->minor_version);
3774                 else
3775                         rdev = md_import_device(dev, -1, -1);
3776                 if (IS_ERR(rdev)) {
3777                         printk(KERN_WARNING 
3778                                 "md: md_import_device returned %ld\n",
3779                                 PTR_ERR(rdev));
3780                         return PTR_ERR(rdev);
3781                 }
3782                 /* set save_raid_disk if appropriate */
3783                 if (!mddev->persistent) {
3784                         if (info->state & (1<<MD_DISK_SYNC)  &&
3785                             info->raid_disk < mddev->raid_disks)
3786                                 rdev->raid_disk = info->raid_disk;
3787                         else
3788                                 rdev->raid_disk = -1;
3789                 } else
3790                         super_types[mddev->major_version].
3791                                 validate_super(mddev, rdev);
3792                 rdev->saved_raid_disk = rdev->raid_disk;
3793
3794                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3795                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3796                         set_bit(WriteMostly, &rdev->flags);
3797
3798                 rdev->raid_disk = -1;
3799                 err = bind_rdev_to_array(rdev, mddev);
3800                 if (!err && !mddev->pers->hot_remove_disk) {
3801                         /* If there is hot_add_disk but no hot_remove_disk
3802                          * then added disks for geometry changes,
3803                          * and should be added immediately.
3804                          */
3805                         super_types[mddev->major_version].
3806                                 validate_super(mddev, rdev);
3807                         err = mddev->pers->hot_add_disk(mddev, rdev);
3808                         if (err)
3809                                 unbind_rdev_from_array(rdev);
3810                 }
3811                 if (err)
3812                         export_rdev(rdev);
3813
3814                 md_update_sb(mddev, 1);
3815                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3816                 md_wakeup_thread(mddev->thread);
3817                 return err;
3818         }
3819
3820         /* otherwise, add_new_disk is only allowed
3821          * for major_version==0 superblocks
3822          */
3823         if (mddev->major_version != 0) {
3824                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3825                        mdname(mddev));
3826                 return -EINVAL;
3827         }
3828
3829         if (!(info->state & (1<<MD_DISK_FAULTY))) {
3830                 int err;
3831                 rdev = md_import_device (dev, -1, 0);
3832                 if (IS_ERR(rdev)) {
3833                         printk(KERN_WARNING 
3834                                 "md: error, md_import_device() returned %ld\n",
3835                                 PTR_ERR(rdev));
3836                         return PTR_ERR(rdev);
3837                 }
3838                 rdev->desc_nr = info->number;
3839                 if (info->raid_disk < mddev->raid_disks)
3840                         rdev->raid_disk = info->raid_disk;
3841                 else
3842                         rdev->raid_disk = -1;
3843
3844                 rdev->flags = 0;
3845
3846                 if (rdev->raid_disk < mddev->raid_disks)
3847                         if (info->state & (1<<MD_DISK_SYNC))
3848                                 set_bit(In_sync, &rdev->flags);
3849
3850                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3851                         set_bit(WriteMostly, &rdev->flags);
3852
3853                 if (!mddev->persistent) {
3854                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
3855                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3856                 } else 
3857                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3858                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3859
3860                 err = bind_rdev_to_array(rdev, mddev);
3861                 if (err) {
3862                         export_rdev(rdev);
3863                         return err;
3864                 }
3865         }
3866
3867         return 0;
3868 }
3869
3870 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3871 {
3872         char b[BDEVNAME_SIZE];
3873         mdk_rdev_t *rdev;
3874
3875         if (!mddev->pers)
3876                 return -ENODEV;
3877
3878         rdev = find_rdev(mddev, dev);
3879         if (!rdev)
3880                 return -ENXIO;
3881
3882         if (rdev->raid_disk >= 0)
3883                 goto busy;
3884
3885         kick_rdev_from_array(rdev);
3886         md_update_sb(mddev, 1);
3887         md_new_event(mddev);
3888
3889         return 0;
3890 busy:
3891         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3892                 bdevname(rdev->bdev,b), mdname(mddev));
3893         return -EBUSY;
3894 }
3895
3896 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3897 {
3898         char b[BDEVNAME_SIZE];
3899         int err;
3900         unsigned int size;
3901         mdk_rdev_t *rdev;
3902
3903         if (!mddev->pers)
3904                 return -ENODEV;
3905
3906         if (mddev->major_version != 0) {
3907                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3908                         " version-0 superblocks.\n",
3909                         mdname(mddev));
3910                 return -EINVAL;
3911         }
3912         if (!mddev->pers->hot_add_disk) {
3913                 printk(KERN_WARNING 
3914                         "%s: personality does not support diskops!\n",
3915                         mdname(mddev));
3916                 return -EINVAL;
3917         }
3918
3919         rdev = md_import_device (dev, -1, 0);
3920         if (IS_ERR(rdev)) {
3921                 printk(KERN_WARNING 
3922                         "md: error, md_import_device() returned %ld\n",
3923                         PTR_ERR(rdev));
3924                 return -EINVAL;
3925         }
3926
3927         if (mddev->persistent)
3928                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3929         else
3930                 rdev->sb_offset =
3931                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3932
3933         size = calc_dev_size(rdev, mddev->chunk_size);
3934         rdev->size = size;
3935
3936         if (test_bit(Faulty, &rdev->flags)) {
3937                 printk(KERN_WARNING 
3938                         "md: can not hot-add faulty %s disk to %s!\n",
3939                         bdevname(rdev->bdev,b), mdname(mddev));
3940                 err = -EINVAL;
3941                 goto abort_export;
3942         }
3943         clear_bit(In_sync, &rdev->flags);
3944         rdev->desc_nr = -1;
3945         rdev->saved_raid_disk = -1;
3946         err = bind_rdev_to_array(rdev, mddev);
3947         if (err)
3948                 goto abort_export;
3949
3950         /*
3951          * The rest should better be atomic, we can have disk failures
3952          * noticed in interrupt contexts ...
3953          */
3954
3955         if (rdev->desc_nr == mddev->max_disks) {
3956                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3957                         mdname(mddev));
3958                 err = -EBUSY;
3959                 goto abort_unbind_export;
3960         }
3961
3962         rdev->raid_disk = -1;
3963
3964         md_update_sb(mddev, 1);
3965
3966         /*
3967          * Kick recovery, maybe this spare has to be added to the
3968          * array immediately.
3969          */
3970         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3971         md_wakeup_thread(mddev->thread);
3972         md_new_event(mddev);
3973         return 0;
3974
3975 abort_unbind_export:
3976         unbind_rdev_from_array(rdev);
3977
3978 abort_export:
3979         export_rdev(rdev);
3980         return err;
3981 }
3982
3983 static int set_bitmap_file(mddev_t *mddev, int fd)
3984 {
3985         int err;
3986
3987         if (mddev->pers) {
3988                 if (!mddev->pers->quiesce)
3989                         return -EBUSY;
3990                 if (mddev->recovery || mddev->sync_thread)
3991                         return -EBUSY;
3992                 /* we should be able to change the bitmap.. */
3993         }
3994
3995
3996         if (fd >= 0) {
3997                 if (mddev->bitmap)
3998                         return -EEXIST; /* cannot add when bitmap is present */
3999                 mddev->bitmap_file = fget(fd);
4000
4001                 if (mddev->bitmap_file == NULL) {
4002                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4003                                mdname(mddev));
4004                         return -EBADF;
4005                 }
4006
4007                 err = deny_bitmap_write_access(mddev->bitmap_file);
4008                 if (err) {
4009                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4010                                mdname(mddev));
4011                         fput(mddev->bitmap_file);
4012                         mddev->bitmap_file = NULL;
4013                         return err;
4014                 }
4015                 mddev->bitmap_offset = 0; /* file overrides offset */
4016         } else if (mddev->bitmap == NULL)
4017                 return -ENOENT; /* cannot remove what isn't there */
4018         err = 0;
4019         if (mddev->pers) {
4020                 mddev->pers->quiesce(mddev, 1);
4021                 if (fd >= 0)
4022                         err = bitmap_create(mddev);
4023                 if (fd < 0 || err) {
4024                         bitmap_destroy(mddev);
4025                         fd = -1; /* make sure to put the file */
4026                 }
4027                 mddev->pers->quiesce(mddev, 0);
4028         }
4029         if (fd < 0) {
4030                 if (mddev->bitmap_file) {
4031                         restore_bitmap_write_access(mddev->bitmap_file);
4032                         fput(mddev->bitmap_file);
4033                 }
4034                 mddev->bitmap_file = NULL;
4035         }
4036
4037         return err;
4038 }
4039
4040 /*
4041  * set_array_info is used two different ways
4042  * The original usage is when creating a new array.
4043  * In this usage, raid_disks is > 0 and it together with
4044  *  level, size, not_persistent,layout,chunksize determine the
4045  *  shape of the array.
4046  *  This will always create an array with a type-0.90.0 superblock.
4047  * The newer usage is when assembling an array.
4048  *  In this case raid_disks will be 0, and the major_version field is
4049  *  use to determine which style super-blocks are to be found on the devices.
4050  *  The minor and patch _version numbers are also kept incase the
4051  *  super_block handler wishes to interpret them.
4052  */
4053 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4054 {
4055
4056         if (info->raid_disks == 0) {
4057                 /* just setting version number for superblock loading */
4058                 if (info->major_version < 0 ||
4059                     info->major_version >= ARRAY_SIZE(super_types) ||
4060                     super_types[info->major_version].name == NULL) {
4061                         /* maybe try to auto-load a module? */
4062                         printk(KERN_INFO 
4063                                 "md: superblock version %d not known\n",
4064                                 info->major_version);
4065                         return -EINVAL;
4066                 }
4067                 mddev->major_version = info->major_version;
4068                 mddev->minor_version = info->minor_version;
4069                 mddev->patch_version = info->patch_version;
4070                 mddev->persistent = !info->not_persistent;
4071                 return 0;
4072         }
4073         mddev->major_version = MD_MAJOR_VERSION;
4074         mddev->minor_version = MD_MINOR_VERSION;
4075         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4076         mddev->ctime         = get_seconds();
4077
4078         mddev->level         = info->level;
4079         mddev->clevel[0]     = 0;
4080         mddev->size          = info->size;
4081         mddev->raid_disks    = info->raid_disks;
4082         /* don't set md_minor, it is determined by which /dev/md* was
4083          * openned
4084          */
4085         if (info->state & (1<<MD_SB_CLEAN))
4086                 mddev->recovery_cp = MaxSector;
4087         else
4088                 mddev->recovery_cp = 0;
4089         mddev->persistent    = ! info->not_persistent;
4090
4091         mddev->layout        = info->layout;
4092         mddev->chunk_size    = info->chunk_size;
4093
4094         mddev->max_disks     = MD_SB_DISKS;
4095
4096         mddev->flags         = 0;
4097         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4098
4099         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4100         mddev->bitmap_offset = 0;
4101
4102         mddev->reshape_position = MaxSector;
4103
4104         /*
4105          * Generate a 128 bit UUID
4106          */
4107         get_random_bytes(mddev->uuid, 16);
4108
4109         mddev->new_level = mddev->level;
4110         mddev->new_chunk = mddev->chunk_size;
4111         mddev->new_layout = mddev->layout;
4112         mddev->delta_disks = 0;
4113
4114         return 0;
4115 }
4116
4117 static int update_size(mddev_t *mddev, unsigned long size)
4118 {
4119         mdk_rdev_t * rdev;
4120         int rv;
4121         struct list_head *tmp;
4122         int fit = (size == 0);
4123
4124         if (mddev->pers->resize == NULL)
4125                 return -EINVAL;
4126         /* The "size" is the amount of each device that is used.
4127          * This can only make sense for arrays with redundancy.
4128          * linear and raid0 always use whatever space is available
4129          * We can only consider changing the size if no resync
4130          * or reconstruction is happening, and if the new size
4131          * is acceptable. It must fit before the sb_offset or,
4132          * if that is <data_offset, it must fit before the
4133          * size of each device.
4134          * If size is zero, we find the largest size that fits.
4135          */
4136         if (mddev->sync_thread)
4137                 return -EBUSY;
4138         ITERATE_RDEV(mddev,rdev,tmp) {
4139                 sector_t avail;
4140                 avail = rdev->size * 2;
4141
4142                 if (fit && (size == 0 || size > avail/2))
4143                         size = avail/2;
4144                 if (avail < ((sector_t)size << 1))
4145                         return -ENOSPC;
4146         }
4147         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4148         if (!rv) {
4149                 struct block_device *bdev;
4150
4151                 bdev = bdget_disk(mddev->gendisk, 0);
4152                 if (bdev) {
4153                         mutex_lock(&bdev->bd_inode->i_mutex);
4154                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4155                         mutex_unlock(&bdev->bd_inode->i_mutex);
4156                         bdput(bdev);
4157                 }
4158         }
4159         return rv;
4160 }
4161
4162 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4163 {
4164         int rv;
4165         /* change the number of raid disks */
4166         if (mddev->pers->check_reshape == NULL)
4167                 return -EINVAL;
4168         if (raid_disks <= 0 ||
4169             raid_disks >= mddev->max_disks)
4170                 return -EINVAL;
4171         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4172                 return -EBUSY;
4173         mddev->delta_disks = raid_disks - mddev->raid_disks;
4174
4175         rv = mddev->pers->check_reshape(mddev);
4176         return rv;
4177 }
4178
4179
4180 /*
4181  * update_array_info is used to change the configuration of an
4182  * on-line array.
4183  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4184  * fields in the info are checked against the array.
4185  * Any differences that cannot be handled will cause an error.
4186  * Normally, only one change can be managed at a time.
4187  */
4188 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4189 {
4190         int rv = 0;
4191         int cnt = 0;
4192         int state = 0;
4193
4194         /* calculate expected state,ignoring low bits */
4195         if (mddev->bitmap && mddev->bitmap_offset)
4196                 state |= (1 << MD_SB_BITMAP_PRESENT);
4197
4198         if (mddev->major_version != info->major_version ||
4199             mddev->minor_version != info->minor_version ||
4200 /*          mddev->patch_version != info->patch_version || */
4201             mddev->ctime         != info->ctime         ||
4202             mddev->level         != info->level         ||
4203 /*          mddev->layout        != info->layout        || */
4204             !mddev->persistent   != info->not_persistent||
4205             mddev->chunk_size    != info->chunk_size    ||
4206             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4207             ((state^info->state) & 0xfffffe00)
4208                 )
4209                 return -EINVAL;
4210         /* Check there is only one change */
4211         if (info->size >= 0 && mddev->size != info->size) cnt++;
4212         if (mddev->raid_disks != info->raid_disks) cnt++;
4213         if (mddev->layout != info->layout) cnt++;
4214         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4215         if (cnt == 0) return 0;
4216         if (cnt > 1) return -EINVAL;
4217
4218         if (mddev->layout != info->layout) {
4219                 /* Change layout
4220                  * we don't need to do anything at the md level, the
4221                  * personality will take care of it all.
4222                  */
4223                 if (mddev->pers->reconfig == NULL)
4224                         return -EINVAL;
4225                 else
4226                         return mddev->pers->reconfig(mddev, info->layout, -1);
4227         }
4228         if (info->size >= 0 && mddev->size != info->size)
4229                 rv = update_size(mddev, info->size);
4230
4231         if (mddev->raid_disks    != info->raid_disks)
4232                 rv = update_raid_disks(mddev, info->raid_disks);
4233
4234         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4235                 if (mddev->pers->quiesce == NULL)
4236                         return -EINVAL;
4237                 if (mddev->recovery || mddev->sync_thread)
4238                         return -EBUSY;
4239                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4240                         /* add the bitmap */
4241                         if (mddev->bitmap)
4242                                 return -EEXIST;
4243                         if (mddev->default_bitmap_offset == 0)
4244                                 return -EINVAL;
4245                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4246                         mddev->pers->quiesce(mddev, 1);
4247                         rv = bitmap_create(mddev);
4248                         if (rv)
4249                                 bitmap_destroy(mddev);
4250                         mddev->pers->quiesce(mddev, 0);
4251                 } else {
4252                         /* remove the bitmap */
4253                         if (!mddev->bitmap)
4254                                 return -ENOENT;
4255                         if (mddev->bitmap->file)
4256                                 return -EINVAL;
4257                         mddev->pers->quiesce(mddev, 1);
4258                         bitmap_destroy(mddev);
4259                         mddev->pers->quiesce(mddev, 0);
4260                         mddev->bitmap_offset = 0;
4261                 }
4262         }
4263         md_update_sb(mddev, 1);
4264         return rv;
4265 }
4266
4267 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4268 {
4269         mdk_rdev_t *rdev;
4270
4271         if (mddev->pers == NULL)
4272                 return -ENODEV;
4273
4274         rdev = find_rdev(mddev, dev);
4275         if (!rdev)
4276                 return -ENODEV;
4277
4278         md_error(mddev, rdev);
4279         return 0;
4280 }
4281
4282 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4283 {
4284         mddev_t *mddev = bdev->bd_disk->private_data;
4285
4286         geo->heads = 2;
4287         geo->sectors = 4;
4288         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4289         return 0;
4290 }
4291
4292 static int md_ioctl(struct inode *inode, struct file *file,
4293                         unsigned int cmd, unsigned long arg)
4294 {
4295         int err = 0;
4296         void __user *argp = (void __user *)arg;
4297         mddev_t *mddev = NULL;
4298
4299         if (!capable(CAP_SYS_ADMIN))
4300                 return -EACCES;
4301
4302         /*
4303          * Commands dealing with the RAID driver but not any
4304          * particular array:
4305          */
4306         switch (cmd)
4307         {
4308                 case RAID_VERSION:
4309                         err = get_version(argp);
4310                         goto done;
4311
4312                 case PRINT_RAID_DEBUG:
4313                         err = 0;
4314                         md_print_devices();
4315                         goto done;
4316
4317 #ifndef MODULE
4318                 case RAID_AUTORUN:
4319                         err = 0;
4320                         autostart_arrays(arg);
4321                         goto done;
4322 #endif
4323                 default:;
4324         }
4325
4326         /*
4327          * Commands creating/starting a new array:
4328          */
4329
4330         mddev = inode->i_bdev->bd_disk->private_data;
4331
4332         if (!mddev) {
4333                 BUG();
4334                 goto abort;
4335         }
4336
4337         err = mddev_lock(mddev);
4338         if (err) {
4339                 printk(KERN_INFO 
4340                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4341                         err, cmd);
4342                 goto abort;
4343         }
4344
4345         switch (cmd)
4346         {
4347                 case SET_ARRAY_INFO:
4348                         {
4349                                 mdu_array_info_t info;
4350                                 if (!arg)
4351                                         memset(&info, 0, sizeof(info));
4352                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4353                                         err = -EFAULT;
4354                                         goto abort_unlock;
4355                                 }
4356                                 if (mddev->pers) {
4357                                         err = update_array_info(mddev, &info);
4358                                         if (err) {
4359                                                 printk(KERN_WARNING "md: couldn't update"
4360                                                        " array info. %d\n", err);
4361                                                 goto abort_unlock;
4362                                         }
4363                                         goto done_unlock;
4364                                 }
4365                                 if (!list_empty(&mddev->disks)) {
4366                                         printk(KERN_WARNING
4367                                                "md: array %s already has disks!\n",
4368                                                mdname(mddev));
4369                                         err = -EBUSY;
4370                                         goto abort_unlock;
4371                                 }
4372                                 if (mddev->raid_disks) {
4373                                         printk(KERN_WARNING
4374                                                "md: array %s already initialised!\n",
4375                                                mdname(mddev));
4376                                         err = -EBUSY;
4377                                         goto abort_unlock;
4378                                 }
4379                                 err = set_array_info(mddev, &info);
4380                                 if (err) {
4381                                         printk(KERN_WARNING "md: couldn't set"
4382                                                " array info. %d\n", err);
4383                                         goto abort_unlock;
4384                                 }
4385                         }
4386                         goto done_unlock;
4387
4388                 default:;
4389         }
4390
4391         /*
4392          * Commands querying/configuring an existing array:
4393          */
4394         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4395          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4396         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4397                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4398                         && cmd != GET_BITMAP_FILE) {
4399                 err = -ENODEV;
4400                 goto abort_unlock;
4401         }
4402
4403         /*
4404          * Commands even a read-only array can execute:
4405          */
4406         switch (cmd)
4407         {
4408                 case GET_ARRAY_INFO:
4409                         err = get_array_info(mddev, argp);
4410                         goto done_unlock;
4411
4412                 case GET_BITMAP_FILE:
4413                         err = get_bitmap_file(mddev, argp);
4414                         goto done_unlock;
4415
4416                 case GET_DISK_INFO:
4417                         err = get_disk_info(mddev, argp);
4418                         goto done_unlock;
4419
4420                 case RESTART_ARRAY_RW:
4421                         err = restart_array(mddev);
4422                         goto done_unlock;
4423
4424                 case STOP_ARRAY:
4425                         err = do_md_stop (mddev, 0);
4426                         goto done_unlock;
4427
4428                 case STOP_ARRAY_RO:
4429                         err = do_md_stop (mddev, 1);
4430                         goto done_unlock;
4431
4432         /*
4433          * We have a problem here : there is no easy way to give a CHS
4434          * virtual geometry. We currently pretend that we have a 2 heads
4435          * 4 sectors (with a BIG number of cylinders...). This drives
4436          * dosfs just mad... ;-)
4437          */
4438         }
4439
4440         /*
4441          * The remaining ioctls are changing the state of the
4442          * superblock, so we do not allow them on read-only arrays.
4443          * However non-MD ioctls (e.g. get-size) will still come through
4444          * here and hit the 'default' below, so only disallow
4445          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4446          */
4447         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4448             mddev->ro && mddev->pers) {
4449                 if (mddev->ro == 2) {
4450                         mddev->ro = 0;
4451                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4452                 md_wakeup_thread(mddev->thread);
4453
4454                 } else {
4455                         err = -EROFS;
4456                         goto abort_unlock;
4457                 }
4458         }
4459
4460         switch (cmd)
4461         {
4462                 case ADD_NEW_DISK:
4463                 {
4464                         mdu_disk_info_t info;
4465                         if (copy_from_user(&info, argp, sizeof(info)))
4466                                 err = -EFAULT;
4467                         else
4468                                 err = add_new_disk(mddev, &info);
4469                         goto done_unlock;
4470                 }
4471
4472                 case HOT_REMOVE_DISK:
4473                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4474                         goto done_unlock;
4475
4476                 case HOT_ADD_DISK:
4477                         err = hot_add_disk(mddev, new_decode_dev(arg));
4478                         goto done_unlock;
4479
4480                 case SET_DISK_FAULTY:
4481                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4482                         goto done_unlock;
4483
4484                 case RUN_ARRAY:
4485                         err = do_md_run (mddev);
4486                         goto done_unlock;
4487
4488                 case SET_BITMAP_FILE:
4489                         err = set_bitmap_file(mddev, (int)arg);
4490                         goto done_unlock;
4491
4492                 default:
4493                         err = -EINVAL;
4494                         goto abort_unlock;
4495         }
4496
4497 done_unlock:
4498 abort_unlock:
4499         mddev_unlock(mddev);
4500
4501         return err;
4502 done:
4503         if (err)
4504                 MD_BUG();
4505 abort:
4506         return err;
4507 }
4508
4509 static int md_open(struct inode *inode, struct file *file)
4510 {
4511         /*
4512          * Succeed if we can lock the mddev, which confirms that
4513          * it isn't being stopped right now.
4514          */
4515         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4516         int err;
4517
4518         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4519                 goto out;
4520
4521         err = 0;
4522         mddev_get(mddev);
4523         mddev_unlock(mddev);
4524
4525         check_disk_change(inode->i_bdev);
4526  out:
4527         return err;
4528 }
4529
4530 static int md_release(struct inode *inode, struct file * file)
4531 {
4532         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4533
4534         BUG_ON(!mddev);
4535         mddev_put(mddev);
4536
4537         return 0;
4538 }
4539
4540 static int md_media_changed(struct gendisk *disk)
4541 {
4542         mddev_t *mddev = disk->private_data;
4543
4544         return mddev->changed;
4545 }
4546
4547 static int md_revalidate(struct gendisk *disk)
4548 {
4549         mddev_t *mddev = disk->private_data;
4550
4551         mddev->changed = 0;
4552         return 0;
4553 }
4554 static struct block_device_operations md_fops =
4555 {
4556         .owner          = THIS_MODULE,
4557         .open           = md_open,
4558         .release        = md_release,
4559         .ioctl          = md_ioctl,
4560         .getgeo         = md_getgeo,
4561         .media_changed  = md_media_changed,
4562         .revalidate_disk= md_revalidate,
4563 };
4564
4565 static int md_thread(void * arg)
4566 {
4567         mdk_thread_t *thread = arg;
4568
4569         /*
4570          * md_thread is a 'system-thread', it's priority should be very
4571          * high. We avoid resource deadlocks individually in each
4572          * raid personality. (RAID5 does preallocation) We also use RR and
4573          * the very same RT priority as kswapd, thus we will never get
4574          * into a priority inversion deadlock.
4575          *
4576          * we definitely have to have equal or higher priority than
4577          * bdflush, otherwise bdflush will deadlock if there are too
4578          * many dirty RAID5 blocks.
4579          */
4580
4581         current->flags |= PF_NOFREEZE;
4582         allow_signal(SIGKILL);
4583         while (!kthread_should_stop()) {
4584
4585                 /* We need to wait INTERRUPTIBLE so that
4586                  * we don't add to the load-average.
4587                  * That means we need to be sure no signals are
4588                  * pending
4589                  */
4590                 if (signal_pending(current))
4591                         flush_signals(current);
4592
4593                 wait_event_interruptible_timeout
4594                         (thread->wqueue,
4595                          test_bit(THREAD_WAKEUP, &thread->flags)
4596                          || kthread_should_stop(),
4597                          thread->timeout);
4598
4599                 clear_bit(THREAD_WAKEUP, &thread->flags);
4600
4601                 thread->run(thread->mddev);
4602         }
4603
4604         return 0;
4605 }
4606
4607 void md_wakeup_thread(mdk_thread_t *thread)
4608 {
4609         if (thread) {
4610                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4611                 set_bit(THREAD_WAKEUP, &thread->flags);
4612                 wake_up(&thread->wqueue);
4613         }
4614 }
4615
4616 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4617                                  const char *name)
4618 {
4619         mdk_thread_t *thread;
4620
4621         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4622         if (!thread)
4623                 return NULL;
4624
4625         init_waitqueue_head(&thread->wqueue);
4626
4627         thread->run = run;
4628         thread->mddev = mddev;
4629         thread->timeout = MAX_SCHEDULE_TIMEOUT;
4630         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4631         if (IS_ERR(thread->tsk)) {
4632                 kfree(thread);
4633                 return NULL;
4634         }
4635         return thread;
4636 }
4637
4638 void md_unregister_thread(mdk_thread_t *thread)
4639 {
4640         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4641
4642         kthread_stop(thread->tsk);
4643         kfree(thread);
4644 }
4645
4646 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4647 {
4648         if (!mddev) {
4649                 MD_BUG();
4650                 return;
4651         }
4652
4653         if (!rdev || test_bit(Faulty, &rdev->flags))
4654                 return;
4655 /*
4656         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4657                 mdname(mddev),
4658                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4659                 __builtin_return_address(0),__builtin_return_address(1),
4660                 __builtin_return_address(2),__builtin_return_address(3));
4661 */
4662         if (!mddev->pers)
4663                 return;
4664         if (!mddev->pers->error_handler)
4665                 return;
4666         mddev->pers->error_handler(mddev,rdev);
4667         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4668         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4669         md_wakeup_thread(mddev->thread);
4670         md_new_event_inintr(mddev);
4671 }
4672
4673 /* seq_file implementation /proc/mdstat */
4674
4675 static void status_unused(struct seq_file *seq)
4676 {
4677         int i = 0;
4678         mdk_rdev_t *rdev;
4679         struct list_head *tmp;
4680
4681         seq_printf(seq, "unused devices: ");
4682
4683         ITERATE_RDEV_PENDING(rdev,tmp) {
4684                 char b[BDEVNAME_SIZE];
4685                 i++;
4686                 seq_printf(seq, "%s ",
4687                               bdevname(rdev->bdev,b));
4688         }
4689         if (!i)
4690                 seq_printf(seq, "<none>");
4691
4692         seq_printf(seq, "\n");
4693 }
4694
4695
4696 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4697 {
4698         sector_t max_blocks, resync, res;
4699         unsigned long dt, db, rt;
4700         int scale;
4701         unsigned int per_milli;
4702
4703         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4704
4705         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4706                 max_blocks = mddev->resync_max_sectors >> 1;
4707         else
4708                 max_blocks = mddev->size;
4709
4710         /*
4711          * Should not happen.
4712          */
4713         if (!max_blocks) {
4714                 MD_BUG();
4715                 return;
4716         }
4717         /* Pick 'scale' such that (resync>>scale)*1000 will fit
4718          * in a sector_t, and (max_blocks>>scale) will fit in a
4719          * u32, as those are the requirements for sector_div.
4720          * Thus 'scale' must be at least 10
4721          */
4722         scale = 10;
4723         if (sizeof(sector_t) > sizeof(unsigned long)) {
4724                 while ( max_blocks/2 > (1ULL<<(scale+32)))
4725                         scale++;
4726         }
4727         res = (resync>>scale)*1000;
4728         sector_div(res, (u32)((max_blocks>>scale)+1));
4729
4730         per_milli = res;
4731         {
4732                 int i, x = per_milli/50, y = 20-x;
4733                 seq_printf(seq, "[");
4734                 for (i = 0; i < x; i++)
4735                         seq_printf(seq, "=");
4736                 seq_printf(seq, ">");
4737                 for (i = 0; i < y; i++)
4738                         seq_printf(seq, ".");
4739                 seq_printf(seq, "] ");
4740         }
4741         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4742                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4743                     "reshape" :
4744                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4745                      "check" :
4746                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4747                       "resync" : "recovery"))),
4748                    per_milli/10, per_milli % 10,
4749                    (unsigned long long) resync,
4750                    (unsigned long long) max_blocks);
4751
4752         /*
4753          * We do not want to overflow, so the order of operands and
4754          * the * 100 / 100 trick are important. We do a +1 to be
4755          * safe against division by zero. We only estimate anyway.
4756          *
4757          * dt: time from mark until now
4758          * db: blocks written from mark until now
4759          * rt: remaining time
4760          */
4761         dt = ((jiffies - mddev->resync_mark) / HZ);
4762         if (!dt) dt++;
4763         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4764                 - mddev->resync_mark_cnt;
4765         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4766
4767         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4768
4769         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4770 }
4771
4772 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4773 {
4774         struct list_head *tmp;
4775         loff_t l = *pos;
4776         mddev_t *mddev;
4777
4778         if (l >= 0x10000)
4779                 return NULL;
4780         if (!l--)
4781                 /* header */
4782                 return (void*)1;
4783
4784         spin_lock(&all_mddevs_lock);
4785         list_for_each(tmp,&all_mddevs)
4786                 if (!l--) {
4787                         mddev = list_entry(tmp, mddev_t, all_mddevs);
4788                         mddev_get(mddev);
4789                         spin_unlock(&all_mddevs_lock);
4790                         return mddev;
4791                 }
4792         spin_unlock(&all_mddevs_lock);
4793         if (!l--)
4794                 return (void*)2;/* tail */
4795         return NULL;
4796 }
4797
4798 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4799 {
4800         struct list_head *tmp;
4801         mddev_t *next_mddev, *mddev = v;
4802         
4803         ++*pos;
4804         if (v == (void*)2)
4805                 return NULL;
4806
4807         spin_lock(&all_mddevs_lock);
4808         if (v == (void*)1)
4809                 tmp = all_mddevs.next;
4810         else
4811                 tmp = mddev->all_mddevs.next;
4812         if (tmp != &all_mddevs)
4813                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4814         else {
4815                 next_mddev = (void*)2;
4816                 *pos = 0x10000;
4817         }               
4818         spin_unlock(&all_mddevs_lock);
4819
4820         if (v != (void*)1)
4821                 mddev_put(mddev);
4822         return next_mddev;
4823
4824 }
4825
4826 static void md_seq_stop(struct seq_file *seq, void *v)
4827 {
4828         mddev_t *mddev = v;
4829
4830         if (mddev && v != (void*)1 && v != (void*)2)
4831                 mddev_put(mddev);
4832 }
4833
4834 struct mdstat_info {
4835         int event;
4836 };
4837
4838 static int md_seq_show(struct seq_file *seq, void *v)
4839 {
4840         mddev_t *mddev = v;
4841         sector_t size;
4842         struct list_head *tmp2;
4843         mdk_rdev_t *rdev;
4844         struct mdstat_info *mi = seq->private;
4845         struct bitmap *bitmap;
4846
4847         if (v == (void*)1) {
4848                 struct mdk_personality *pers;
4849                 seq_printf(seq, "Personalities : ");
4850                 spin_lock(&pers_lock);
4851                 list_for_each_entry(pers, &pers_list, list)
4852                         seq_printf(seq, "[%s] ", pers->name);
4853
4854                 spin_unlock(&pers_lock);
4855                 seq_printf(seq, "\n");
4856                 mi->event = atomic_read(&md_event_count);
4857                 return 0;
4858         }
4859         if (v == (void*)2) {
4860                 status_unused(seq);
4861                 return 0;
4862         }
4863
4864         if (mddev_lock(mddev) < 0)
4865                 return -EINTR;
4866
4867         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4868                 seq_printf(seq, "%s : %sactive", mdname(mddev),
4869                                                 mddev->pers ? "" : "in");
4870                 if (mddev->pers) {
4871                         if (mddev->ro==1)
4872                                 seq_printf(seq, " (read-only)");
4873                         if (mddev->ro==2)
4874                                 seq_printf(seq, "(auto-read-only)");
4875                         seq_printf(seq, " %s", mddev->pers->name);
4876                 }
4877
4878                 size = 0;
4879                 ITERATE_RDEV(mddev,rdev,tmp2) {
4880                         char b[BDEVNAME_SIZE];
4881                         seq_printf(seq, " %s[%d]",
4882                                 bdevname(rdev->bdev,b), rdev->desc_nr);
4883                         if (test_bit(WriteMostly, &rdev->flags))
4884                                 seq_printf(seq, "(W)");
4885                         if (test_bit(Faulty, &rdev->flags)) {
4886                                 seq_printf(seq, "(F)");
4887                                 continue;
4888                         } else if (rdev->raid_disk < 0)
4889                                 seq_printf(seq, "(S)"); /* spare */
4890                         size += rdev->size;
4891                 }
4892
4893                 if (!list_empty(&mddev->disks)) {
4894                         if (mddev->pers)
4895                                 seq_printf(seq, "\n      %llu blocks",
4896                                         (unsigned long long)mddev->array_size);
4897                         else
4898                                 seq_printf(seq, "\n      %llu blocks",
4899                                         (unsigned long long)size);
4900                 }
4901                 if (mddev->persistent) {
4902                         if (mddev->major_version != 0 ||
4903                             mddev->minor_version != 90) {
4904                                 seq_printf(seq," super %d.%d",
4905                                            mddev->major_version,
4906                                            mddev->minor_version);
4907                         }
4908                 } else
4909                         seq_printf(seq, " super non-persistent");
4910
4911                 if (mddev->pers) {
4912                         mddev->pers->status (seq, mddev);
4913                         seq_printf(seq, "\n      ");
4914                         if (mddev->pers->sync_request) {
4915                                 if (mddev->curr_resync > 2) {
4916                                         status_resync (seq, mddev);
4917                                         seq_printf(seq, "\n      ");
4918                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4919                                         seq_printf(seq, "\tresync=DELAYED\n      ");
4920                                 else if (mddev->recovery_cp < MaxSector)
4921                                         seq_printf(seq, "\tresync=PENDING\n      ");
4922                         }
4923                 } else
4924                         seq_printf(seq, "\n       ");
4925
4926                 if ((bitmap = mddev->bitmap)) {
4927                         unsigned long chunk_kb;
4928                         unsigned long flags;
4929                         spin_lock_irqsave(&bitmap->lock, flags);
4930                         chunk_kb = bitmap->chunksize >> 10;
4931                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4932                                 "%lu%s chunk",
4933                                 bitmap->pages - bitmap->missing_pages,
4934                                 bitmap->pages,
4935                                 (bitmap->pages - bitmap->missing_pages)
4936                                         << (PAGE_SHIFT - 10),
4937                                 chunk_kb ? chunk_kb : bitmap->chunksize,
4938                                 chunk_kb ? "KB" : "B");
4939                         if (bitmap->file) {
4940                                 seq_printf(seq, ", file: ");
4941                                 seq_path(seq, bitmap->file->f_path.mnt,
4942                                          bitmap->file->f_path.dentry," \t\n");
4943                         }
4944
4945                         seq_printf(seq, "\n");
4946                         spin_unlock_irqrestore(&bitmap->lock, flags);
4947                 }
4948
4949                 seq_printf(seq, "\n");
4950         }
4951         mddev_unlock(mddev);
4952         
4953         return 0;
4954 }
4955
4956 static struct seq_operations md_seq_ops = {
4957         .start  = md_seq_start,
4958         .next   = md_seq_next,
4959         .stop   = md_seq_stop,
4960         .show   = md_seq_show,
4961 };
4962
4963 static int md_seq_open(struct inode *inode, struct file *file)
4964 {
4965         int error;
4966         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4967         if (mi == NULL)
4968                 return -ENOMEM;
4969
4970         error = seq_open(file, &md_seq_ops);
4971         if (error)
4972                 kfree(mi);
4973         else {
4974                 struct seq_file *p = file->private_data;
4975                 p->private = mi;
4976                 mi->event = atomic_read(&md_event_count);
4977         }
4978         return error;
4979 }
4980
4981 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4982 {
4983         struct seq_file *m = filp->private_data;
4984         struct mdstat_info *mi = m->private;
4985         int mask;
4986
4987         poll_wait(filp, &md_event_waiters, wait);
4988
4989         /* always allow read */
4990         mask = POLLIN | POLLRDNORM;
4991
4992         if (mi->event != atomic_read(&md_event_count))
4993                 mask |= POLLERR | POLLPRI;
4994         return mask;
4995 }
4996
4997 static const struct file_operations md_seq_fops = {
4998         .owner          = THIS_MODULE,
4999         .open           = md_seq_open,
5000         .read           = seq_read,
5001         .llseek         = seq_lseek,
5002         .release        = seq_release_private,
5003         .poll           = mdstat_poll,
5004 };
5005
5006 int register_md_personality(struct mdk_personality *p)
5007 {
5008         spin_lock(&pers_lock);
5009         list_add_tail(&p->list, &pers_list);
5010         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5011         spin_unlock(&pers_lock);
5012         return 0;
5013 }
5014
5015 int unregister_md_personality(struct mdk_personality *p)
5016 {
5017         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5018         spin_lock(&pers_lock);
5019         list_del_init(&p->list);
5020         spin_unlock(&pers_lock);
5021         return 0;
5022 }
5023
5024 static int is_mddev_idle(mddev_t *mddev)
5025 {
5026         mdk_rdev_t * rdev;
5027         struct list_head *tmp;
5028         int idle;
5029         unsigned long curr_events;
5030
5031         idle = 1;
5032         ITERATE_RDEV(mddev,rdev,tmp) {
5033                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5034                 curr_events = disk_stat_read(disk, sectors[0]) + 
5035                                 disk_stat_read(disk, sectors[1]) - 
5036                                 atomic_read(&disk->sync_io);
5037                 /* The difference between curr_events and last_events
5038                  * will be affected by any new non-sync IO (making
5039                  * curr_events bigger) and any difference in the amount of
5040                  * in-flight syncio (making current_events bigger or smaller)
5041                  * The amount in-flight is currently limited to
5042                  * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
5043                  * which is at most 4096 sectors.
5044                  * These numbers are fairly fragile and should be made
5045                  * more robust, probably by enforcing the
5046                  * 'window size' that md_do_sync sort-of uses.
5047                  *
5048                  * Note: the following is an unsigned comparison.
5049                  */
5050                 if ((curr_events - rdev->last_events + 4096) > 8192) {
5051                         rdev->last_events = curr_events;
5052                         idle = 0;
5053                 }
5054         }
5055         return idle;
5056 }
5057
5058 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5059 {
5060         /* another "blocks" (512byte) blocks have been synced */
5061         atomic_sub(blocks, &mddev->recovery_active);
5062         wake_up(&mddev->recovery_wait);
5063         if (!ok) {
5064                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5065                 md_wakeup_thread(mddev->thread);
5066                 // stop recovery, signal do_sync ....
5067         }
5068 }
5069
5070
5071 /* md_write_start(mddev, bi)
5072  * If we need to update some array metadata (e.g. 'active' flag
5073  * in superblock) before writing, schedule a superblock update
5074  * and wait for it to complete.
5075  */
5076 void md_write_start(mddev_t *mddev, struct bio *bi)
5077 {
5078         if (bio_data_dir(bi) != WRITE)
5079                 return;
5080
5081         BUG_ON(mddev->ro == 1);
5082         if (mddev->ro == 2) {
5083                 /* need to switch to read/write */
5084                 mddev->ro = 0;
5085                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5086                 md_wakeup_thread(mddev->thread);
5087         }
5088         atomic_inc(&mddev->writes_pending);
5089         if (mddev->in_sync) {
5090                 spin_lock_irq(&mddev->write_lock);
5091                 if (mddev->in_sync) {
5092                         mddev->in_sync = 0;
5093                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5094                         md_wakeup_thread(mddev->thread);
5095                 }
5096                 spin_unlock_irq(&mddev->write_lock);
5097         }
5098         wait_event(mddev->sb_wait, mddev->flags==0);
5099 }
5100
5101 void md_write_end(mddev_t *mddev)
5102 {
5103         if (atomic_dec_and_test(&mddev->writes_pending)) {
5104                 if (mddev->safemode == 2)
5105                         md_wakeup_thread(mddev->thread);
5106                 else if (mddev->safemode_delay)
5107                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5108         }
5109 }
5110
5111 /* md_allow_write(mddev)
5112  * Calling this ensures that the array is marked 'active' so that writes
5113  * may proceed without blocking.  It is important to call this before
5114  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5115  * Must be called with mddev_lock held.
5116  */
5117 void md_allow_write(mddev_t *mddev)
5118 {
5119         if (!mddev->pers)
5120                 return;
5121         if (mddev->ro)
5122                 return;
5123
5124         spin_lock_irq(&mddev->write_lock);
5125         if (mddev->in_sync) {
5126                 mddev->in_sync = 0;
5127                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5128                 if (mddev->safemode_delay &&
5129                     mddev->safemode == 0)
5130                         mddev->safemode = 1;
5131                 spin_unlock_irq(&mddev->write_lock);
5132                 md_update_sb(mddev, 0);
5133         } else
5134                 spin_unlock_irq(&mddev->write_lock);
5135 }
5136 EXPORT_SYMBOL_GPL(md_allow_write);
5137
5138 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5139
5140 #define SYNC_MARKS      10
5141 #define SYNC_MARK_STEP  (3*HZ)
5142 void md_do_sync(mddev_t *mddev)
5143 {
5144         mddev_t *mddev2;
5145         unsigned int currspeed = 0,
5146                  window;
5147         sector_t max_sectors,j, io_sectors;
5148         unsigned long mark[SYNC_MARKS];
5149         sector_t mark_cnt[SYNC_MARKS];
5150         int last_mark,m;
5151         struct list_head *tmp;
5152         sector_t last_check;
5153         int skipped = 0;
5154         struct list_head *rtmp;
5155         mdk_rdev_t *rdev;
5156         char *desc;
5157
5158         /* just incase thread restarts... */
5159         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5160                 return;
5161         if (mddev->ro) /* never try to sync a read-only array */
5162                 return;
5163
5164         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5165                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5166                         desc = "data-check";
5167                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5168                         desc = "requested-resync";
5169                 else
5170                         desc = "resync";
5171         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5172                 desc = "reshape";
5173         else
5174                 desc = "recovery";
5175
5176         /* we overload curr_resync somewhat here.
5177          * 0 == not engaged in resync at all
5178          * 2 == checking that there is no conflict with another sync
5179          * 1 == like 2, but have yielded to allow conflicting resync to
5180          *              commense
5181          * other == active in resync - this many blocks
5182          *
5183          * Before starting a resync we must have set curr_resync to
5184          * 2, and then checked that every "conflicting" array has curr_resync
5185          * less than ours.  When we find one that is the same or higher
5186          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5187          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5188          * This will mean we have to start checking from the beginning again.
5189          *
5190          */
5191
5192         do {
5193                 mddev->curr_resync = 2;
5194
5195         try_again:
5196                 if (kthread_should_stop()) {
5197                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5198                         goto skip;
5199                 }
5200                 ITERATE_MDDEV(mddev2,tmp) {
5201                         if (mddev2 == mddev)
5202                                 continue;
5203                         if (mddev2->curr_resync && 
5204                             match_mddev_units(mddev,mddev2)) {
5205                                 DEFINE_WAIT(wq);
5206                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5207                                         /* arbitrarily yield */
5208                                         mddev->curr_resync = 1;
5209                                         wake_up(&resync_wait);
5210                                 }
5211                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5212                                         /* no need to wait here, we can wait the next
5213                                          * time 'round when curr_resync == 2
5214                                          */
5215                                         continue;
5216                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5217                                 if (!kthread_should_stop() &&
5218                                     mddev2->curr_resync >= mddev->curr_resync) {
5219                                         printk(KERN_INFO "md: delaying %s of %s"
5220                                                " until %s has finished (they"
5221                                                " share one or more physical units)\n",
5222                                                desc, mdname(mddev), mdname(mddev2));
5223                                         mddev_put(mddev2);
5224                                         schedule();
5225                                         finish_wait(&resync_wait, &wq);
5226                                         goto try_again;
5227                                 }
5228                                 finish_wait(&resync_wait, &wq);
5229                         }
5230                 }
5231         } while (mddev->curr_resync < 2);
5232
5233         j = 0;
5234         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5235                 /* resync follows the size requested by the personality,
5236                  * which defaults to physical size, but can be virtual size
5237                  */
5238                 max_sectors = mddev->resync_max_sectors;
5239                 mddev->resync_mismatches = 0;
5240                 /* we don't use the checkpoint if there's a bitmap */
5241                 if (!mddev->bitmap &&
5242                     !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5243                         j = mddev->recovery_cp;
5244         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5245                 max_sectors = mddev->size << 1;
5246         else {
5247                 /* recovery follows the physical size of devices */
5248                 max_sectors = mddev->size << 1;
5249                 j = MaxSector;
5250                 ITERATE_RDEV(mddev,rdev,rtmp)
5251                         if (rdev->raid_disk >= 0 &&
5252                             !test_bit(Faulty, &rdev->flags) &&
5253                             !test_bit(In_sync, &rdev->flags) &&
5254                             rdev->recovery_offset < j)
5255                                 j = rdev->recovery_offset;
5256         }
5257
5258         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5259         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5260                 " %d KB/sec/disk.\n", speed_min(mddev));
5261         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5262                "(but not more than %d KB/sec) for %s.\n",
5263                speed_max(mddev), desc);
5264
5265         is_mddev_idle(mddev); /* this also initializes IO event counters */
5266
5267         io_sectors = 0;
5268         for (m = 0; m < SYNC_MARKS; m++) {
5269                 mark[m] = jiffies;
5270                 mark_cnt[m] = io_sectors;
5271         }
5272         last_mark = 0;
5273         mddev->resync_mark = mark[last_mark];
5274         mddev->resync_mark_cnt = mark_cnt[last_mark];
5275
5276         /*
5277          * Tune reconstruction:
5278          */
5279         window = 32*(PAGE_SIZE/512);
5280         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5281                 window/2,(unsigned long long) max_sectors/2);
5282
5283         atomic_set(&mddev->recovery_active, 0);
5284         init_waitqueue_head(&mddev->recovery_wait);
5285         last_check = 0;
5286
5287         if (j>2) {
5288                 printk(KERN_INFO 
5289                        "md: resuming %s of %s from checkpoint.\n",
5290                        desc, mdname(mddev));
5291                 mddev->curr_resync = j;
5292         }
5293
5294         while (j < max_sectors) {
5295                 sector_t sectors;
5296
5297                 skipped = 0;
5298                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5299                                             currspeed < speed_min(mddev));
5300                 if (sectors == 0) {
5301                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5302                         goto out;
5303                 }
5304
5305                 if (!skipped) { /* actual IO requested */
5306                         io_sectors += sectors;
5307                         atomic_add(sectors, &mddev->recovery_active);
5308                 }
5309
5310                 j += sectors;
5311                 if (j>1) mddev->curr_resync = j;
5312                 mddev->curr_mark_cnt = io_sectors;
5313                 if (last_check == 0)
5314                         /* this is the earliers that rebuilt will be
5315                          * visible in /proc/mdstat
5316                          */
5317                         md_new_event(mddev);
5318
5319                 if (last_check + window > io_sectors || j == max_sectors)
5320                         continue;
5321
5322                 last_check = io_sectors;
5323
5324                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5325                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5326                         break;
5327
5328         repeat:
5329                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5330                         /* step marks */
5331                         int next = (last_mark+1) % SYNC_MARKS;
5332
5333                         mddev->resync_mark = mark[next];
5334                         mddev->resync_mark_cnt = mark_cnt[next];
5335                         mark[next] = jiffies;
5336                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5337                         last_mark = next;
5338                 }
5339
5340
5341                 if (kthread_should_stop()) {
5342                         /*
5343                          * got a signal, exit.
5344                          */
5345                         printk(KERN_INFO 
5346                                 "md: md_do_sync() got signal ... exiting\n");
5347                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5348                         goto out;
5349                 }
5350
5351                 /*
5352                  * this loop exits only if either when we are slower than
5353                  * the 'hard' speed limit, or the system was IO-idle for
5354                  * a jiffy.
5355                  * the system might be non-idle CPU-wise, but we only care
5356                  * about not overloading the IO subsystem. (things like an
5357                  * e2fsck being done on the RAID array should execute fast)
5358                  */
5359                 mddev->queue->unplug_fn(mddev->queue);
5360                 cond_resched();
5361
5362                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5363                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5364
5365                 if (currspeed > speed_min(mddev)) {
5366                         if ((currspeed > speed_max(mddev)) ||
5367                                         !is_mddev_idle(mddev)) {
5368                                 msleep(500);
5369                                 goto repeat;
5370                         }
5371                 }
5372         }
5373         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5374         /*
5375          * this also signals 'finished resyncing' to md_stop
5376          */
5377  out:
5378         mddev->queue->unplug_fn(mddev->queue);
5379
5380         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5381
5382         /* tell personality that we are finished */
5383         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5384
5385         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5386             !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5387             mddev->curr_resync > 2) {
5388                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5389                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5390                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5391                                         printk(KERN_INFO
5392                                                "md: checkpointing %s of %s.\n",
5393                                                desc, mdname(mddev));
5394                                         mddev->recovery_cp = mddev->curr_resync;
5395                                 }
5396                         } else
5397                                 mddev->recovery_cp = MaxSector;
5398                 } else {
5399                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5400                                 mddev->curr_resync = MaxSector;
5401                         ITERATE_RDEV(mddev,rdev,rtmp)
5402                                 if (rdev->raid_disk >= 0 &&
5403                                     !test_bit(Faulty, &rdev->flags) &&
5404                                     !test_bit(In_sync, &rdev->flags) &&
5405                                     rdev->recovery_offset < mddev->curr_resync)
5406                                         rdev->recovery_offset = mddev->curr_resync;
5407                 }
5408         }
5409         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5410
5411  skip:
5412         mddev->curr_resync = 0;
5413         wake_up(&resync_wait);
5414         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5415         md_wakeup_thread(mddev->thread);
5416 }
5417 EXPORT_SYMBOL_GPL(md_do_sync);
5418
5419
5420 static int remove_and_add_spares(mddev_t *mddev)
5421 {
5422         mdk_rdev_t *rdev;
5423         struct list_head *rtmp;
5424         int spares = 0;
5425
5426         ITERATE_RDEV(mddev,rdev,rtmp)
5427                 if (rdev->raid_disk >= 0 &&
5428                     (test_bit(Faulty, &rdev->flags) ||
5429                      ! test_bit(In_sync, &rdev->flags)) &&
5430                     atomic_read(&rdev->nr_pending)==0) {
5431                         if (mddev->pers->hot_remove_disk(
5432                                     mddev, rdev->raid_disk)==0) {
5433                                 char nm[20];
5434                                 sprintf(nm,"rd%d", rdev->raid_disk);
5435                                 sysfs_remove_link(&mddev->kobj, nm);
5436                                 rdev->raid_disk = -1;
5437                         }
5438                 }
5439
5440         if (mddev->degraded) {
5441                 ITERATE_RDEV(mddev,rdev,rtmp)
5442                         if (rdev->raid_disk < 0
5443                             && !test_bit(Faulty, &rdev->flags)) {
5444                                 rdev->recovery_offset = 0;
5445                                 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5446                                         char nm[20];
5447                                         sprintf(nm, "rd%d", rdev->raid_disk);
5448                                         if (sysfs_create_link(&mddev->kobj,
5449                                                               &rdev->kobj, nm))
5450                                                 printk(KERN_WARNING
5451                                                        "md: cannot register "
5452                                                        "%s for %s\n",
5453                                                        nm, mdname(mddev));
5454                                         spares++;
5455                                         md_new_event(mddev);
5456                                 } else
5457                                         break;
5458                         }
5459         }
5460         return spares;
5461 }
5462 /*
5463  * This routine is regularly called by all per-raid-array threads to
5464  * deal with generic issues like resync and super-block update.
5465  * Raid personalities that don't have a thread (linear/raid0) do not
5466  * need this as they never do any recovery or update the superblock.
5467  *
5468  * It does not do any resync itself, but rather "forks" off other threads
5469  * to do that as needed.
5470  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5471  * "->recovery" and create a thread at ->sync_thread.
5472  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5473  * and wakeups up this thread which will reap the thread and finish up.
5474  * This thread also removes any faulty devices (with nr_pending == 0).
5475  *
5476  * The overall approach is:
5477  *  1/ if the superblock needs updating, update it.
5478  *  2/ If a recovery thread is running, don't do anything else.
5479  *  3/ If recovery has finished, clean up, possibly marking spares active.
5480  *  4/ If there are any faulty devices, remove them.
5481  *  5/ If array is degraded, try to add spares devices
5482  *  6/ If array has spares or is not in-sync, start a resync thread.
5483  */
5484 void md_check_recovery(mddev_t *mddev)
5485 {
5486         mdk_rdev_t *rdev;
5487         struct list_head *rtmp;
5488
5489
5490         if (mddev->bitmap)
5491                 bitmap_daemon_work(mddev->bitmap);
5492
5493         if (mddev->ro)
5494                 return;
5495
5496         if (signal_pending(current)) {
5497                 if (mddev->pers->sync_request) {
5498                         printk(KERN_INFO "md: %s in immediate safe mode\n",
5499                                mdname(mddev));
5500                         mddev->safemode = 2;
5501                 }
5502                 flush_signals(current);
5503         }
5504
5505         if ( ! (
5506                 mddev->flags ||
5507                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5508                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5509                 (mddev->safemode == 1) ||
5510                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5511                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5512                 ))
5513                 return;
5514
5515         if (mddev_trylock(mddev)) {
5516                 int spares = 0;
5517
5518                 spin_lock_irq(&mddev->write_lock);
5519                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5520                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5521                         mddev->in_sync = 1;
5522                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5523                 }
5524                 if (mddev->safemode == 1)
5525                         mddev->safemode = 0;
5526                 spin_unlock_irq(&mddev->write_lock);
5527
5528                 if (mddev->flags)
5529                         md_update_sb(mddev, 0);
5530
5531
5532                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5533                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5534                         /* resync/recovery still happening */
5535                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5536                         goto unlock;
5537                 }
5538                 if (mddev->sync_thread) {
5539                         /* resync has finished, collect result */
5540                         md_unregister_thread(mddev->sync_thread);
5541                         mddev->sync_thread = NULL;
5542                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5543                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5544                                 /* success...*/
5545                                 /* activate any spares */
5546                                 mddev->pers->spare_active(mddev);
5547                         }
5548                         md_update_sb(mddev, 1);
5549
5550                         /* if array is no-longer degraded, then any saved_raid_disk
5551                          * information must be scrapped
5552                          */
5553                         if (!mddev->degraded)
5554                                 ITERATE_RDEV(mddev,rdev,rtmp)
5555                                         rdev->saved_raid_disk = -1;
5556
5557                         mddev->recovery = 0;
5558                         /* flag recovery needed just to double check */
5559                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5560                         md_new_event(mddev);
5561                         goto unlock;
5562                 }
5563                 /* Clear some bits that don't mean anything, but
5564                  * might be left set
5565                  */
5566                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5567                 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5568                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5569                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5570
5571                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5572                         goto unlock;
5573                 /* no recovery is running.
5574                  * remove any failed drives, then
5575                  * add spares if possible.
5576                  * Spare are also removed and re-added, to allow
5577                  * the personality to fail the re-add.
5578                  */
5579
5580                 if (mddev->reshape_position != MaxSector) {
5581                         if (mddev->pers->check_reshape(mddev) != 0)
5582                                 /* Cannot proceed */
5583                                 goto unlock;
5584                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5585                 } else if ((spares = remove_and_add_spares(mddev))) {
5586                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5587                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5588                 } else if (mddev->recovery_cp < MaxSector) {
5589                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5590                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5591                         /* nothing to be done ... */
5592                         goto unlock;
5593
5594                 if (mddev->pers->sync_request) {
5595                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5596                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5597                                 /* We are adding a device or devices to an array
5598                                  * which has the bitmap stored on all devices.
5599                                  * So make sure all bitmap pages get written
5600                                  */
5601                                 bitmap_write_all(mddev->bitmap);
5602                         }
5603                         mddev->sync_thread = md_register_thread(md_do_sync,
5604                                                                 mddev,
5605                                                                 "%s_resync");
5606                         if (!mddev->sync_thread) {
5607                                 printk(KERN_ERR "%s: could not start resync"
5608                                         " thread...\n", 
5609                                         mdname(mddev));
5610                                 /* leave the spares where they are, it shouldn't hurt */
5611                                 mddev->recovery = 0;
5612                         } else
5613                                 md_wakeup_thread(mddev->sync_thread);
5614                         md_new_event(mddev);
5615                 }
5616         unlock:
5617                 mddev_unlock(mddev);
5618         }
5619 }
5620
5621 static int md_notify_reboot(struct notifier_block *this,
5622                             unsigned long code, void *x)
5623 {
5624         struct list_head *tmp;
5625         mddev_t *mddev;
5626
5627         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5628
5629                 printk(KERN_INFO "md: stopping all md devices.\n");
5630
5631                 ITERATE_MDDEV(mddev,tmp)
5632                         if (mddev_trylock(mddev)) {
5633                                 do_md_stop (mddev, 1);
5634                                 mddev_unlock(mddev);
5635                         }
5636                 /*
5637                  * certain more exotic SCSI devices are known to be
5638                  * volatile wrt too early system reboots. While the
5639                  * right place to handle this issue is the given
5640                  * driver, we do want to have a safe RAID driver ...
5641                  */
5642                 mdelay(1000*1);
5643         }
5644         return NOTIFY_DONE;
5645 }
5646
5647 static struct notifier_block md_notifier = {
5648         .notifier_call  = md_notify_reboot,
5649         .next           = NULL,
5650         .priority       = INT_MAX, /* before any real devices */
5651 };
5652
5653 static void md_geninit(void)
5654 {
5655         struct proc_dir_entry *p;
5656
5657         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5658
5659         p = create_proc_entry("mdstat", S_IRUGO, NULL);
5660         if (p)
5661                 p->proc_fops = &md_seq_fops;
5662 }
5663
5664 static int __init md_init(void)
5665 {
5666         if (register_blkdev(MAJOR_NR, "md"))
5667                 return -1;
5668         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5669                 unregister_blkdev(MAJOR_NR, "md");
5670                 return -1;
5671         }
5672         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5673                             md_probe, NULL, NULL);
5674         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5675                             md_probe, NULL, NULL);
5676
5677         register_reboot_notifier(&md_notifier);
5678         raid_table_header = register_sysctl_table(raid_root_table);
5679
5680         md_geninit();
5681         return (0);
5682 }
5683
5684
5685 #ifndef MODULE
5686
5687 /*
5688  * Searches all registered partitions for autorun RAID arrays
5689  * at boot time.
5690  */
5691 static dev_t detected_devices[128];
5692 static int dev_cnt;
5693
5694 void md_autodetect_dev(dev_t dev)
5695 {
5696         if (dev_cnt >= 0 && dev_cnt < 127)
5697                 detected_devices[dev_cnt++] = dev;
5698 }
5699
5700
5701 static void autostart_arrays(int part)
5702 {
5703         mdk_rdev_t *rdev;
5704         int i;
5705
5706         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5707
5708         for (i = 0; i < dev_cnt; i++) {
5709                 dev_t dev = detected_devices[i];
5710
5711                 rdev = md_import_device(dev,0, 0);
5712                 if (IS_ERR(rdev))
5713                         continue;
5714
5715                 if (test_bit(Faulty, &rdev->flags)) {
5716                         MD_BUG();
5717                         continue;
5718                 }
5719                 list_add(&rdev->same_set, &pending_raid_disks);
5720         }
5721         dev_cnt = 0;
5722
5723         autorun_devices(part);
5724 }
5725
5726 #endif /* !MODULE */
5727
5728 static __exit void md_exit(void)
5729 {
5730         mddev_t *mddev;
5731         struct list_head *tmp;
5732
5733         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5734         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5735
5736         unregister_blkdev(MAJOR_NR,"md");
5737         unregister_blkdev(mdp_major, "mdp");
5738         unregister_reboot_notifier(&md_notifier);
5739         unregister_sysctl_table(raid_table_header);
5740         remove_proc_entry("mdstat", NULL);
5741         ITERATE_MDDEV(mddev,tmp) {
5742                 struct gendisk *disk = mddev->gendisk;
5743                 if (!disk)
5744                         continue;
5745                 export_array(mddev);
5746                 del_gendisk(disk);
5747                 put_disk(disk);
5748                 mddev->gendisk = NULL;
5749                 mddev_put(mddev);
5750         }
5751 }
5752
5753 module_init(md_init)
5754 module_exit(md_exit)
5755
5756 static int get_ro(char *buffer, struct kernel_param *kp)
5757 {
5758         return sprintf(buffer, "%d", start_readonly);
5759 }
5760 static int set_ro(const char *val, struct kernel_param *kp)
5761 {
5762         char *e;
5763         int num = simple_strtoul(val, &e, 10);
5764         if (*val && (*e == '\0' || *e == '\n')) {
5765                 start_readonly = num;
5766                 return 0;
5767         }
5768         return -EINVAL;
5769 }
5770
5771 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5772 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5773
5774
5775 EXPORT_SYMBOL(register_md_personality);
5776 EXPORT_SYMBOL(unregister_md_personality);
5777 EXPORT_SYMBOL(md_error);
5778 EXPORT_SYMBOL(md_done_sync);
5779 EXPORT_SYMBOL(md_write_start);
5780 EXPORT_SYMBOL(md_write_end);
5781 EXPORT_SYMBOL(md_register_thread);
5782 EXPORT_SYMBOL(md_unregister_thread);
5783 EXPORT_SYMBOL(md_wakeup_thread);
5784 EXPORT_SYMBOL(md_check_recovery);
5785 MODULE_LICENSE("GPL");
5786 MODULE_ALIAS("md");
5787 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);