]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/buffer.c
OMAP: dmtimer: enable all timers to be wakeup events
[linux-2.6-omap-h63xx.git] / fs / buffer.c
index 9f697419ed8e34ec041c92d8f6794c8b4b06ab33..a2fd743d97cb85380b5140e5d84ba9c50c383b84 100644 (file)
@@ -165,151 +165,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        put_bh(bh);
 }
 
-/*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping.  Does not take the superblock lock.
- */
-int sync_blockdev(struct block_device *bdev)
-{
-       int ret = 0;
-
-       if (bdev)
-               ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
-       return ret;
-}
-EXPORT_SYMBOL(sync_blockdev);
-
-/*
- * Write out and wait upon all dirty data associated with this
- * device.   Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
-       struct super_block *sb = get_super(bdev);
-       if (sb) {
-               int res = fsync_super(sb);
-               drop_super(sb);
-               return res;
-       }
-       return sync_blockdev(bdev);
-}
-
-/**
- * freeze_bdev  --  lock a filesystem and force it into a consistent state
- * @bdev:      blockdevice to lock
- *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- * The reference counter (bd_fsfreeze_count) guarantees that only the last
- * unfreeze process can unfreeze the frozen filesystem actually when multiple
- * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
- * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
- * actually.
- */
-struct super_block *freeze_bdev(struct block_device *bdev)
-{
-       struct super_block *sb;
-       int error = 0;
-
-       mutex_lock(&bdev->bd_fsfreeze_mutex);
-       if (bdev->bd_fsfreeze_count > 0) {
-               bdev->bd_fsfreeze_count++;
-               sb = get_super(bdev);
-               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               return sb;
-       }
-       bdev->bd_fsfreeze_count++;
-
-       down(&bdev->bd_mount_sem);
-       sb = get_super(bdev);
-       if (sb && !(sb->s_flags & MS_RDONLY)) {
-               sb->s_frozen = SB_FREEZE_WRITE;
-               smp_wmb();
-
-               __fsync_super(sb);
-
-               sb->s_frozen = SB_FREEZE_TRANS;
-               smp_wmb();
-
-               sync_blockdev(sb->s_bdev);
-
-               if (sb->s_op->freeze_fs) {
-                       error = sb->s_op->freeze_fs(sb);
-                       if (error) {
-                               printk(KERN_ERR
-                                       "VFS:Filesystem freeze failed\n");
-                               sb->s_frozen = SB_UNFROZEN;
-                               drop_super(sb);
-                               up(&bdev->bd_mount_sem);
-                               bdev->bd_fsfreeze_count--;
-                               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-                               return ERR_PTR(error);
-                       }
-               }
-       }
-
-       sync_blockdev(bdev);
-       mutex_unlock(&bdev->bd_fsfreeze_mutex);
-
-       return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
-}
-EXPORT_SYMBOL(freeze_bdev);
-
-/**
- * thaw_bdev  -- unlock filesystem
- * @bdev:      blockdevice to unlock
- * @sb:                associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
-int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
-       int error = 0;
-
-       mutex_lock(&bdev->bd_fsfreeze_mutex);
-       if (!bdev->bd_fsfreeze_count) {
-               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               return -EINVAL;
-       }
-
-       bdev->bd_fsfreeze_count--;
-       if (bdev->bd_fsfreeze_count > 0) {
-               if (sb)
-                       drop_super(sb);
-               mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               return 0;
-       }
-
-       if (sb) {
-               BUG_ON(sb->s_bdev != bdev);
-               if (!(sb->s_flags & MS_RDONLY)) {
-                       if (sb->s_op->unfreeze_fs) {
-                               error = sb->s_op->unfreeze_fs(sb);
-                               if (error) {
-                                       printk(KERN_ERR
-                                               "VFS:Filesystem thaw failed\n");
-                                       sb->s_frozen = SB_FREEZE_TRANS;
-                                       bdev->bd_fsfreeze_count++;
-                                       mutex_unlock(&bdev->bd_fsfreeze_mutex);
-                                       return error;
-                               }
-                       }
-                       sb->s_frozen = SB_UNFROZEN;
-                       smp_wmb();
-                       wake_up(&sb->s_wait_unfrozen);
-               }
-               drop_super(sb);
-       }
-
-       up(&bdev->bd_mount_sem);
-       mutex_unlock(&bdev->bd_fsfreeze_mutex);
-       return 0;
-}
-EXPORT_SYMBOL(thaw_bdev);
-
 /*
  * Various filesystems appear to want __find_get_block to be non-blocking.
  * But it's the page lock which protects the buffers.  To get around this,
@@ -760,15 +615,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  * If warn is true, then emit a warning if the page is not uptodate and has
  * not been truncated.
  */
-static int __set_page_dirty(struct page *page,
+static void __set_page_dirty(struct page *page,
                struct address_space *mapping, int warn)
 {
-       if (unlikely(!mapping))
-               return !TestSetPageDirty(page);
-
-       if (TestSetPageDirty(page))
-               return 0;
-
        spin_lock_irq(&mapping->tree_lock);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(warn && !PageUptodate(page));
@@ -785,8 +634,6 @@ static int __set_page_dirty(struct page *page,
        }
        spin_unlock_irq(&mapping->tree_lock);
        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-
-       return 1;
 }
 
 /*
@@ -816,6 +663,7 @@ static int __set_page_dirty(struct page *page,
  */
 int __set_page_dirty_buffers(struct page *page)
 {
+       int newly_dirty;
        struct address_space *mapping = page_mapping(page);
 
        if (unlikely(!mapping))
@@ -831,9 +679,12 @@ int __set_page_dirty_buffers(struct page *page)
                        bh = bh->b_this_page;
                } while (bh != head);
        }
+       newly_dirty = !TestSetPageDirty(page);
        spin_unlock(&mapping->private_lock);
 
-       return __set_page_dirty(page, mapping, 1);
+       if (newly_dirty)
+               __set_page_dirty(page, mapping, 1);
+       return newly_dirty;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -1262,8 +1113,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
                        return;
        }
 
-       if (!test_set_buffer_dirty(bh))
-               __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
+       if (!test_set_buffer_dirty(bh)) {
+               struct page *page = bh->b_page;
+               if (!TestSetPageDirty(page))
+                       __set_page_dirty(page, page_mapping(page), 0);
+       }
 }
 
 /*