]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/xfs/linux-2.6/xfs_super.c
[XFS] kill vn_to_inode
[linux-2.6-omap-h63xx.git] / fs / xfs / linux-2.6 / xfs_super.c
index 1b60e46f527fa2104c271105e177fe270f86db3e..71ac3a6162e9f59d1a3af06982f871151ec7623e 100644 (file)
 #include "xfs_version.h"
 #include "xfs_log_priv.h"
 #include "xfs_trans_priv.h"
+#include "xfs_filestream.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_trace.h"
+#include "xfs_extfree_item.h"
+#include "xfs_mru_cache.h"
+#include "xfs_inode_item.h"
 
 #include <linux/namei.h>
 #include <linux/init.h>
@@ -60,6 +66,7 @@
 #include <linux/writeback.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/parser.h>
 
 static struct quotactl_ops xfs_quotactl_operations;
 static struct super_operations xfs_super_operations;
@@ -74,7 +81,10 @@ xfs_args_allocate(
 {
        struct xfs_mount_args   *args;
 
-       args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
+       args = kzalloc(sizeof(struct xfs_mount_args), GFP_KERNEL);
+       if (!args)
+               return NULL;
+
        args->logbufs = args->logbufsize = -1;
        strncpy(args->fsname, sb->s_id, MAXNAMELEN);
 
@@ -138,6 +148,23 @@ xfs_args_allocate(
 #define MNTOPT_XDSM    "xdsm"          /* DMI enabled (DMAPI / XDSM) */
 #define MNTOPT_DMI     "dmi"           /* DMI enabled (DMAPI / XDSM) */
 
+/*
+ * Table driven mount option parser.
+ *
+ * Currently only used for remount, but it will be used for mount
+ * in the future, too.
+ */
+enum {
+       Opt_barrier, Opt_nobarrier, Opt_err
+};
+
+static match_table_t tokens = {
+       {Opt_barrier, "barrier"},
+       {Opt_nobarrier, "nobarrier"},
+       {Opt_err, NULL}
+};
+
+
 STATIC unsigned long
 suffix_strtoul(char *s, char **endp, unsigned int base)
 {
@@ -565,7 +592,10 @@ xfs_set_inodeops(
                inode->i_mapping->a_ops = &xfs_address_space_operations;
                break;
        case S_IFDIR:
-               inode->i_op = &xfs_dir_inode_operations;
+               if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+                       inode->i_op = &xfs_dir_ci_inode_operations;
+               else
+                       inode->i_op = &xfs_dir_inode_operations;
                inode->i_fop = &xfs_dir_file_operations;
                break;
        case S_IFLNK:
@@ -583,10 +613,9 @@ xfs_set_inodeops(
 STATIC_INLINE void
 xfs_revalidate_inode(
        xfs_mount_t             *mp,
-       bhv_vnode_t             *vp,
+       struct inode            *inode,
        xfs_inode_t             *ip)
 {
-       struct inode            *inode = vn_to_inode(vp);
 
        inode->i_mode   = ip->i_d.di_mode;
        inode->i_nlink  = ip->i_d.di_nlink;
@@ -635,13 +664,12 @@ xfs_revalidate_inode(
 void
 xfs_initialize_vnode(
        struct xfs_mount        *mp,
-       bhv_vnode_t             *vp,
+       struct inode            *inode,
        struct xfs_inode        *ip)
 {
-       struct inode            *inode = vn_to_inode(vp);
 
        if (!ip->i_vnode) {
-               ip->i_vnode = vp;
+               ip->i_vnode = inode;
                inode->i_private = ip;
        }
 
@@ -653,7 +681,7 @@ xfs_initialize_vnode(
         * finish our work.
         */
        if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
-               xfs_revalidate_inode(mp, vp, ip);
+               xfs_revalidate_inode(mp, inode, ip);
                xfs_set_inodeops(inode);
 
                xfs_iflags_clear(ip, XFS_INEW);
@@ -734,14 +762,6 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
                return;
        }
 
-       if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
-                                       QUEUE_ORDERED_NONE) {
-               xfs_fs_cmn_err(CE_NOTE, mp,
-                 "Disabling barriers, not supported by the underlying device");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
        if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
                xfs_fs_cmn_err(CE_NOTE, mp,
                  "Disabling barriers, underlying device is readonly");
@@ -765,6 +785,139 @@ xfs_blkdev_issue_flush(
        blkdev_issue_flush(buftarg->bt_bdev, NULL);
 }
 
+STATIC void
+xfs_close_devices(
+       struct xfs_mount        *mp)
+{
+       if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
+               struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
+               xfs_free_buftarg(mp->m_logdev_targp);
+               xfs_blkdev_put(logdev);
+       }
+       if (mp->m_rtdev_targp) {
+               struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
+               xfs_free_buftarg(mp->m_rtdev_targp);
+               xfs_blkdev_put(rtdev);
+       }
+       xfs_free_buftarg(mp->m_ddev_targp);
+}
+
+/*
+ * The file system configurations are:
+ *     (1) device (partition) with data and internal log
+ *     (2) logical volume with data and log subvolumes.
+ *     (3) logical volume with data, log, and realtime subvolumes.
+ *
+ * We only have to handle opening the log and realtime volumes here if
+ * they are present.  The data subvolume has already been opened by
+ * get_sb_bdev() and is stored in sb->s_bdev.
+ */
+STATIC int
+xfs_open_devices(
+       struct xfs_mount        *mp,
+       struct xfs_mount_args   *args)
+{
+       struct block_device     *ddev = mp->m_super->s_bdev;
+       struct block_device     *logdev = NULL, *rtdev = NULL;
+       int                     error;
+
+       /*
+        * Open real time and log devices - order is important.
+        */
+       if (args->logname[0]) {
+               error = xfs_blkdev_get(mp, args->logname, &logdev);
+               if (error)
+                       goto out;
+       }
+
+       if (args->rtname[0]) {
+               error = xfs_blkdev_get(mp, args->rtname, &rtdev);
+               if (error)
+                       goto out_close_logdev;
+
+               if (rtdev == ddev || rtdev == logdev) {
+                       cmn_err(CE_WARN,
+       "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
+                       error = EINVAL;
+                       goto out_close_rtdev;
+               }
+       }
+
+       /*
+        * Setup xfs_mount buffer target pointers
+        */
+       error = ENOMEM;
+       mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
+       if (!mp->m_ddev_targp)
+               goto out_close_rtdev;
+
+       if (rtdev) {
+               mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
+               if (!mp->m_rtdev_targp)
+                       goto out_free_ddev_targ;
+       }
+
+       if (logdev && logdev != ddev) {
+               mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1);
+               if (!mp->m_logdev_targp)
+                       goto out_free_rtdev_targ;
+       } else {
+               mp->m_logdev_targp = mp->m_ddev_targp;
+       }
+
+       return 0;
+
+ out_free_rtdev_targ:
+       if (mp->m_rtdev_targp)
+               xfs_free_buftarg(mp->m_rtdev_targp);
+ out_free_ddev_targ:
+       xfs_free_buftarg(mp->m_ddev_targp);
+ out_close_rtdev:
+       if (rtdev)
+               xfs_blkdev_put(rtdev);
+ out_close_logdev:
+       if (logdev && logdev != ddev)
+               xfs_blkdev_put(logdev);
+ out:
+       return error;
+}
+
+/*
+ * Setup xfs_mount buffer target pointers based on superblock
+ */
+STATIC int
+xfs_setup_devices(
+       struct xfs_mount        *mp)
+{
+       int                     error;
+
+       error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
+                                   mp->m_sb.sb_sectsize);
+       if (error)
+               return error;
+
+       if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
+               unsigned int    log_sector_size = BBSIZE;
+
+               if (xfs_sb_version_hassector(&mp->m_sb))
+                       log_sector_size = mp->m_sb.sb_logsectsize;
+               error = xfs_setsize_buftarg(mp->m_logdev_targp,
+                                           mp->m_sb.sb_blocksize,
+                                           log_sector_size);
+               if (error)
+                       return error;
+       }
+       if (mp->m_rtdev_targp) {
+               error = xfs_setsize_buftarg(mp->m_rtdev_targp,
+                                           mp->m_sb.sb_blocksize,
+                                           mp->m_sb.sb_sectsize);
+               if (error)
+                       return error;
+       }
+
+       return 0;
+}
+
 /*
  * XFS AIL push thread support
  */
@@ -832,57 +985,21 @@ xfs_fs_alloc_inode(
        vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
        if (unlikely(!vp))
                return NULL;
-       return vn_to_inode(vp);
+       return vp;
 }
 
 STATIC void
 xfs_fs_destroy_inode(
        struct inode            *inode)
 {
-       kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
+       kmem_zone_free(xfs_vnode_zone, inode);
 }
 
 STATIC void
 xfs_fs_inode_init_once(
        void                    *vnode)
 {
-       inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
-}
-
-STATIC int __init
-xfs_init_zones(void)
-{
-       xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
-                                       KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
-                                       KM_ZONE_SPREAD,
-                                       xfs_fs_inode_init_once);
-       if (!xfs_vnode_zone)
-               goto out;
-
-       xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
-       if (!xfs_ioend_zone)
-               goto out_destroy_vnode_zone;
-
-       xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
-                                                 xfs_ioend_zone);
-       if (!xfs_ioend_pool)
-               goto out_free_ioend_zone;
-       return 0;
-
- out_free_ioend_zone:
-       kmem_zone_destroy(xfs_ioend_zone);
- out_destroy_vnode_zone:
-       kmem_zone_destroy(xfs_vnode_zone);
- out:
-       return -ENOMEM;
-}
-
-STATIC void
-xfs_destroy_zones(void)
-{
-       mempool_destroy(xfs_ioend_pool);
-       kmem_zone_destroy(xfs_vnode_zone);
-       kmem_zone_destroy(xfs_ioend_zone);
+       inode_init_once((struct inode *)vnode);
 }
 
 /*
@@ -987,7 +1104,7 @@ void
 xfs_flush_inode(
        xfs_inode_t     *ip)
 {
-       struct inode    *inode = ip->i_vnode;
+       struct inode    *inode = VFS_I(ip);
 
        igrab(inode);
        xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
@@ -1012,7 +1129,7 @@ void
 xfs_flush_device(
        xfs_inode_t     *ip)
 {
-       struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
+       struct inode    *inode = VFS_I(ip);
 
        igrab(inode);
        xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
@@ -1074,26 +1191,85 @@ xfssyncd(
                        list_del(&work->w_list);
                        if (work == &mp->m_sync_work)
                                continue;
-                       kmem_free(work, sizeof(struct bhv_vfs_sync_work));
+                       kmem_free(work);
                }
        }
 
        return 0;
 }
 
+STATIC void
+xfs_free_fsname(
+       struct xfs_mount        *mp)
+{
+       kfree(mp->m_fsname);
+       kfree(mp->m_rtname);
+       kfree(mp->m_logname);
+}
+
 STATIC void
 xfs_fs_put_super(
        struct super_block      *sb)
 {
        struct xfs_mount        *mp = XFS_M(sb);
+       struct xfs_inode        *rip = mp->m_rootip;
+       int                     unmount_event_flags = 0;
        int                     error;
 
        kthread_stop(mp->m_sync_task);
 
        xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
-       error = xfs_unmount(mp, 0, NULL);
-       if (error)
-               printk("XFS: unmount got error=%d\n", error);
+
+#ifdef HAVE_DMAPI
+       if (mp->m_flags & XFS_MOUNT_DMAPI) {
+               unmount_event_flags =
+                       (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ?
+                               0 : DM_FLAGS_UNWANTED;
+               /*
+                * Ignore error from dmapi here, first unmount is not allowed
+                * to fail anyway, and second we wouldn't want to fail a
+                * unmount because of dmapi.
+                */
+               XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
+                               NULL, NULL, 0, 0, unmount_event_flags);
+       }
+#endif
+
+       /*
+        * Blow away any referenced inode in the filestreams cache.
+        * This can and will cause log traffic as inodes go inactive
+        * here.
+        */
+       xfs_filestream_unmount(mp);
+
+       XFS_bflush(mp->m_ddev_targp);
+       error = xfs_unmount_flush(mp, 0);
+       WARN_ON(error);
+
+       IRELE(rip);
+
+       /*
+        * If we're forcing a shutdown, typically because of a media error,
+        * we want to make sure we invalidate dirty pages that belong to
+        * referenced vnodes as well.
+        */
+       if (XFS_FORCED_SHUTDOWN(mp)) {
+               error = xfs_sync(mp, SYNC_WAIT | SYNC_CLOSE);
+               ASSERT(error != EFSCORRUPTED);
+       }
+
+       if (mp->m_flags & XFS_MOUNT_DMAPI) {
+               XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0,
+                               unmount_event_flags);
+       }
+
+       xfs_unmountfs(mp);
+       xfs_icsb_destroy_counters(mp);
+       xfs_close_devices(mp);
+       xfs_qmops_put(mp);
+       xfs_dmops_put(mp);
+       xfs_free_fsname(mp);
+       kfree(mp);
 }
 
 STATIC void
@@ -1216,14 +1392,54 @@ xfs_fs_remount(
        char                    *options)
 {
        struct xfs_mount        *mp = XFS_M(sb);
-       struct xfs_mount_args   *args = xfs_args_allocate(sb, 0);
-       int                     error;
+       substring_t             args[MAX_OPT_ARGS];
+       char                    *p;
 
-       error = xfs_parseargs(mp, options, args, 1);
-       if (!error)
-               error = xfs_mntupdate(mp, flags, args);
-       kmem_free(args, sizeof(*args));
-       return -error;
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+
+               if (!*p)
+                       continue;
+
+               token = match_token(p, tokens, args);
+               switch (token) {
+               case Opt_barrier:
+                       mp->m_flags |= XFS_MOUNT_BARRIER;
+
+                       /*
+                        * Test if barriers are actually working if we can,
+                        * else delay this check until the filesystem is
+                        * marked writeable.
+                        */
+                       if (!(mp->m_flags & XFS_MOUNT_RDONLY))
+                               xfs_mountfs_check_barriers(mp);
+                       break;
+               case Opt_nobarrier:
+                       mp->m_flags &= ~XFS_MOUNT_BARRIER;
+                       break;
+               default:
+                       printk(KERN_INFO
+       "XFS: mount option \"%s\" not supported for remount\n", p);
+                       return -EINVAL;
+               }
+       }
+
+       /* rw/ro -> rw */
+       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+               mp->m_flags &= ~XFS_MOUNT_RDONLY;
+               if (mp->m_flags & XFS_MOUNT_BARRIER)
+                       xfs_mountfs_check_barriers(mp);
+       }
+
+       /* rw -> ro */
+       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+               xfs_filestream_flush(mp);
+               xfs_sync(mp, SYNC_DATA_QUIESCE);
+               xfs_attr_quiesce(mp);
+               mp->m_flags |= XFS_MOUNT_RDONLY;
+       }
+
+       return 0;
 }
 
 /*
@@ -1300,6 +1516,245 @@ xfs_fs_setxquota(
                                   Q_XSETPQLIM), id, (caddr_t)fdq);
 }
 
+/*
+ * This function fills in xfs_mount_t fields based on mount args.
+ * Note: the superblock has _not_ yet been read in.
+ */
+STATIC int
+xfs_start_flags(
+       struct xfs_mount_args   *ap,
+       struct xfs_mount        *mp)
+{
+       int                     error;
+
+       /* Values are in BBs */
+       if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
+               /*
+                * At this point the superblock has not been read
+                * in, therefore we do not know the block size.
+                * Before the mount call ends we will convert
+                * these to FSBs.
+                */
+               mp->m_dalign = ap->sunit;
+               mp->m_swidth = ap->swidth;
+       }
+
+       if (ap->logbufs != -1 &&
+           ap->logbufs != 0 &&
+           (ap->logbufs < XLOG_MIN_ICLOGS ||
+            ap->logbufs > XLOG_MAX_ICLOGS)) {
+               cmn_err(CE_WARN,
+                       "XFS: invalid logbufs value: %d [not %d-%d]",
+                       ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
+               return XFS_ERROR(EINVAL);
+       }
+       mp->m_logbufs = ap->logbufs;
+       if (ap->logbufsize != -1 &&
+           ap->logbufsize !=  0 &&
+           (ap->logbufsize < XLOG_MIN_RECORD_BSIZE ||
+            ap->logbufsize > XLOG_MAX_RECORD_BSIZE ||
+            !is_power_of_2(ap->logbufsize))) {
+               cmn_err(CE_WARN,
+       "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
+                       ap->logbufsize);
+               return XFS_ERROR(EINVAL);
+       }
+
+       error = ENOMEM;
+
+       mp->m_logbsize = ap->logbufsize;
+       mp->m_fsname_len = strlen(ap->fsname) + 1;
+
+       mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL);
+       if (!mp->m_fsname)
+               goto out;
+
+       if (ap->rtname[0]) {
+               mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL);
+               if (!mp->m_rtname)
+                       goto out_free_fsname;
+
+       }
+
+       if (ap->logname[0]) {
+               mp->m_logname = kstrdup(ap->logname, GFP_KERNEL);
+               if (!mp->m_logname)
+                       goto out_free_rtname;
+       }
+
+       if (ap->flags & XFSMNT_WSYNC)
+               mp->m_flags |= XFS_MOUNT_WSYNC;
+#if XFS_BIG_INUMS
+       if (ap->flags & XFSMNT_INO64) {
+               mp->m_flags |= XFS_MOUNT_INO64;
+               mp->m_inoadd = XFS_INO64_OFFSET;
+       }
+#endif
+       if (ap->flags & XFSMNT_RETERR)
+               mp->m_flags |= XFS_MOUNT_RETERR;
+       if (ap->flags & XFSMNT_NOALIGN)
+               mp->m_flags |= XFS_MOUNT_NOALIGN;
+       if (ap->flags & XFSMNT_SWALLOC)
+               mp->m_flags |= XFS_MOUNT_SWALLOC;
+       if (ap->flags & XFSMNT_OSYNCISOSYNC)
+               mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
+       if (ap->flags & XFSMNT_32BITINODES)
+               mp->m_flags |= XFS_MOUNT_32BITINODES;
+
+       if (ap->flags & XFSMNT_IOSIZE) {
+               if (ap->iosizelog > XFS_MAX_IO_LOG ||
+                   ap->iosizelog < XFS_MIN_IO_LOG) {
+                       cmn_err(CE_WARN,
+               "XFS: invalid log iosize: %d [not %d-%d]",
+                               ap->iosizelog, XFS_MIN_IO_LOG,
+                               XFS_MAX_IO_LOG);
+                       return XFS_ERROR(EINVAL);
+               }
+
+               mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
+               mp->m_readio_log = mp->m_writeio_log = ap->iosizelog;
+       }
+
+       if (ap->flags & XFSMNT_IKEEP)
+               mp->m_flags |= XFS_MOUNT_IKEEP;
+       if (ap->flags & XFSMNT_DIRSYNC)
+               mp->m_flags |= XFS_MOUNT_DIRSYNC;
+       if (ap->flags & XFSMNT_ATTR2)
+               mp->m_flags |= XFS_MOUNT_ATTR2;
+       if (ap->flags & XFSMNT_NOATTR2)
+               mp->m_flags |= XFS_MOUNT_NOATTR2;
+
+       if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
+               mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
+
+       /*
+        * no recovery flag requires a read-only mount
+        */
+       if (ap->flags & XFSMNT_NORECOVERY) {
+               if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+                       cmn_err(CE_WARN,
+       "XFS: tried to mount a FS read-write without recovery!");
+                       return XFS_ERROR(EINVAL);
+               }
+               mp->m_flags |= XFS_MOUNT_NORECOVERY;
+       }
+
+       if (ap->flags & XFSMNT_NOUUID)
+               mp->m_flags |= XFS_MOUNT_NOUUID;
+       if (ap->flags & XFSMNT_BARRIER)
+               mp->m_flags |= XFS_MOUNT_BARRIER;
+       else
+               mp->m_flags &= ~XFS_MOUNT_BARRIER;
+
+       if (ap->flags2 & XFSMNT2_FILESTREAMS)
+               mp->m_flags |= XFS_MOUNT_FILESTREAMS;
+
+       if (ap->flags & XFSMNT_DMAPI)
+               mp->m_flags |= XFS_MOUNT_DMAPI;
+       return 0;
+
+
+ out_free_rtname:
+       kfree(mp->m_rtname);
+ out_free_fsname:
+       kfree(mp->m_fsname);
+ out:
+       return error;
+}
+
+/*
+ * This function fills in xfs_mount_t fields based on mount args.
+ * Note: the superblock _has_ now been read in.
+ */
+STATIC int
+xfs_finish_flags(
+       struct xfs_mount_args   *ap,
+       struct xfs_mount        *mp)
+{
+       int                     ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
+
+       /* Fail a mount where the logbuf is smaller then the log stripe */
+       if (xfs_sb_version_haslogv2(&mp->m_sb)) {
+               if ((ap->logbufsize <= 0) &&
+                   (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
+                       mp->m_logbsize = mp->m_sb.sb_logsunit;
+               } else if (ap->logbufsize > 0 &&
+                          ap->logbufsize < mp->m_sb.sb_logsunit) {
+                       cmn_err(CE_WARN,
+       "XFS: logbuf size must be greater than or equal to log stripe size");
+                       return XFS_ERROR(EINVAL);
+               }
+       } else {
+               /* Fail a mount if the logbuf is larger than 32K */
+               if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) {
+                       cmn_err(CE_WARN,
+       "XFS: logbuf size for version 1 logs must be 16K or 32K");
+                       return XFS_ERROR(EINVAL);
+               }
+       }
+
+       /*
+        * mkfs'ed attr2 will turn on attr2 mount unless explicitly
+        * told by noattr2 to turn it off
+        */
+       if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+           !(ap->flags & XFSMNT_NOATTR2))
+               mp->m_flags |= XFS_MOUNT_ATTR2;
+
+       /*
+        * prohibit r/w mounts of read-only filesystems
+        */
+       if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
+               cmn_err(CE_WARN,
+       "XFS: cannot mount a read-only filesystem as read-write");
+               return XFS_ERROR(EROFS);
+       }
+
+       /*
+        * check for shared mount.
+        */
+       if (ap->flags & XFSMNT_SHARED) {
+               if (!xfs_sb_version_hasshared(&mp->m_sb))
+                       return XFS_ERROR(EINVAL);
+
+               /*
+                * For IRIX 6.5, shared mounts must have the shared
+                * version bit set, have the persistent readonly
+                * field set, must be version 0 and can only be mounted
+                * read-only.
+                */
+               if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) ||
+                    (mp->m_sb.sb_shared_vn != 0))
+                       return XFS_ERROR(EINVAL);
+
+               mp->m_flags |= XFS_MOUNT_SHARED;
+
+               /*
+                * Shared XFS V0 can't deal with DMI.  Return EINVAL.
+                */
+               if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI))
+                       return XFS_ERROR(EINVAL);
+       }
+
+       if (ap->flags & XFSMNT_UQUOTA) {
+               mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
+               if (ap->flags & XFSMNT_UQUOTAENF)
+                       mp->m_qflags |= XFS_UQUOTA_ENFD;
+       }
+
+       if (ap->flags & XFSMNT_GQUOTA) {
+               mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
+               if (ap->flags & XFSMNT_GQUOTAENF)
+                       mp->m_qflags |= XFS_OQUOTA_ENFD;
+       } else if (ap->flags & XFSMNT_PQUOTA) {
+               mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
+               if (ap->flags & XFSMNT_PQUOTAENF)
+                       mp->m_qflags |= XFS_OQUOTA_ENFD;
+       }
+
+       return 0;
+}
+
 STATIC int
 xfs_fs_fill_super(
        struct super_block      *sb,
@@ -1308,11 +1763,21 @@ xfs_fs_fill_super(
 {
        struct inode            *root;
        struct xfs_mount        *mp = NULL;
-       struct xfs_mount_args   *args = xfs_args_allocate(sb, silent);
-       int                     error;
+       struct xfs_mount_args   *args;
+       int                     flags = 0, error = ENOMEM;
+
+       args = xfs_args_allocate(sb, silent);
+       if (!args)
+               return -ENOMEM;
 
-       mp = xfs_mount_init();
+       mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
+       if (!mp)
+               goto out_free_args;
 
+       spin_lock_init(&mp->m_sb_lock);
+       mutex_init(&mp->m_ilock);
+       mutex_init(&mp->m_growlock);
+       atomic_set(&mp->m_active_trans, 0);
        INIT_LIST_HEAD(&mp->m_sync_list);
        spin_lock_init(&mp->m_sync_lock);
        init_waitqueue_head(&mp->m_wait_single_sync_task);
@@ -1325,16 +1790,60 @@ xfs_fs_fill_super(
 
        error = xfs_parseargs(mp, (char *)data, args, 0);
        if (error)
-               goto fail_vfsop;
+               goto out_free_mp;
 
        sb_min_blocksize(sb, BBSIZE);
+       sb->s_xattr = xfs_xattr_handlers;
        sb->s_export_op = &xfs_export_operations;
        sb->s_qcop = &xfs_quotactl_operations;
        sb->s_op = &xfs_super_operations;
 
-       error = xfs_mount(mp, args, NULL);
+       error = xfs_dmops_get(mp, args);
+       if (error)
+               goto out_free_mp;
+       error = xfs_qmops_get(mp, args);
+       if (error)
+               goto out_put_dmops;
+
+       if (args->flags & XFSMNT_QUIET)
+               flags |= XFS_MFSI_QUIET;
+
+       error = xfs_open_devices(mp, args);
        if (error)
-               goto fail_vfsop;
+               goto out_put_qmops;
+
+       if (xfs_icsb_init_counters(mp))
+               mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
+
+       /*
+        * Setup flags based on mount(2) options and then the superblock
+        */
+       error = xfs_start_flags(args, mp);
+       if (error)
+               goto out_free_fsname;
+       error = xfs_readsb(mp, flags);
+       if (error)
+               goto out_free_fsname;
+       error = xfs_finish_flags(args, mp);
+       if (error)
+               goto out_free_sb;
+
+       error = xfs_setup_devices(mp);
+       if (error)
+               goto out_free_sb;
+
+       if (mp->m_flags & XFS_MOUNT_BARRIER)
+               xfs_mountfs_check_barriers(mp);
+
+       error = xfs_filestream_mount(mp);
+       if (error)
+               goto out_free_sb;
+
+       error = xfs_mountfs(mp, flags);
+       if (error)
+               goto out_filestream_unmount;
+
+       XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname);
 
        sb->s_dirt = 1;
        sb->s_magic = XFS_SB_MAGIC;
@@ -1344,7 +1853,7 @@ xfs_fs_fill_super(
        sb->s_time_gran = 1;
        set_posix_acl_flag(sb);
 
-       root = igrab(mp->m_rootip->i_vnode);
+       root = igrab(VFS_I(mp->m_rootip));
        if (!root) {
                error = ENOENT;
                goto fail_unmount;
@@ -1369,10 +1878,28 @@ xfs_fs_fill_super(
 
        xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
 
-       kmem_free(args, sizeof(*args));
+       kfree(args);
        return 0;
 
-fail_vnrele:
+ out_filestream_unmount:
+       xfs_filestream_unmount(mp);
+ out_free_sb:
+       xfs_freesb(mp);
+ out_free_fsname:
+       xfs_free_fsname(mp);
+       xfs_icsb_destroy_counters(mp);
+       xfs_close_devices(mp);
+ out_put_qmops:
+       xfs_qmops_put(mp);
+ out_put_dmops:
+       xfs_dmops_put(mp);
+ out_free_mp:
+       kfree(mp);
+ out_free_args:
+       kfree(args);
+       return -error;
+
+ fail_vnrele:
        if (sb->s_root) {
                dput(sb->s_root);
                sb->s_root = NULL;
@@ -1380,12 +1907,22 @@ fail_vnrele:
                iput(root);
        }
 
-fail_unmount:
-       xfs_unmount(mp, 0, NULL);
+ fail_unmount:
+       /*
+        * Blow away any referenced inode in the filestreams cache.
+        * This can and will cause log traffic as inodes go inactive
+        * here.
+        */
+       xfs_filestream_unmount(mp);
 
-fail_vfsop:
-       kmem_free(args, sizeof(*args));
-       return -error;
+       XFS_bflush(mp->m_ddev_targp);
+       error = xfs_unmount_flush(mp, 0);
+       WARN_ON(error);
+
+       IRELE(mp->m_rootip);
+
+       xfs_unmountfs(mp);
+       goto out_free_fsname;
 }
 
 STATIC int
@@ -1430,9 +1967,235 @@ static struct file_system_type xfs_fs_type = {
        .fs_flags               = FS_REQUIRES_DEV,
 };
 
+STATIC int __init
+xfs_alloc_trace_bufs(void)
+{
+#ifdef XFS_ALLOC_TRACE
+       xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL);
+       if (!xfs_alloc_trace_buf)
+               goto out;
+#endif
+#ifdef XFS_BMAP_TRACE
+       xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL);
+       if (!xfs_bmap_trace_buf)
+               goto out_free_alloc_trace;
+#endif
+#ifdef XFS_BMBT_TRACE
+       xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL);
+       if (!xfs_bmbt_trace_buf)
+               goto out_free_bmap_trace;
+#endif
+#ifdef XFS_ATTR_TRACE
+       xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL);
+       if (!xfs_attr_trace_buf)
+               goto out_free_bmbt_trace;
+#endif
+#ifdef XFS_DIR2_TRACE
+       xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL);
+       if (!xfs_dir2_trace_buf)
+               goto out_free_attr_trace;
+#endif
+
+       return 0;
+
+#ifdef XFS_DIR2_TRACE
+ out_free_attr_trace:
+#endif
+#ifdef XFS_ATTR_TRACE
+       ktrace_free(xfs_attr_trace_buf);
+ out_free_bmbt_trace:
+#endif
+#ifdef XFS_BMBT_TRACE
+       ktrace_free(xfs_bmbt_trace_buf);
+ out_free_bmap_trace:
+#endif
+#ifdef XFS_BMAP_TRACE
+       ktrace_free(xfs_bmap_trace_buf);
+ out_free_alloc_trace:
+#endif
+#ifdef XFS_ALLOC_TRACE
+       ktrace_free(xfs_alloc_trace_buf);
+ out:
+#endif
+       return -ENOMEM;
+}
+
+STATIC void
+xfs_free_trace_bufs(void)
+{
+#ifdef XFS_DIR2_TRACE
+       ktrace_free(xfs_dir2_trace_buf);
+#endif
+#ifdef XFS_ATTR_TRACE
+       ktrace_free(xfs_attr_trace_buf);
+#endif
+#ifdef XFS_BMBT_TRACE
+       ktrace_free(xfs_bmbt_trace_buf);
+#endif
+#ifdef XFS_BMAP_TRACE
+       ktrace_free(xfs_bmap_trace_buf);
+#endif
+#ifdef XFS_ALLOC_TRACE
+       ktrace_free(xfs_alloc_trace_buf);
+#endif
+}
+
+STATIC int __init
+xfs_init_zones(void)
+{
+       xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
+                                       KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
+                                       KM_ZONE_SPREAD,
+                                       xfs_fs_inode_init_once);
+       if (!xfs_vnode_zone)
+               goto out;
+
+       xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
+       if (!xfs_ioend_zone)
+               goto out_destroy_vnode_zone;
+
+       xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
+                                                 xfs_ioend_zone);
+       if (!xfs_ioend_pool)
+               goto out_destroy_ioend_zone;
+
+       xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
+                                               "xfs_log_ticket");
+       if (!xfs_log_ticket_zone)
+               goto out_destroy_ioend_pool;
+
+       xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
+                                               "xfs_bmap_free_item");
+       if (!xfs_bmap_free_item_zone)
+               goto out_destroy_log_ticket_zone;
+       xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
+                                               "xfs_btree_cur");
+       if (!xfs_btree_cur_zone)
+               goto out_destroy_bmap_free_item_zone;
+
+       xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
+                                               "xfs_da_state");
+       if (!xfs_da_state_zone)
+               goto out_destroy_btree_cur_zone;
+
+       xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
+       if (!xfs_dabuf_zone)
+               goto out_destroy_da_state_zone;
+
+       xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
+       if (!xfs_ifork_zone)
+               goto out_destroy_dabuf_zone;
+
+       xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
+       if (!xfs_trans_zone)
+               goto out_destroy_ifork_zone;
+
+       /*
+        * The size of the zone allocated buf log item is the maximum
+        * size possible under XFS.  This wastes a little bit of memory,
+        * but it is much faster.
+        */
+       xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
+                               (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
+                                 NBWORD) * sizeof(int))), "xfs_buf_item");
+       if (!xfs_buf_item_zone)
+               goto out_destroy_trans_zone;
+
+       xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
+                       ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
+                                sizeof(xfs_extent_t))), "xfs_efd_item");
+       if (!xfs_efd_zone)
+               goto out_destroy_buf_item_zone;
+
+       xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
+                       ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
+                               sizeof(xfs_extent_t))), "xfs_efi_item");
+       if (!xfs_efi_zone)
+               goto out_destroy_efd_zone;
+
+       xfs_inode_zone =
+               kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
+                                       KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
+                                       KM_ZONE_SPREAD, NULL);
+       if (!xfs_inode_zone)
+               goto out_destroy_efi_zone;
+
+       xfs_ili_zone =
+               kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
+                                       KM_ZONE_SPREAD, NULL);
+       if (!xfs_ili_zone)
+               goto out_destroy_inode_zone;
+
+#ifdef CONFIG_XFS_POSIX_ACL
+       xfs_acl_zone = kmem_zone_init(sizeof(xfs_acl_t), "xfs_acl");
+       if (!xfs_acl_zone)
+               goto out_destroy_ili_zone;
+#endif
+
+       return 0;
+
+#ifdef CONFIG_XFS_POSIX_ACL
+ out_destroy_ili_zone:
+#endif
+       kmem_zone_destroy(xfs_ili_zone);
+ out_destroy_inode_zone:
+       kmem_zone_destroy(xfs_inode_zone);
+ out_destroy_efi_zone:
+       kmem_zone_destroy(xfs_efi_zone);
+ out_destroy_efd_zone:
+       kmem_zone_destroy(xfs_efd_zone);
+ out_destroy_buf_item_zone:
+       kmem_zone_destroy(xfs_buf_item_zone);
+ out_destroy_trans_zone:
+       kmem_zone_destroy(xfs_trans_zone);
+ out_destroy_ifork_zone:
+       kmem_zone_destroy(xfs_ifork_zone);
+ out_destroy_dabuf_zone:
+       kmem_zone_destroy(xfs_dabuf_zone);
+ out_destroy_da_state_zone:
+       kmem_zone_destroy(xfs_da_state_zone);
+ out_destroy_btree_cur_zone:
+       kmem_zone_destroy(xfs_btree_cur_zone);
+ out_destroy_bmap_free_item_zone:
+       kmem_zone_destroy(xfs_bmap_free_item_zone);
+ out_destroy_log_ticket_zone:
+       kmem_zone_destroy(xfs_log_ticket_zone);
+ out_destroy_ioend_pool:
+       mempool_destroy(xfs_ioend_pool);
+ out_destroy_ioend_zone:
+       kmem_zone_destroy(xfs_ioend_zone);
+ out_destroy_vnode_zone:
+       kmem_zone_destroy(xfs_vnode_zone);
+ out:
+       return -ENOMEM;
+}
+
+STATIC void
+xfs_destroy_zones(void)
+{
+#ifdef CONFIG_XFS_POSIX_ACL
+       kmem_zone_destroy(xfs_acl_zone);
+#endif
+       kmem_zone_destroy(xfs_ili_zone);
+       kmem_zone_destroy(xfs_inode_zone);
+       kmem_zone_destroy(xfs_efi_zone);
+       kmem_zone_destroy(xfs_efd_zone);
+       kmem_zone_destroy(xfs_buf_item_zone);
+       kmem_zone_destroy(xfs_trans_zone);
+       kmem_zone_destroy(xfs_ifork_zone);
+       kmem_zone_destroy(xfs_dabuf_zone);
+       kmem_zone_destroy(xfs_da_state_zone);
+       kmem_zone_destroy(xfs_btree_cur_zone);
+       kmem_zone_destroy(xfs_bmap_free_item_zone);
+       kmem_zone_destroy(xfs_log_ticket_zone);
+       mempool_destroy(xfs_ioend_pool);
+       kmem_zone_destroy(xfs_ioend_zone);
+       kmem_zone_destroy(xfs_vnode_zone);
+
+}
 
 STATIC int __init
-init_xfs_fs( void )
+init_xfs_fs(void)
 {
        int                     error;
        static char             message[] __initdata = KERN_INFO \
@@ -1441,42 +2204,73 @@ init_xfs_fs( void )
        printk(message);
 
        ktrace_init(64);
+       vn_init();
+       xfs_dir_startup();
 
        error = xfs_init_zones();
-       if (error < 0)
-               goto undo_zones;
+       if (error)
+               goto out;
+
+       error = xfs_alloc_trace_bufs();
+       if (error)
+               goto out_destroy_zones;
+
+       error = xfs_mru_cache_init();
+       if (error)
+               goto out_free_trace_buffers;
+
+       error = xfs_filestream_init();
+       if (error)
+               goto out_mru_cache_uninit;
 
        error = xfs_buf_init();
-       if (error < 0)
-               goto undo_buffers;
+       if (error)
+               goto out_filestream_uninit;
+
+       error = xfs_init_procfs();
+       if (error)
+               goto out_buf_terminate;
+
+       error = xfs_sysctl_register();
+       if (error)
+               goto out_cleanup_procfs;
 
-       vn_init();
-       xfs_init();
-       uuid_init();
        vfs_initquota();
 
        error = register_filesystem(&xfs_fs_type);
        if (error)
-               goto undo_register;
+               goto out_sysctl_unregister;
        return 0;
 
-undo_register:
+ out_sysctl_unregister:
+       xfs_sysctl_unregister();
+ out_cleanup_procfs:
+       xfs_cleanup_procfs();
+ out_buf_terminate:
        xfs_buf_terminate();
-
-undo_buffers:
+ out_filestream_uninit:
+       xfs_filestream_uninit();
+ out_mru_cache_uninit:
+       xfs_mru_cache_uninit();
+ out_free_trace_buffers:
+       xfs_free_trace_bufs();
+ out_destroy_zones:
        xfs_destroy_zones();
-
-undo_zones:
+ out:
        return error;
 }
 
 STATIC void __exit
-exit_xfs_fs( void )
+exit_xfs_fs(void)
 {
        vfs_exitquota();
        unregister_filesystem(&xfs_fs_type);
-       xfs_cleanup();
+       xfs_sysctl_unregister();
+       xfs_cleanup_procfs();
        xfs_buf_terminate();
+       xfs_filestream_uninit();
+       xfs_mru_cache_uninit();
+       xfs_free_trace_bufs();
        xfs_destroy_zones();
        ktrace_uninit();
 }