]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/xfs/xfs_iget.c
[XFS] sanitize xfs_initialize_vnode
[linux-2.6-omap-h63xx.git] / fs / xfs / xfs_iget.c
index 8e09b71f4104a91e0e6e2ba00ce8384d1b780499..539c2dd8eae8b8d9f42b706d42d1ebc4727c53ec 100644 (file)
@@ -78,7 +78,6 @@ xfs_iget_core(
        xfs_inode_t     *ip;
        xfs_inode_t     *iq;
        int             error;
-       xfs_icluster_t  *icl, *new_icl = NULL;
        unsigned long   first_index, mask;
        xfs_perag_t     *pag;
        xfs_agino_t     agino;
@@ -229,11 +228,9 @@ finish_inode:
        }
 
        /*
-        * This is a bit messy - we preallocate everything we _might_
-        * need before we pick up the ici lock. That way we don't have to
-        * juggle locks and go all the way back to the start.
+        * Preload the radix tree so we can insert safely under the
+        * write spinlock.
         */
-       new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP);
        if (radix_tree_preload(GFP_KERNEL)) {
                xfs_idestroy(ip);
                delay(1);
@@ -242,17 +239,6 @@ finish_inode:
        mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
        first_index = agino & mask;
        write_lock(&pag->pag_ici_lock);
-
-       /*
-        * Find the cluster if it exists
-        */
-       icl = NULL;
-       if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq,
-                                                       first_index, 1)) {
-               if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) == first_index)
-                       icl = iq->i_cluster;
-       }
-
        /*
         * insert the new inode
         */
@@ -267,30 +253,13 @@ finish_inode:
        }
 
        /*
-        * These values _must_ be set before releasing ihlock!
+        * These values _must_ be set before releasing the radix tree lock!
         */
        ip->i_udquot = ip->i_gdquot = NULL;
        xfs_iflags_set(ip, XFS_INEW);
 
-       ASSERT(ip->i_cluster == NULL);
-
-       if (!icl) {
-               spin_lock_init(&new_icl->icl_lock);
-               INIT_HLIST_HEAD(&new_icl->icl_inodes);
-               icl = new_icl;
-               new_icl = NULL;
-       } else {
-               ASSERT(!hlist_empty(&icl->icl_inodes));
-       }
-       spin_lock(&icl->icl_lock);
-       hlist_add_head(&ip->i_cnode, &icl->icl_inodes);
-       ip->i_cluster = icl;
-       spin_unlock(&icl->icl_lock);
-
        write_unlock(&pag->pag_ici_lock);
        radix_tree_preload_end();
-       if (new_icl)
-               kmem_zone_free(xfs_icluster_zone, new_icl);
 
        /*
         * Link ip to its mount and thread it on the mount's inode list.
@@ -318,11 +287,18 @@ finish_inode:
        xfs_iflags_set(ip, XFS_IMODIFIED);
        *ipp = ip;
 
+       /*
+        * Set up the Linux with the Linux inode.
+        */
+       ip->i_vnode = inode;
+       inode->i_private = ip;
+
        /*
         * If we have a real type for an on-disk inode, we can set ops(&unlock)
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
         */
-       xfs_initialize_vnode(mp, inode, ip);
+       if (ip->i_d.di_mode != 0)
+               xfs_setup_inode(ip);
        return 0;
 }
 
@@ -442,10 +418,11 @@ xfs_iput(xfs_inode_t      *ip,
  * Special iput for brand-new inodes that are still locked
  */
 void
-xfs_iput_new(xfs_inode_t       *ip,
-            uint               lock_flags)
+xfs_iput_new(
+       xfs_inode_t     *ip,
+       uint            lock_flags)
 {
-       struct inode    *inode = ip->i_vnode;
+       struct inode    *inode = VFS_I(ip);
 
        xfs_itrace_entry(ip);
 
@@ -528,18 +505,6 @@ xfs_iextract(
        write_unlock(&pag->pag_ici_lock);
        xfs_put_perag(mp, pag);
 
-       /*
-        * Remove from cluster list
-        */
-       mp = ip->i_mount;
-       spin_lock(&ip->i_cluster->icl_lock);
-       hlist_del(&ip->i_cnode);
-       spin_unlock(&ip->i_cluster->icl_lock);
-
-       /* was last inode in cluster? */
-       if (hlist_empty(&ip->i_cluster->icl_inodes))
-               kmem_zone_free(xfs_icluster_zone, ip->i_cluster);
-
        /*
         * Remove from mount's inode list.
         */
@@ -636,8 +601,9 @@ xfs_iunlock_map_shared(
  *             XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  */
 void
-xfs_ilock(xfs_inode_t  *ip,
-         uint          lock_flags)
+xfs_ilock(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
        /*
         * You can't set both SHARED and EXCL for the same lock,
@@ -650,16 +616,16 @@ xfs_ilock(xfs_inode_t     *ip,
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
        ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
 
-       if (lock_flags & XFS_IOLOCK_EXCL) {
+       if (lock_flags & XFS_IOLOCK_EXCL)
                mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-       } else if (lock_flags & XFS_IOLOCK_SHARED) {
+       else if (lock_flags & XFS_IOLOCK_SHARED)
                mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-       }
-       if (lock_flags & XFS_ILOCK_EXCL) {
+
+       if (lock_flags & XFS_ILOCK_EXCL)
                mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
-       } else if (lock_flags & XFS_ILOCK_SHARED) {
+       else if (lock_flags & XFS_ILOCK_SHARED)
                mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
-       }
+
        xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
 }
 
@@ -674,15 +640,12 @@ xfs_ilock(xfs_inode_t     *ip,
  * lock_flags -- this parameter indicates the inode's locks to be
  *       to be locked.  See the comment for xfs_ilock() for a list
  *      of valid values.
- *
  */
 int
-xfs_ilock_nowait(xfs_inode_t   *ip,
-                uint           lock_flags)
+xfs_ilock_nowait(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
-       int     iolocked;
-       int     ilocked;
-
        /*
         * You can't set both SHARED and EXCL for the same lock,
         * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
@@ -694,37 +657,30 @@ xfs_ilock_nowait(xfs_inode_t      *ip,
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
        ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
 
-       iolocked = 0;
        if (lock_flags & XFS_IOLOCK_EXCL) {
-               iolocked = mrtryupdate(&ip->i_iolock);
-               if (!iolocked) {
-                       return 0;
-               }
+               if (!mrtryupdate(&ip->i_iolock))
+                       goto out;
        } else if (lock_flags & XFS_IOLOCK_SHARED) {
-               iolocked = mrtryaccess(&ip->i_iolock);
-               if (!iolocked) {
-                       return 0;
-               }
+               if (!mrtryaccess(&ip->i_iolock))
+                       goto out;
        }
        if (lock_flags & XFS_ILOCK_EXCL) {
-               ilocked = mrtryupdate(&ip->i_lock);
-               if (!ilocked) {
-                       if (iolocked) {
-                               mrunlock(&ip->i_iolock);
-                       }
-                       return 0;
-               }
+               if (!mrtryupdate(&ip->i_lock))
+                       goto out_undo_iolock;
        } else if (lock_flags & XFS_ILOCK_SHARED) {
-               ilocked = mrtryaccess(&ip->i_lock);
-               if (!ilocked) {
-                       if (iolocked) {
-                               mrunlock(&ip->i_iolock);
-                       }
-                       return 0;
-               }
+               if (!mrtryaccess(&ip->i_lock))
+                       goto out_undo_iolock;
        }
        xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
        return 1;
+
+ out_undo_iolock:
+       if (lock_flags & XFS_IOLOCK_EXCL)
+               mrunlock_excl(&ip->i_iolock);
+       else if (lock_flags & XFS_IOLOCK_SHARED)
+               mrunlock_shared(&ip->i_iolock);
+ out:
+       return 0;
 }
 
 /*
@@ -740,8 +696,9 @@ xfs_ilock_nowait(xfs_inode_t        *ip,
  *
  */
 void
-xfs_iunlock(xfs_inode_t        *ip,
-           uint        lock_flags)
+xfs_iunlock(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
        /*
         * You can't set both SHARED and EXCL for the same lock,
@@ -756,31 +713,25 @@ xfs_iunlock(xfs_inode_t   *ip,
                        XFS_LOCK_DEP_MASK)) == 0);
        ASSERT(lock_flags != 0);
 
-       if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
-               ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
-                      (ismrlocked(&ip->i_iolock, MR_ACCESS)));
-               ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
-                      (ismrlocked(&ip->i_iolock, MR_UPDATE)));
-               mrunlock(&ip->i_iolock);
-       }
+       if (lock_flags & XFS_IOLOCK_EXCL)
+               mrunlock_excl(&ip->i_iolock);
+       else if (lock_flags & XFS_IOLOCK_SHARED)
+               mrunlock_shared(&ip->i_iolock);
 
-       if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
-               ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
-                      (ismrlocked(&ip->i_lock, MR_ACCESS)));
-               ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
-                      (ismrlocked(&ip->i_lock, MR_UPDATE)));
-               mrunlock(&ip->i_lock);
+       if (lock_flags & XFS_ILOCK_EXCL)
+               mrunlock_excl(&ip->i_lock);
+       else if (lock_flags & XFS_ILOCK_SHARED)
+               mrunlock_shared(&ip->i_lock);
 
+       if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
+           !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
                /*
                 * Let the AIL know that this item has been unlocked in case
                 * it is in the AIL and anyone is waiting on it.  Don't do
                 * this if the caller has asked us not to.
                 */
-               if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
-                    ip->i_itemp != NULL) {
-                       xfs_trans_unlocked_item(ip->i_mount,
-                                               (xfs_log_item_t*)(ip->i_itemp));
-               }
+               xfs_trans_unlocked_item(ip->i_mount,
+                                       (xfs_log_item_t*)(ip->i_itemp));
        }
        xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
 }
@@ -790,21 +741,47 @@ xfs_iunlock(xfs_inode_t   *ip,
  * if it is being demoted.
  */
 void
-xfs_ilock_demote(xfs_inode_t   *ip,
-                uint           lock_flags)
+xfs_ilock_demote(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
        ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
        ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 
-       if (lock_flags & XFS_ILOCK_EXCL) {
-               ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       if (lock_flags & XFS_ILOCK_EXCL)
                mrdemote(&ip->i_lock);
-       }
-       if (lock_flags & XFS_IOLOCK_EXCL) {
-               ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+       if (lock_flags & XFS_IOLOCK_EXCL)
                mrdemote(&ip->i_iolock);
+}
+
+#ifdef DEBUG
+/*
+ * Debug-only routine, without additional rw_semaphore APIs, we can
+ * now only answer requests regarding whether we hold the lock for write
+ * (reader state is outside our visibility, we only track writer state).
+ *
+ * Note: this means !xfs_isilocked would give false positives, so don't do that.
+ */
+int
+xfs_isilocked(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
+{
+       if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
+                       XFS_ILOCK_EXCL) {
+               if (!ip->i_lock.mr_writer)
+                       return 0;
        }
+
+       if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
+                       XFS_IOLOCK_EXCL) {
+               if (!ip->i_iolock.mr_writer)
+                       return 0;
+       }
+
+       return 1;
 }
+#endif
 
 /*
  * The following three routines simply manage the i_flock