int  fs_nosys(void) { return ENOSYS; }
 void fs_noval(void) { return; }
 
+/*
+ * note: all filemap functions return negative error codes. These
+ * need to be inverted before returning to the xfs core functions.
+ */
 void
 xfs_tosspages(
        xfs_inode_t     *ip,
                if (!ret)
                        truncate_inode_pages(mapping, first);
        }
-       return ret;
+       return -ret;
 }
 
 int
                xfs_iflags_clear(ip, XFS_ITRUNCATED);
                ret = filemap_fdatawrite(mapping);
                if (flags & XFS_B_ASYNC)
-                       return ret;
+                       return -ret;
                ret2 = filemap_fdatawait(mapping);
                if (!ret)
                        ret = ret2;
        }
-       return ret;
+       return -ret;
+}
+
+int
+xfs_wait_on_pages(
+       xfs_inode_t     *ip,
+       xfs_off_t       first,
+       xfs_off_t       last)
+{
+       struct address_space *mapping = VFS_I(ip)->i_mapping;
+
+       if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+               return -filemap_fdatawait(mapping);
+       return 0;
 }
 
 
        if (unlikely(ioflags & IO_ISDIRECT)) {
                if (inode->i_mapping->nrpages)
-                       ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
+                       ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
                                                    -1, FI_REMAPF_LOCKED);
                mutex_unlock(&inode->i_mutex);
                if (ret) {
 
        struct inode            *inode,
        int                     sync)
 {
+       struct xfs_inode        *ip = XFS_I(inode);
        int                     error = 0;
        int                     flags = 0;
 
-       xfs_itrace_entry(XFS_I(inode));
+       xfs_itrace_entry(ip);
        if (sync) {
-               filemap_fdatawait(inode->i_mapping);
+               error = xfs_wait_on_pages(ip, 0, -1);
+               if (error)
+                       goto out_error;
                flags |= FLUSH_SYNC;
        }
-       error = xfs_inode_flush(XFS_I(inode), flags);
+       error = xfs_inode_flush(ip, flags);
+
+out_error:
        /*
         * if we failed to write out the inode then mark
         * it dirty again so we'll try again later.
         */
        if (error)
-               xfs_mark_inode_dirty_sync(XFS_I(inode));
+               xfs_mark_inode_dirty_sync(ip);
 
        return -error;
 }
 
                return XFS_ERROR(EIO);
 
        /* capture size updates in I/O completion before writing the inode. */
-       error = filemap_fdatawait(VFS_I(ip)->i_mapping);
+       error = xfs_wait_on_pages(ip, 0, -1);
        if (error)
                return XFS_ERROR(error);
 
 
                xfs_off_t last, int fiopt);
 int xfs_flush_pages(struct xfs_inode *ip, xfs_off_t first,
                xfs_off_t last, uint64_t flags, int fiopt);
+int xfs_wait_on_pages(struct xfs_inode *ip, xfs_off_t first, xfs_off_t last);
 
 #endif /* _XFS_VNODEOPS_H */