]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[XFS] kill superflous buffer locking (2nd attempt)
authorChristoph Hellwig <hch@infradead.org>
Fri, 7 Dec 2007 03:07:08 +0000 (14:07 +1100)
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>
Thu, 7 Feb 2008 07:18:50 +0000 (18:18 +1100)
There is no need to lock any page in xfs_buf.c because we operate on our
own address_space and all locking is covered by the buffer semaphore. If
we ever switch back to main blockdeive address_space as suggested e.g. for
fsblock with a similar scheme the locking will have to be totally revised
anyway because the current scheme is neither correct nor coherent with
itself.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:30156a

Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.h

index 522cfaa7025801818c3f1ec79137f1b48e96d99a..302273f8e2a92399b27b13faf9d7ddf72359f518 100644 (file)
@@ -387,8 +387,6 @@ _xfs_buf_lookup_pages(
                if (unlikely(page == NULL)) {
                        if (flags & XBF_READ_AHEAD) {
                                bp->b_page_count = i;
-                               for (i = 0; i < bp->b_page_count; i++)
-                                       unlock_page(bp->b_pages[i]);
                                return -ENOMEM;
                        }
 
@@ -418,24 +416,17 @@ _xfs_buf_lookup_pages(
                ASSERT(!PagePrivate(page));
                if (!PageUptodate(page)) {
                        page_count--;
-                       if (blocksize >= PAGE_CACHE_SIZE) {
-                               if (flags & XBF_READ)
-                                       bp->b_locked = 1;
-                       } else if (!PagePrivate(page)) {
+                       if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
                                if (test_page_region(page, offset, nbytes))
                                        page_count++;
                        }
                }
 
+               unlock_page(page);
                bp->b_pages[i] = page;
                offset = 0;
        }
 
-       if (!bp->b_locked) {
-               for (i = 0; i < bp->b_page_count; i++)
-                       unlock_page(bp->b_pages[i]);
-       }
-
        if (page_count == bp->b_page_count)
                bp->b_flags |= XBF_DONE;
 
@@ -752,7 +743,6 @@ xfs_buf_associate_memory(
                bp->b_pages[i] = mem_to_page((void *)pageaddr);
                pageaddr += PAGE_CACHE_SIZE;
        }
-       bp->b_locked = 0;
 
        bp->b_count_desired = len;
        bp->b_buffer_length = buflen;
@@ -1099,25 +1089,13 @@ xfs_buf_iostart(
        return status;
 }
 
-STATIC_INLINE int
-_xfs_buf_iolocked(
-       xfs_buf_t               *bp)
-{
-       ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
-       if (bp->b_flags & XBF_READ)
-               return bp->b_locked;
-       return 0;
-}
-
 STATIC_INLINE void
 _xfs_buf_ioend(
        xfs_buf_t               *bp,
        int                     schedule)
 {
-       if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
-               bp->b_locked = 0;
+       if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
                xfs_buf_ioend(bp, schedule);
-       }
 }
 
 STATIC void
@@ -1148,10 +1126,6 @@ xfs_buf_bio_end_io(
 
                if (--bvec >= bio->bi_io_vec)
                        prefetchw(&bvec->bv_page->flags);
-
-               if (_xfs_buf_iolocked(bp)) {
-                       unlock_page(page);
-               }
        } while (bvec >= bio->bi_io_vec);
 
        _xfs_buf_ioend(bp, 1);
@@ -1162,13 +1136,12 @@ STATIC void
 _xfs_buf_ioapply(
        xfs_buf_t               *bp)
 {
-       int                     i, rw, map_i, total_nr_pages, nr_pages;
+       int                     rw, map_i, total_nr_pages, nr_pages;
        struct bio              *bio;
        int                     offset = bp->b_offset;
        int                     size = bp->b_count_desired;
        sector_t                sector = bp->b_bn;
        unsigned int            blocksize = bp->b_target->bt_bsize;
-       int                     locking = _xfs_buf_iolocked(bp);
 
        total_nr_pages = bp->b_page_count;
        map_i = 0;
@@ -1191,7 +1164,7 @@ _xfs_buf_ioapply(
         * filesystem block size is not smaller than the page size.
         */
        if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
-           (bp->b_flags & XBF_READ) && locking &&
+           (bp->b_flags & XBF_READ) &&
            (blocksize >= PAGE_CACHE_SIZE)) {
                bio = bio_alloc(GFP_NOIO, 1);
 
@@ -1208,24 +1181,6 @@ _xfs_buf_ioapply(
                goto submit_io;
        }
 
-       /* Lock down the pages which we need to for the request */
-       if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
-               for (i = 0; size; i++) {
-                       int             nbytes = PAGE_CACHE_SIZE - offset;
-                       struct page     *page = bp->b_pages[i];
-
-                       if (nbytes > size)
-                               nbytes = size;
-
-                       lock_page(page);
-
-                       size -= nbytes;
-                       offset = 0;
-               }
-               offset = bp->b_offset;
-               size = bp->b_count_desired;
-       }
-
 next_chunk:
        atomic_inc(&bp->b_io_remaining);
        nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
index b5908a34b15d80916ac7ce307a76f02e319bcf73..a3d207de48b8b206e510a9b4009a6eb5cedad5f7 100644 (file)
@@ -143,7 +143,6 @@ typedef struct xfs_buf {
        void                    *b_fspriv2;
        void                    *b_fspriv3;
        unsigned short          b_error;        /* error code on I/O */
-       unsigned short          b_locked;       /* page array is locked */
        unsigned int            b_page_count;   /* size of page array */
        unsigned int            b_offset;       /* page offset in first page */
        struct page             **b_pages;      /* array of page pointers */