u64 nr_delalloc;
u64 delalloc_end;
+
WARN_ON(!PageLocked(page));
- if (page->index > end_index) {
- clear_extent_dirty(tree, start, page_end, GFP_NOFS);
+ page_offset = i_size & (PAGE_CACHE_SIZE - 1);
+ if (page->index > end_index ||
+ (page->index == end_index && !page_offset)) {
+ page->mapping->a_ops->invalidatepage(page, 0);
unlock_page(page);
return 0;
}
if (page->index == end_index) {
char *userpage;
- size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
-
userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
+ memset(userpage + page_offset, 0,
+ PAGE_CACHE_SIZE - page_offset);
kunmap_atomic(userpage, KM_USER0);
+ flush_dcache_page(page);
}
+ page_offset = 0;
set_page_extent_mapped(page);
(inline_size & (root->sectorsize -1)) == 0 ||
inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
u64 last_end;
- u64 existing_delalloc = 0;
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
last_end = (u64)(pages[num_pages -1]->index) <<
PAGE_CACHE_SHIFT;
last_end += PAGE_CACHE_SIZE - 1;
- if (start_pos < isize) {
- u64 delalloc_start = start_pos;
- existing_delalloc = count_range_bits(io_tree,
- &delalloc_start,
- end_of_last_block, (u64)-1,
- EXTENT_DELALLOC);
- }
set_extent_delalloc(io_tree, start_pos, end_of_last_block,
GFP_NOFS);
btrfs_add_ordered_inode(inode);
goto out;
ret = -ENOMEM;
+again:
page = grab_cache_page(mapping, index);
if (!page)
goto out;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
lock_page(page);
+ if (page->mapping != mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
if (!PageUptodate(page)) {
ret = -EIO;
goto out;
}
}
- page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+ page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+ wait_on_page_writeback(page);
ret = btrfs_cow_one_page(inode, page, offset);
unlock_page(page);