1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
39 #define MLOG_MASK_PREFIX ML_INODE
40 #include <cluster/masklog.h>
48 #include "extent_map.h"
60 #include "buffer_head_io.h"
62 static int ocfs2_sync_inode(struct inode *inode)
64 filemap_fdatawrite(inode->i_mapping);
65 return sync_mapping_buffers(inode->i_mapping);
68 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
70 struct ocfs2_file_private *fp;
72 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
77 mutex_init(&fp->fp_mutex);
78 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
79 file->private_data = fp;
84 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
86 struct ocfs2_file_private *fp = file->private_data;
87 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
90 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
91 ocfs2_lock_res_free(&fp->fp_flock);
93 file->private_data = NULL;
97 static int ocfs2_file_open(struct inode *inode, struct file *file)
100 int mode = file->f_flags;
101 struct ocfs2_inode_info *oi = OCFS2_I(inode);
103 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
104 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
106 spin_lock(&oi->ip_lock);
108 /* Check that the inode hasn't been wiped from disk by another
109 * node. If it hasn't then we're safe as long as we hold the
110 * spin lock until our increment of open count. */
111 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
112 spin_unlock(&oi->ip_lock);
119 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
122 spin_unlock(&oi->ip_lock);
124 status = ocfs2_init_file_private(inode, file);
127 * We want to set open count back if we're failing the
130 spin_lock(&oi->ip_lock);
132 spin_unlock(&oi->ip_lock);
140 static int ocfs2_file_release(struct inode *inode, struct file *file)
142 struct ocfs2_inode_info *oi = OCFS2_I(inode);
144 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
145 file->f_path.dentry->d_name.len,
146 file->f_path.dentry->d_name.name);
148 spin_lock(&oi->ip_lock);
149 if (!--oi->ip_open_count)
150 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
151 spin_unlock(&oi->ip_lock);
153 ocfs2_free_file_private(inode, file);
160 static int ocfs2_dir_open(struct inode *inode, struct file *file)
162 return ocfs2_init_file_private(inode, file);
165 static int ocfs2_dir_release(struct inode *inode, struct file *file)
167 ocfs2_free_file_private(inode, file);
171 static int ocfs2_sync_file(struct file *file,
172 struct dentry *dentry,
177 struct inode *inode = dentry->d_inode;
178 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
180 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
181 dentry->d_name.len, dentry->d_name.name);
183 err = ocfs2_sync_inode(dentry->d_inode);
187 journal = osb->journal->j_journal;
188 err = journal_force_commit(journal);
193 return (err < 0) ? -EIO : 0;
196 int ocfs2_should_update_atime(struct inode *inode,
197 struct vfsmount *vfsmnt)
200 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
202 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
205 if ((inode->i_flags & S_NOATIME) ||
206 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
210 * We can be called with no vfsmnt structure - NFSD will
213 * Note that our action here is different than touch_atime() -
214 * if we can't tell whether this is a noatime mount, then we
215 * don't know whether to trust the value of s_atime_quantum.
220 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
221 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
224 if (vfsmnt->mnt_flags & MNT_RELATIME) {
225 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
226 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
233 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
239 int ocfs2_update_inode_atime(struct inode *inode,
240 struct buffer_head *bh)
243 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
245 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
249 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
250 if (handle == NULL) {
256 ret = ocfs2_journal_access(handle, inode, bh,
257 OCFS2_JOURNAL_ACCESS_WRITE);
264 * Don't use ocfs2_mark_inode_dirty() here as we don't always
265 * have i_mutex to guard against concurrent changes to other
268 inode->i_atime = CURRENT_TIME;
269 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
270 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
272 ret = ocfs2_journal_dirty(handle, bh);
277 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
283 static int ocfs2_set_inode_size(handle_t *handle,
285 struct buffer_head *fe_bh,
291 i_size_write(inode, new_i_size);
292 inode->i_blocks = ocfs2_inode_sector_count(inode);
293 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
295 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
306 static int ocfs2_simple_size_update(struct inode *inode,
307 struct buffer_head *di_bh,
311 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
312 handle_t *handle = NULL;
314 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
315 if (handle == NULL) {
321 ret = ocfs2_set_inode_size(handle, inode, di_bh,
326 ocfs2_commit_trans(osb, handle);
331 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
333 struct buffer_head *fe_bh,
338 struct ocfs2_dinode *di;
343 /* TODO: This needs to actually orphan the inode in this
346 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
347 if (IS_ERR(handle)) {
348 status = PTR_ERR(handle);
353 status = ocfs2_journal_access(handle, inode, fe_bh,
354 OCFS2_JOURNAL_ACCESS_WRITE);
361 * Do this before setting i_size.
363 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
364 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
371 i_size_write(inode, new_i_size);
372 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
374 di = (struct ocfs2_dinode *) fe_bh->b_data;
375 di->i_size = cpu_to_le64(new_i_size);
376 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
377 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
379 status = ocfs2_journal_dirty(handle, fe_bh);
384 ocfs2_commit_trans(osb, handle);
391 static int ocfs2_truncate_file(struct inode *inode,
392 struct buffer_head *di_bh,
396 struct ocfs2_dinode *fe = NULL;
397 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
398 struct ocfs2_truncate_context *tc = NULL;
400 mlog_entry("(inode = %llu, new_i_size = %llu\n",
401 (unsigned long long)OCFS2_I(inode)->ip_blkno,
402 (unsigned long long)new_i_size);
404 fe = (struct ocfs2_dinode *) di_bh->b_data;
405 if (!OCFS2_IS_VALID_DINODE(fe)) {
406 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
411 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
412 "Inode %llu, inode i_size = %lld != di "
413 "i_size = %llu, i_flags = 0x%x\n",
414 (unsigned long long)OCFS2_I(inode)->ip_blkno,
416 (unsigned long long)le64_to_cpu(fe->i_size),
417 le32_to_cpu(fe->i_flags));
419 if (new_i_size > le64_to_cpu(fe->i_size)) {
420 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
421 (unsigned long long)le64_to_cpu(fe->i_size),
422 (unsigned long long)new_i_size);
428 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
429 (unsigned long long)le64_to_cpu(fe->i_blkno),
430 (unsigned long long)le64_to_cpu(fe->i_size),
431 (unsigned long long)new_i_size);
433 /* lets handle the simple truncate cases before doing any more
434 * cluster locking. */
435 if (new_i_size == le64_to_cpu(fe->i_size))
438 down_write(&OCFS2_I(inode)->ip_alloc_sem);
441 * The inode lock forced other nodes to sync and drop their
442 * pages, which (correctly) happens even if we have a truncate
443 * without allocation change - ocfs2 cluster sizes can be much
444 * greater than page size, so we have to truncate them
447 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
448 truncate_inode_pages(inode->i_mapping, new_i_size);
450 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
451 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
452 i_size_read(inode), 1);
456 goto bail_unlock_sem;
459 /* alright, we're going to need to do a full blown alloc size
460 * change. Orphan the inode so that recovery can complete the
461 * truncate if necessary. This does the task of marking
463 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
466 goto bail_unlock_sem;
469 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
472 goto bail_unlock_sem;
475 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
478 goto bail_unlock_sem;
481 /* TODO: orphan dir cleanup here. */
483 up_write(&OCFS2_I(inode)->ip_alloc_sem);
492 * extend file allocation only here.
493 * we'll update all the disk stuff, and oip->alloc_size
495 * expect stuff to be locked, a transaction started and enough data /
496 * metadata reservations in the contexts.
498 * Will return -EAGAIN, and a reason if a restart is needed.
499 * If passed in, *reason will always be set, even in error.
501 int ocfs2_add_inode_data(struct ocfs2_super *osb,
506 struct buffer_head *fe_bh,
508 struct ocfs2_alloc_context *data_ac,
509 struct ocfs2_alloc_context *meta_ac,
510 enum ocfs2_alloc_restarted *reason_ret)
513 struct ocfs2_extent_tree et;
515 ocfs2_get_dinode_extent_tree(&et, inode, fe_bh);
516 ret = ocfs2_add_clusters_in_btree(osb, inode, logical_offset,
517 clusters_to_add, mark_unwritten,
519 data_ac, meta_ac, reason_ret);
520 ocfs2_put_extent_tree(&et);
525 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
526 u32 clusters_to_add, int mark_unwritten)
529 int restart_func = 0;
532 struct buffer_head *bh = NULL;
533 struct ocfs2_dinode *fe = NULL;
534 handle_t *handle = NULL;
535 struct ocfs2_alloc_context *data_ac = NULL;
536 struct ocfs2_alloc_context *meta_ac = NULL;
537 enum ocfs2_alloc_restarted why;
538 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
539 struct ocfs2_extent_tree et;
541 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
544 * This function only exists for file systems which don't
547 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
549 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
550 OCFS2_BH_CACHED, inode);
556 fe = (struct ocfs2_dinode *) bh->b_data;
557 if (!OCFS2_IS_VALID_DINODE(fe)) {
558 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
564 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
566 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
567 "clusters_to_add = %u\n",
568 (unsigned long long)OCFS2_I(inode)->ip_blkno,
569 (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
571 ocfs2_get_dinode_extent_tree(&et, inode, bh);
572 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
574 ocfs2_put_extent_tree(&et);
580 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
582 handle = ocfs2_start_trans(osb, credits);
583 if (IS_ERR(handle)) {
584 status = PTR_ERR(handle);
590 restarted_transaction:
591 /* reserve a write to the file entry early on - that we if we
592 * run out of credits in the allocation path, we can still
594 status = ocfs2_journal_access(handle, inode, bh,
595 OCFS2_JOURNAL_ACCESS_WRITE);
601 prev_clusters = OCFS2_I(inode)->ip_clusters;
603 status = ocfs2_add_inode_data(osb,
613 if ((status < 0) && (status != -EAGAIN)) {
614 if (status != -ENOSPC)
619 status = ocfs2_journal_dirty(handle, bh);
625 spin_lock(&OCFS2_I(inode)->ip_lock);
626 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
627 spin_unlock(&OCFS2_I(inode)->ip_lock);
629 if (why != RESTART_NONE && clusters_to_add) {
630 if (why == RESTART_META) {
631 mlog(0, "restarting function.\n");
634 BUG_ON(why != RESTART_TRANS);
636 mlog(0, "restarting transaction.\n");
637 /* TODO: This can be more intelligent. */
638 credits = ocfs2_calc_extend_credits(osb->sb,
641 status = ocfs2_extend_trans(handle, credits);
643 /* handle still has to be committed at
649 goto restarted_transaction;
653 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
654 le32_to_cpu(fe->i_clusters),
655 (unsigned long long)le64_to_cpu(fe->i_size));
656 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
657 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
661 ocfs2_commit_trans(osb, handle);
665 ocfs2_free_alloc_context(data_ac);
669 ocfs2_free_alloc_context(meta_ac);
672 if ((!status) && restart_func) {
685 /* Some parts of this taken from generic_cont_expand, which turned out
686 * to be too fragile to do exactly what we need without us having to
687 * worry about recursive locking in ->prepare_write() and
688 * ->commit_write(). */
689 static int ocfs2_write_zero_page(struct inode *inode,
692 struct address_space *mapping = inode->i_mapping;
696 handle_t *handle = NULL;
699 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
700 /* ugh. in prepare/commit_write, if from==to==start of block, we
701 ** skip the prepare. make sure we never send an offset for the start
704 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
707 index = size >> PAGE_CACHE_SHIFT;
709 page = grab_cache_page(mapping, index);
716 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
722 if (ocfs2_should_order_data(inode)) {
723 handle = ocfs2_start_walk_page_trans(inode, page, offset,
725 if (IS_ERR(handle)) {
726 ret = PTR_ERR(handle);
732 /* must not update i_size! */
733 ret = block_commit_write(page, offset, offset);
740 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
743 page_cache_release(page);
748 static int ocfs2_zero_extend(struct inode *inode,
753 struct super_block *sb = inode->i_sb;
755 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
756 while (start_off < zero_to_size) {
757 ret = ocfs2_write_zero_page(inode, start_off);
763 start_off += sb->s_blocksize;
766 * Very large extends have the potential to lock up
767 * the cpu for extended periods of time.
776 int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
780 struct ocfs2_inode_info *oi = OCFS2_I(inode);
782 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
783 if (clusters_to_add < oi->ip_clusters)
786 clusters_to_add -= oi->ip_clusters;
788 if (clusters_to_add) {
789 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
798 * Call this even if we don't add any clusters to the tree. We
799 * still need to zero the area between the old i_size and the
802 ret = ocfs2_zero_extend(inode, zero_to);
810 static int ocfs2_extend_file(struct inode *inode,
811 struct buffer_head *di_bh,
815 struct ocfs2_inode_info *oi = OCFS2_I(inode);
819 /* setattr sometimes calls us like this. */
823 if (i_size_read(inode) == new_i_size)
825 BUG_ON(new_i_size < i_size_read(inode));
828 * Fall through for converting inline data, even if the fs
829 * supports sparse files.
831 * The check for inline data here is legal - nobody can add
832 * the feature since we have i_mutex. We must check it again
833 * after acquiring ip_alloc_sem though, as paths like mmap
834 * might have raced us to converting the inode to extents.
836 if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
837 && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
838 goto out_update_size;
841 * The alloc sem blocks people in read/write from reading our
842 * allocation until we're done changing it. We depend on
843 * i_mutex to block other extend/truncate calls while we're
846 down_write(&oi->ip_alloc_sem);
848 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
850 * We can optimize small extends by keeping the inodes
853 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
854 up_write(&oi->ip_alloc_sem);
855 goto out_update_size;
858 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
860 up_write(&oi->ip_alloc_sem);
867 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
868 ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
870 up_write(&oi->ip_alloc_sem);
878 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
886 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
888 int status = 0, size_change;
889 struct inode *inode = dentry->d_inode;
890 struct super_block *sb = inode->i_sb;
891 struct ocfs2_super *osb = OCFS2_SB(sb);
892 struct buffer_head *bh = NULL;
893 handle_t *handle = NULL;
895 mlog_entry("(0x%p, '%.*s')\n", dentry,
896 dentry->d_name.len, dentry->d_name.name);
898 /* ensuring we don't even attempt to truncate a symlink */
899 if (S_ISLNK(inode->i_mode))
900 attr->ia_valid &= ~ATTR_SIZE;
902 if (attr->ia_valid & ATTR_MODE)
903 mlog(0, "mode change: %d\n", attr->ia_mode);
904 if (attr->ia_valid & ATTR_UID)
905 mlog(0, "uid change: %d\n", attr->ia_uid);
906 if (attr->ia_valid & ATTR_GID)
907 mlog(0, "gid change: %d\n", attr->ia_gid);
908 if (attr->ia_valid & ATTR_SIZE)
909 mlog(0, "size change...\n");
910 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
911 mlog(0, "time change...\n");
913 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
914 | ATTR_GID | ATTR_UID | ATTR_MODE)
915 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
916 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
920 status = inode_change_ok(inode, attr);
924 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
926 status = ocfs2_rw_lock(inode, 1);
933 status = ocfs2_inode_lock(inode, &bh, 1);
935 if (status != -ENOENT)
940 if (size_change && attr->ia_size != i_size_read(inode)) {
941 if (attr->ia_size > sb->s_maxbytes) {
946 if (i_size_read(inode) > attr->ia_size)
947 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
949 status = ocfs2_extend_file(inode, bh, attr->ia_size);
951 if (status != -ENOSPC)
958 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
959 if (IS_ERR(handle)) {
960 status = PTR_ERR(handle);
966 * This will intentionally not wind up calling vmtruncate(),
967 * since all the work for a size change has been done above.
968 * Otherwise, we could get into problems with truncate as
969 * ip_alloc_sem is used there to protect against i_size
972 status = inode_setattr(inode, attr);
978 status = ocfs2_mark_inode_dirty(handle, inode, bh);
983 ocfs2_commit_trans(osb, handle);
985 ocfs2_inode_unlock(inode, 1);
988 ocfs2_rw_unlock(inode, 1);
997 int ocfs2_getattr(struct vfsmount *mnt,
998 struct dentry *dentry,
1001 struct inode *inode = dentry->d_inode;
1002 struct super_block *sb = dentry->d_inode->i_sb;
1003 struct ocfs2_super *osb = sb->s_fs_info;
1008 err = ocfs2_inode_revalidate(dentry);
1015 generic_fillattr(inode, stat);
1017 /* We set the blksize from the cluster size for performance */
1018 stat->blksize = osb->s_clustersize;
1026 int ocfs2_permission(struct inode *inode, int mask)
1032 ret = ocfs2_inode_lock(inode, NULL, 0);
1039 ret = generic_permission(inode, mask, NULL);
1041 ocfs2_inode_unlock(inode, 0);
1047 static int __ocfs2_write_remove_suid(struct inode *inode,
1048 struct buffer_head *bh)
1052 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1053 struct ocfs2_dinode *di;
1055 mlog_entry("(Inode %llu, mode 0%o)\n",
1056 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
1058 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1059 if (handle == NULL) {
1065 ret = ocfs2_journal_access(handle, inode, bh,
1066 OCFS2_JOURNAL_ACCESS_WRITE);
1072 inode->i_mode &= ~S_ISUID;
1073 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1074 inode->i_mode &= ~S_ISGID;
1076 di = (struct ocfs2_dinode *) bh->b_data;
1077 di->i_mode = cpu_to_le16(inode->i_mode);
1079 ret = ocfs2_journal_dirty(handle, bh);
1084 ocfs2_commit_trans(osb, handle);
1091 * Will look for holes and unwritten extents in the range starting at
1092 * pos for count bytes (inclusive).
1094 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1098 unsigned int extent_flags;
1099 u32 cpos, clusters, extent_len, phys_cpos;
1100 struct super_block *sb = inode->i_sb;
1102 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1103 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1106 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1113 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1118 if (extent_len > clusters)
1119 extent_len = clusters;
1121 clusters -= extent_len;
1128 static int ocfs2_write_remove_suid(struct inode *inode)
1131 struct buffer_head *bh = NULL;
1132 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1134 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1135 oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1141 ret = __ocfs2_write_remove_suid(inode, bh);
1148 * Allocate enough extents to cover the region starting at byte offset
1149 * start for len bytes. Existing extents are skipped, any extents
1150 * added are marked as "unwritten".
1152 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1156 u32 cpos, phys_cpos, clusters, alloc_size;
1157 u64 end = start + len;
1158 struct buffer_head *di_bh = NULL;
1160 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1161 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1162 OCFS2_I(inode)->ip_blkno, &di_bh,
1163 OCFS2_BH_CACHED, inode);
1170 * Nothing to do if the requested reservation range
1171 * fits within the inode.
1173 if (ocfs2_size_fits_inline_data(di_bh, end))
1176 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1184 * We consider both start and len to be inclusive.
1186 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1187 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1191 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1199 * Hole or existing extent len can be arbitrary, so
1200 * cap it to our own allocation request.
1202 if (alloc_size > clusters)
1203 alloc_size = clusters;
1207 * We already have an allocation at this
1208 * region so we can safely skip it.
1213 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1222 clusters -= alloc_size;
1232 static int __ocfs2_remove_inode_range(struct inode *inode,
1233 struct buffer_head *di_bh,
1234 u32 cpos, u32 phys_cpos, u32 len,
1235 struct ocfs2_cached_dealloc_ctxt *dealloc)
1238 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
1239 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1240 struct inode *tl_inode = osb->osb_tl_inode;
1242 struct ocfs2_alloc_context *meta_ac = NULL;
1243 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1244 struct ocfs2_extent_tree et;
1246 ocfs2_get_dinode_extent_tree(&et, inode, di_bh);
1248 ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
1250 ocfs2_put_extent_tree(&et);
1255 mutex_lock(&tl_inode->i_mutex);
1257 if (ocfs2_truncate_log_needs_flush(osb)) {
1258 ret = __ocfs2_flush_truncate_log(osb);
1265 handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS);
1266 if (handle == NULL) {
1272 ret = ocfs2_journal_access(handle, inode, di_bh,
1273 OCFS2_JOURNAL_ACCESS_WRITE);
1279 ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac,
1286 OCFS2_I(inode)->ip_clusters -= len;
1287 di->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
1289 ret = ocfs2_journal_dirty(handle, di_bh);
1295 ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len);
1300 ocfs2_commit_trans(osb, handle);
1302 mutex_unlock(&tl_inode->i_mutex);
1305 ocfs2_free_alloc_context(meta_ac);
1307 ocfs2_put_extent_tree(&et);
1312 * Truncate a byte range, avoiding pages within partial clusters. This
1313 * preserves those pages for the zeroing code to write to.
1315 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1318 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1320 struct address_space *mapping = inode->i_mapping;
1322 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1323 end = byte_start + byte_len;
1324 end = end & ~(osb->s_clustersize - 1);
1327 unmap_mapping_range(mapping, start, end - start, 0);
1328 truncate_inode_pages_range(mapping, start, end - 1);
1332 static int ocfs2_zero_partial_clusters(struct inode *inode,
1336 u64 tmpend, end = start + len;
1337 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1338 unsigned int csize = osb->s_clustersize;
1342 * The "start" and "end" values are NOT necessarily part of
1343 * the range whose allocation is being deleted. Rather, this
1344 * is what the user passed in with the request. We must zero
1345 * partial clusters here. There's no need to worry about
1346 * physical allocation - the zeroing code knows to skip holes.
1348 mlog(0, "byte start: %llu, end: %llu\n",
1349 (unsigned long long)start, (unsigned long long)end);
1352 * If both edges are on a cluster boundary then there's no
1353 * zeroing required as the region is part of the allocation to
1356 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1359 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1360 if (handle == NULL) {
1367 * We want to get the byte offset of the end of the 1st cluster.
1369 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1373 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1374 (unsigned long long)start, (unsigned long long)tmpend);
1376 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1382 * This may make start and end equal, but the zeroing
1383 * code will skip any work in that case so there's no
1384 * need to catch it up here.
1386 start = end & ~(osb->s_clustersize - 1);
1388 mlog(0, "2nd range: start: %llu, end: %llu\n",
1389 (unsigned long long)start, (unsigned long long)end);
1391 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1396 ocfs2_commit_trans(osb, handle);
1401 static int ocfs2_remove_inode_range(struct inode *inode,
1402 struct buffer_head *di_bh, u64 byte_start,
1406 u32 trunc_start, trunc_len, cpos, phys_cpos, alloc_size;
1407 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1408 struct ocfs2_cached_dealloc_ctxt dealloc;
1409 struct address_space *mapping = inode->i_mapping;
1411 ocfs2_init_dealloc_ctxt(&dealloc);
1416 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1417 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1418 byte_start + byte_len, 0);
1424 * There's no need to get fancy with the page cache
1425 * truncate of an inline-data inode. We're talking
1426 * about less than a page here, which will be cached
1427 * in the dinode buffer anyway.
1429 unmap_mapping_range(mapping, 0, 0, 0);
1430 truncate_inode_pages(mapping, 0);
1434 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1435 trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
1436 if (trunc_len >= trunc_start)
1437 trunc_len -= trunc_start;
1441 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n",
1442 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1443 (unsigned long long)byte_start,
1444 (unsigned long long)byte_len, trunc_start, trunc_len);
1446 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1454 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1461 if (alloc_size > trunc_len)
1462 alloc_size = trunc_len;
1464 /* Only do work for non-holes */
1465 if (phys_cpos != 0) {
1466 ret = __ocfs2_remove_inode_range(inode, di_bh, cpos,
1467 phys_cpos, alloc_size,
1476 trunc_len -= alloc_size;
1479 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1482 ocfs2_schedule_truncate_log_flush(osb, 1);
1483 ocfs2_run_deallocs(osb, &dealloc);
1489 * Parts of this function taken from xfs_change_file_space()
1491 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1492 loff_t f_pos, unsigned int cmd,
1493 struct ocfs2_space_resv *sr,
1499 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1500 struct buffer_head *di_bh = NULL;
1502 unsigned long long max_off = inode->i_sb->s_maxbytes;
1504 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1507 mutex_lock(&inode->i_mutex);
1510 * This prevents concurrent writes on other nodes
1512 ret = ocfs2_rw_lock(inode, 1);
1518 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1524 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1526 goto out_inode_unlock;
1529 switch (sr->l_whence) {
1530 case 0: /*SEEK_SET*/
1532 case 1: /*SEEK_CUR*/
1533 sr->l_start += f_pos;
1535 case 2: /*SEEK_END*/
1536 sr->l_start += i_size_read(inode);
1540 goto out_inode_unlock;
1544 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1547 || sr->l_start > max_off
1548 || (sr->l_start + llen) < 0
1549 || (sr->l_start + llen) > max_off) {
1551 goto out_inode_unlock;
1553 size = sr->l_start + sr->l_len;
1555 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1556 if (sr->l_len <= 0) {
1558 goto out_inode_unlock;
1562 if (file && should_remove_suid(file->f_path.dentry)) {
1563 ret = __ocfs2_write_remove_suid(inode, di_bh);
1566 goto out_inode_unlock;
1570 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1572 case OCFS2_IOC_RESVSP:
1573 case OCFS2_IOC_RESVSP64:
1575 * This takes unsigned offsets, but the signed ones we
1576 * pass have been checked against overflow above.
1578 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1581 case OCFS2_IOC_UNRESVSP:
1582 case OCFS2_IOC_UNRESVSP64:
1583 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1589 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1592 goto out_inode_unlock;
1596 * We update c/mtime for these changes
1598 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1599 if (IS_ERR(handle)) {
1600 ret = PTR_ERR(handle);
1602 goto out_inode_unlock;
1605 if (change_size && i_size_read(inode) < size)
1606 i_size_write(inode, size);
1608 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1609 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1613 ocfs2_commit_trans(osb, handle);
1617 ocfs2_inode_unlock(inode, 1);
1619 ocfs2_rw_unlock(inode, 1);
1622 mutex_unlock(&inode->i_mutex);
1626 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1627 struct ocfs2_space_resv *sr)
1629 struct inode *inode = file->f_path.dentry->d_inode;
1630 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);;
1632 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1633 !ocfs2_writes_unwritten_extents(osb))
1635 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1636 !ocfs2_sparse_alloc(osb))
1639 if (!S_ISREG(inode->i_mode))
1642 if (!(file->f_mode & FMODE_WRITE))
1645 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1648 static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1651 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1652 struct ocfs2_space_resv sr;
1653 int change_size = 1;
1655 if (!ocfs2_writes_unwritten_extents(osb))
1658 if (S_ISDIR(inode->i_mode))
1661 if (mode & FALLOC_FL_KEEP_SIZE)
1665 sr.l_start = (s64)offset;
1666 sr.l_len = (s64)len;
1668 return __ocfs2_change_file_space(NULL, inode, offset,
1669 OCFS2_IOC_RESVSP64, &sr, change_size);
1672 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1678 int ret = 0, meta_level = 0;
1679 struct inode *inode = dentry->d_inode;
1680 loff_t saved_pos, end;
1683 * We start with a read level meta lock and only jump to an ex
1684 * if we need to make modifications here.
1687 ret = ocfs2_inode_lock(inode, NULL, meta_level);
1694 /* Clear suid / sgid if necessary. We do this here
1695 * instead of later in the write path because
1696 * remove_suid() calls ->setattr without any hint that
1697 * we may have already done our cluster locking. Since
1698 * ocfs2_setattr() *must* take cluster locks to
1699 * proceeed, this will lead us to recursively lock the
1700 * inode. There's also the dinode i_size state which
1701 * can be lost via setattr during extending writes (we
1702 * set inode->i_size at the end of a write. */
1703 if (should_remove_suid(dentry)) {
1704 if (meta_level == 0) {
1705 ocfs2_inode_unlock(inode, meta_level);
1710 ret = ocfs2_write_remove_suid(inode);
1717 /* work on a copy of ppos until we're sure that we won't have
1718 * to recalculate it due to relocking. */
1720 saved_pos = i_size_read(inode);
1721 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1726 end = saved_pos + count;
1729 * Skip the O_DIRECT checks if we don't need
1732 if (!direct_io || !(*direct_io))
1736 * There's no sane way to do direct writes to an inode
1739 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1745 * Allowing concurrent direct writes means
1746 * i_size changes wouldn't be synchronized, so
1747 * one node could wind up truncating another
1750 if (end > i_size_read(inode)) {
1756 * We don't fill holes during direct io, so
1757 * check for them here. If any are found, the
1758 * caller will have to retake some cluster
1759 * locks and initiate the io as buffered.
1761 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
1774 ocfs2_inode_unlock(inode, meta_level);
1780 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1781 const struct iovec *iov,
1782 unsigned long nr_segs,
1785 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1787 ssize_t written = 0;
1788 size_t ocount; /* original count */
1789 size_t count; /* after file limit checks */
1790 loff_t old_size, *ppos = &iocb->ki_pos;
1792 struct file *file = iocb->ki_filp;
1793 struct inode *inode = file->f_path.dentry->d_inode;
1794 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1796 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1797 (unsigned int)nr_segs,
1798 file->f_path.dentry->d_name.len,
1799 file->f_path.dentry->d_name.name);
1801 if (iocb->ki_left == 0)
1804 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1806 appending = file->f_flags & O_APPEND ? 1 : 0;
1807 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1809 mutex_lock(&inode->i_mutex);
1812 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1814 down_read(&inode->i_alloc_sem);
1818 /* concurrent O_DIRECT writes are allowed */
1819 rw_level = !direct_io;
1820 ret = ocfs2_rw_lock(inode, rw_level);
1826 can_do_direct = direct_io;
1827 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1828 iocb->ki_left, appending,
1836 * We can't complete the direct I/O as requested, fall back to
1839 if (direct_io && !can_do_direct) {
1840 ocfs2_rw_unlock(inode, rw_level);
1841 up_read(&inode->i_alloc_sem);
1851 * To later detect whether a journal commit for sync writes is
1852 * necessary, we sample i_size, and cluster count here.
1854 old_size = i_size_read(inode);
1855 old_clusters = OCFS2_I(inode)->ip_clusters;
1857 /* communicate with ocfs2_dio_end_io */
1858 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1861 ret = generic_segment_checks(iov, &nr_segs, &ocount,
1866 ret = generic_write_checks(file, ppos, &count,
1867 S_ISBLK(inode->i_mode));
1871 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
1872 ppos, count, ocount);
1878 written = generic_file_aio_write_nolock(iocb, iov, nr_segs,
1883 /* buffered aio wouldn't have proper lock coverage today */
1884 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
1886 if ((file->f_flags & O_SYNC && !direct_io) || IS_SYNC(inode)) {
1888 * The generic write paths have handled getting data
1889 * to disk, but since we don't make use of the dirty
1890 * inode list, a manual journal commit is necessary
1893 if (old_size != i_size_read(inode) ||
1894 old_clusters != OCFS2_I(inode)->ip_clusters) {
1895 ret = journal_force_commit(osb->journal->j_journal);
1902 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
1903 * function pointer which is called when o_direct io completes so that
1904 * it can unlock our rw lock. (it's the clustered equivalent of
1905 * i_alloc_sem; protects truncate from racing with pending ios).
1906 * Unfortunately there are error cases which call end_io and others
1907 * that don't. so we don't have to unlock the rw_lock if either an
1908 * async dio is going to do it in the future or an end_io after an
1909 * error has already done it.
1911 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1918 ocfs2_rw_unlock(inode, rw_level);
1922 up_read(&inode->i_alloc_sem);
1924 mutex_unlock(&inode->i_mutex);
1927 return written ? written : ret;
1930 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1937 struct inode *inode = out->f_path.dentry->d_inode;
1939 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1941 out->f_path.dentry->d_name.len,
1942 out->f_path.dentry->d_name.name);
1944 inode_double_lock(inode, pipe->inode);
1946 ret = ocfs2_rw_lock(inode, 1);
1952 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
1959 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
1962 ocfs2_rw_unlock(inode, 1);
1964 inode_double_unlock(inode, pipe->inode);
1970 static ssize_t ocfs2_file_splice_read(struct file *in,
1972 struct pipe_inode_info *pipe,
1977 struct inode *inode = in->f_path.dentry->d_inode;
1979 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
1981 in->f_path.dentry->d_name.len,
1982 in->f_path.dentry->d_name.name);
1985 * See the comment in ocfs2_file_aio_read()
1987 ret = ocfs2_inode_lock(inode, NULL, 0);
1992 ocfs2_inode_unlock(inode, 0);
1994 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2001 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2002 const struct iovec *iov,
2003 unsigned long nr_segs,
2006 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2007 struct file *filp = iocb->ki_filp;
2008 struct inode *inode = filp->f_path.dentry->d_inode;
2010 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
2011 (unsigned int)nr_segs,
2012 filp->f_path.dentry->d_name.len,
2013 filp->f_path.dentry->d_name.name);
2022 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2023 * need locks to protect pending reads from racing with truncate.
2025 if (filp->f_flags & O_DIRECT) {
2026 down_read(&inode->i_alloc_sem);
2029 ret = ocfs2_rw_lock(inode, 0);
2035 /* communicate with ocfs2_dio_end_io */
2036 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2040 * We're fine letting folks race truncates and extending
2041 * writes with read across the cluster, just like they can
2042 * locally. Hence no rw_lock during read.
2044 * Take and drop the meta data lock to update inode fields
2045 * like i_size. This allows the checks down below
2046 * generic_file_aio_read() a chance of actually working.
2048 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2053 ocfs2_inode_unlock(inode, lock_level);
2055 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2057 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2059 /* buffered aio wouldn't have proper lock coverage today */
2060 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2062 /* see ocfs2_file_aio_write */
2063 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2070 up_read(&inode->i_alloc_sem);
2072 ocfs2_rw_unlock(inode, rw_level);
2078 const struct inode_operations ocfs2_file_iops = {
2079 .setattr = ocfs2_setattr,
2080 .getattr = ocfs2_getattr,
2081 .permission = ocfs2_permission,
2082 .setxattr = generic_setxattr,
2083 .getxattr = generic_getxattr,
2084 .listxattr = ocfs2_listxattr,
2085 .removexattr = generic_removexattr,
2086 .fallocate = ocfs2_fallocate,
2087 .fiemap = ocfs2_fiemap,
2090 const struct inode_operations ocfs2_special_file_iops = {
2091 .setattr = ocfs2_setattr,
2092 .getattr = ocfs2_getattr,
2093 .permission = ocfs2_permission,
2097 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2098 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2100 const struct file_operations ocfs2_fops = {
2101 .llseek = generic_file_llseek,
2102 .read = do_sync_read,
2103 .write = do_sync_write,
2105 .fsync = ocfs2_sync_file,
2106 .release = ocfs2_file_release,
2107 .open = ocfs2_file_open,
2108 .aio_read = ocfs2_file_aio_read,
2109 .aio_write = ocfs2_file_aio_write,
2110 .unlocked_ioctl = ocfs2_ioctl,
2111 #ifdef CONFIG_COMPAT
2112 .compat_ioctl = ocfs2_compat_ioctl,
2115 .flock = ocfs2_flock,
2116 .splice_read = ocfs2_file_splice_read,
2117 .splice_write = ocfs2_file_splice_write,
2120 const struct file_operations ocfs2_dops = {
2121 .llseek = generic_file_llseek,
2122 .read = generic_read_dir,
2123 .readdir = ocfs2_readdir,
2124 .fsync = ocfs2_sync_file,
2125 .release = ocfs2_dir_release,
2126 .open = ocfs2_dir_open,
2127 .unlocked_ioctl = ocfs2_ioctl,
2128 #ifdef CONFIG_COMPAT
2129 .compat_ioctl = ocfs2_compat_ioctl,
2132 .flock = ocfs2_flock,
2136 * POSIX-lockless variants of our file_operations.
2138 * These will be used if the underlying cluster stack does not support
2139 * posix file locking, if the user passes the "localflocks" mount
2140 * option, or if we have a local-only fs.
2142 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2143 * so we still want it in the case of no stack support for
2144 * plocks. Internally, it will do the right thing when asked to ignore
2147 const struct file_operations ocfs2_fops_no_plocks = {
2148 .llseek = generic_file_llseek,
2149 .read = do_sync_read,
2150 .write = do_sync_write,
2152 .fsync = ocfs2_sync_file,
2153 .release = ocfs2_file_release,
2154 .open = ocfs2_file_open,
2155 .aio_read = ocfs2_file_aio_read,
2156 .aio_write = ocfs2_file_aio_write,
2157 .unlocked_ioctl = ocfs2_ioctl,
2158 #ifdef CONFIG_COMPAT
2159 .compat_ioctl = ocfs2_compat_ioctl,
2161 .flock = ocfs2_flock,
2162 .splice_read = ocfs2_file_splice_read,
2163 .splice_write = ocfs2_file_splice_write,
2166 const struct file_operations ocfs2_dops_no_plocks = {
2167 .llseek = generic_file_llseek,
2168 .read = generic_read_dir,
2169 .readdir = ocfs2_readdir,
2170 .fsync = ocfs2_sync_file,
2171 .release = ocfs2_dir_release,
2172 .open = ocfs2_dir_open,
2173 .unlocked_ioctl = ocfs2_ioctl,
2174 #ifdef CONFIG_COMPAT
2175 .compat_ioctl = ocfs2_compat_ioctl,
2177 .flock = ocfs2_flock,