]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/ocfs2/buffer_head_io.c
ca4ab7ce85bf6d53a5669e3fbf0cdcfc2c43a6a2
[linux-2.6-omap-h63xx.git] / fs / ocfs2 / buffer_head_io.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * io.c
5  *
6  * Buffer cache handling
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25
26 #include <linux/fs.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30
31 #include <cluster/masklog.h>
32
33 #include "ocfs2.h"
34
35 #include "alloc.h"
36 #include "inode.h"
37 #include "journal.h"
38 #include "uptodate.h"
39
40 #include "buffer_head_io.h"
41
42 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
43                       struct inode *inode)
44 {
45         int ret = 0;
46
47         mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
48                    (unsigned long long)bh->b_blocknr, inode);
49
50         BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
51         BUG_ON(buffer_jbd(bh));
52
53         /* No need to check for a soft readonly file system here. non
54          * journalled writes are only ever done on system files which
55          * can get modified during recovery even if read-only. */
56         if (ocfs2_is_hard_readonly(osb)) {
57                 ret = -EROFS;
58                 goto out;
59         }
60
61         mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
62
63         lock_buffer(bh);
64         set_buffer_uptodate(bh);
65
66         /* remove from dirty list before I/O. */
67         clear_buffer_dirty(bh);
68
69         get_bh(bh); /* for end_buffer_write_sync() */
70         bh->b_end_io = end_buffer_write_sync;
71         submit_bh(WRITE, bh);
72
73         wait_on_buffer(bh);
74
75         if (buffer_uptodate(bh)) {
76                 ocfs2_set_buffer_uptodate(inode, bh);
77         } else {
78                 /* We don't need to remove the clustered uptodate
79                  * information for this bh as it's not marked locally
80                  * uptodate. */
81                 ret = -EIO;
82                 put_bh(bh);
83         }
84
85         mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
86 out:
87         mlog_exit(ret);
88         return ret;
89 }
90
91 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
92                            unsigned int nr, struct buffer_head *bhs[])
93 {
94         int status = 0;
95         unsigned int i;
96         struct buffer_head *bh;
97
98         if (!nr) {
99                 mlog(ML_BH_IO, "No buffers will be read!\n");
100                 goto bail;
101         }
102
103         for (i = 0 ; i < nr ; i++) {
104                 if (bhs[i] == NULL) {
105                         bhs[i] = sb_getblk(osb->sb, block++);
106                         if (bhs[i] == NULL) {
107                                 status = -EIO;
108                                 mlog_errno(status);
109                                 goto bail;
110                         }
111                 }
112                 bh = bhs[i];
113
114                 if (buffer_jbd(bh)) {
115                         mlog(ML_ERROR,
116                              "trying to sync read a jbd "
117                              "managed bh (blocknr = %llu), skipping\n",
118                              (unsigned long long)bh->b_blocknr);
119                         continue;
120                 }
121
122                 if (buffer_dirty(bh)) {
123                         /* This should probably be a BUG, or
124                          * at least return an error. */
125                         mlog(ML_ERROR,
126                              "trying to sync read a dirty "
127                              "buffer! (blocknr = %llu), skipping\n",
128                              (unsigned long long)bh->b_blocknr);
129                         continue;
130                 }
131
132                 lock_buffer(bh);
133                 if (buffer_jbd(bh)) {
134                         mlog(ML_ERROR,
135                              "block %llu had the JBD bit set "
136                              "while I was in lock_buffer!",
137                              (unsigned long long)bh->b_blocknr);
138                         BUG();
139                 }
140
141                 clear_buffer_uptodate(bh);
142                 get_bh(bh); /* for end_buffer_read_sync() */
143                 bh->b_end_io = end_buffer_read_sync;
144                 submit_bh(READ, bh);
145         }
146
147         for (i = nr; i > 0; i--) {
148                 bh = bhs[i - 1];
149
150                 if (buffer_jbd(bh)) {
151                         mlog(ML_ERROR,
152                              "the journal got the buffer while it was "
153                              "locked for io! (blocknr = %llu)\n",
154                              (unsigned long long)bh->b_blocknr);
155                         BUG();
156                 }
157
158                 wait_on_buffer(bh);
159                 if (!buffer_uptodate(bh)) {
160                         /* Status won't be cleared from here on out,
161                          * so we can safely record this and loop back
162                          * to cleanup the other buffers. */
163                         status = -EIO;
164                         put_bh(bh);
165                         bhs[i - 1] = NULL;
166                 }
167         }
168
169 bail:
170         return status;
171 }
172
173 int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr,
174                       struct buffer_head *bhs[], int flags,
175                       struct inode *inode)
176 {
177         int status = 0;
178         struct super_block *sb;
179         int i, ignore_cache = 0;
180         struct buffer_head *bh;
181
182         mlog_entry("(block=(%llu), nr=(%d), flags=%d, inode=%p)\n",
183                    (unsigned long long)block, nr, flags, inode);
184
185         BUG_ON((flags & OCFS2_BH_READAHEAD) &&
186                (!inode || !(flags & OCFS2_BH_CACHED)));
187
188         if (osb == NULL || osb->sb == NULL || bhs == NULL) {
189                 status = -EINVAL;
190                 mlog_errno(status);
191                 goto bail;
192         }
193
194         if (nr < 0) {
195                 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
196                 status = -EINVAL;
197                 mlog_errno(status);
198                 goto bail;
199         }
200
201         if (nr == 0) {
202                 mlog(ML_BH_IO, "No buffers will be read!\n");
203                 status = 0;
204                 goto bail;
205         }
206
207         sb = osb->sb;
208
209         if (flags & OCFS2_BH_CACHED && !inode)
210                 flags &= ~OCFS2_BH_CACHED;
211
212         if (inode)
213                 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
214         for (i = 0 ; i < nr ; i++) {
215                 if (bhs[i] == NULL) {
216                         bhs[i] = sb_getblk(sb, block++);
217                         if (bhs[i] == NULL) {
218                                 if (inode)
219                                         mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
220                                 status = -EIO;
221                                 mlog_errno(status);
222                                 goto bail;
223                         }
224                 }
225                 bh = bhs[i];
226                 ignore_cache = 0;
227
228                 /* There are three read-ahead cases here which we need to
229                  * be concerned with. All three assume a buffer has
230                  * previously been submitted with OCFS2_BH_READAHEAD
231                  * and it hasn't yet completed I/O.
232                  *
233                  * 1) The current request is sync to disk. This rarely
234                  *    happens these days, and never when performance
235                  *    matters - the code can just wait on the buffer
236                  *    lock and re-submit.
237                  *
238                  * 2) The current request is cached, but not
239                  *    readahead. ocfs2_buffer_uptodate() will return
240                  *    false anyway, so we'll wind up waiting on the
241                  *    buffer lock to do I/O. We re-check the request
242                  *    with after getting the lock to avoid a re-submit.
243                  *
244                  * 3) The current request is readahead (and so must
245                  *    also be a caching one). We short circuit if the
246                  *    buffer is locked (under I/O) and if it's in the
247                  *    uptodate cache. The re-check from #2 catches the
248                  *    case that the previous read-ahead completes just
249                  *    before our is-it-in-flight check.
250                  */
251
252                 if (flags & OCFS2_BH_CACHED &&
253                     !ocfs2_buffer_uptodate(inode, bh)) {
254                         mlog(ML_UPTODATE,
255                              "bh (%llu), inode %llu not uptodate\n",
256                              (unsigned long long)bh->b_blocknr,
257                              (unsigned long long)OCFS2_I(inode)->ip_blkno);
258                         ignore_cache = 1;
259                 }
260
261                 /* XXX: Can we ever get this and *not* have the cached
262                  * flag set? */
263                 if (buffer_jbd(bh)) {
264                         if (!(flags & OCFS2_BH_CACHED) || ignore_cache)
265                                 mlog(ML_BH_IO, "trying to sync read a jbd "
266                                                "managed bh (blocknr = %llu)\n",
267                                      (unsigned long long)bh->b_blocknr);
268                         continue;
269                 }
270
271                 if (!(flags & OCFS2_BH_CACHED) || ignore_cache) {
272                         if (buffer_dirty(bh)) {
273                                 /* This should probably be a BUG, or
274                                  * at least return an error. */
275                                 mlog(ML_BH_IO, "asking me to sync read a dirty "
276                                                "buffer! (blocknr = %llu)\n",
277                                      (unsigned long long)bh->b_blocknr);
278                                 continue;
279                         }
280
281                         /* A read-ahead request was made - if the
282                          * buffer is already under read-ahead from a
283                          * previously submitted request than we are
284                          * done here. */
285                         if ((flags & OCFS2_BH_READAHEAD)
286                             && ocfs2_buffer_read_ahead(inode, bh))
287                                 continue;
288
289                         lock_buffer(bh);
290                         if (buffer_jbd(bh)) {
291 #ifdef CATCH_BH_JBD_RACES
292                                 mlog(ML_ERROR, "block %llu had the JBD bit set "
293                                                "while I was in lock_buffer!",
294                                      (unsigned long long)bh->b_blocknr);
295                                 BUG();
296 #else
297                                 unlock_buffer(bh);
298                                 continue;
299 #endif
300                         }
301
302                         /* Re-check ocfs2_buffer_uptodate() as a
303                          * previously read-ahead buffer may have
304                          * completed I/O while we were waiting for the
305                          * buffer lock. */
306                         if ((flags & OCFS2_BH_CACHED)
307                             && !(flags & OCFS2_BH_READAHEAD)
308                             && ocfs2_buffer_uptodate(inode, bh)) {
309                                 unlock_buffer(bh);
310                                 continue;
311                         }
312
313                         clear_buffer_uptodate(bh);
314                         get_bh(bh); /* for end_buffer_read_sync() */
315                         bh->b_end_io = end_buffer_read_sync;
316                         submit_bh(READ, bh);
317                         continue;
318                 }
319         }
320
321         status = 0;
322
323         for (i = (nr - 1); i >= 0; i--) {
324                 bh = bhs[i];
325
326                 if (!(flags & OCFS2_BH_READAHEAD)) {
327                         /* We know this can't have changed as we hold the
328                          * inode sem. Avoid doing any work on the bh if the
329                          * journal has it. */
330                         if (!buffer_jbd(bh))
331                                 wait_on_buffer(bh);
332
333                         if (!buffer_uptodate(bh)) {
334                                 /* Status won't be cleared from here on out,
335                                  * so we can safely record this and loop back
336                                  * to cleanup the other buffers. Don't need to
337                                  * remove the clustered uptodate information
338                                  * for this bh as it's not marked locally
339                                  * uptodate. */
340                                 status = -EIO;
341                                 put_bh(bh);
342                                 bhs[i] = NULL;
343                                 continue;
344                         }
345                 }
346
347                 /* Always set the buffer in the cache, even if it was
348                  * a forced read, or read-ahead which hasn't yet
349                  * completed. */
350                 if (inode)
351                         ocfs2_set_buffer_uptodate(inode, bh);
352         }
353         if (inode)
354                 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
355
356         mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 
357              (unsigned long long)block, nr,
358              (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes", flags);
359
360 bail:
361
362         mlog_exit(status);
363         return status;
364 }
365
366 /* Check whether the blkno is the super block or one of the backups. */
367 static void ocfs2_check_super_or_backup(struct super_block *sb,
368                                         sector_t blkno)
369 {
370         int i;
371         u64 backup_blkno;
372
373         if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
374                 return;
375
376         for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
377                 backup_blkno = ocfs2_backup_super_blkno(sb, i);
378                 if (backup_blkno == blkno)
379                         return;
380         }
381
382         BUG();
383 }
384
385 /*
386  * Write super block and backups doesn't need to collaborate with journal,
387  * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed
388  * into this function.
389  */
390 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
391                                 struct buffer_head *bh)
392 {
393         int ret = 0;
394
395         mlog_entry_void();
396
397         BUG_ON(buffer_jbd(bh));
398         ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
399
400         if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
401                 ret = -EROFS;
402                 goto out;
403         }
404
405         lock_buffer(bh);
406         set_buffer_uptodate(bh);
407
408         /* remove from dirty list before I/O. */
409         clear_buffer_dirty(bh);
410
411         get_bh(bh); /* for end_buffer_write_sync() */
412         bh->b_end_io = end_buffer_write_sync;
413         submit_bh(WRITE, bh);
414
415         wait_on_buffer(bh);
416
417         if (!buffer_uptodate(bh)) {
418                 ret = -EIO;
419                 put_bh(bh);
420         }
421
422 out:
423         mlog_exit(ret);
424         return ret;
425 }