1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/crc32.h>
31 #include <linux/kthread.h>
32 #include <linux/pagemap.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
36 #include <cluster/heartbeat.h>
37 #include <cluster/nodemanager.h>
38 #include <cluster/tcp.h>
40 #include <dlm/dlmapi.h>
42 #define MLOG_MASK_PREFIX ML_DLM_GLUE
43 #include <cluster/masklog.h>
50 #include "extent_map.h"
52 #include "heartbeat.h"
59 #include "buffer_head_io.h"
61 struct ocfs2_mask_waiter {
62 struct list_head mw_item;
64 struct completion mw_complete;
65 unsigned long mw_mask;
66 unsigned long mw_goal;
69 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
70 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 * Return value from ->downconvert_worker functions.
75 * These control the precise actions of ocfs2_unblock_lock()
76 * and ocfs2_process_blocked_lock()
79 enum ocfs2_unblock_action {
80 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
81 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
82 * ->post_unlock callback */
83 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
84 * ->post_unlock() callback. */
87 struct ocfs2_unblock_ctl {
89 enum ocfs2_unblock_action unblock_action;
92 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
94 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
96 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
99 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
102 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
103 struct ocfs2_lock_res *lockres);
106 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
108 /* This aids in debugging situations where a bad LVB might be involved. */
109 static void ocfs2_dump_meta_lvb_info(u64 level,
110 const char *function,
112 struct ocfs2_lock_res *lockres)
114 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
116 mlog(level, "LVB information for %s (called from %s:%u):\n",
117 lockres->l_name, function, line);
118 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
119 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
120 be32_to_cpu(lvb->lvb_igeneration));
121 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
122 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
123 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
124 be16_to_cpu(lvb->lvb_imode));
125 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
126 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
127 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
128 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
129 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
130 be32_to_cpu(lvb->lvb_iattr));
135 * OCFS2 Lock Resource Operations
137 * These fine tune the behavior of the generic dlmglue locking infrastructure.
139 * The most basic of lock types can point ->l_priv to their respective
140 * struct ocfs2_super and allow the default actions to manage things.
142 * Right now, each lock type also needs to implement an init function,
143 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
144 * should be called when the lock is no longer needed (i.e., object
147 struct ocfs2_lock_res_ops {
149 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
150 * this callback if ->l_priv is not an ocfs2_super pointer
152 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
155 * Optionally called in the downconvert thread after a
156 * successful downconvert. The lockres will not be referenced
157 * after this callback is called, so it is safe to free
160 * The exact semantics of when this is called are controlled
161 * by ->downconvert_worker()
163 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
166 * Allow a lock type to add checks to determine whether it is
167 * safe to downconvert a lock. Return 0 to re-queue the
168 * downconvert at a later time, nonzero to continue.
170 * For most locks, the default checks that there are no
171 * incompatible holders are sufficient.
173 * Called with the lockres spinlock held.
175 int (*check_downconvert)(struct ocfs2_lock_res *, int);
178 * Allows a lock type to populate the lock value block. This
179 * is called on downconvert, and when we drop a lock.
181 * Locks that want to use this should set LOCK_TYPE_USES_LVB
182 * in the flags field.
184 * Called with the lockres spinlock held.
186 void (*set_lvb)(struct ocfs2_lock_res *);
189 * Called from the downconvert thread when it is determined
190 * that a lock will be downconverted. This is called without
191 * any locks held so the function can do work that might
192 * schedule (syncing out data, etc).
194 * This should return any one of the ocfs2_unblock_action
195 * values, depending on what it wants the thread to do.
197 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
200 * LOCK_TYPE_* flags which describe the specific requirements
201 * of a lock type. Descriptions of each individual flag follow.
207 * Some locks want to "refresh" potentially stale data when a
208 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
209 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
210 * individual lockres l_flags member from the ast function. It is
211 * expected that the locking wrapper will clear the
212 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
214 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
217 * Indicate that a lock type makes use of the lock value block. The
218 * ->set_lvb lock type callback must be defined.
220 #define LOCK_TYPE_USES_LVB 0x2
222 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
223 .get_osb = ocfs2_get_inode_osb,
227 static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
228 .get_osb = ocfs2_get_inode_osb,
229 .check_downconvert = ocfs2_check_meta_downconvert,
230 .set_lvb = ocfs2_set_meta_lvb,
231 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
234 static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
235 .get_osb = ocfs2_get_inode_osb,
236 .downconvert_worker = ocfs2_data_convert_worker,
240 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
241 .flags = LOCK_TYPE_REQUIRES_REFRESH,
244 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
248 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
249 .get_osb = ocfs2_get_dentry_osb,
250 .post_unlock = ocfs2_dentry_post_unlock,
251 .downconvert_worker = ocfs2_dentry_convert_worker,
255 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
256 .get_osb = ocfs2_get_inode_osb,
260 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
262 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
263 lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
264 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
265 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
268 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
270 BUG_ON(!ocfs2_is_inode_lock(lockres));
272 return (struct inode *) lockres->l_priv;
275 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
277 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
279 return (struct ocfs2_dentry_lock *)lockres->l_priv;
282 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
284 if (lockres->l_ops->get_osb)
285 return lockres->l_ops->get_osb(lockres);
287 return (struct ocfs2_super *)lockres->l_priv;
290 static int ocfs2_lock_create(struct ocfs2_super *osb,
291 struct ocfs2_lock_res *lockres,
294 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
296 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
297 struct ocfs2_lock_res *lockres,
299 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
300 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
301 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
302 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
303 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
304 struct ocfs2_lock_res *lockres);
305 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
307 #define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
308 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
309 "resource %s: %s\n", dlm_errname(_stat), _func, \
310 _lockres->l_name, dlm_errmsg(_stat)); \
312 static int ocfs2_downconvert_thread(void *arg);
313 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
314 struct ocfs2_lock_res *lockres);
315 static int ocfs2_meta_lock_update(struct inode *inode,
316 struct buffer_head **bh);
317 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
318 static inline int ocfs2_highest_compat_lock_level(int level);
320 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
329 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
331 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
332 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
333 (long long)blkno, generation);
335 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
337 mlog(0, "built lock resource with name: %s\n", name);
342 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
344 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
345 struct ocfs2_dlm_debug *dlm_debug)
347 mlog(0, "Add tracking for lockres %s\n", res->l_name);
349 spin_lock(&ocfs2_dlm_tracking_lock);
350 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
351 spin_unlock(&ocfs2_dlm_tracking_lock);
354 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
356 spin_lock(&ocfs2_dlm_tracking_lock);
357 if (!list_empty(&res->l_debug_list))
358 list_del_init(&res->l_debug_list);
359 spin_unlock(&ocfs2_dlm_tracking_lock);
362 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
363 struct ocfs2_lock_res *res,
364 enum ocfs2_lock_type type,
365 struct ocfs2_lock_res_ops *ops,
372 res->l_level = LKM_IVMODE;
373 res->l_requested = LKM_IVMODE;
374 res->l_blocking = LKM_IVMODE;
375 res->l_action = OCFS2_AST_INVALID;
376 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
378 res->l_flags = OCFS2_LOCK_INITIALIZED;
380 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
383 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
385 /* This also clears out the lock status block */
386 memset(res, 0, sizeof(struct ocfs2_lock_res));
387 spin_lock_init(&res->l_lock);
388 init_waitqueue_head(&res->l_event);
389 INIT_LIST_HEAD(&res->l_blocked_list);
390 INIT_LIST_HEAD(&res->l_mask_waiters);
393 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
394 enum ocfs2_lock_type type,
395 unsigned int generation,
398 struct ocfs2_lock_res_ops *ops;
401 case OCFS2_LOCK_TYPE_RW:
402 ops = &ocfs2_inode_rw_lops;
404 case OCFS2_LOCK_TYPE_META:
405 ops = &ocfs2_inode_meta_lops;
407 case OCFS2_LOCK_TYPE_DATA:
408 ops = &ocfs2_inode_data_lops;
410 case OCFS2_LOCK_TYPE_OPEN:
411 ops = &ocfs2_inode_open_lops;
414 mlog_bug_on_msg(1, "type: %d\n", type);
415 ops = NULL; /* thanks, gcc */
419 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
420 generation, res->l_name);
421 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
424 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
426 struct inode *inode = ocfs2_lock_res_inode(lockres);
428 return OCFS2_SB(inode->i_sb);
431 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
433 __be64 inode_blkno_be;
435 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
438 return be64_to_cpu(inode_blkno_be);
441 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
443 struct ocfs2_dentry_lock *dl = lockres->l_priv;
445 return OCFS2_SB(dl->dl_inode->i_sb);
448 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
449 u64 parent, struct inode *inode)
452 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
453 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
454 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
456 ocfs2_lock_res_init_once(lockres);
459 * Unfortunately, the standard lock naming scheme won't work
460 * here because we have two 16 byte values to use. Instead,
461 * we'll stuff the inode number as a binary value. We still
462 * want error prints to show something without garbling the
463 * display, so drop a null byte in there before the inode
464 * number. A future version of OCFS2 will likely use all
465 * binary lock names. The stringified names have been a
466 * tremendous aid in debugging, but now that the debugfs
467 * interface exists, we can mangle things there if need be.
469 * NOTE: We also drop the standard "pad" value (the total lock
470 * name size stays the same though - the last part is all
471 * zeros due to the memset in ocfs2_lock_res_init_once()
473 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
475 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
478 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
480 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
483 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
484 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
488 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
489 struct ocfs2_super *osb)
491 /* Superblock lockres doesn't come from a slab so we call init
492 * once on it manually. */
493 ocfs2_lock_res_init_once(res);
494 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
496 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
497 &ocfs2_super_lops, osb);
500 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
501 struct ocfs2_super *osb)
503 /* Rename lockres doesn't come from a slab so we call init
504 * once on it manually. */
505 ocfs2_lock_res_init_once(res);
506 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
507 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
508 &ocfs2_rename_lops, osb);
511 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
515 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
518 ocfs2_remove_lockres_tracking(res);
520 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
521 "Lockres %s is on the blocked list\n",
523 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
524 "Lockres %s has mask waiters pending\n",
526 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
527 "Lockres %s is locked\n",
529 mlog_bug_on_msg(res->l_ro_holders,
530 "Lockres %s has %u ro holders\n",
531 res->l_name, res->l_ro_holders);
532 mlog_bug_on_msg(res->l_ex_holders,
533 "Lockres %s has %u ex holders\n",
534 res->l_name, res->l_ex_holders);
536 /* Need to clear out the lock status block for the dlm */
537 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
543 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
552 lockres->l_ex_holders++;
555 lockres->l_ro_holders++;
564 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
573 BUG_ON(!lockres->l_ex_holders);
574 lockres->l_ex_holders--;
577 BUG_ON(!lockres->l_ro_holders);
578 lockres->l_ro_holders--;
586 /* WARNING: This function lives in a world where the only three lock
587 * levels are EX, PR, and NL. It *will* have to be adjusted when more
588 * lock types are added. */
589 static inline int ocfs2_highest_compat_lock_level(int level)
591 int new_level = LKM_EXMODE;
593 if (level == LKM_EXMODE)
594 new_level = LKM_NLMODE;
595 else if (level == LKM_PRMODE)
596 new_level = LKM_PRMODE;
600 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
601 unsigned long newflags)
603 struct ocfs2_mask_waiter *mw, *tmp;
605 assert_spin_locked(&lockres->l_lock);
607 lockres->l_flags = newflags;
609 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
610 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
613 list_del_init(&mw->mw_item);
615 complete(&mw->mw_complete);
618 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
620 lockres_set_flags(lockres, lockres->l_flags | or);
622 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
625 lockres_set_flags(lockres, lockres->l_flags & ~clear);
628 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
632 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
633 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
634 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
635 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
637 lockres->l_level = lockres->l_requested;
638 if (lockres->l_level <=
639 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
640 lockres->l_blocking = LKM_NLMODE;
641 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
643 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
648 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
652 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
653 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
655 /* Convert from RO to EX doesn't really need anything as our
656 * information is already up to data. Convert from NL to
657 * *anything* however should mark ourselves as needing an
659 if (lockres->l_level == LKM_NLMODE &&
660 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
661 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
663 lockres->l_level = lockres->l_requested;
664 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
669 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
673 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
674 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
676 if (lockres->l_requested > LKM_NLMODE &&
677 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
678 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
679 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
681 lockres->l_level = lockres->l_requested;
682 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
683 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
688 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
691 int needs_downconvert = 0;
694 assert_spin_locked(&lockres->l_lock);
696 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
698 if (level > lockres->l_blocking) {
699 /* only schedule a downconvert if we haven't already scheduled
700 * one that goes low enough to satisfy the level we're
701 * blocking. this also catches the case where we get
703 if (ocfs2_highest_compat_lock_level(level) <
704 ocfs2_highest_compat_lock_level(lockres->l_blocking))
705 needs_downconvert = 1;
707 lockres->l_blocking = level;
710 mlog_exit(needs_downconvert);
711 return needs_downconvert;
714 static void ocfs2_blocking_ast(void *opaque, int level)
716 struct ocfs2_lock_res *lockres = opaque;
717 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
718 int needs_downconvert;
721 BUG_ON(level <= LKM_NLMODE);
723 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
724 lockres->l_name, level, lockres->l_level,
725 ocfs2_lock_type_string(lockres->l_type));
727 spin_lock_irqsave(&lockres->l_lock, flags);
728 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
729 if (needs_downconvert)
730 ocfs2_schedule_blocked_lock(osb, lockres);
731 spin_unlock_irqrestore(&lockres->l_lock, flags);
733 wake_up(&lockres->l_event);
735 ocfs2_wake_downconvert_thread(osb);
738 static void ocfs2_locking_ast(void *opaque)
740 struct ocfs2_lock_res *lockres = opaque;
741 struct dlm_lockstatus *lksb = &lockres->l_lksb;
744 spin_lock_irqsave(&lockres->l_lock, flags);
746 if (lksb->status != DLM_NORMAL) {
747 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
748 lockres->l_name, lksb->status);
749 spin_unlock_irqrestore(&lockres->l_lock, flags);
753 switch(lockres->l_action) {
754 case OCFS2_AST_ATTACH:
755 ocfs2_generic_handle_attach_action(lockres);
756 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
758 case OCFS2_AST_CONVERT:
759 ocfs2_generic_handle_convert_action(lockres);
761 case OCFS2_AST_DOWNCONVERT:
762 ocfs2_generic_handle_downconvert_action(lockres);
765 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
766 "lockres flags = 0x%lx, unlock action: %u\n",
767 lockres->l_name, lockres->l_action, lockres->l_flags,
768 lockres->l_unlock_action);
772 /* set it to something invalid so if we get called again we
774 lockres->l_action = OCFS2_AST_INVALID;
776 wake_up(&lockres->l_event);
777 spin_unlock_irqrestore(&lockres->l_lock, flags);
780 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
786 spin_lock_irqsave(&lockres->l_lock, flags);
787 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
789 lockres->l_action = OCFS2_AST_INVALID;
791 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
792 spin_unlock_irqrestore(&lockres->l_lock, flags);
794 wake_up(&lockres->l_event);
798 /* Note: If we detect another process working on the lock (i.e.,
799 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
800 * to do the right thing in that case.
802 static int ocfs2_lock_create(struct ocfs2_super *osb,
803 struct ocfs2_lock_res *lockres,
808 enum dlm_status status = DLM_NORMAL;
813 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
816 spin_lock_irqsave(&lockres->l_lock, flags);
817 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
818 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
819 spin_unlock_irqrestore(&lockres->l_lock, flags);
823 lockres->l_action = OCFS2_AST_ATTACH;
824 lockres->l_requested = level;
825 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
826 spin_unlock_irqrestore(&lockres->l_lock, flags);
828 status = dlmlock(osb->dlm,
833 OCFS2_LOCK_ID_MAX_LEN - 1,
837 if (status != DLM_NORMAL) {
838 ocfs2_log_dlm_error("dlmlock", status, lockres);
840 ocfs2_recover_from_dlm_error(lockres, 1);
843 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
850 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
856 spin_lock_irqsave(&lockres->l_lock, flags);
857 ret = lockres->l_flags & flag;
858 spin_unlock_irqrestore(&lockres->l_lock, flags);
863 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
866 wait_event(lockres->l_event,
867 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
870 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
873 wait_event(lockres->l_event,
874 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
877 /* predict what lock level we'll be dropping down to on behalf
878 * of another node, and return true if the currently wanted
879 * level will be compatible with it. */
880 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
883 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
885 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
888 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
890 INIT_LIST_HEAD(&mw->mw_item);
891 init_completion(&mw->mw_complete);
894 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
896 wait_for_completion(&mw->mw_complete);
897 /* Re-arm the completion in case we want to wait on it again */
898 INIT_COMPLETION(mw->mw_complete);
899 return mw->mw_status;
902 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
903 struct ocfs2_mask_waiter *mw,
907 BUG_ON(!list_empty(&mw->mw_item));
909 assert_spin_locked(&lockres->l_lock);
911 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
916 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
917 * if the mask still hadn't reached its goal */
918 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
919 struct ocfs2_mask_waiter *mw)
924 spin_lock_irqsave(&lockres->l_lock, flags);
925 if (!list_empty(&mw->mw_item)) {
926 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
929 list_del_init(&mw->mw_item);
930 init_completion(&mw->mw_complete);
932 spin_unlock_irqrestore(&lockres->l_lock, flags);
938 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
939 struct ocfs2_lock_res *lockres,
944 struct ocfs2_mask_waiter mw;
945 enum dlm_status status;
946 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
947 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
952 ocfs2_init_mask_waiter(&mw);
954 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
955 lkm_flags |= LKM_VALBLK;
960 if (catch_signals && signal_pending(current)) {
965 spin_lock_irqsave(&lockres->l_lock, flags);
967 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
968 "Cluster lock called on freeing lockres %s! flags "
969 "0x%lx\n", lockres->l_name, lockres->l_flags);
971 /* We only compare against the currently granted level
972 * here. If the lock is blocked waiting on a downconvert,
973 * we'll get caught below. */
974 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
975 level > lockres->l_level) {
976 /* is someone sitting in dlm_lock? If so, wait on
978 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
983 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
984 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
985 /* is the lock is currently blocked on behalf of
987 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
992 if (level > lockres->l_level) {
993 if (lockres->l_action != OCFS2_AST_INVALID)
994 mlog(ML_ERROR, "lockres %s has action %u pending\n",
995 lockres->l_name, lockres->l_action);
997 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
998 lockres->l_action = OCFS2_AST_ATTACH;
999 lkm_flags &= ~LKM_CONVERT;
1001 lockres->l_action = OCFS2_AST_CONVERT;
1002 lkm_flags |= LKM_CONVERT;
1005 lockres->l_requested = level;
1006 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1007 spin_unlock_irqrestore(&lockres->l_lock, flags);
1009 BUG_ON(level == LKM_IVMODE);
1010 BUG_ON(level == LKM_NLMODE);
1012 mlog(0, "lock %s, convert from %d to level = %d\n",
1013 lockres->l_name, lockres->l_level, level);
1015 /* call dlm_lock to upgrade lock now */
1016 status = dlmlock(osb->dlm,
1021 OCFS2_LOCK_ID_MAX_LEN - 1,
1024 ocfs2_blocking_ast);
1025 if (status != DLM_NORMAL) {
1026 if ((lkm_flags & LKM_NOQUEUE) &&
1027 (status == DLM_NOTQUEUED))
1030 ocfs2_log_dlm_error("dlmlock", status,
1034 ocfs2_recover_from_dlm_error(lockres, 1);
1038 mlog(0, "lock %s, successfull return from dlmlock\n",
1041 /* At this point we've gone inside the dlm and need to
1042 * complete our work regardless. */
1045 /* wait for busy to clear and carry on */
1049 /* Ok, if we get here then we're good to go. */
1050 ocfs2_inc_holders(lockres, level);
1054 spin_unlock_irqrestore(&lockres->l_lock, flags);
1057 * This is helping work around a lock inversion between the page lock
1058 * and dlm locks. One path holds the page lock while calling aops
1059 * which block acquiring dlm locks. The voting thread holds dlm
1060 * locks while acquiring page locks while down converting data locks.
1061 * This block is helping an aop path notice the inversion and back
1062 * off to unlock its page lock before trying the dlm lock again.
1064 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1065 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1067 if (lockres_remove_mask_waiter(lockres, &mw))
1073 ret = ocfs2_wait_for_mask(&mw);
1083 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1084 struct ocfs2_lock_res *lockres,
1087 unsigned long flags;
1090 spin_lock_irqsave(&lockres->l_lock, flags);
1091 ocfs2_dec_holders(lockres, level);
1092 ocfs2_downconvert_on_unlock(osb, lockres);
1093 spin_unlock_irqrestore(&lockres->l_lock, flags);
1097 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1098 struct ocfs2_lock_res *lockres,
1102 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1103 unsigned long flags;
1104 int lkm_flags = local ? LKM_LOCAL : 0;
1106 spin_lock_irqsave(&lockres->l_lock, flags);
1107 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1108 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1109 spin_unlock_irqrestore(&lockres->l_lock, flags);
1111 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1114 /* Grants us an EX lock on the data and metadata resources, skipping
1115 * the normal cluster directory lookup. Use this ONLY on newly created
1116 * inodes which other nodes can't possibly see, and which haven't been
1117 * hashed in the inode hash yet. This can give us a good performance
1118 * increase as it'll skip the network broadcast normally associated
1119 * with creating a new lock resource. */
1120 int ocfs2_create_new_inode_locks(struct inode *inode)
1123 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1126 BUG_ON(!ocfs2_inode_is_new(inode));
1130 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1132 /* NOTE: That we don't increment any of the holder counts, nor
1133 * do we add anything to a journal handle. Since this is
1134 * supposed to be a new inode which the cluster doesn't know
1135 * about yet, there is no need to. As far as the LVB handling
1136 * is concerned, this is basically like acquiring an EX lock
1137 * on a resource which has an invalid one -- we'll set it
1138 * valid when we release the EX. */
1140 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1147 * We don't want to use LKM_LOCAL on a meta data lock as they
1148 * don't use a generation in their lock names.
1150 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_meta_lockres, 1, 0);
1156 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_data_lockres, 1, 1);
1162 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1173 int ocfs2_rw_lock(struct inode *inode, int write)
1176 struct ocfs2_lock_res *lockres;
1177 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1183 mlog(0, "inode %llu take %s RW lock\n",
1184 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1185 write ? "EXMODE" : "PRMODE");
1187 if (ocfs2_mount_local(osb))
1190 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1192 level = write ? LKM_EXMODE : LKM_PRMODE;
1194 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1203 void ocfs2_rw_unlock(struct inode *inode, int write)
1205 int level = write ? LKM_EXMODE : LKM_PRMODE;
1206 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1207 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1211 mlog(0, "inode %llu drop %s RW lock\n",
1212 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1213 write ? "EXMODE" : "PRMODE");
1215 if (!ocfs2_mount_local(osb))
1216 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1222 * ocfs2_open_lock always get PR mode lock.
1224 int ocfs2_open_lock(struct inode *inode)
1227 struct ocfs2_lock_res *lockres;
1228 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1234 mlog(0, "inode %llu take PRMODE open lock\n",
1235 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1237 if (ocfs2_mount_local(osb))
1240 lockres = &OCFS2_I(inode)->ip_open_lockres;
1242 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1252 int ocfs2_try_open_lock(struct inode *inode, int write)
1254 int status = 0, level;
1255 struct ocfs2_lock_res *lockres;
1256 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1262 mlog(0, "inode %llu try to take %s open lock\n",
1263 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1264 write ? "EXMODE" : "PRMODE");
1266 if (ocfs2_mount_local(osb))
1269 lockres = &OCFS2_I(inode)->ip_open_lockres;
1271 level = write ? LKM_EXMODE : LKM_PRMODE;
1274 * The file system may already holding a PRMODE/EXMODE open lock.
1275 * Since we pass LKM_NOQUEUE, the request won't block waiting on
1276 * other nodes and the -EAGAIN will indicate to the caller that
1277 * this inode is still in use.
1279 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1280 level, LKM_NOQUEUE, 0);
1288 * ocfs2_open_unlock unlock PR and EX mode open locks.
1290 void ocfs2_open_unlock(struct inode *inode)
1292 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1293 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1297 mlog(0, "inode %llu drop open lock\n",
1298 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1300 if (ocfs2_mount_local(osb))
1303 if(lockres->l_ro_holders)
1304 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1306 if(lockres->l_ex_holders)
1307 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1314 int ocfs2_data_lock_full(struct inode *inode,
1318 int status = 0, level;
1319 struct ocfs2_lock_res *lockres;
1320 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1326 mlog(0, "inode %llu take %s DATA lock\n",
1327 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1328 write ? "EXMODE" : "PRMODE");
1330 /* We'll allow faking a readonly data lock for
1332 if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
1340 if (ocfs2_mount_local(osb))
1343 lockres = &OCFS2_I(inode)->ip_data_lockres;
1345 level = write ? LKM_EXMODE : LKM_PRMODE;
1347 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
1349 if (status < 0 && status != -EAGAIN)
1357 /* see ocfs2_meta_lock_with_page() */
1358 int ocfs2_data_lock_with_page(struct inode *inode,
1364 ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
1365 if (ret == -EAGAIN) {
1367 if (ocfs2_data_lock(inode, write) == 0)
1368 ocfs2_data_unlock(inode, write);
1369 ret = AOP_TRUNCATED_PAGE;
1375 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1376 struct ocfs2_lock_res *lockres)
1382 /* If we know that another node is waiting on our lock, kick
1383 * the downconvert thread * pre-emptively when we reach a release
1385 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1386 switch(lockres->l_blocking) {
1388 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1392 if (!lockres->l_ex_holders)
1401 ocfs2_wake_downconvert_thread(osb);
1406 void ocfs2_data_unlock(struct inode *inode,
1409 int level = write ? LKM_EXMODE : LKM_PRMODE;
1410 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
1411 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1415 mlog(0, "inode %llu drop %s DATA lock\n",
1416 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1417 write ? "EXMODE" : "PRMODE");
1419 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1420 !ocfs2_mount_local(osb))
1421 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1426 #define OCFS2_SEC_BITS 34
1427 #define OCFS2_SEC_SHIFT (64 - 34)
1428 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1430 /* LVB only has room for 64 bits of time here so we pack it for
1432 static u64 ocfs2_pack_timespec(struct timespec *spec)
1435 u64 sec = spec->tv_sec;
1436 u32 nsec = spec->tv_nsec;
1438 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1443 /* Call this with the lockres locked. I am reasonably sure we don't
1444 * need ip_lock in this function as anyone who would be changing those
1445 * values is supposed to be blocked in ocfs2_meta_lock right now. */
1446 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1448 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1449 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1450 struct ocfs2_meta_lvb *lvb;
1454 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1457 * Invalidate the LVB of a deleted inode - this way other
1458 * nodes are forced to go to disk and discover the new inode
1461 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1462 lvb->lvb_version = 0;
1466 lvb->lvb_version = OCFS2_LVB_VERSION;
1467 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1468 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1469 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1470 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1471 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1472 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1473 lvb->lvb_iatime_packed =
1474 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1475 lvb->lvb_ictime_packed =
1476 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1477 lvb->lvb_imtime_packed =
1478 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
1479 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
1480 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
1481 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
1484 mlog_meta_lvb(0, lockres);
1489 static void ocfs2_unpack_timespec(struct timespec *spec,
1492 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1493 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1496 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1498 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1499 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1500 struct ocfs2_meta_lvb *lvb;
1504 mlog_meta_lvb(0, lockres);
1506 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1508 /* We're safe here without the lockres lock... */
1509 spin_lock(&oi->ip_lock);
1510 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1511 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1513 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1514 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
1515 ocfs2_set_inode_flags(inode);
1517 /* fast-symlinks are a special case */
1518 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1519 inode->i_blocks = 0;
1521 inode->i_blocks = ocfs2_inode_sector_count(inode);
1523 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1524 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1525 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1526 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1527 ocfs2_unpack_timespec(&inode->i_atime,
1528 be64_to_cpu(lvb->lvb_iatime_packed));
1529 ocfs2_unpack_timespec(&inode->i_mtime,
1530 be64_to_cpu(lvb->lvb_imtime_packed));
1531 ocfs2_unpack_timespec(&inode->i_ctime,
1532 be64_to_cpu(lvb->lvb_ictime_packed));
1533 spin_unlock(&oi->ip_lock);
1538 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1539 struct ocfs2_lock_res *lockres)
1541 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1543 if (lvb->lvb_version == OCFS2_LVB_VERSION
1544 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1549 /* Determine whether a lock resource needs to be refreshed, and
1550 * arbitrate who gets to refresh it.
1552 * 0 means no refresh needed.
1554 * > 0 means you need to refresh this and you MUST call
1555 * ocfs2_complete_lock_res_refresh afterwards. */
1556 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1558 unsigned long flags;
1564 spin_lock_irqsave(&lockres->l_lock, flags);
1565 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1566 spin_unlock_irqrestore(&lockres->l_lock, flags);
1570 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1571 spin_unlock_irqrestore(&lockres->l_lock, flags);
1573 ocfs2_wait_on_refreshing_lock(lockres);
1577 /* Ok, I'll be the one to refresh this lock. */
1578 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1579 spin_unlock_irqrestore(&lockres->l_lock, flags);
1587 /* If status is non zero, I'll mark it as not being in refresh
1588 * anymroe, but i won't clear the needs refresh flag. */
1589 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1592 unsigned long flags;
1595 spin_lock_irqsave(&lockres->l_lock, flags);
1596 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1598 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1599 spin_unlock_irqrestore(&lockres->l_lock, flags);
1601 wake_up(&lockres->l_event);
1606 /* may or may not return a bh if it went to disk. */
1607 static int ocfs2_meta_lock_update(struct inode *inode,
1608 struct buffer_head **bh)
1611 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1612 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1613 struct ocfs2_dinode *fe;
1614 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1618 if (ocfs2_mount_local(osb))
1621 spin_lock(&oi->ip_lock);
1622 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1623 mlog(0, "Orphaned inode %llu was deleted while we "
1624 "were waiting on a lock. ip_flags = 0x%x\n",
1625 (unsigned long long)oi->ip_blkno, oi->ip_flags);
1626 spin_unlock(&oi->ip_lock);
1630 spin_unlock(&oi->ip_lock);
1632 if (!ocfs2_should_refresh_lock_res(lockres))
1635 /* This will discard any caching information we might have had
1636 * for the inode metadata. */
1637 ocfs2_metadata_cache_purge(inode);
1639 ocfs2_extent_map_trunc(inode, 0);
1641 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1642 mlog(0, "Trusting LVB on inode %llu\n",
1643 (unsigned long long)oi->ip_blkno);
1644 ocfs2_refresh_inode_from_lvb(inode);
1646 /* Boo, we have to go to disk. */
1647 /* read bh, cast, ocfs2_refresh_inode */
1648 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1649 bh, OCFS2_BH_CACHED, inode);
1654 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1656 /* This is a good chance to make sure we're not
1657 * locking an invalid object.
1659 * We bug on a stale inode here because we checked
1660 * above whether it was wiped from disk. The wiping
1661 * node provides a guarantee that we receive that
1662 * message and can mark the inode before dropping any
1663 * locks associated with it. */
1664 if (!OCFS2_IS_VALID_DINODE(fe)) {
1665 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1669 mlog_bug_on_msg(inode->i_generation !=
1670 le32_to_cpu(fe->i_generation),
1671 "Invalid dinode %llu disk generation: %u "
1672 "inode->i_generation: %u\n",
1673 (unsigned long long)oi->ip_blkno,
1674 le32_to_cpu(fe->i_generation),
1675 inode->i_generation);
1676 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1677 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
1678 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1679 (unsigned long long)oi->ip_blkno,
1680 (unsigned long long)le64_to_cpu(fe->i_dtime),
1681 le32_to_cpu(fe->i_flags));
1683 ocfs2_refresh_inode(inode, fe);
1688 ocfs2_complete_lock_res_refresh(lockres, status);
1694 static int ocfs2_assign_bh(struct inode *inode,
1695 struct buffer_head **ret_bh,
1696 struct buffer_head *passed_bh)
1701 /* Ok, the update went to disk for us, use the
1703 *ret_bh = passed_bh;
1709 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1710 OCFS2_I(inode)->ip_blkno,
1721 * returns < 0 error if the callback will never be called, otherwise
1722 * the result of the lock will be communicated via the callback.
1724 int ocfs2_meta_lock_full(struct inode *inode,
1725 struct buffer_head **ret_bh,
1729 int status, level, dlm_flags, acquired;
1730 struct ocfs2_lock_res *lockres = NULL;
1731 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1732 struct buffer_head *local_bh = NULL;
1738 mlog(0, "inode %llu, take %s META lock\n",
1739 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1740 ex ? "EXMODE" : "PRMODE");
1744 /* We'll allow faking a readonly metadata lock for
1746 if (ocfs2_is_hard_readonly(osb)) {
1752 if (ocfs2_mount_local(osb))
1755 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1756 wait_event(osb->recovery_event,
1757 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1759 lockres = &OCFS2_I(inode)->ip_meta_lockres;
1760 level = ex ? LKM_EXMODE : LKM_PRMODE;
1762 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1763 dlm_flags |= LKM_NOQUEUE;
1765 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1767 if (status != -EAGAIN && status != -EIOCBRETRY)
1772 /* Notify the error cleanup path to drop the cluster lock. */
1775 /* We wait twice because a node may have died while we were in
1776 * the lower dlm layers. The second time though, we've
1777 * committed to owning this lock so we don't allow signals to
1778 * abort the operation. */
1779 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1780 wait_event(osb->recovery_event,
1781 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1785 * We only see this flag if we're being called from
1786 * ocfs2_read_locked_inode(). It means we're locking an inode
1787 * which hasn't been populated yet, so clear the refresh flag
1788 * and let the caller handle it.
1790 if (inode->i_state & I_NEW) {
1793 ocfs2_complete_lock_res_refresh(lockres, 0);
1797 /* This is fun. The caller may want a bh back, or it may
1798 * not. ocfs2_meta_lock_update definitely wants one in, but
1799 * may or may not read one, depending on what's in the
1800 * LVB. The result of all of this is that we've *only* gone to
1801 * disk if we have to, so the complexity is worthwhile. */
1802 status = ocfs2_meta_lock_update(inode, &local_bh);
1804 if (status != -ENOENT)
1810 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1819 if (ret_bh && (*ret_bh)) {
1824 ocfs2_meta_unlock(inode, ex);
1835 * This is working around a lock inversion between tasks acquiring DLM
1836 * locks while holding a page lock and the downconvert thread which
1837 * blocks dlm lock acquiry while acquiring page locks.
1839 * ** These _with_page variantes are only intended to be called from aop
1840 * methods that hold page locks and return a very specific *positive* error
1841 * code that aop methods pass up to the VFS -- test for errors with != 0. **
1843 * The DLM is called such that it returns -EAGAIN if it would have
1844 * blocked waiting for the downconvert thread. In that case we unlock
1845 * our page so the downconvert thread can make progress. Once we've
1846 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
1847 * that called us can bubble that back up into the VFS who will then
1848 * immediately retry the aop call.
1850 * We do a blocking lock and immediate unlock before returning, though, so that
1851 * the lock has a great chance of being cached on this node by the time the VFS
1852 * calls back to retry the aop. This has a potential to livelock as nodes
1853 * ping locks back and forth, but that's a risk we're willing to take to avoid
1854 * the lock inversion simply.
1856 int ocfs2_meta_lock_with_page(struct inode *inode,
1857 struct buffer_head **ret_bh,
1863 ret = ocfs2_meta_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
1864 if (ret == -EAGAIN) {
1866 if (ocfs2_meta_lock(inode, ret_bh, ex) == 0)
1867 ocfs2_meta_unlock(inode, ex);
1868 ret = AOP_TRUNCATED_PAGE;
1874 int ocfs2_meta_lock_atime(struct inode *inode,
1875 struct vfsmount *vfsmnt,
1881 ret = ocfs2_meta_lock(inode, NULL, 0);
1888 * If we should update atime, we will get EX lock,
1889 * otherwise we just get PR lock.
1891 if (ocfs2_should_update_atime(inode, vfsmnt)) {
1892 struct buffer_head *bh = NULL;
1894 ocfs2_meta_unlock(inode, 0);
1895 ret = ocfs2_meta_lock(inode, &bh, 1);
1901 if (ocfs2_should_update_atime(inode, vfsmnt))
1902 ocfs2_update_inode_atime(inode, bh);
1912 void ocfs2_meta_unlock(struct inode *inode,
1915 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1916 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
1917 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1921 mlog(0, "inode %llu drop %s META lock\n",
1922 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1923 ex ? "EXMODE" : "PRMODE");
1925 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1926 !ocfs2_mount_local(osb))
1927 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1932 int ocfs2_super_lock(struct ocfs2_super *osb,
1936 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1937 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1938 struct buffer_head *bh;
1939 struct ocfs2_slot_info *si = osb->slot_info;
1943 if (ocfs2_is_hard_readonly(osb))
1946 if (ocfs2_mount_local(osb))
1949 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1955 /* The super block lock path is really in the best position to
1956 * know when resources covered by the lock need to be
1957 * refreshed, so we do it here. Of course, making sense of
1958 * everything is up to the caller :) */
1959 status = ocfs2_should_refresh_lock_res(lockres);
1966 status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
1969 ocfs2_update_slot_info(si);
1971 ocfs2_complete_lock_res_refresh(lockres, status);
1981 void ocfs2_super_unlock(struct ocfs2_super *osb,
1984 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1985 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1987 if (!ocfs2_mount_local(osb))
1988 ocfs2_cluster_unlock(osb, lockres, level);
1991 int ocfs2_rename_lock(struct ocfs2_super *osb)
1994 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1996 if (ocfs2_is_hard_readonly(osb))
1999 if (ocfs2_mount_local(osb))
2002 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
2009 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2011 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2013 if (!ocfs2_mount_local(osb))
2014 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
2017 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2020 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2021 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2022 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2026 if (ocfs2_is_hard_readonly(osb))
2029 if (ocfs2_mount_local(osb))
2032 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2039 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2041 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2042 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2043 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2045 if (!ocfs2_mount_local(osb))
2046 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2049 /* Reference counting of the dlm debug structure. We want this because
2050 * open references on the debug inodes can live on after a mount, so
2051 * we can't rely on the ocfs2_super to always exist. */
2052 static void ocfs2_dlm_debug_free(struct kref *kref)
2054 struct ocfs2_dlm_debug *dlm_debug;
2056 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2061 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2064 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2067 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2069 kref_get(&debug->d_refcnt);
2072 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2074 struct ocfs2_dlm_debug *dlm_debug;
2076 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2078 mlog_errno(-ENOMEM);
2082 kref_init(&dlm_debug->d_refcnt);
2083 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2084 dlm_debug->d_locking_state = NULL;
2089 /* Access to this is arbitrated for us via seq_file->sem. */
2090 struct ocfs2_dlm_seq_priv {
2091 struct ocfs2_dlm_debug *p_dlm_debug;
2092 struct ocfs2_lock_res p_iter_res;
2093 struct ocfs2_lock_res p_tmp_res;
2096 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2097 struct ocfs2_dlm_seq_priv *priv)
2099 struct ocfs2_lock_res *iter, *ret = NULL;
2100 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2102 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2104 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2105 /* discover the head of the list */
2106 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2107 mlog(0, "End of list found, %p\n", ret);
2111 /* We track our "dummy" iteration lockres' by a NULL
2113 if (iter->l_ops != NULL) {
2122 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2124 struct ocfs2_dlm_seq_priv *priv = m->private;
2125 struct ocfs2_lock_res *iter;
2127 spin_lock(&ocfs2_dlm_tracking_lock);
2128 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2130 /* Since lockres' have the lifetime of their container
2131 * (which can be inodes, ocfs2_supers, etc) we want to
2132 * copy this out to a temporary lockres while still
2133 * under the spinlock. Obviously after this we can't
2134 * trust any pointers on the copy returned, but that's
2135 * ok as the information we want isn't typically held
2137 priv->p_tmp_res = *iter;
2138 iter = &priv->p_tmp_res;
2140 spin_unlock(&ocfs2_dlm_tracking_lock);
2145 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2149 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2151 struct ocfs2_dlm_seq_priv *priv = m->private;
2152 struct ocfs2_lock_res *iter = v;
2153 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2155 spin_lock(&ocfs2_dlm_tracking_lock);
2156 iter = ocfs2_dlm_next_res(iter, priv);
2157 list_del_init(&dummy->l_debug_list);
2159 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2160 priv->p_tmp_res = *iter;
2161 iter = &priv->p_tmp_res;
2163 spin_unlock(&ocfs2_dlm_tracking_lock);
2168 /* So that debugfs.ocfs2 can determine which format is being used */
2169 #define OCFS2_DLM_DEBUG_STR_VERSION 1
2170 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2174 struct ocfs2_lock_res *lockres = v;
2179 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2181 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2182 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2184 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2186 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2188 seq_printf(m, "%d\t"
2199 lockres->l_unlock_action,
2200 lockres->l_ro_holders,
2201 lockres->l_ex_holders,
2202 lockres->l_requested,
2203 lockres->l_blocking);
2205 /* Dump the raw LVB */
2206 lvb = lockres->l_lksb.lvb;
2207 for(i = 0; i < DLM_LVB_LEN; i++)
2208 seq_printf(m, "0x%x\t", lvb[i]);
2211 seq_printf(m, "\n");
2215 static struct seq_operations ocfs2_dlm_seq_ops = {
2216 .start = ocfs2_dlm_seq_start,
2217 .stop = ocfs2_dlm_seq_stop,
2218 .next = ocfs2_dlm_seq_next,
2219 .show = ocfs2_dlm_seq_show,
2222 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2224 struct seq_file *seq = (struct seq_file *) file->private_data;
2225 struct ocfs2_dlm_seq_priv *priv = seq->private;
2226 struct ocfs2_lock_res *res = &priv->p_iter_res;
2228 ocfs2_remove_lockres_tracking(res);
2229 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2230 return seq_release_private(inode, file);
2233 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2236 struct ocfs2_dlm_seq_priv *priv;
2237 struct seq_file *seq;
2238 struct ocfs2_super *osb;
2240 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2246 osb = inode->i_private;
2247 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2248 priv->p_dlm_debug = osb->osb_dlm_debug;
2249 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2251 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2258 seq = (struct seq_file *) file->private_data;
2259 seq->private = priv;
2261 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2268 static const struct file_operations ocfs2_dlm_debug_fops = {
2269 .open = ocfs2_dlm_debug_open,
2270 .release = ocfs2_dlm_debug_release,
2272 .llseek = seq_lseek,
2275 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2278 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2280 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2282 osb->osb_debug_root,
2284 &ocfs2_dlm_debug_fops);
2285 if (!dlm_debug->d_locking_state) {
2288 "Unable to create locking state debugfs file.\n");
2292 ocfs2_get_dlm_debug(dlm_debug);
2297 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2299 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2302 debugfs_remove(dlm_debug->d_locking_state);
2303 ocfs2_put_dlm_debug(dlm_debug);
2307 int ocfs2_dlm_init(struct ocfs2_super *osb)
2311 struct dlm_ctxt *dlm = NULL;
2315 if (ocfs2_mount_local(osb))
2318 status = ocfs2_dlm_init_debug(osb);
2324 /* launch downconvert thread */
2325 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2326 if (IS_ERR(osb->dc_task)) {
2327 status = PTR_ERR(osb->dc_task);
2328 osb->dc_task = NULL;
2333 /* used by the dlm code to make message headers unique, each
2334 * node in this domain must agree on this. */
2335 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2337 /* for now, uuid == domain */
2338 dlm = dlm_register_domain(osb->uuid_str, dlm_key);
2340 status = PTR_ERR(dlm);
2345 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2348 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2349 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2356 ocfs2_dlm_shutdown_debug(osb);
2358 kthread_stop(osb->dc_task);
2365 void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2369 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2371 ocfs2_drop_osb_locks(osb);
2374 kthread_stop(osb->dc_task);
2375 osb->dc_task = NULL;
2378 ocfs2_lock_res_free(&osb->osb_super_lockres);
2379 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2381 dlm_unregister_domain(osb->dlm);
2384 ocfs2_dlm_shutdown_debug(osb);
2389 static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
2391 struct ocfs2_lock_res *lockres = opaque;
2392 unsigned long flags;
2396 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2397 lockres->l_unlock_action);
2399 spin_lock_irqsave(&lockres->l_lock, flags);
2400 /* We tried to cancel a convert request, but it was already
2401 * granted. All we want to do here is clear our unlock
2402 * state. The wake_up call done at the bottom is redundant
2403 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2404 * hurt anything anyway */
2405 if (status == DLM_CANCELGRANT &&
2406 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2407 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2409 /* We don't clear the busy flag in this case as it
2410 * should have been cleared by the ast which the dlm
2412 goto complete_unlock;
2415 if (status != DLM_NORMAL) {
2416 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2417 "unlock_action %d\n", status, lockres->l_name,
2418 lockres->l_unlock_action);
2419 spin_unlock_irqrestore(&lockres->l_lock, flags);
2423 switch(lockres->l_unlock_action) {
2424 case OCFS2_UNLOCK_CANCEL_CONVERT:
2425 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2426 lockres->l_action = OCFS2_AST_INVALID;
2428 case OCFS2_UNLOCK_DROP_LOCK:
2429 lockres->l_level = LKM_IVMODE;
2435 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2437 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2438 spin_unlock_irqrestore(&lockres->l_lock, flags);
2440 wake_up(&lockres->l_event);
2445 static int ocfs2_drop_lock(struct ocfs2_super *osb,
2446 struct ocfs2_lock_res *lockres)
2448 enum dlm_status status;
2449 unsigned long flags;
2452 /* We didn't get anywhere near actually using this lockres. */
2453 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2456 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2457 lkm_flags |= LKM_VALBLK;
2459 spin_lock_irqsave(&lockres->l_lock, flags);
2461 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2462 "lockres %s, flags 0x%lx\n",
2463 lockres->l_name, lockres->l_flags);
2465 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2466 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2467 "%u, unlock_action = %u\n",
2468 lockres->l_name, lockres->l_flags, lockres->l_action,
2469 lockres->l_unlock_action);
2471 spin_unlock_irqrestore(&lockres->l_lock, flags);
2473 /* XXX: Today we just wait on any busy
2474 * locks... Perhaps we need to cancel converts in the
2476 ocfs2_wait_on_busy_lock(lockres);
2478 spin_lock_irqsave(&lockres->l_lock, flags);
2481 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2482 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2483 lockres->l_level == LKM_EXMODE &&
2484 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2485 lockres->l_ops->set_lvb(lockres);
2488 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2489 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2491 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2492 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2494 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2495 spin_unlock_irqrestore(&lockres->l_lock, flags);
2499 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2501 /* make sure we never get here while waiting for an ast to
2503 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2505 /* is this necessary? */
2506 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2507 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2508 spin_unlock_irqrestore(&lockres->l_lock, flags);
2510 mlog(0, "lock %s\n", lockres->l_name);
2512 status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
2513 ocfs2_unlock_ast, lockres);
2514 if (status != DLM_NORMAL) {
2515 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2516 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2517 dlm_print_one_lock(lockres->l_lksb.lockid);
2520 mlog(0, "lock %s, successfull return from dlmunlock\n",
2523 ocfs2_wait_on_busy_lock(lockres);
2529 /* Mark the lockres as being dropped. It will no longer be
2530 * queued if blocking, but we still may have to wait on it
2531 * being dequeued from the downconvert thread before we can consider
2534 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2535 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2538 struct ocfs2_mask_waiter mw;
2539 unsigned long flags;
2541 ocfs2_init_mask_waiter(&mw);
2543 spin_lock_irqsave(&lockres->l_lock, flags);
2544 lockres->l_flags |= OCFS2_LOCK_FREEING;
2545 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2546 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2547 spin_unlock_irqrestore(&lockres->l_lock, flags);
2549 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2551 status = ocfs2_wait_for_mask(&mw);
2555 spin_lock_irqsave(&lockres->l_lock, flags);
2557 spin_unlock_irqrestore(&lockres->l_lock, flags);
2560 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2561 struct ocfs2_lock_res *lockres)
2565 ocfs2_mark_lockres_freeing(lockres);
2566 ret = ocfs2_drop_lock(osb, lockres);
2571 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2573 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2574 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
2577 int ocfs2_drop_inode_locks(struct inode *inode)
2583 /* No need to call ocfs2_mark_lockres_freeing here -
2584 * ocfs2_clear_inode has done it for us. */
2586 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2587 &OCFS2_I(inode)->ip_open_lockres);
2593 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2594 &OCFS2_I(inode)->ip_data_lockres);
2597 if (err < 0 && !status)
2600 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2601 &OCFS2_I(inode)->ip_meta_lockres);
2604 if (err < 0 && !status)
2607 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2608 &OCFS2_I(inode)->ip_rw_lockres);
2611 if (err < 0 && !status)
2618 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2621 assert_spin_locked(&lockres->l_lock);
2623 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2625 if (lockres->l_level <= new_level) {
2626 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2627 lockres->l_level, new_level);
2631 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2632 lockres->l_name, new_level, lockres->l_blocking);
2634 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2635 lockres->l_requested = new_level;
2636 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2639 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2640 struct ocfs2_lock_res *lockres,
2644 int ret, dlm_flags = LKM_CONVERT;
2645 enum dlm_status status;
2650 dlm_flags |= LKM_VALBLK;
2652 status = dlmlock(osb->dlm,
2657 OCFS2_LOCK_ID_MAX_LEN - 1,
2660 ocfs2_blocking_ast);
2661 if (status != DLM_NORMAL) {
2662 ocfs2_log_dlm_error("dlmlock", status, lockres);
2664 ocfs2_recover_from_dlm_error(lockres, 1);
2674 /* returns 1 when the caller should unlock and call dlmunlock */
2675 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2676 struct ocfs2_lock_res *lockres)
2678 assert_spin_locked(&lockres->l_lock);
2681 mlog(0, "lock %s\n", lockres->l_name);
2683 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2684 /* If we're already trying to cancel a lock conversion
2685 * then just drop the spinlock and allow the caller to
2686 * requeue this lock. */
2688 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2692 /* were we in a convert when we got the bast fire? */
2693 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2694 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2695 /* set things up for the unlockast to know to just
2696 * clear out the ast_action and unset busy, etc. */
2697 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2699 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2700 "lock %s, invalid flags: 0x%lx\n",
2701 lockres->l_name, lockres->l_flags);
2706 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2707 struct ocfs2_lock_res *lockres)
2710 enum dlm_status status;
2713 mlog(0, "lock %s\n", lockres->l_name);
2716 status = dlmunlock(osb->dlm,
2721 if (status != DLM_NORMAL) {
2722 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2724 ocfs2_recover_from_dlm_error(lockres, 0);
2727 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2733 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2734 struct ocfs2_lock_res *lockres,
2735 struct ocfs2_unblock_ctl *ctl)
2737 unsigned long flags;
2745 spin_lock_irqsave(&lockres->l_lock, flags);
2747 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2750 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2752 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2753 spin_unlock_irqrestore(&lockres->l_lock, flags);
2755 ret = ocfs2_cancel_convert(osb, lockres);
2762 /* if we're blocking an exclusive and we have *any* holders,
2764 if ((lockres->l_blocking == LKM_EXMODE)
2765 && (lockres->l_ex_holders || lockres->l_ro_holders))
2768 /* If it's a PR we're blocking, then only
2769 * requeue if we've got any EX holders */
2770 if (lockres->l_blocking == LKM_PRMODE &&
2771 lockres->l_ex_holders)
2775 * Can we get a lock in this state if the holder counts are
2776 * zero? The meta data unblock code used to check this.
2778 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2779 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2782 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2784 if (lockres->l_ops->check_downconvert
2785 && !lockres->l_ops->check_downconvert(lockres, new_level))
2788 /* If we get here, then we know that there are no more
2789 * incompatible holders (and anyone asking for an incompatible
2790 * lock is blocked). We can now downconvert the lock */
2791 if (!lockres->l_ops->downconvert_worker)
2794 /* Some lockres types want to do a bit of work before
2795 * downconverting a lock. Allow that here. The worker function
2796 * may sleep, so we save off a copy of what we're blocking as
2797 * it may change while we're not holding the spin lock. */
2798 blocking = lockres->l_blocking;
2799 spin_unlock_irqrestore(&lockres->l_lock, flags);
2801 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
2803 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2806 spin_lock_irqsave(&lockres->l_lock, flags);
2807 if (blocking != lockres->l_blocking) {
2808 /* If this changed underneath us, then we can't drop
2816 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2817 if (lockres->l_level == LKM_EXMODE)
2821 * We only set the lvb if the lock has been fully
2822 * refreshed - otherwise we risk setting stale
2823 * data. Otherwise, there's no need to actually clear
2824 * out the lvb here as it's value is still valid.
2826 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2827 lockres->l_ops->set_lvb(lockres);
2830 ocfs2_prepare_downconvert(lockres, new_level);
2831 spin_unlock_irqrestore(&lockres->l_lock, flags);
2832 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
2838 spin_unlock_irqrestore(&lockres->l_lock, flags);
2845 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2848 struct inode *inode;
2849 struct address_space *mapping;
2851 inode = ocfs2_lock_res_inode(lockres);
2852 mapping = inode->i_mapping;
2855 * We need this before the filemap_fdatawrite() so that it can
2856 * transfer the dirty bit from the PTE to the
2857 * page. Unfortunately this means that even for EX->PR
2858 * downconverts, we'll lose our mappings and have to build
2861 unmap_mapping_range(mapping, 0, 0, 0);
2863 if (filemap_fdatawrite(mapping)) {
2864 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
2865 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2867 sync_mapping_buffers(mapping);
2868 if (blocking == LKM_EXMODE) {
2869 truncate_inode_pages(mapping, 0);
2871 /* We only need to wait on the I/O if we're not also
2872 * truncating pages because truncate_inode_pages waits
2873 * for us above. We don't truncate pages if we're
2874 * blocking anything < EXMODE because we want to keep
2875 * them around in that case. */
2876 filemap_fdatawait(mapping);
2879 return UNBLOCK_CONTINUE;
2882 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
2885 struct inode *inode = ocfs2_lock_res_inode(lockres);
2886 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
2888 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
2889 BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
2894 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
2898 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
2900 struct inode *inode = ocfs2_lock_res_inode(lockres);
2902 __ocfs2_stuff_meta_lvb(inode);
2906 * Does the final reference drop on our dentry lock. Right now this
2907 * happens in the downconvert thread, but we could choose to simplify the
2908 * dlmglue API and push these off to the ocfs2_wq in the future.
2910 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
2911 struct ocfs2_lock_res *lockres)
2913 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2914 ocfs2_dentry_lock_put(osb, dl);
2918 * d_delete() matching dentries before the lock downconvert.
2920 * At this point, any process waiting to destroy the
2921 * dentry_lock due to last ref count is stopped by the
2922 * OCFS2_LOCK_QUEUED flag.
2924 * We have two potential problems
2926 * 1) If we do the last reference drop on our dentry_lock (via dput)
2927 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
2928 * the downconvert to finish. Instead we take an elevated
2929 * reference and push the drop until after we've completed our
2930 * unblock processing.
2932 * 2) There might be another process with a final reference,
2933 * waiting on us to finish processing. If this is the case, we
2934 * detect it and exit out - there's no more dentries anyway.
2936 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
2939 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2940 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
2941 struct dentry *dentry;
2942 unsigned long flags;
2946 * This node is blocking another node from getting a read
2947 * lock. This happens when we've renamed within a
2948 * directory. We've forced the other nodes to d_delete(), but
2949 * we never actually dropped our lock because it's still
2950 * valid. The downconvert code will retain a PR for this node,
2951 * so there's no further work to do.
2953 if (blocking == LKM_PRMODE)
2954 return UNBLOCK_CONTINUE;
2957 * Mark this inode as potentially orphaned. The code in
2958 * ocfs2_delete_inode() will figure out whether it actually
2959 * needs to be freed or not.
2961 spin_lock(&oi->ip_lock);
2962 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2963 spin_unlock(&oi->ip_lock);
2966 * Yuck. We need to make sure however that the check of
2967 * OCFS2_LOCK_FREEING and the extra reference are atomic with
2968 * respect to a reference decrement or the setting of that
2971 spin_lock_irqsave(&lockres->l_lock, flags);
2972 spin_lock(&dentry_attach_lock);
2973 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
2978 spin_unlock(&dentry_attach_lock);
2979 spin_unlock_irqrestore(&lockres->l_lock, flags);
2981 mlog(0, "extra_ref = %d\n", extra_ref);
2984 * We have a process waiting on us in ocfs2_dentry_iput(),
2985 * which means we can't have any more outstanding
2986 * aliases. There's no need to do any more work.
2989 return UNBLOCK_CONTINUE;
2991 spin_lock(&dentry_attach_lock);
2993 dentry = ocfs2_find_local_alias(dl->dl_inode,
2994 dl->dl_parent_blkno, 1);
2997 spin_unlock(&dentry_attach_lock);
2999 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3000 dentry->d_name.name);
3003 * The following dcache calls may do an
3004 * iput(). Normally we don't want that from the
3005 * downconverting thread, but in this case it's ok
3006 * because the requesting node already has an
3007 * exclusive lock on the inode, so it can't be queued
3008 * for a downconvert.
3013 spin_lock(&dentry_attach_lock);
3015 spin_unlock(&dentry_attach_lock);
3018 * If we are the last holder of this dentry lock, there is no
3019 * reason to downconvert so skip straight to the unlock.
3021 if (dl->dl_count == 1)
3022 return UNBLOCK_STOP_POST;
3024 return UNBLOCK_CONTINUE_POST;
3027 void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3028 struct ocfs2_lock_res *lockres)
3031 struct ocfs2_unblock_ctl ctl = {0, 0,};
3032 unsigned long flags;
3034 /* Our reference to the lockres in this function can be
3035 * considered valid until we remove the OCFS2_LOCK_QUEUED
3041 BUG_ON(!lockres->l_ops);
3043 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3045 /* Detect whether a lock has been marked as going away while
3046 * the downconvert thread was processing other things. A lock can
3047 * still be marked with OCFS2_LOCK_FREEING after this check,
3048 * but short circuiting here will still save us some
3050 spin_lock_irqsave(&lockres->l_lock, flags);
3051 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3053 spin_unlock_irqrestore(&lockres->l_lock, flags);
3055 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3059 spin_lock_irqsave(&lockres->l_lock, flags);
3061 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3062 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3064 ocfs2_schedule_blocked_lock(osb, lockres);
3066 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3067 ctl.requeue ? "yes" : "no");
3068 spin_unlock_irqrestore(&lockres->l_lock, flags);
3070 if (ctl.unblock_action != UNBLOCK_CONTINUE
3071 && lockres->l_ops->post_unlock)
3072 lockres->l_ops->post_unlock(osb, lockres);
3077 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3078 struct ocfs2_lock_res *lockres)
3082 assert_spin_locked(&lockres->l_lock);
3084 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3085 /* Do not schedule a lock for downconvert when it's on
3086 * the way to destruction - any nodes wanting access
3087 * to the resource will get it soon. */
3088 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3089 lockres->l_name, lockres->l_flags);
3093 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3095 spin_lock(&osb->dc_task_lock);
3096 if (list_empty(&lockres->l_blocked_list)) {
3097 list_add_tail(&lockres->l_blocked_list,
3098 &osb->blocked_lock_list);
3099 osb->blocked_lock_count++;
3101 spin_unlock(&osb->dc_task_lock);
3106 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3108 unsigned long processed;
3109 struct ocfs2_lock_res *lockres;
3113 spin_lock(&osb->dc_task_lock);
3114 /* grab this early so we know to try again if a state change and
3115 * wake happens part-way through our work */
3116 osb->dc_work_sequence = osb->dc_wake_sequence;
3118 processed = osb->blocked_lock_count;
3120 BUG_ON(list_empty(&osb->blocked_lock_list));
3122 lockres = list_entry(osb->blocked_lock_list.next,
3123 struct ocfs2_lock_res, l_blocked_list);
3124 list_del_init(&lockres->l_blocked_list);
3125 osb->blocked_lock_count--;
3126 spin_unlock(&osb->dc_task_lock);
3131 ocfs2_process_blocked_lock(osb, lockres);
3133 spin_lock(&osb->dc_task_lock);
3135 spin_unlock(&osb->dc_task_lock);
3140 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3144 spin_lock(&osb->dc_task_lock);
3145 if (list_empty(&osb->blocked_lock_list))
3148 spin_unlock(&osb->dc_task_lock);
3152 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
3154 int should_wake = 0;
3156 spin_lock(&osb->dc_task_lock);
3157 if (osb->dc_work_sequence != osb->dc_wake_sequence)
3159 spin_unlock(&osb->dc_task_lock);
3164 int ocfs2_downconvert_thread(void *arg)
3167 struct ocfs2_super *osb = arg;
3169 /* only quit once we've been asked to stop and there is no more
3171 while (!(kthread_should_stop() &&
3172 ocfs2_downconvert_thread_lists_empty(osb))) {
3174 wait_event_interruptible(osb->dc_event,
3175 ocfs2_downconvert_thread_should_wake(osb) ||
3176 kthread_should_stop());
3178 mlog(0, "downconvert_thread: awoken\n");
3180 ocfs2_downconvert_thread_do_work(osb);
3183 osb->dc_task = NULL;
3187 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
3189 spin_lock(&osb->dc_task_lock);
3190 /* make sure the voting thread gets a swipe at whatever changes
3191 * the caller may have made to the voting state */
3192 osb->dc_wake_sequence++;
3193 spin_unlock(&osb->dc_task_lock);
3194 wake_up(&osb->dc_event);