]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[DLM] fix aborted recovery during node removal
authorDavid Teigland <teigland@redhat.com>
Tue, 31 Oct 2006 17:56:01 +0000 (11:56 -0600)
committerSteven Whitehouse <swhiteho@redhat.com>
Thu, 30 Nov 2006 15:35:13 +0000 (10:35 -0500)
Red Hat BZ 211914

With the new cluster infrastructure, dlm recovery for a node removal can
be aborted and restarted for a node addition.  When this happens, the
restarted recovery isn't aware that it's doing recovery for the earlier
removal as well as the addition.  So, it then skips the recovery steps
only required when nodes are removed.  This can result in locks not being
purged for failed/removed nodes.  The fix is to check for removed nodes
for which recovery has not been completed at the start of a new recovery
sequence.

Signed-off-by: David Teigland <teigland@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
fs/dlm/member.c
fs/dlm/recoverd.c

index a3f7de7f3a8f969b9500875203297f4b8d6872ef..85e2897bd7400fc4155948fc8eb1c81cb1ff8e01 100644 (file)
@@ -186,6 +186,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
        struct dlm_member *memb, *safe;
        int i, error, found, pos = 0, neg = 0, low = -1;
 
+       /* previously removed members that we've not finished removing need to
+          count as a negative change so the "neg" recovery steps will happen */
+
+       list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
+               log_debug(ls, "prev removed member %d", memb->nodeid);
+               neg++;
+       }
+
        /* move departed members from ls_nodes to ls_nodes_gone */
 
        list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
index 362e3eff4dc9afef2e8bd02777ed87591636fcfe..4a1d6023fd9b9dea797b7058267599f172f003d8 100644 (file)
@@ -164,6 +164,13 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
                 */
 
                dlm_recover_rsbs(ls);
+       } else {
+               /*
+                * Other lockspace members may be going through the "neg" steps
+                * while also adding us to the lockspace, in which case they'll
+                * be looking for this status bit during dlm_recover_locks().
+                */
+               dlm_set_recover_status(ls, DLM_RS_LOCKS);
        }
 
        dlm_release_root_list(ls);