]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/nfs/nfs4state.c
300faba9a18a52e06d9d93e80314d6cd4249c2b1
[linux-2.6-omap-h63xx.git] / fs / nfs / nfs4state.c
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
51
52 #include "nfs4_fs.h"
53 #include "callback.h"
54 #include "delegation.h"
55 #include "internal.h"
56
57 #define OPENOWNER_POOL_SIZE     8
58
59 const nfs4_stateid zero_stateid;
60
61 static LIST_HEAD(nfs4_clientid_list);
62
63 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
64 {
65         int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
66                         nfs_callback_tcpport, cred);
67         if (status == 0)
68                 status = nfs4_proc_setclientid_confirm(clp, cred);
69         if (status == 0)
70                 nfs4_schedule_state_renewal(clp);
71         return status;
72 }
73
74 static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
75 {
76         struct rpc_cred *cred = NULL;
77
78         if (clp->cl_machine_cred != NULL)
79                 cred = get_rpccred(clp->cl_machine_cred);
80         return cred;
81 }
82
83 static void nfs4_clear_machine_cred(struct nfs_client *clp)
84 {
85         struct rpc_cred *cred;
86
87         spin_lock(&clp->cl_lock);
88         cred = clp->cl_machine_cred;
89         clp->cl_machine_cred = NULL;
90         spin_unlock(&clp->cl_lock);
91         if (cred != NULL)
92                 put_rpccred(cred);
93 }
94
95 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
96 {
97         struct nfs4_state_owner *sp;
98         struct rb_node *pos;
99         struct rpc_cred *cred = NULL;
100
101         for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
102                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
103                 if (list_empty(&sp->so_states))
104                         continue;
105                 cred = get_rpccred(sp->so_cred);
106                 break;
107         }
108         return cred;
109 }
110
111 static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
112 {
113         struct rpc_cred *cred;
114
115         spin_lock(&clp->cl_lock);
116         cred = nfs4_get_renew_cred_locked(clp);
117         spin_unlock(&clp->cl_lock);
118         return cred;
119 }
120
121 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
122 {
123         struct nfs4_state_owner *sp;
124         struct rb_node *pos;
125         struct rpc_cred *cred;
126
127         spin_lock(&clp->cl_lock);
128         cred = nfs4_get_machine_cred_locked(clp);
129         if (cred != NULL)
130                 goto out;
131         pos = rb_first(&clp->cl_state_owners);
132         if (pos != NULL) {
133                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
134                 cred = get_rpccred(sp->so_cred);
135         }
136 out:
137         spin_unlock(&clp->cl_lock);
138         return cred;
139 }
140
141 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
142                 __u64 minval, int maxbits)
143 {
144         struct rb_node **p, *parent;
145         struct nfs_unique_id *pos;
146         __u64 mask = ~0ULL;
147
148         if (maxbits < 64)
149                 mask = (1ULL << maxbits) - 1ULL;
150
151         /* Ensure distribution is more or less flat */
152         get_random_bytes(&new->id, sizeof(new->id));
153         new->id &= mask;
154         if (new->id < minval)
155                 new->id += minval;
156 retry:
157         p = &root->rb_node;
158         parent = NULL;
159
160         while (*p != NULL) {
161                 parent = *p;
162                 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
163
164                 if (new->id < pos->id)
165                         p = &(*p)->rb_left;
166                 else if (new->id > pos->id)
167                         p = &(*p)->rb_right;
168                 else
169                         goto id_exists;
170         }
171         rb_link_node(&new->rb_node, parent, p);
172         rb_insert_color(&new->rb_node, root);
173         return;
174 id_exists:
175         for (;;) {
176                 new->id++;
177                 if (new->id < minval || (new->id & mask) != new->id) {
178                         new->id = minval;
179                         break;
180                 }
181                 parent = rb_next(parent);
182                 if (parent == NULL)
183                         break;
184                 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
185                 if (new->id < pos->id)
186                         break;
187         }
188         goto retry;
189 }
190
191 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
192 {
193         rb_erase(&id->rb_node, root);
194 }
195
196 static struct nfs4_state_owner *
197 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
198 {
199         struct nfs_client *clp = server->nfs_client;
200         struct rb_node **p = &clp->cl_state_owners.rb_node,
201                        *parent = NULL;
202         struct nfs4_state_owner *sp, *res = NULL;
203
204         while (*p != NULL) {
205                 parent = *p;
206                 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
207
208                 if (server < sp->so_server) {
209                         p = &parent->rb_left;
210                         continue;
211                 }
212                 if (server > sp->so_server) {
213                         p = &parent->rb_right;
214                         continue;
215                 }
216                 if (cred < sp->so_cred)
217                         p = &parent->rb_left;
218                 else if (cred > sp->so_cred)
219                         p = &parent->rb_right;
220                 else {
221                         atomic_inc(&sp->so_count);
222                         res = sp;
223                         break;
224                 }
225         }
226         return res;
227 }
228
229 static struct nfs4_state_owner *
230 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
231 {
232         struct rb_node **p = &clp->cl_state_owners.rb_node,
233                        *parent = NULL;
234         struct nfs4_state_owner *sp;
235
236         while (*p != NULL) {
237                 parent = *p;
238                 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
239
240                 if (new->so_server < sp->so_server) {
241                         p = &parent->rb_left;
242                         continue;
243                 }
244                 if (new->so_server > sp->so_server) {
245                         p = &parent->rb_right;
246                         continue;
247                 }
248                 if (new->so_cred < sp->so_cred)
249                         p = &parent->rb_left;
250                 else if (new->so_cred > sp->so_cred)
251                         p = &parent->rb_right;
252                 else {
253                         atomic_inc(&sp->so_count);
254                         return sp;
255                 }
256         }
257         nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
258         rb_link_node(&new->so_client_node, parent, p);
259         rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
260         return new;
261 }
262
263 static void
264 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
265 {
266         if (!RB_EMPTY_NODE(&sp->so_client_node))
267                 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
268         nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
269 }
270
271 /*
272  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
273  * create a new state_owner.
274  *
275  */
276 static struct nfs4_state_owner *
277 nfs4_alloc_state_owner(void)
278 {
279         struct nfs4_state_owner *sp;
280
281         sp = kzalloc(sizeof(*sp),GFP_KERNEL);
282         if (!sp)
283                 return NULL;
284         spin_lock_init(&sp->so_lock);
285         INIT_LIST_HEAD(&sp->so_states);
286         INIT_LIST_HEAD(&sp->so_delegations);
287         rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
288         sp->so_seqid.sequence = &sp->so_sequence;
289         spin_lock_init(&sp->so_sequence.lock);
290         INIT_LIST_HEAD(&sp->so_sequence.list);
291         atomic_set(&sp->so_count, 1);
292         return sp;
293 }
294
295 static void
296 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
297 {
298         if (!RB_EMPTY_NODE(&sp->so_client_node)) {
299                 struct nfs_client *clp = sp->so_client;
300
301                 spin_lock(&clp->cl_lock);
302                 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
303                 RB_CLEAR_NODE(&sp->so_client_node);
304                 spin_unlock(&clp->cl_lock);
305         }
306 }
307
308 /*
309  * Note: must be called with clp->cl_sem held in order to prevent races
310  *       with reboot recovery!
311  */
312 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
313 {
314         struct nfs_client *clp = server->nfs_client;
315         struct nfs4_state_owner *sp, *new;
316
317         spin_lock(&clp->cl_lock);
318         sp = nfs4_find_state_owner(server, cred);
319         spin_unlock(&clp->cl_lock);
320         if (sp != NULL)
321                 return sp;
322         new = nfs4_alloc_state_owner();
323         if (new == NULL)
324                 return NULL;
325         new->so_client = clp;
326         new->so_server = server;
327         new->so_cred = cred;
328         spin_lock(&clp->cl_lock);
329         sp = nfs4_insert_state_owner(clp, new);
330         spin_unlock(&clp->cl_lock);
331         if (sp == new)
332                 get_rpccred(cred);
333         else {
334                 rpc_destroy_wait_queue(&new->so_sequence.wait);
335                 kfree(new);
336         }
337         return sp;
338 }
339
340 /*
341  * Must be called with clp->cl_sem held in order to avoid races
342  * with state recovery...
343  */
344 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
345 {
346         struct nfs_client *clp = sp->so_client;
347         struct rpc_cred *cred = sp->so_cred;
348
349         if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
350                 return;
351         nfs4_remove_state_owner(clp, sp);
352         spin_unlock(&clp->cl_lock);
353         rpc_destroy_wait_queue(&sp->so_sequence.wait);
354         put_rpccred(cred);
355         kfree(sp);
356 }
357
358 static struct nfs4_state *
359 nfs4_alloc_open_state(void)
360 {
361         struct nfs4_state *state;
362
363         state = kzalloc(sizeof(*state), GFP_KERNEL);
364         if (!state)
365                 return NULL;
366         atomic_set(&state->count, 1);
367         INIT_LIST_HEAD(&state->lock_states);
368         spin_lock_init(&state->state_lock);
369         seqlock_init(&state->seqlock);
370         return state;
371 }
372
373 void
374 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
375 {
376         if (state->state == mode)
377                 return;
378         /* NB! List reordering - see the reclaim code for why.  */
379         if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
380                 if (mode & FMODE_WRITE)
381                         list_move(&state->open_states, &state->owner->so_states);
382                 else
383                         list_move_tail(&state->open_states, &state->owner->so_states);
384         }
385         state->state = mode;
386 }
387
388 static struct nfs4_state *
389 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
390 {
391         struct nfs_inode *nfsi = NFS_I(inode);
392         struct nfs4_state *state;
393
394         list_for_each_entry(state, &nfsi->open_states, inode_states) {
395                 if (state->owner != owner)
396                         continue;
397                 if (atomic_inc_not_zero(&state->count))
398                         return state;
399         }
400         return NULL;
401 }
402
403 static void
404 nfs4_free_open_state(struct nfs4_state *state)
405 {
406         kfree(state);
407 }
408
409 struct nfs4_state *
410 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
411 {
412         struct nfs4_state *state, *new;
413         struct nfs_inode *nfsi = NFS_I(inode);
414
415         spin_lock(&inode->i_lock);
416         state = __nfs4_find_state_byowner(inode, owner);
417         spin_unlock(&inode->i_lock);
418         if (state)
419                 goto out;
420         new = nfs4_alloc_open_state();
421         spin_lock(&owner->so_lock);
422         spin_lock(&inode->i_lock);
423         state = __nfs4_find_state_byowner(inode, owner);
424         if (state == NULL && new != NULL) {
425                 state = new;
426                 state->owner = owner;
427                 atomic_inc(&owner->so_count);
428                 list_add(&state->inode_states, &nfsi->open_states);
429                 state->inode = igrab(inode);
430                 spin_unlock(&inode->i_lock);
431                 /* Note: The reclaim code dictates that we add stateless
432                  * and read-only stateids to the end of the list */
433                 list_add_tail(&state->open_states, &owner->so_states);
434                 spin_unlock(&owner->so_lock);
435         } else {
436                 spin_unlock(&inode->i_lock);
437                 spin_unlock(&owner->so_lock);
438                 if (new)
439                         nfs4_free_open_state(new);
440         }
441 out:
442         return state;
443 }
444
445 /*
446  * Beware! Caller must be holding exactly one
447  * reference to clp->cl_sem!
448  */
449 void nfs4_put_open_state(struct nfs4_state *state)
450 {
451         struct inode *inode = state->inode;
452         struct nfs4_state_owner *owner = state->owner;
453
454         if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
455                 return;
456         spin_lock(&inode->i_lock);
457         list_del(&state->inode_states);
458         list_del(&state->open_states);
459         spin_unlock(&inode->i_lock);
460         spin_unlock(&owner->so_lock);
461         iput(inode);
462         nfs4_free_open_state(state);
463         nfs4_put_state_owner(owner);
464 }
465
466 /*
467  * Close the current file.
468  */
469 static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mode, int wait)
470 {
471         struct nfs4_state_owner *owner = state->owner;
472         int call_close = 0;
473         int newstate;
474
475         atomic_inc(&owner->so_count);
476         /* Protect against nfs4_find_state() */
477         spin_lock(&owner->so_lock);
478         switch (mode & (FMODE_READ | FMODE_WRITE)) {
479                 case FMODE_READ:
480                         state->n_rdonly--;
481                         break;
482                 case FMODE_WRITE:
483                         state->n_wronly--;
484                         break;
485                 case FMODE_READ|FMODE_WRITE:
486                         state->n_rdwr--;
487         }
488         newstate = FMODE_READ|FMODE_WRITE;
489         if (state->n_rdwr == 0) {
490                 if (state->n_rdonly == 0) {
491                         newstate &= ~FMODE_READ;
492                         call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
493                         call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
494                 }
495                 if (state->n_wronly == 0) {
496                         newstate &= ~FMODE_WRITE;
497                         call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
498                         call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
499                 }
500                 if (newstate == 0)
501                         clear_bit(NFS_DELEGATED_STATE, &state->flags);
502         }
503         nfs4_state_set_mode_locked(state, newstate);
504         spin_unlock(&owner->so_lock);
505
506         if (!call_close) {
507                 nfs4_put_open_state(state);
508                 nfs4_put_state_owner(owner);
509         } else
510                 nfs4_do_close(path, state, wait);
511 }
512
513 void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode)
514 {
515         __nfs4_close(path, state, mode, 0);
516 }
517
518 void nfs4_close_sync(struct path *path, struct nfs4_state *state, mode_t mode)
519 {
520         __nfs4_close(path, state, mode, 1);
521 }
522
523 /*
524  * Search the state->lock_states for an existing lock_owner
525  * that is compatible with current->files
526  */
527 static struct nfs4_lock_state *
528 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
529 {
530         struct nfs4_lock_state *pos;
531         list_for_each_entry(pos, &state->lock_states, ls_locks) {
532                 if (pos->ls_owner != fl_owner)
533                         continue;
534                 atomic_inc(&pos->ls_count);
535                 return pos;
536         }
537         return NULL;
538 }
539
540 /*
541  * Return a compatible lock_state. If no initialized lock_state structure
542  * exists, return an uninitialized one.
543  *
544  */
545 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
546 {
547         struct nfs4_lock_state *lsp;
548         struct nfs_client *clp = state->owner->so_client;
549
550         lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
551         if (lsp == NULL)
552                 return NULL;
553         rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
554         spin_lock_init(&lsp->ls_sequence.lock);
555         INIT_LIST_HEAD(&lsp->ls_sequence.list);
556         lsp->ls_seqid.sequence = &lsp->ls_sequence;
557         atomic_set(&lsp->ls_count, 1);
558         lsp->ls_owner = fl_owner;
559         spin_lock(&clp->cl_lock);
560         nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
561         spin_unlock(&clp->cl_lock);
562         INIT_LIST_HEAD(&lsp->ls_locks);
563         return lsp;
564 }
565
566 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
567 {
568         struct nfs_client *clp = lsp->ls_state->owner->so_client;
569
570         spin_lock(&clp->cl_lock);
571         nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
572         spin_unlock(&clp->cl_lock);
573         rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
574         kfree(lsp);
575 }
576
577 /*
578  * Return a compatible lock_state. If no initialized lock_state structure
579  * exists, return an uninitialized one.
580  *
581  * The caller must be holding clp->cl_sem
582  */
583 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
584 {
585         struct nfs4_lock_state *lsp, *new = NULL;
586         
587         for(;;) {
588                 spin_lock(&state->state_lock);
589                 lsp = __nfs4_find_lock_state(state, owner);
590                 if (lsp != NULL)
591                         break;
592                 if (new != NULL) {
593                         new->ls_state = state;
594                         list_add(&new->ls_locks, &state->lock_states);
595                         set_bit(LK_STATE_IN_USE, &state->flags);
596                         lsp = new;
597                         new = NULL;
598                         break;
599                 }
600                 spin_unlock(&state->state_lock);
601                 new = nfs4_alloc_lock_state(state, owner);
602                 if (new == NULL)
603                         return NULL;
604         }
605         spin_unlock(&state->state_lock);
606         if (new != NULL)
607                 nfs4_free_lock_state(new);
608         return lsp;
609 }
610
611 /*
612  * Release reference to lock_state, and free it if we see that
613  * it is no longer in use
614  */
615 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
616 {
617         struct nfs4_state *state;
618
619         if (lsp == NULL)
620                 return;
621         state = lsp->ls_state;
622         if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
623                 return;
624         list_del(&lsp->ls_locks);
625         if (list_empty(&state->lock_states))
626                 clear_bit(LK_STATE_IN_USE, &state->flags);
627         spin_unlock(&state->state_lock);
628         nfs4_free_lock_state(lsp);
629 }
630
631 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
632 {
633         struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
634
635         dst->fl_u.nfs4_fl.owner = lsp;
636         atomic_inc(&lsp->ls_count);
637 }
638
639 static void nfs4_fl_release_lock(struct file_lock *fl)
640 {
641         nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
642 }
643
644 static struct file_lock_operations nfs4_fl_lock_ops = {
645         .fl_copy_lock = nfs4_fl_copy_lock,
646         .fl_release_private = nfs4_fl_release_lock,
647 };
648
649 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
650 {
651         struct nfs4_lock_state *lsp;
652
653         if (fl->fl_ops != NULL)
654                 return 0;
655         lsp = nfs4_get_lock_state(state, fl->fl_owner);
656         if (lsp == NULL)
657                 return -ENOMEM;
658         fl->fl_u.nfs4_fl.owner = lsp;
659         fl->fl_ops = &nfs4_fl_lock_ops;
660         return 0;
661 }
662
663 /*
664  * Byte-range lock aware utility to initialize the stateid of read/write
665  * requests.
666  */
667 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
668 {
669         struct nfs4_lock_state *lsp;
670         int seq;
671
672         do {
673                 seq = read_seqbegin(&state->seqlock);
674                 memcpy(dst, &state->stateid, sizeof(*dst));
675         } while (read_seqretry(&state->seqlock, seq));
676         if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
677                 return;
678
679         spin_lock(&state->state_lock);
680         lsp = __nfs4_find_lock_state(state, fl_owner);
681         if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
682                 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
683         spin_unlock(&state->state_lock);
684         nfs4_put_lock_state(lsp);
685 }
686
687 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
688 {
689         struct nfs_seqid *new;
690
691         new = kmalloc(sizeof(*new), GFP_KERNEL);
692         if (new != NULL) {
693                 new->sequence = counter;
694                 INIT_LIST_HEAD(&new->list);
695         }
696         return new;
697 }
698
699 void nfs_free_seqid(struct nfs_seqid *seqid)
700 {
701         if (!list_empty(&seqid->list)) {
702                 struct rpc_sequence *sequence = seqid->sequence->sequence;
703
704                 spin_lock(&sequence->lock);
705                 list_del(&seqid->list);
706                 spin_unlock(&sequence->lock);
707                 rpc_wake_up(&sequence->wait);
708         }
709         kfree(seqid);
710 }
711
712 /*
713  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
714  * failed with a seqid incrementing error -
715  * see comments nfs_fs.h:seqid_mutating_error()
716  */
717 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
718 {
719         BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
720         switch (status) {
721                 case 0:
722                         break;
723                 case -NFS4ERR_BAD_SEQID:
724                         if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
725                                 return;
726                         printk(KERN_WARNING "NFS: v4 server returned a bad"
727                                         " sequence-id error on an"
728                                         " unconfirmed sequence %p!\n",
729                                         seqid->sequence);
730                 case -NFS4ERR_STALE_CLIENTID:
731                 case -NFS4ERR_STALE_STATEID:
732                 case -NFS4ERR_BAD_STATEID:
733                 case -NFS4ERR_BADXDR:
734                 case -NFS4ERR_RESOURCE:
735                 case -NFS4ERR_NOFILEHANDLE:
736                         /* Non-seqid mutating errors */
737                         return;
738         };
739         /*
740          * Note: no locking needed as we are guaranteed to be first
741          * on the sequence list
742          */
743         seqid->sequence->counter++;
744 }
745
746 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
747 {
748         if (status == -NFS4ERR_BAD_SEQID) {
749                 struct nfs4_state_owner *sp = container_of(seqid->sequence,
750                                 struct nfs4_state_owner, so_seqid);
751                 nfs4_drop_state_owner(sp);
752         }
753         nfs_increment_seqid(status, seqid);
754 }
755
756 /*
757  * Increment the seqid if the LOCK/LOCKU succeeded, or
758  * failed with a seqid incrementing error -
759  * see comments nfs_fs.h:seqid_mutating_error()
760  */
761 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
762 {
763         nfs_increment_seqid(status, seqid);
764 }
765
766 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
767 {
768         struct rpc_sequence *sequence = seqid->sequence->sequence;
769         int status = 0;
770
771         spin_lock(&sequence->lock);
772         if (list_empty(&seqid->list))
773                 list_add_tail(&seqid->list, &sequence->list);
774         if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
775                 goto unlock;
776         rpc_sleep_on(&sequence->wait, task, NULL);
777         status = -EAGAIN;
778 unlock:
779         spin_unlock(&sequence->lock);
780         return status;
781 }
782
783 static int reclaimer(void *);
784
785 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
786 {
787         smp_mb__before_clear_bit();
788         clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
789         smp_mb__after_clear_bit();
790         wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
791         rpc_wake_up(&clp->cl_rpcwaitq);
792 }
793
794 /*
795  * State recovery routine
796  */
797 static void nfs4_recover_state(struct nfs_client *clp)
798 {
799         struct task_struct *task;
800
801         __module_get(THIS_MODULE);
802         atomic_inc(&clp->cl_count);
803         task = kthread_run(reclaimer, clp, "%s-reclaim",
804                                 rpc_peeraddr2str(clp->cl_rpcclient,
805                                                         RPC_DISPLAY_ADDR));
806         if (!IS_ERR(task))
807                 return;
808         nfs4_clear_recover_bit(clp);
809         nfs_put_client(clp);
810         module_put(THIS_MODULE);
811 }
812
813 /*
814  * Schedule a state recovery attempt
815  */
816 void nfs4_schedule_state_recovery(struct nfs_client *clp)
817 {
818         if (!clp)
819                 return;
820         if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
821                 nfs4_recover_state(clp);
822 }
823
824 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
825 {
826         struct inode *inode = state->inode;
827         struct file_lock *fl;
828         int status = 0;
829
830         for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
831                 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
832                         continue;
833                 if (nfs_file_open_context(fl->fl_file)->state != state)
834                         continue;
835                 status = ops->recover_lock(state, fl);
836                 if (status >= 0)
837                         continue;
838                 switch (status) {
839                         default:
840                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
841                                                 __func__, status);
842                         case -NFS4ERR_EXPIRED:
843                         case -NFS4ERR_NO_GRACE:
844                         case -NFS4ERR_RECLAIM_BAD:
845                         case -NFS4ERR_RECLAIM_CONFLICT:
846                                 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
847                                 break;
848                         case -NFS4ERR_STALE_CLIENTID:
849                                 goto out_err;
850                 }
851         }
852         return 0;
853 out_err:
854         return status;
855 }
856
857 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
858 {
859         struct nfs4_state *state;
860         struct nfs4_lock_state *lock;
861         int status = 0;
862
863         /* Note: we rely on the sp->so_states list being ordered 
864          * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
865          * states first.
866          * This is needed to ensure that the server won't give us any
867          * read delegations that we have to return if, say, we are
868          * recovering after a network partition or a reboot from a
869          * server that doesn't support a grace period.
870          */
871         list_for_each_entry(state, &sp->so_states, open_states) {
872                 if (state->state == 0)
873                         continue;
874                 status = ops->recover_open(sp, state);
875                 if (status >= 0) {
876                         status = nfs4_reclaim_locks(state, ops);
877                         if (status >= 0) {
878                                 list_for_each_entry(lock, &state->lock_states, ls_locks) {
879                                         if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
880                                                 printk("%s: Lock reclaim failed!\n",
881                                                         __func__);
882                                 }
883                                 continue;
884                         }
885                 }
886                 switch (status) {
887                         default:
888                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
889                                                 __func__, status);
890                         case -ENOENT:
891                         case -NFS4ERR_RECLAIM_BAD:
892                         case -NFS4ERR_RECLAIM_CONFLICT:
893                                 /*
894                                  * Open state on this file cannot be recovered
895                                  * All we can do is revert to using the zero stateid.
896                                  */
897                                 memset(state->stateid.data, 0,
898                                         sizeof(state->stateid.data));
899                                 /* Mark the file as being 'closed' */
900                                 state->state = 0;
901                                 break;
902                         case -NFS4ERR_EXPIRED:
903                         case -NFS4ERR_NO_GRACE:
904                         case -NFS4ERR_STALE_CLIENTID:
905                                 goto out_err;
906                 }
907         }
908         return 0;
909 out_err:
910         return status;
911 }
912
913 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
914 {
915         struct nfs4_state_owner *sp;
916         struct rb_node *pos;
917         struct nfs4_state *state;
918         struct nfs4_lock_state *lock;
919
920         /* Reset all sequence ids to zero */
921         for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
922                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
923                 sp->so_seqid.counter = 0;
924                 sp->so_seqid.flags = 0;
925                 spin_lock(&sp->so_lock);
926                 list_for_each_entry(state, &sp->so_states, open_states) {
927                         clear_bit(NFS_DELEGATED_STATE, &state->flags);
928                         clear_bit(NFS_O_RDONLY_STATE, &state->flags);
929                         clear_bit(NFS_O_WRONLY_STATE, &state->flags);
930                         clear_bit(NFS_O_RDWR_STATE, &state->flags);
931                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
932                                 lock->ls_seqid.counter = 0;
933                                 lock->ls_seqid.flags = 0;
934                                 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
935                         }
936                 }
937                 spin_unlock(&sp->so_lock);
938         }
939 }
940
941 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
942 {
943         struct rb_node *pos;
944         int status = 0;
945
946         /* Note: list is protected by exclusive lock on cl->cl_sem */
947         for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
948                 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
949                 status = nfs4_reclaim_open_state(sp, ops);
950                 if (status < 0)
951                         break;
952         }
953         return status;
954 }
955
956 static int nfs4_check_lease(struct nfs_client *clp)
957 {
958         struct rpc_cred *cred;
959         int status = -NFS4ERR_EXPIRED;
960
961         /* Are there any open files on this volume? */
962         cred = nfs4_get_renew_cred(clp);
963         if (cred != NULL) {
964                 /* Yes there are: try to renew the old lease */
965                 status = nfs4_proc_renew(clp, cred);
966                 put_rpccred(cred);
967                 return status;
968         }
969
970         /* "reboot" to ensure we clear all state on the server */
971         clp->cl_boot_time = CURRENT_TIME;
972         return status;
973 }
974
975 static int nfs4_reclaim_lease(struct nfs_client *clp)
976 {
977         struct rpc_cred *cred;
978         int status = -ENOENT;
979
980         cred = nfs4_get_setclientid_cred(clp);
981         if (cred != NULL) {
982                 status = nfs4_init_client(clp, cred);
983                 put_rpccred(cred);
984                 /* Handle case where the user hasn't set up machine creds */
985                 if (status == -EACCES && cred == clp->cl_machine_cred) {
986                         nfs4_clear_machine_cred(clp);
987                         status = -EAGAIN;
988                 }
989         }
990         return status;
991 }
992
993 static int reclaimer(void *ptr)
994 {
995         struct nfs_client *clp = ptr;
996         const struct nfs4_state_recovery_ops *ops;
997         int status = 0;
998
999         allow_signal(SIGKILL);
1000
1001         /* Ensure exclusive access to NFSv4 state */
1002         down_write(&clp->cl_sem);
1003         while (!list_empty(&clp->cl_superblocks)) {
1004                 ops = &nfs4_network_partition_recovery_ops;
1005                 status = nfs4_check_lease(clp);
1006                 switch (status) {
1007                         case 0:
1008                         case -NFS4ERR_CB_PATH_DOWN:
1009                                 goto out;
1010                         case -NFS4ERR_STALE_CLIENTID:
1011                         case -NFS4ERR_LEASE_MOVED:
1012                                 ops = &nfs4_reboot_recovery_ops;
1013                 }
1014
1015                 /* We're going to have to re-establish a clientid */
1016                 nfs4_state_mark_reclaim(clp);
1017
1018                 status = nfs4_reclaim_lease(clp);
1019                 if (status) {
1020                         if (status == -EAGAIN)
1021                                 continue;
1022                         goto out_error;
1023                 }
1024
1025                 /* Mark all delegations for reclaim */
1026                 nfs_delegation_mark_reclaim(clp);
1027                 /* Note: list is protected by exclusive lock on cl->cl_sem */
1028                 status = nfs4_do_reclaim(clp, ops);
1029                 if (status < 0) {
1030                         if (status == -NFS4ERR_NO_GRACE) {
1031                                 ops = &nfs4_network_partition_recovery_ops;
1032                                 status = nfs4_do_reclaim(clp, ops);
1033                         }
1034                         if (status == -NFS4ERR_STALE_CLIENTID)
1035                                 continue;
1036                         if (status == -NFS4ERR_EXPIRED)
1037                                 continue;
1038                 }
1039                 nfs_delegation_reap_unclaimed(clp);
1040                 break;
1041         }
1042 out:
1043         up_write(&clp->cl_sem);
1044         if (status == -NFS4ERR_CB_PATH_DOWN)
1045                 nfs_handle_cb_pathdown(clp);
1046         nfs4_clear_recover_bit(clp);
1047         nfs_put_client(clp);
1048         module_put_and_exit(0);
1049         return 0;
1050 out_error:
1051         printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
1052                         " with error %d\n", clp->cl_hostname, -status);
1053         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1054         goto out;
1055 }
1056
1057 /*
1058  * Local variables:
1059  *  c-basic-offset: 8
1060  * End:
1061  */