]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/lockd/host.c
lockd: address-family independent printable addresses
[linux-2.6-omap-h63xx.git] / fs / lockd / host.c
1 /*
2  * linux/fs/lockd/host.c
3  *
4  * Management for NLM peer hosts. The nlm_host struct is shared
5  * between client and server implementation. The only reason to
6  * do so is to reduce code bloat.
7  *
8  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9  */
10
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/in.h>
14 #include <linux/in6.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19 #include <linux/mutex.h>
20
21 #include <net/ipv6.h>
22
23 #define NLMDBG_FACILITY         NLMDBG_HOSTCACHE
24 #define NLM_HOST_NRHASH         32
25 #define NLM_ADDRHASH(addr)      (ntohl(addr) & (NLM_HOST_NRHASH-1))
26 #define NLM_HOST_REBIND         (60 * HZ)
27 #define NLM_HOST_EXPIRE         (300 * HZ)
28 #define NLM_HOST_COLLECT        (120 * HZ)
29
30 static struct hlist_head        nlm_hosts[NLM_HOST_NRHASH];
31 static unsigned long            next_gc;
32 static int                      nrhosts;
33 static DEFINE_MUTEX(nlm_host_mutex);
34
35
36 static void                     nlm_gc_hosts(void);
37 static struct nsm_handle *      __nsm_find(const struct sockaddr_in *,
38                                         const char *, unsigned int, int);
39 static struct nsm_handle *      nsm_find(const struct sockaddr_in *sin,
40                                          const char *hostname,
41                                          unsigned int hostname_len);
42
43 static void nlm_display_address(const struct sockaddr *sap,
44                                 char *buf, const size_t len)
45 {
46         const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
47         const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
48
49         switch (sap->sa_family) {
50         case AF_UNSPEC:
51                 snprintf(buf, len, "unspecified");
52                 break;
53         case AF_INET:
54                 snprintf(buf, len, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
55                 break;
56         case AF_INET6:
57                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
58                         snprintf(buf, len, NIPQUAD_FMT,
59                                  NIPQUAD(sin6->sin6_addr.s6_addr32[3]));
60                 else
61                         snprintf(buf, len, NIP6_FMT, NIP6(sin6->sin6_addr));
62                 break;
63         default:
64                 snprintf(buf, len, "unsupported address family");
65                 break;
66         }
67 }
68
69 /*
70  * Common host lookup routine for server & client
71  */
72 static struct nlm_host *nlm_lookup_host(int server,
73                                         const struct sockaddr_in *sin,
74                                         int proto, u32 version,
75                                         const char *hostname,
76                                         unsigned int hostname_len,
77                                         const struct sockaddr_in *ssin)
78 {
79         struct hlist_head *chain;
80         struct hlist_node *pos;
81         struct nlm_host *host;
82         struct nsm_handle *nsm = NULL;
83         int             hash;
84
85         dprintk("lockd: nlm_lookup_host(proto=%d, vers=%u,"
86                         " my role is %s, hostname=%.*s)\n",
87                         proto, version, server ? "server" : "client",
88                         hostname_len, hostname ? hostname : "<none>");
89
90         hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
91
92         /* Lock hash table */
93         mutex_lock(&nlm_host_mutex);
94
95         if (time_after_eq(jiffies, next_gc))
96                 nlm_gc_hosts();
97
98         /* We may keep several nlm_host objects for a peer, because each
99          * nlm_host is identified by
100          * (address, protocol, version, server/client)
101          * We could probably simplify this a little by putting all those
102          * different NLM rpc_clients into one single nlm_host object.
103          * This would allow us to have one nlm_host per address.
104          */
105         chain = &nlm_hosts[hash];
106         hlist_for_each_entry(host, pos, chain, h_hash) {
107                 if (!nlm_cmp_addr(&host->h_addr, sin))
108                         continue;
109
110                 /* See if we have an NSM handle for this client */
111                 if (!nsm)
112                         nsm = host->h_nsmhandle;
113
114                 if (host->h_proto != proto)
115                         continue;
116                 if (host->h_version != version)
117                         continue;
118                 if (host->h_server != server)
119                         continue;
120                 if (!nlm_cmp_addr(&host->h_saddr, ssin))
121                         continue;
122
123                 /* Move to head of hash chain. */
124                 hlist_del(&host->h_hash);
125                 hlist_add_head(&host->h_hash, chain);
126
127                 nlm_get_host(host);
128                 dprintk("lockd: nlm_lookup_host found host %s (%s)\n",
129                                 host->h_name, host->h_addrbuf);
130                 goto out;
131         }
132
133         /*
134          * The host wasn't in our hash table.  If we don't
135          * have an NSM handle for it yet, create one.
136          */
137         if (nsm)
138                 atomic_inc(&nsm->sm_count);
139         else {
140                 host = NULL;
141                 nsm = nsm_find(sin, hostname, hostname_len);
142                 if (!nsm) {
143                         dprintk("lockd: nlm_lookup_host failed; "
144                                 "no nsm handle\n");
145                         goto out;
146                 }
147         }
148
149         host = kzalloc(sizeof(*host), GFP_KERNEL);
150         if (!host) {
151                 nsm_release(nsm);
152                 dprintk("lockd: nlm_lookup_host failed; no memory\n");
153                 goto out;
154         }
155         host->h_name       = nsm->sm_name;
156         host->h_addr       = *sin;
157         host->h_addr.sin_port = 0;      /* ouch! */
158         host->h_saddr      = *ssin;
159         host->h_version    = version;
160         host->h_proto      = proto;
161         host->h_rpcclnt    = NULL;
162         mutex_init(&host->h_mutex);
163         host->h_nextrebind = jiffies + NLM_HOST_REBIND;
164         host->h_expires    = jiffies + NLM_HOST_EXPIRE;
165         atomic_set(&host->h_count, 1);
166         init_waitqueue_head(&host->h_gracewait);
167         init_rwsem(&host->h_rwsem);
168         host->h_state      = 0;                 /* pseudo NSM state */
169         host->h_nsmstate   = 0;                 /* real NSM state */
170         host->h_nsmhandle  = nsm;
171         host->h_server     = server;
172         hlist_add_head(&host->h_hash, chain);
173         INIT_LIST_HEAD(&host->h_lockowners);
174         spin_lock_init(&host->h_lock);
175         INIT_LIST_HEAD(&host->h_granted);
176         INIT_LIST_HEAD(&host->h_reclaim);
177
178         nrhosts++;
179
180         nlm_display_address((struct sockaddr *)&host->h_addr,
181                                 host->h_addrbuf, sizeof(host->h_addrbuf));
182         nlm_display_address((struct sockaddr *)&host->h_saddr,
183                                 host->h_saddrbuf, sizeof(host->h_saddrbuf));
184
185         dprintk("lockd: nlm_lookup_host created host %s\n",
186                         host->h_name);
187
188 out:
189         mutex_unlock(&nlm_host_mutex);
190         return host;
191 }
192
193 /*
194  * Destroy a host
195  */
196 static void
197 nlm_destroy_host(struct nlm_host *host)
198 {
199         struct rpc_clnt *clnt;
200
201         BUG_ON(!list_empty(&host->h_lockowners));
202         BUG_ON(atomic_read(&host->h_count));
203
204         /*
205          * Release NSM handle and unmonitor host.
206          */
207         nsm_unmonitor(host);
208
209         clnt = host->h_rpcclnt;
210         if (clnt != NULL)
211                 rpc_shutdown_client(clnt);
212         kfree(host);
213 }
214
215 /*
216  * Find an NLM server handle in the cache. If there is none, create it.
217  */
218 struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin,
219                                      int proto, u32 version,
220                                      const char *hostname,
221                                      unsigned int hostname_len)
222 {
223         struct sockaddr_in ssin = {0};
224
225         return nlm_lookup_host(0, sin, proto, version,
226                                hostname, hostname_len, &ssin);
227 }
228
229 /*
230  * Find an NLM client handle in the cache. If there is none, create it.
231  */
232 struct nlm_host *
233 nlmsvc_lookup_host(struct svc_rqst *rqstp,
234                         const char *hostname, unsigned int hostname_len)
235 {
236         struct sockaddr_in ssin = {0};
237
238         ssin.sin_addr = rqstp->rq_daddr.addr;
239         return nlm_lookup_host(1, svc_addr_in(rqstp),
240                                rqstp->rq_prot, rqstp->rq_vers,
241                                hostname, hostname_len, &ssin);
242 }
243
244 /*
245  * Create the NLM RPC client for an NLM peer
246  */
247 struct rpc_clnt *
248 nlm_bind_host(struct nlm_host *host)
249 {
250         struct rpc_clnt *clnt;
251
252         dprintk("lockd: nlm_bind_host %s (%s), my addr=%s\n",
253                         host->h_name, host->h_addrbuf, host->h_saddrbuf);
254
255         /* Lock host handle */
256         mutex_lock(&host->h_mutex);
257
258         /* If we've already created an RPC client, check whether
259          * RPC rebind is required
260          */
261         if ((clnt = host->h_rpcclnt) != NULL) {
262                 if (time_after_eq(jiffies, host->h_nextrebind)) {
263                         rpc_force_rebind(clnt);
264                         host->h_nextrebind = jiffies + NLM_HOST_REBIND;
265                         dprintk("lockd: next rebind in %lu jiffies\n",
266                                         host->h_nextrebind - jiffies);
267                 }
268         } else {
269                 unsigned long increment = nlmsvc_timeout;
270                 struct rpc_timeout timeparms = {
271                         .to_initval     = increment,
272                         .to_increment   = increment,
273                         .to_maxval      = increment * 6UL,
274                         .to_retries     = 5U,
275                 };
276                 struct rpc_create_args args = {
277                         .protocol       = host->h_proto,
278                         .address        = (struct sockaddr *)&host->h_addr,
279                         .addrsize       = sizeof(host->h_addr),
280                         .saddress       = (struct sockaddr *)&host->h_saddr,
281                         .timeout        = &timeparms,
282                         .servername     = host->h_name,
283                         .program        = &nlm_program,
284                         .version        = host->h_version,
285                         .authflavor     = RPC_AUTH_UNIX,
286                         .flags          = (RPC_CLNT_CREATE_NOPING |
287                                            RPC_CLNT_CREATE_AUTOBIND),
288                 };
289
290                 /*
291                  * lockd retries server side blocks automatically so we want
292                  * those to be soft RPC calls. Client side calls need to be
293                  * hard RPC tasks.
294                  */
295                 if (!host->h_server)
296                         args.flags |= RPC_CLNT_CREATE_HARDRTRY;
297
298                 clnt = rpc_create(&args);
299                 if (!IS_ERR(clnt))
300                         host->h_rpcclnt = clnt;
301                 else {
302                         printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
303                         clnt = NULL;
304                 }
305         }
306
307         mutex_unlock(&host->h_mutex);
308         return clnt;
309 }
310
311 /*
312  * Force a portmap lookup of the remote lockd port
313  */
314 void
315 nlm_rebind_host(struct nlm_host *host)
316 {
317         dprintk("lockd: rebind host %s\n", host->h_name);
318         if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
319                 rpc_force_rebind(host->h_rpcclnt);
320                 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
321         }
322 }
323
324 /*
325  * Increment NLM host count
326  */
327 struct nlm_host * nlm_get_host(struct nlm_host *host)
328 {
329         if (host) {
330                 dprintk("lockd: get host %s\n", host->h_name);
331                 atomic_inc(&host->h_count);
332                 host->h_expires = jiffies + NLM_HOST_EXPIRE;
333         }
334         return host;
335 }
336
337 /*
338  * Release NLM host after use
339  */
340 void nlm_release_host(struct nlm_host *host)
341 {
342         if (host != NULL) {
343                 dprintk("lockd: release host %s\n", host->h_name);
344                 BUG_ON(atomic_read(&host->h_count) < 0);
345                 if (atomic_dec_and_test(&host->h_count)) {
346                         BUG_ON(!list_empty(&host->h_lockowners));
347                         BUG_ON(!list_empty(&host->h_granted));
348                         BUG_ON(!list_empty(&host->h_reclaim));
349                 }
350         }
351 }
352
353 /*
354  * We were notified that the host indicated by address &sin
355  * has rebooted.
356  * Release all resources held by that peer.
357  */
358 void nlm_host_rebooted(const struct sockaddr_in *sin,
359                                 const char *hostname,
360                                 unsigned int hostname_len,
361                                 u32 new_state)
362 {
363         struct hlist_head *chain;
364         struct hlist_node *pos;
365         struct nsm_handle *nsm;
366         struct nlm_host *host;
367
368         /* Find the NSM handle for this peer */
369         nsm = __nsm_find(sin, hostname, hostname_len, 0);
370         if (nsm == NULL) {
371                 dprintk("lockd: never saw rebooted peer '%.*s' before\n",
372                                 hostname_len, hostname);
373                 return;
374         }
375
376         dprintk("lockd: nlm_host_rebooted(%.*s, %s)\n",
377                         hostname_len, hostname, nsm->sm_addrbuf);
378
379         /* When reclaiming locks on this peer, make sure that
380          * we set up a new notification */
381         nsm->sm_monitored = 0;
382
383         /* Mark all hosts tied to this NSM state as having rebooted.
384          * We run the loop repeatedly, because we drop the host table
385          * lock for this.
386          * To avoid processing a host several times, we match the nsmstate.
387          */
388 again:  mutex_lock(&nlm_host_mutex);
389         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
390                 hlist_for_each_entry(host, pos, chain, h_hash) {
391                         if (host->h_nsmhandle == nsm
392                          && host->h_nsmstate != new_state) {
393                                 host->h_nsmstate = new_state;
394                                 host->h_state++;
395
396                                 nlm_get_host(host);
397                                 mutex_unlock(&nlm_host_mutex);
398
399                                 if (host->h_server) {
400                                         /* We're server for this guy, just ditch
401                                          * all the locks he held. */
402                                         nlmsvc_free_host_resources(host);
403                                 } else {
404                                         /* He's the server, initiate lock recovery. */
405                                         nlmclnt_recovery(host);
406                                 }
407
408                                 nlm_release_host(host);
409                                 goto again;
410                         }
411                 }
412         }
413
414         mutex_unlock(&nlm_host_mutex);
415 }
416
417 /*
418  * Shut down the hosts module.
419  * Note that this routine is called only at server shutdown time.
420  */
421 void
422 nlm_shutdown_hosts(void)
423 {
424         struct hlist_head *chain;
425         struct hlist_node *pos;
426         struct nlm_host *host;
427
428         dprintk("lockd: shutting down host module\n");
429         mutex_lock(&nlm_host_mutex);
430
431         /* First, make all hosts eligible for gc */
432         dprintk("lockd: nuking all hosts...\n");
433         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
434                 hlist_for_each_entry(host, pos, chain, h_hash) {
435                         host->h_expires = jiffies - 1;
436                         if (host->h_rpcclnt) {
437                                 rpc_shutdown_client(host->h_rpcclnt);
438                                 host->h_rpcclnt = NULL;
439                         }
440                 }
441         }
442
443         /* Then, perform a garbage collection pass */
444         nlm_gc_hosts();
445         mutex_unlock(&nlm_host_mutex);
446
447         /* complain if any hosts are left */
448         if (nrhosts) {
449                 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
450                 dprintk("lockd: %d hosts left:\n", nrhosts);
451                 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
452                         hlist_for_each_entry(host, pos, chain, h_hash) {
453                                 dprintk("       %s (cnt %d use %d exp %ld)\n",
454                                         host->h_name, atomic_read(&host->h_count),
455                                         host->h_inuse, host->h_expires);
456                         }
457                 }
458         }
459 }
460
461 /*
462  * Garbage collect any unused NLM hosts.
463  * This GC combines reference counting for async operations with
464  * mark & sweep for resources held by remote clients.
465  */
466 static void
467 nlm_gc_hosts(void)
468 {
469         struct hlist_head *chain;
470         struct hlist_node *pos, *next;
471         struct nlm_host *host;
472
473         dprintk("lockd: host garbage collection\n");
474         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
475                 hlist_for_each_entry(host, pos, chain, h_hash)
476                         host->h_inuse = 0;
477         }
478
479         /* Mark all hosts that hold locks, blocks or shares */
480         nlmsvc_mark_resources();
481
482         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
483                 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
484                         if (atomic_read(&host->h_count) || host->h_inuse
485                          || time_before(jiffies, host->h_expires)) {
486                                 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
487                                         host->h_name, atomic_read(&host->h_count),
488                                         host->h_inuse, host->h_expires);
489                                 continue;
490                         }
491                         dprintk("lockd: delete host %s\n", host->h_name);
492                         hlist_del_init(&host->h_hash);
493
494                         nlm_destroy_host(host);
495                         nrhosts--;
496                 }
497         }
498
499         next_gc = jiffies + NLM_HOST_COLLECT;
500 }
501
502
503 /*
504  * Manage NSM handles
505  */
506 static LIST_HEAD(nsm_handles);
507 static DEFINE_SPINLOCK(nsm_lock);
508
509 static struct nsm_handle *
510 __nsm_find(const struct sockaddr_in *sin,
511                 const char *hostname, unsigned int hostname_len,
512                 int create)
513 {
514         struct nsm_handle *nsm = NULL;
515         struct nsm_handle *pos;
516
517         if (!sin)
518                 return NULL;
519
520         if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
521                 if (printk_ratelimit()) {
522                         printk(KERN_WARNING "Invalid hostname \"%.*s\" "
523                                             "in NFS lock request\n",
524                                 hostname_len, hostname);
525                 }
526                 return NULL;
527         }
528
529 retry:
530         spin_lock(&nsm_lock);
531         list_for_each_entry(pos, &nsm_handles, sm_link) {
532
533                 if (hostname && nsm_use_hostnames) {
534                         if (strlen(pos->sm_name) != hostname_len
535                          || memcmp(pos->sm_name, hostname, hostname_len))
536                                 continue;
537                 } else if (!nlm_cmp_addr(&pos->sm_addr, sin))
538                         continue;
539                 atomic_inc(&pos->sm_count);
540                 kfree(nsm);
541                 nsm = pos;
542                 goto found;
543         }
544         if (nsm) {
545                 list_add(&nsm->sm_link, &nsm_handles);
546                 goto found;
547         }
548         spin_unlock(&nsm_lock);
549
550         if (!create)
551                 return NULL;
552
553         nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
554         if (nsm == NULL)
555                 return NULL;
556
557         nsm->sm_addr = *sin;
558         nsm->sm_name = (char *) (nsm + 1);
559         memcpy(nsm->sm_name, hostname, hostname_len);
560         nsm->sm_name[hostname_len] = '\0';
561         nlm_display_address((struct sockaddr *)&nsm->sm_addr,
562                                 nsm->sm_addrbuf, sizeof(nsm->sm_addrbuf));
563         atomic_set(&nsm->sm_count, 1);
564         goto retry;
565
566 found:
567         spin_unlock(&nsm_lock);
568         return nsm;
569 }
570
571 static struct nsm_handle *
572 nsm_find(const struct sockaddr_in *sin, const char *hostname,
573          unsigned int hostname_len)
574 {
575         return __nsm_find(sin, hostname, hostname_len, 1);
576 }
577
578 /*
579  * Release an NSM handle
580  */
581 void
582 nsm_release(struct nsm_handle *nsm)
583 {
584         if (!nsm)
585                 return;
586         if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
587                 list_del(&nsm->sm_link);
588                 spin_unlock(&nsm_lock);
589                 kfree(nsm);
590         }
591 }