2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/errno.h>
25 #include <linux/fcntl.h>
26 #include <linux/net.h>
28 #include <linux/inet.h>
29 #include <linux/udp.h>
30 #include <linux/tcp.h>
31 #include <linux/unistd.h>
32 #include <linux/slab.h>
33 #include <linux/netdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/file.h>
36 #include <linux/freezer.h>
38 #include <net/checksum.h>
41 #include <net/tcp_states.h>
42 #include <asm/uaccess.h>
43 #include <asm/ioctls.h>
45 #include <linux/sunrpc/types.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/xdr.h>
48 #include <linux/sunrpc/svcsock.h>
49 #include <linux/sunrpc/stats.h>
51 /* SMP locking strategy:
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
61 * Some flags can be set to certain values at any time
62 * providing that certain rules are followed:
64 * SK_CONN, SK_DATA, can be set or cleared at any time.
65 * after a set, svc_sock_enqueue must be called.
66 * after a clear, the socket must be read/accepted
67 * if this succeeds, it must be set again.
68 * SK_CLOSE can set at any time. It is never cleared.
69 * xpt_ref contains a bias of '1' until SK_DEAD is set.
70 * so when xprt_ref hits zero, we know the transport is dead
71 * and no-one is using it.
72 * SK_DEAD can only be set while SK_BUSY is held which ensures
73 * no other thread will be using the socket or will try to
78 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
81 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
82 int *errp, int flags);
83 static void svc_delete_socket(struct svc_sock *svsk);
84 static void svc_udp_data_ready(struct sock *, int);
85 static int svc_udp_recvfrom(struct svc_rqst *);
86 static int svc_udp_sendto(struct svc_rqst *);
87 static void svc_close_socket(struct svc_sock *svsk);
88 static void svc_sock_detach(struct svc_xprt *);
89 static void svc_sock_free(struct svc_xprt *);
91 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
92 static int svc_deferred_recv(struct svc_rqst *rqstp);
93 static struct cache_deferred_req *svc_defer(struct cache_req *req);
94 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
95 struct sockaddr *, int, int);
97 /* apparently the "standard" is that clients close
98 * idle connections after 5 minutes, servers after
100 * http://www.connectathon.org/talks96/nfstcp.pdf
102 static int svc_conn_age_period = 6*60;
104 #ifdef CONFIG_DEBUG_LOCK_ALLOC
105 static struct lock_class_key svc_key[2];
106 static struct lock_class_key svc_slock_key[2];
108 static inline void svc_reclassify_socket(struct socket *sock)
110 struct sock *sk = sock->sk;
111 BUG_ON(sock_owned_by_user(sk));
112 switch (sk->sk_family) {
114 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
115 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
119 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
120 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
128 static inline void svc_reclassify_socket(struct socket *sock)
133 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
135 switch (addr->sa_family) {
137 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
138 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
139 ntohs(((struct sockaddr_in *) addr)->sin_port));
143 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
144 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
145 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
149 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
156 * svc_print_addr - Format rq_addr field for printing
157 * @rqstp: svc_rqst struct containing address to print
158 * @buf: target buffer for formatted address
159 * @len: length of target buffer
162 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
164 return __svc_print_addr(svc_addr(rqstp), buf, len);
166 EXPORT_SYMBOL_GPL(svc_print_addr);
169 * Queue up an idle server thread. Must have pool->sp_lock held.
170 * Note: this is really a stack rather than a queue, so that we only
171 * use as many different threads as we need, and the rest don't pollute
175 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
177 list_add(&rqstp->rq_list, &pool->sp_threads);
181 * Dequeue an nfsd thread. Must have pool->sp_lock held.
184 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
186 list_del(&rqstp->rq_list);
190 * Release an skbuff after use
192 static void svc_release_skb(struct svc_rqst *rqstp)
194 struct sk_buff *skb = rqstp->rq_xprt_ctxt;
195 struct svc_deferred_req *dr = rqstp->rq_deferred;
198 rqstp->rq_xprt_ctxt = NULL;
200 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
201 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
204 rqstp->rq_deferred = NULL;
210 * Queue up a socket with data pending. If there are idle nfsd
211 * processes, wake 'em up.
215 svc_sock_enqueue(struct svc_sock *svsk)
217 struct svc_serv *serv = svsk->sk_server;
218 struct svc_pool *pool;
219 struct svc_rqst *rqstp;
222 if (!(svsk->sk_flags &
223 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
225 if (test_bit(SK_DEAD, &svsk->sk_flags))
229 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
232 spin_lock_bh(&pool->sp_lock);
234 if (!list_empty(&pool->sp_threads) &&
235 !list_empty(&pool->sp_sockets))
237 "svc_sock_enqueue: threads and sockets both waiting??\n");
239 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
240 /* Don't enqueue dead sockets */
241 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
245 /* Mark socket as busy. It will remain in this state until the
246 * server has processed all pending data and put the socket back
247 * on the idle list. We update SK_BUSY atomically because
248 * it also guards against trying to enqueue the svc_sock twice.
250 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
251 /* Don't enqueue socket while already enqueued */
252 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
255 BUG_ON(svsk->sk_pool != NULL);
256 svsk->sk_pool = pool;
258 /* Handle pending connection */
259 if (test_bit(SK_CONN, &svsk->sk_flags))
262 /* Handle close in-progress */
263 if (test_bit(SK_CLOSE, &svsk->sk_flags))
266 /* Check if we have space to reply to a request */
267 if (!svsk->sk_xprt.xpt_ops->xpo_has_wspace(&svsk->sk_xprt)) {
268 /* Don't enqueue while not enough space for reply */
269 dprintk("svc: no write space, socket %p not enqueued\n", svsk);
270 svsk->sk_pool = NULL;
271 clear_bit(SK_BUSY, &svsk->sk_flags);
276 if (!list_empty(&pool->sp_threads)) {
277 rqstp = list_entry(pool->sp_threads.next,
280 dprintk("svc: socket %p served by daemon %p\n",
282 svc_thread_dequeue(pool, rqstp);
285 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
286 rqstp, rqstp->rq_sock);
287 rqstp->rq_sock = svsk;
288 svc_xprt_get(&svsk->sk_xprt);
289 rqstp->rq_reserved = serv->sv_max_mesg;
290 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
291 BUG_ON(svsk->sk_pool != pool);
292 wake_up(&rqstp->rq_wait);
294 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
295 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
296 BUG_ON(svsk->sk_pool != pool);
300 spin_unlock_bh(&pool->sp_lock);
304 * Dequeue the first socket. Must be called with the pool->sp_lock held.
306 static inline struct svc_sock *
307 svc_sock_dequeue(struct svc_pool *pool)
309 struct svc_sock *svsk;
311 if (list_empty(&pool->sp_sockets))
314 svsk = list_entry(pool->sp_sockets.next,
315 struct svc_sock, sk_ready);
316 list_del_init(&svsk->sk_ready);
318 dprintk("svc: socket %p dequeued, inuse=%d\n",
319 svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
325 * Having read something from a socket, check whether it
326 * needs to be re-enqueued.
327 * Note: SK_DATA only gets cleared when a read-attempt finds
328 * no (or insufficient) data.
331 svc_sock_received(struct svc_sock *svsk)
333 svsk->sk_pool = NULL;
334 clear_bit(SK_BUSY, &svsk->sk_flags);
335 svc_sock_enqueue(svsk);
340 * svc_reserve - change the space reserved for the reply to a request.
341 * @rqstp: The request in question
342 * @space: new max space to reserve
344 * Each request reserves some space on the output queue of the socket
345 * to make sure the reply fits. This function reduces that reserved
346 * space to be the amount of space used already, plus @space.
349 void svc_reserve(struct svc_rqst *rqstp, int space)
351 space += rqstp->rq_res.head[0].iov_len;
353 if (space < rqstp->rq_reserved) {
354 struct svc_sock *svsk = rqstp->rq_sock;
355 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
356 rqstp->rq_reserved = space;
358 svc_sock_enqueue(svsk);
363 svc_sock_release(struct svc_rqst *rqstp)
365 struct svc_sock *svsk = rqstp->rq_sock;
367 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
369 svc_free_res_pages(rqstp);
370 rqstp->rq_res.page_len = 0;
371 rqstp->rq_res.page_base = 0;
374 /* Reset response buffer and release
376 * But first, check that enough space was reserved
377 * for the reply, otherwise we have a bug!
379 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
380 printk(KERN_ERR "RPC request reserved %d but used %d\n",
384 rqstp->rq_res.head[0].iov_len = 0;
385 svc_reserve(rqstp, 0);
386 rqstp->rq_sock = NULL;
388 svc_xprt_put(&svsk->sk_xprt);
392 * External function to wake up a server waiting for data
393 * This really only makes sense for services like lockd
394 * which have exactly one thread anyway.
397 svc_wake_up(struct svc_serv *serv)
399 struct svc_rqst *rqstp;
401 struct svc_pool *pool;
403 for (i = 0; i < serv->sv_nrpools; i++) {
404 pool = &serv->sv_pools[i];
406 spin_lock_bh(&pool->sp_lock);
407 if (!list_empty(&pool->sp_threads)) {
408 rqstp = list_entry(pool->sp_threads.next,
411 dprintk("svc: daemon %p woken up.\n", rqstp);
413 svc_thread_dequeue(pool, rqstp);
414 rqstp->rq_sock = NULL;
416 wake_up(&rqstp->rq_wait);
418 spin_unlock_bh(&pool->sp_lock);
422 union svc_pktinfo_u {
423 struct in_pktinfo pkti;
424 struct in6_pktinfo pkti6;
426 #define SVC_PKTINFO_SPACE \
427 CMSG_SPACE(sizeof(union svc_pktinfo_u))
429 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
431 switch (rqstp->rq_sock->sk_sk->sk_family) {
433 struct in_pktinfo *pki = CMSG_DATA(cmh);
435 cmh->cmsg_level = SOL_IP;
436 cmh->cmsg_type = IP_PKTINFO;
437 pki->ipi_ifindex = 0;
438 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
439 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
444 struct in6_pktinfo *pki = CMSG_DATA(cmh);
446 cmh->cmsg_level = SOL_IPV6;
447 cmh->cmsg_type = IPV6_PKTINFO;
448 pki->ipi6_ifindex = 0;
449 ipv6_addr_copy(&pki->ipi6_addr,
450 &rqstp->rq_daddr.addr6);
451 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
459 * Generic sendto routine
462 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
464 struct svc_sock *svsk = rqstp->rq_sock;
465 struct socket *sock = svsk->sk_sock;
469 long all[SVC_PKTINFO_SPACE / sizeof(long)];
471 struct cmsghdr *cmh = &buffer.hdr;
475 struct page **ppage = xdr->pages;
476 size_t base = xdr->page_base;
477 unsigned int pglen = xdr->page_len;
478 unsigned int flags = MSG_MORE;
479 char buf[RPC_MAX_ADDRBUFLEN];
483 if (rqstp->rq_prot == IPPROTO_UDP) {
484 struct msghdr msg = {
485 .msg_name = &rqstp->rq_addr,
486 .msg_namelen = rqstp->rq_addrlen,
488 .msg_controllen = sizeof(buffer),
489 .msg_flags = MSG_MORE,
492 svc_set_cmsg_data(rqstp, cmh);
494 if (sock_sendmsg(sock, &msg, 0) < 0)
499 if (slen == xdr->head[0].iov_len)
501 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
502 xdr->head[0].iov_len, flags);
503 if (len != xdr->head[0].iov_len)
505 slen -= xdr->head[0].iov_len;
510 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
514 result = kernel_sendpage(sock, *ppage, base, size, flags);
521 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
526 if (xdr->tail[0].iov_len) {
527 result = kernel_sendpage(sock, rqstp->rq_respages[0],
528 ((unsigned long)xdr->tail[0].iov_base)
530 xdr->tail[0].iov_len, 0);
536 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
537 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
538 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
544 * Report socket names for nfsdfs
546 static int one_sock_name(char *buf, struct svc_sock *svsk)
550 switch(svsk->sk_sk->sk_family) {
552 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
553 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
555 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
556 inet_sk(svsk->sk_sk)->num);
559 len = sprintf(buf, "*unknown-%d*\n",
560 svsk->sk_sk->sk_family);
566 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
568 struct svc_sock *svsk, *closesk = NULL;
573 spin_lock_bh(&serv->sv_lock);
574 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
575 int onelen = one_sock_name(buf+len, svsk);
576 if (toclose && strcmp(toclose, buf+len) == 0)
581 spin_unlock_bh(&serv->sv_lock);
583 /* Should unregister with portmap, but you cannot
584 * unregister just one protocol...
586 svc_close_socket(closesk);
591 EXPORT_SYMBOL(svc_sock_names);
594 * Check input queue length
597 svc_recv_available(struct svc_sock *svsk)
599 struct socket *sock = svsk->sk_sock;
602 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
604 return (err >= 0)? avail : err;
608 * Generic recvfrom routine.
611 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
613 struct svc_sock *svsk = rqstp->rq_sock;
614 struct msghdr msg = {
615 .msg_flags = MSG_DONTWAIT,
617 struct sockaddr *sin;
620 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
623 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
625 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
626 rqstp->rq_addrlen = svsk->sk_remotelen;
628 /* Destination address in request is needed for binding the
629 * source address in RPC callbacks later.
631 sin = (struct sockaddr *)&svsk->sk_local;
632 switch (sin->sa_family) {
634 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
637 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
641 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
642 svsk, iov[0].iov_base, iov[0].iov_len, len);
648 * Set socket snd and rcv buffer lengths
651 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
655 oldfs = get_fs(); set_fs(KERNEL_DS);
656 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
657 (char*)&snd, sizeof(snd));
658 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
659 (char*)&rcv, sizeof(rcv));
661 /* sock_setsockopt limits use to sysctl_?mem_max,
662 * which isn't acceptable. Until that is made conditional
663 * on not having CAP_SYS_RESOURCE or similar, we go direct...
664 * DaveM said I could!
667 sock->sk->sk_sndbuf = snd * 2;
668 sock->sk->sk_rcvbuf = rcv * 2;
669 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
670 release_sock(sock->sk);
674 * INET callback when data has been received on the socket.
677 svc_udp_data_ready(struct sock *sk, int count)
679 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
682 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
683 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
684 set_bit(SK_DATA, &svsk->sk_flags);
685 svc_sock_enqueue(svsk);
687 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
688 wake_up_interruptible(sk->sk_sleep);
692 * INET callback when space is newly available on the socket.
695 svc_write_space(struct sock *sk)
697 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
700 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
701 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
702 svc_sock_enqueue(svsk);
705 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
706 dprintk("RPC svc_write_space: someone sleeping on %p\n",
708 wake_up_interruptible(sk->sk_sleep);
712 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
715 switch (rqstp->rq_sock->sk_sk->sk_family) {
717 struct in_pktinfo *pki = CMSG_DATA(cmh);
718 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
722 struct in6_pktinfo *pki = CMSG_DATA(cmh);
723 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
730 * Receive a datagram from a UDP socket.
733 svc_udp_recvfrom(struct svc_rqst *rqstp)
735 struct svc_sock *svsk = rqstp->rq_sock;
736 struct svc_serv *serv = svsk->sk_server;
740 long all[SVC_PKTINFO_SPACE / sizeof(long)];
742 struct cmsghdr *cmh = &buffer.hdr;
744 struct msghdr msg = {
745 .msg_name = svc_addr(rqstp),
747 .msg_controllen = sizeof(buffer),
748 .msg_flags = MSG_DONTWAIT,
751 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
752 /* udp sockets need large rcvbuf as all pending
753 * requests are still in that buffer. sndbuf must
754 * also be large enough that there is enough space
755 * for one reply per thread. We count all threads
756 * rather than threads in a particular pool, which
757 * provides an upper bound on the number of threads
758 * which will access the socket.
760 svc_sock_setbufsize(svsk->sk_sock,
761 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
762 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
764 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
765 svc_sock_received(svsk);
766 return svc_deferred_recv(rqstp);
769 clear_bit(SK_DATA, &svsk->sk_flags);
771 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
772 0, 0, MSG_PEEK | MSG_DONTWAIT);
774 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
777 if (err != -EAGAIN) {
778 /* possibly an icmp error */
779 dprintk("svc: recvfrom returned error %d\n", -err);
780 set_bit(SK_DATA, &svsk->sk_flags);
782 svc_sock_received(svsk);
785 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
786 if (skb->tstamp.tv64 == 0) {
787 skb->tstamp = ktime_get_real();
788 /* Don't enable netstamp, sunrpc doesn't
789 need that much accuracy */
791 svsk->sk_sk->sk_stamp = skb->tstamp;
792 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
795 * Maybe more packets - kick another thread ASAP.
797 svc_sock_received(svsk);
799 len = skb->len - sizeof(struct udphdr);
800 rqstp->rq_arg.len = len;
802 rqstp->rq_prot = IPPROTO_UDP;
804 if (cmh->cmsg_level != IPPROTO_IP ||
805 cmh->cmsg_type != IP_PKTINFO) {
807 printk("rpcsvc: received unknown control message:"
809 cmh->cmsg_level, cmh->cmsg_type);
810 skb_free_datagram(svsk->sk_sk, skb);
813 svc_udp_get_dest_address(rqstp, cmh);
815 if (skb_is_nonlinear(skb)) {
816 /* we have to copy */
818 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
821 skb_free_datagram(svsk->sk_sk, skb);
825 skb_free_datagram(svsk->sk_sk, skb);
827 /* we can use it in-place */
828 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
829 rqstp->rq_arg.head[0].iov_len = len;
830 if (skb_checksum_complete(skb)) {
831 skb_free_datagram(svsk->sk_sk, skb);
834 rqstp->rq_xprt_ctxt = skb;
837 rqstp->rq_arg.page_base = 0;
838 if (len <= rqstp->rq_arg.head[0].iov_len) {
839 rqstp->rq_arg.head[0].iov_len = len;
840 rqstp->rq_arg.page_len = 0;
841 rqstp->rq_respages = rqstp->rq_pages+1;
843 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
844 rqstp->rq_respages = rqstp->rq_pages + 1 +
845 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
849 serv->sv_stats->netudpcnt++;
855 svc_udp_sendto(struct svc_rqst *rqstp)
859 error = svc_sendto(rqstp, &rqstp->rq_res);
860 if (error == -ECONNREFUSED)
861 /* ICMP error on earlier request. */
862 error = svc_sendto(rqstp, &rqstp->rq_res);
867 static void svc_udp_prep_reply_hdr(struct svc_rqst *rqstp)
871 static int svc_udp_has_wspace(struct svc_xprt *xprt)
873 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
874 struct svc_serv *serv = svsk->sk_server;
875 unsigned long required;
878 * Set the SOCK_NOSPACE flag before checking the available
881 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
882 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
883 if (required*2 > sock_wspace(svsk->sk_sk))
885 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
889 static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
895 static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
896 struct sockaddr *sa, int salen,
899 return svc_create_socket(serv, IPPROTO_UDP, sa, salen, flags);
902 static struct svc_xprt_ops svc_udp_ops = {
903 .xpo_create = svc_udp_create,
904 .xpo_recvfrom = svc_udp_recvfrom,
905 .xpo_sendto = svc_udp_sendto,
906 .xpo_release_rqst = svc_release_skb,
907 .xpo_detach = svc_sock_detach,
908 .xpo_free = svc_sock_free,
909 .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr,
910 .xpo_has_wspace = svc_udp_has_wspace,
911 .xpo_accept = svc_udp_accept,
914 static struct svc_xprt_class svc_udp_class = {
916 .xcl_owner = THIS_MODULE,
917 .xcl_ops = &svc_udp_ops,
918 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
922 svc_udp_init(struct svc_sock *svsk)
927 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt);
928 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
929 svsk->sk_sk->sk_write_space = svc_write_space;
931 /* initialise setting must have enough space to
932 * receive and respond to one request.
933 * svc_udp_recvfrom will re-adjust if necessary
935 svc_sock_setbufsize(svsk->sk_sock,
936 3 * svsk->sk_server->sv_max_mesg,
937 3 * svsk->sk_server->sv_max_mesg);
939 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
940 set_bit(SK_CHNGBUF, &svsk->sk_flags);
944 /* make sure we get destination address info */
945 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
946 (char __user *)&one, sizeof(one));
951 * A data_ready event on a listening socket means there's a connection
952 * pending. Do not use state_change as a substitute for it.
955 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
957 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
959 dprintk("svc: socket %p TCP (listen) state change %d\n",
963 * This callback may called twice when a new connection
964 * is established as a child socket inherits everything
965 * from a parent LISTEN socket.
966 * 1) data_ready method of the parent socket will be called
967 * when one of child sockets become ESTABLISHED.
968 * 2) data_ready method of the child socket may be called
969 * when it receives data before the socket is accepted.
970 * In case of 2, we should ignore it silently.
972 if (sk->sk_state == TCP_LISTEN) {
974 set_bit(SK_CONN, &svsk->sk_flags);
975 svc_sock_enqueue(svsk);
977 printk("svc: socket %p: no user data\n", sk);
980 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
981 wake_up_interruptible_all(sk->sk_sleep);
985 * A state change on a connected socket means it's dying or dead.
988 svc_tcp_state_change(struct sock *sk)
990 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
992 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
993 sk, sk->sk_state, sk->sk_user_data);
996 printk("svc: socket %p: no user data\n", sk);
998 set_bit(SK_CLOSE, &svsk->sk_flags);
999 svc_sock_enqueue(svsk);
1001 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1002 wake_up_interruptible_all(sk->sk_sleep);
1006 svc_tcp_data_ready(struct sock *sk, int count)
1008 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1010 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
1011 sk, sk->sk_user_data);
1013 set_bit(SK_DATA, &svsk->sk_flags);
1014 svc_sock_enqueue(svsk);
1016 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1017 wake_up_interruptible(sk->sk_sleep);
1020 static inline int svc_port_is_privileged(struct sockaddr *sin)
1022 switch (sin->sa_family) {
1024 return ntohs(((struct sockaddr_in *)sin)->sin_port)
1027 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
1035 * Accept a TCP connection
1037 static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
1039 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1040 struct sockaddr_storage addr;
1041 struct sockaddr *sin = (struct sockaddr *) &addr;
1042 struct svc_serv *serv = svsk->sk_server;
1043 struct socket *sock = svsk->sk_sock;
1044 struct socket *newsock;
1045 struct svc_sock *newsvsk;
1047 char buf[RPC_MAX_ADDRBUFLEN];
1049 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
1053 clear_bit(SK_CONN, &svsk->sk_flags);
1054 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1057 printk(KERN_WARNING "%s: no more sockets!\n",
1059 else if (err != -EAGAIN && net_ratelimit())
1060 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
1061 serv->sv_name, -err);
1065 set_bit(SK_CONN, &svsk->sk_flags);
1067 err = kernel_getpeername(newsock, sin, &slen);
1069 if (net_ratelimit())
1070 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
1071 serv->sv_name, -err);
1072 goto failed; /* aborted connection or whatever */
1075 /* Ideally, we would want to reject connections from unauthorized
1076 * hosts here, but when we get encryption, the IP of the host won't
1077 * tell us anything. For now just warn about unpriv connections.
1079 if (!svc_port_is_privileged(sin)) {
1080 dprintk(KERN_WARNING
1081 "%s: connect from unprivileged port: %s\n",
1083 __svc_print_addr(sin, buf, sizeof(buf)));
1085 dprintk("%s: connect from %s\n", serv->sv_name,
1086 __svc_print_addr(sin, buf, sizeof(buf)));
1088 /* make sure that a write doesn't block forever when
1091 newsock->sk->sk_sndtimeo = HZ*30;
1093 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1094 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
1096 memcpy(&newsvsk->sk_remote, sin, slen);
1097 newsvsk->sk_remotelen = slen;
1098 err = kernel_getsockname(newsock, sin, &slen);
1099 if (unlikely(err < 0)) {
1100 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
1101 slen = offsetof(struct sockaddr, sa_data);
1103 memcpy(&newsvsk->sk_local, sin, slen);
1105 svc_sock_received(newsvsk);
1108 serv->sv_stats->nettcpconn++;
1110 return &newsvsk->sk_xprt;
1113 sock_release(newsock);
1118 * Receive data from a TCP socket.
1121 svc_tcp_recvfrom(struct svc_rqst *rqstp)
1123 struct svc_sock *svsk = rqstp->rq_sock;
1124 struct svc_serv *serv = svsk->sk_server;
1129 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1130 svsk, test_bit(SK_DATA, &svsk->sk_flags),
1131 test_bit(SK_CONN, &svsk->sk_flags),
1132 test_bit(SK_CLOSE, &svsk->sk_flags));
1134 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1135 svc_sock_received(svsk);
1136 return svc_deferred_recv(rqstp);
1139 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
1140 /* sndbuf needs to have room for one request
1141 * per thread, otherwise we can stall even when the
1142 * network isn't a bottleneck.
1144 * We count all threads rather than threads in a
1145 * particular pool, which provides an upper bound
1146 * on the number of threads which will access the socket.
1148 * rcvbuf just needs to be able to hold a few requests.
1149 * Normally they will be removed from the queue
1150 * as soon a a complete request arrives.
1152 svc_sock_setbufsize(svsk->sk_sock,
1153 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1154 3 * serv->sv_max_mesg);
1156 clear_bit(SK_DATA, &svsk->sk_flags);
1158 /* Receive data. If we haven't got the record length yet, get
1159 * the next four bytes. Otherwise try to gobble up as much as
1160 * possible up to the complete record length.
1162 if (svsk->sk_tcplen < 4) {
1163 unsigned long want = 4 - svsk->sk_tcplen;
1166 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1168 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1170 svsk->sk_tcplen += len;
1173 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1175 svc_sock_received(svsk);
1176 return -EAGAIN; /* record header not complete */
1179 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1180 if (!(svsk->sk_reclen & 0x80000000)) {
1181 /* FIXME: technically, a record can be fragmented,
1182 * and non-terminal fragments will not have the top
1183 * bit set in the fragment length header.
1184 * But apparently no known nfs clients send fragmented
1186 if (net_ratelimit())
1187 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1188 " (non-terminal)\n",
1189 (unsigned long) svsk->sk_reclen);
1192 svsk->sk_reclen &= 0x7fffffff;
1193 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1194 if (svsk->sk_reclen > serv->sv_max_mesg) {
1195 if (net_ratelimit())
1196 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1198 (unsigned long) svsk->sk_reclen);
1203 /* Check whether enough data is available */
1204 len = svc_recv_available(svsk);
1208 if (len < svsk->sk_reclen) {
1209 dprintk("svc: incomplete TCP record (%d of %d)\n",
1210 len, svsk->sk_reclen);
1211 svc_sock_received(svsk);
1212 return -EAGAIN; /* record not complete */
1214 len = svsk->sk_reclen;
1215 set_bit(SK_DATA, &svsk->sk_flags);
1217 vec = rqstp->rq_vec;
1218 vec[0] = rqstp->rq_arg.head[0];
1221 while (vlen < len) {
1222 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1223 vec[pnum].iov_len = PAGE_SIZE;
1227 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1229 /* Now receive data */
1230 len = svc_recvfrom(rqstp, vec, pnum, len);
1234 dprintk("svc: TCP complete record (%d bytes)\n", len);
1235 rqstp->rq_arg.len = len;
1236 rqstp->rq_arg.page_base = 0;
1237 if (len <= rqstp->rq_arg.head[0].iov_len) {
1238 rqstp->rq_arg.head[0].iov_len = len;
1239 rqstp->rq_arg.page_len = 0;
1241 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1244 rqstp->rq_xprt_ctxt = NULL;
1245 rqstp->rq_prot = IPPROTO_TCP;
1247 /* Reset TCP read info */
1248 svsk->sk_reclen = 0;
1249 svsk->sk_tcplen = 0;
1251 svc_sock_received(svsk);
1253 serv->sv_stats->nettcpcnt++;
1258 set_bit(SK_CLOSE, &svsk->sk_flags);
1262 if (len == -EAGAIN) {
1263 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1264 svc_sock_received(svsk);
1266 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1267 svsk->sk_server->sv_name, -len);
1275 * Send out data on TCP socket.
1278 svc_tcp_sendto(struct svc_rqst *rqstp)
1280 struct xdr_buf *xbufp = &rqstp->rq_res;
1284 /* Set up the first element of the reply kvec.
1285 * Any other kvecs that may be in use have been taken
1286 * care of by the server implementation itself.
1288 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1289 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1291 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1294 sent = svc_sendto(rqstp, &rqstp->rq_res);
1295 if (sent != xbufp->len) {
1296 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1297 rqstp->rq_sock->sk_server->sv_name,
1298 (sent<0)?"got error":"sent only",
1300 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1301 svc_sock_enqueue(rqstp->rq_sock);
1308 * Setup response header. TCP has a 4B record length field.
1310 static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
1312 struct kvec *resv = &rqstp->rq_res.head[0];
1314 /* tcp needs a space for the record length... */
1318 static int svc_tcp_has_wspace(struct svc_xprt *xprt)
1320 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1321 struct svc_serv *serv = svsk->sk_server;
1326 * Set the SOCK_NOSPACE flag before checking the available
1329 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1330 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
1331 wspace = sk_stream_wspace(svsk->sk_sk);
1333 if (wspace < sk_stream_min_wspace(svsk->sk_sk))
1335 if (required * 2 > wspace)
1338 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1342 static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
1343 struct sockaddr *sa, int salen,
1346 return svc_create_socket(serv, IPPROTO_TCP, sa, salen, flags);
1349 static struct svc_xprt_ops svc_tcp_ops = {
1350 .xpo_create = svc_tcp_create,
1351 .xpo_recvfrom = svc_tcp_recvfrom,
1352 .xpo_sendto = svc_tcp_sendto,
1353 .xpo_release_rqst = svc_release_skb,
1354 .xpo_detach = svc_sock_detach,
1355 .xpo_free = svc_sock_free,
1356 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1357 .xpo_has_wspace = svc_tcp_has_wspace,
1358 .xpo_accept = svc_tcp_accept,
1361 static struct svc_xprt_class svc_tcp_class = {
1363 .xcl_owner = THIS_MODULE,
1364 .xcl_ops = &svc_tcp_ops,
1365 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
1368 void svc_init_xprt_sock(void)
1370 svc_reg_xprt_class(&svc_tcp_class);
1371 svc_reg_xprt_class(&svc_udp_class);
1374 void svc_cleanup_xprt_sock(void)
1376 svc_unreg_xprt_class(&svc_tcp_class);
1377 svc_unreg_xprt_class(&svc_udp_class);
1381 svc_tcp_init(struct svc_sock *svsk)
1383 struct sock *sk = svsk->sk_sk;
1384 struct tcp_sock *tp = tcp_sk(sk);
1386 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt);
1388 if (sk->sk_state == TCP_LISTEN) {
1389 dprintk("setting up TCP socket for listening\n");
1390 set_bit(SK_LISTENER, &svsk->sk_flags);
1391 sk->sk_data_ready = svc_tcp_listen_data_ready;
1392 set_bit(SK_CONN, &svsk->sk_flags);
1394 dprintk("setting up TCP socket for reading\n");
1395 sk->sk_state_change = svc_tcp_state_change;
1396 sk->sk_data_ready = svc_tcp_data_ready;
1397 sk->sk_write_space = svc_write_space;
1399 svsk->sk_reclen = 0;
1400 svsk->sk_tcplen = 0;
1402 tp->nonagle = 1; /* disable Nagle's algorithm */
1404 /* initialise setting must have enough space to
1405 * receive and respond to one request.
1406 * svc_tcp_recvfrom will re-adjust if necessary
1408 svc_sock_setbufsize(svsk->sk_sock,
1409 3 * svsk->sk_server->sv_max_mesg,
1410 3 * svsk->sk_server->sv_max_mesg);
1412 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1413 set_bit(SK_DATA, &svsk->sk_flags);
1414 if (sk->sk_state != TCP_ESTABLISHED)
1415 set_bit(SK_CLOSE, &svsk->sk_flags);
1420 svc_sock_update_bufs(struct svc_serv *serv)
1423 * The number of server threads has changed. Update
1424 * rcvbuf and sndbuf accordingly on all sockets
1426 struct list_head *le;
1428 spin_lock_bh(&serv->sv_lock);
1429 list_for_each(le, &serv->sv_permsocks) {
1430 struct svc_sock *svsk =
1431 list_entry(le, struct svc_sock, sk_list);
1432 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1434 list_for_each(le, &serv->sv_tempsocks) {
1435 struct svc_sock *svsk =
1436 list_entry(le, struct svc_sock, sk_list);
1437 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1439 spin_unlock_bh(&serv->sv_lock);
1443 * Make sure that we don't have too many active connections. If we
1444 * have, something must be dropped.
1446 * There's no point in trying to do random drop here for DoS
1447 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
1448 * attacker can easily beat that.
1450 * The only somewhat efficient mechanism would be if drop old
1451 * connections from the same IP first. But right now we don't even
1452 * record the client IP in svc_sock.
1454 static void svc_check_conn_limits(struct svc_serv *serv)
1456 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
1457 struct svc_sock *svsk = NULL;
1458 spin_lock_bh(&serv->sv_lock);
1459 if (!list_empty(&serv->sv_tempsocks)) {
1460 if (net_ratelimit()) {
1461 /* Try to help the admin */
1462 printk(KERN_NOTICE "%s: too many open TCP "
1463 "sockets, consider increasing the "
1464 "number of nfsd threads\n",
1468 * Always select the oldest socket. It's not fair,
1471 svsk = list_entry(serv->sv_tempsocks.prev,
1474 set_bit(SK_CLOSE, &svsk->sk_flags);
1475 svc_xprt_get(&svsk->sk_xprt);
1477 spin_unlock_bh(&serv->sv_lock);
1480 svc_sock_enqueue(svsk);
1481 svc_xprt_put(&svsk->sk_xprt);
1487 * Receive the next request on any socket. This code is carefully
1488 * organised not to touch any cachelines in the shared svc_serv
1489 * structure, only cachelines in the local svc_pool.
1492 svc_recv(struct svc_rqst *rqstp, long timeout)
1494 struct svc_sock *svsk = NULL;
1495 struct svc_serv *serv = rqstp->rq_server;
1496 struct svc_pool *pool = rqstp->rq_pool;
1499 struct xdr_buf *arg;
1500 DECLARE_WAITQUEUE(wait, current);
1502 dprintk("svc: server %p waiting for data (to = %ld)\n",
1507 "svc_recv: service %p, socket not NULL!\n",
1509 if (waitqueue_active(&rqstp->rq_wait))
1511 "svc_recv: service %p, wait queue active!\n",
1515 /* now allocate needed pages. If we get a failure, sleep briefly */
1516 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1517 for (i=0; i < pages ; i++)
1518 while (rqstp->rq_pages[i] == NULL) {
1519 struct page *p = alloc_page(GFP_KERNEL);
1521 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1522 rqstp->rq_pages[i] = p;
1524 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1525 BUG_ON(pages >= RPCSVC_MAXPAGES);
1527 /* Make arg->head point to first page and arg->pages point to rest */
1528 arg = &rqstp->rq_arg;
1529 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1530 arg->head[0].iov_len = PAGE_SIZE;
1531 arg->pages = rqstp->rq_pages + 1;
1533 /* save at least one page for response */
1534 arg->page_len = (pages-2)*PAGE_SIZE;
1535 arg->len = (pages-1)*PAGE_SIZE;
1536 arg->tail[0].iov_len = 0;
1543 spin_lock_bh(&pool->sp_lock);
1544 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1545 rqstp->rq_sock = svsk;
1546 svc_xprt_get(&svsk->sk_xprt);
1547 rqstp->rq_reserved = serv->sv_max_mesg;
1548 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1550 /* No data pending. Go to sleep */
1551 svc_thread_enqueue(pool, rqstp);
1554 * We have to be able to interrupt this wait
1555 * to bring down the daemons ...
1557 set_current_state(TASK_INTERRUPTIBLE);
1558 add_wait_queue(&rqstp->rq_wait, &wait);
1559 spin_unlock_bh(&pool->sp_lock);
1561 schedule_timeout(timeout);
1565 spin_lock_bh(&pool->sp_lock);
1566 remove_wait_queue(&rqstp->rq_wait, &wait);
1568 if (!(svsk = rqstp->rq_sock)) {
1569 svc_thread_dequeue(pool, rqstp);
1570 spin_unlock_bh(&pool->sp_lock);
1571 dprintk("svc: server %p, no data yet\n", rqstp);
1572 return signalled()? -EINTR : -EAGAIN;
1575 spin_unlock_bh(&pool->sp_lock);
1578 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
1579 dprintk("svc_recv: found SK_CLOSE\n");
1580 svc_delete_socket(svsk);
1581 } else if (test_bit(SK_LISTENER, &svsk->sk_flags)) {
1582 struct svc_xprt *newxpt;
1583 newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
1586 * We know this module_get will succeed because the
1587 * listener holds a reference too
1589 __module_get(newxpt->xpt_class->xcl_owner);
1590 svc_check_conn_limits(svsk->sk_server);
1592 svc_sock_received(svsk);
1594 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1595 rqstp, pool->sp_id, svsk,
1596 atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
1597 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
1598 dprintk("svc: got len=%d\n", len);
1601 /* No data, incomplete (TCP) read, or accept() */
1602 if (len == 0 || len == -EAGAIN) {
1603 rqstp->rq_res.len = 0;
1604 svc_sock_release(rqstp);
1607 svsk->sk_lastrecv = get_seconds();
1608 clear_bit(SK_OLD, &svsk->sk_flags);
1610 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1611 rqstp->rq_chandle.defer = svc_defer;
1614 serv->sv_stats->netcnt++;
1622 svc_drop(struct svc_rqst *rqstp)
1624 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1625 svc_sock_release(rqstp);
1629 * Return reply to client.
1632 svc_send(struct svc_rqst *rqstp)
1634 struct svc_sock *svsk;
1638 if ((svsk = rqstp->rq_sock) == NULL) {
1639 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1640 __FILE__, __LINE__);
1644 /* release the receive skb before sending the reply */
1645 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
1647 /* calculate over-all length */
1648 xb = & rqstp->rq_res;
1649 xb->len = xb->head[0].iov_len +
1651 xb->tail[0].iov_len;
1653 /* Grab svsk->sk_mutex to serialize outgoing data. */
1654 mutex_lock(&svsk->sk_mutex);
1655 if (test_bit(SK_DEAD, &svsk->sk_flags))
1658 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
1659 mutex_unlock(&svsk->sk_mutex);
1660 svc_sock_release(rqstp);
1662 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1668 * Timer function to close old temporary sockets, using
1669 * a mark-and-sweep algorithm.
1672 svc_age_temp_sockets(unsigned long closure)
1674 struct svc_serv *serv = (struct svc_serv *)closure;
1675 struct svc_sock *svsk;
1676 struct list_head *le, *next;
1677 LIST_HEAD(to_be_aged);
1679 dprintk("svc_age_temp_sockets\n");
1681 if (!spin_trylock_bh(&serv->sv_lock)) {
1682 /* busy, try again 1 sec later */
1683 dprintk("svc_age_temp_sockets: busy\n");
1684 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1688 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1689 svsk = list_entry(le, struct svc_sock, sk_list);
1691 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1693 if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1
1694 || test_bit(SK_BUSY, &svsk->sk_flags))
1696 svc_xprt_get(&svsk->sk_xprt);
1697 list_move(le, &to_be_aged);
1698 set_bit(SK_CLOSE, &svsk->sk_flags);
1699 set_bit(SK_DETACHED, &svsk->sk_flags);
1701 spin_unlock_bh(&serv->sv_lock);
1703 while (!list_empty(&to_be_aged)) {
1704 le = to_be_aged.next;
1705 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1707 svsk = list_entry(le, struct svc_sock, sk_list);
1709 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1710 svsk, get_seconds() - svsk->sk_lastrecv);
1712 /* a thread will dequeue and close it soon */
1713 svc_sock_enqueue(svsk);
1714 svc_xprt_put(&svsk->sk_xprt);
1717 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1721 * Initialize socket for RPC use and create svc_sock struct
1722 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1724 static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1725 struct socket *sock,
1726 int *errp, int flags)
1728 struct svc_sock *svsk;
1730 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1731 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1733 dprintk("svc: svc_setup_socket %p\n", sock);
1734 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1741 /* Register socket with portmapper */
1742 if (*errp >= 0 && pmap_register)
1743 *errp = svc_register(serv, inet->sk_protocol,
1744 ntohs(inet_sk(inet)->sport));
1751 set_bit(SK_BUSY, &svsk->sk_flags);
1752 inet->sk_user_data = svsk;
1753 svsk->sk_sock = sock;
1755 svsk->sk_ostate = inet->sk_state_change;
1756 svsk->sk_odata = inet->sk_data_ready;
1757 svsk->sk_owspace = inet->sk_write_space;
1758 svsk->sk_server = serv;
1759 svsk->sk_lastrecv = get_seconds();
1760 spin_lock_init(&svsk->sk_lock);
1761 INIT_LIST_HEAD(&svsk->sk_deferred);
1762 INIT_LIST_HEAD(&svsk->sk_ready);
1763 mutex_init(&svsk->sk_mutex);
1765 /* Initialize the socket */
1766 if (sock->type == SOCK_DGRAM)
1771 spin_lock_bh(&serv->sv_lock);
1773 set_bit(SK_TEMP, &svsk->sk_flags);
1774 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1776 if (serv->sv_temptimer.function == NULL) {
1777 /* setup timer to age temp sockets */
1778 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1779 (unsigned long)serv);
1780 mod_timer(&serv->sv_temptimer,
1781 jiffies + svc_conn_age_period * HZ);
1784 clear_bit(SK_TEMP, &svsk->sk_flags);
1785 list_add(&svsk->sk_list, &serv->sv_permsocks);
1787 spin_unlock_bh(&serv->sv_lock);
1789 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1795 int svc_addsock(struct svc_serv *serv,
1801 struct socket *so = sockfd_lookup(fd, &err);
1802 struct svc_sock *svsk = NULL;
1806 if (so->sk->sk_family != AF_INET)
1807 err = -EAFNOSUPPORT;
1808 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1809 so->sk->sk_protocol != IPPROTO_UDP)
1810 err = -EPROTONOSUPPORT;
1811 else if (so->state > SS_UNCONNECTED)
1814 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
1816 svc_sock_received(svsk);
1824 if (proto) *proto = so->sk->sk_protocol;
1825 return one_sock_name(name_return, svsk);
1827 EXPORT_SYMBOL_GPL(svc_addsock);
1830 * Create socket for RPC service.
1832 static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
1834 struct sockaddr *sin, int len,
1837 struct svc_sock *svsk;
1838 struct socket *sock;
1841 char buf[RPC_MAX_ADDRBUFLEN];
1843 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1844 serv->sv_program->pg_name, protocol,
1845 __svc_print_addr(sin, buf, sizeof(buf)));
1847 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1848 printk(KERN_WARNING "svc: only UDP and TCP "
1849 "sockets supported\n");
1850 return ERR_PTR(-EINVAL);
1852 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1854 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1856 return ERR_PTR(error);
1858 svc_reclassify_socket(sock);
1860 if (type == SOCK_STREAM)
1861 sock->sk->sk_reuse = 1; /* allow address reuse */
1862 error = kernel_bind(sock, sin, len);
1866 if (protocol == IPPROTO_TCP) {
1867 if ((error = kernel_listen(sock, 64)) < 0)
1871 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1872 svc_sock_received(svsk);
1873 return (struct svc_xprt *)svsk;
1877 dprintk("svc: svc_create_socket error = %d\n", -error);
1879 return ERR_PTR(error);
1883 * Detach the svc_sock from the socket so that no
1884 * more callbacks occur.
1886 static void svc_sock_detach(struct svc_xprt *xprt)
1888 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1889 struct sock *sk = svsk->sk_sk;
1891 dprintk("svc: svc_sock_detach(%p)\n", svsk);
1893 /* put back the old socket callbacks */
1894 sk->sk_state_change = svsk->sk_ostate;
1895 sk->sk_data_ready = svsk->sk_odata;
1896 sk->sk_write_space = svsk->sk_owspace;
1900 * Free the svc_sock's socket resources and the svc_sock itself.
1902 static void svc_sock_free(struct svc_xprt *xprt)
1904 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1905 dprintk("svc: svc_sock_free(%p)\n", svsk);
1907 if (svsk->sk_info_authunix != NULL)
1908 svcauth_unix_info_release(svsk->sk_info_authunix);
1909 if (svsk->sk_sock->file)
1910 sockfd_put(svsk->sk_sock);
1912 sock_release(svsk->sk_sock);
1917 * Remove a dead socket
1920 svc_delete_socket(struct svc_sock *svsk)
1922 struct svc_serv *serv;
1925 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1927 serv = svsk->sk_server;
1930 svsk->sk_xprt.xpt_ops->xpo_detach(&svsk->sk_xprt);
1932 spin_lock_bh(&serv->sv_lock);
1934 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1935 list_del_init(&svsk->sk_list);
1937 * We used to delete the svc_sock from whichever list
1938 * it's sk_ready node was on, but we don't actually
1939 * need to. This is because the only time we're called
1940 * while still attached to a queue, the queue itself
1941 * is about to be destroyed (in svc_destroy).
1943 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1944 BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2);
1945 if (test_bit(SK_TEMP, &svsk->sk_flags))
1947 svc_xprt_put(&svsk->sk_xprt);
1950 spin_unlock_bh(&serv->sv_lock);
1953 static void svc_close_socket(struct svc_sock *svsk)
1955 set_bit(SK_CLOSE, &svsk->sk_flags);
1956 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1957 /* someone else will have to effect the close */
1960 svc_xprt_get(&svsk->sk_xprt);
1961 svc_delete_socket(svsk);
1962 clear_bit(SK_BUSY, &svsk->sk_flags);
1963 svc_xprt_put(&svsk->sk_xprt);
1966 void svc_force_close_socket(struct svc_sock *svsk)
1968 set_bit(SK_CLOSE, &svsk->sk_flags);
1969 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
1970 /* Waiting to be processed, but no threads left,
1971 * So just remove it from the waiting list
1973 list_del_init(&svsk->sk_ready);
1974 clear_bit(SK_BUSY, &svsk->sk_flags);
1976 svc_close_socket(svsk);
1980 * Handle defer and revisit of requests
1983 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1985 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1986 struct svc_sock *svsk;
1989 svc_xprt_put(&dr->svsk->sk_xprt);
1993 dprintk("revisit queued\n");
1996 spin_lock(&svsk->sk_lock);
1997 list_add(&dr->handle.recent, &svsk->sk_deferred);
1998 spin_unlock(&svsk->sk_lock);
1999 set_bit(SK_DEFERRED, &svsk->sk_flags);
2000 svc_sock_enqueue(svsk);
2001 svc_xprt_put(&svsk->sk_xprt);
2004 static struct cache_deferred_req *
2005 svc_defer(struct cache_req *req)
2007 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
2008 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
2009 struct svc_deferred_req *dr;
2011 if (rqstp->rq_arg.page_len)
2012 return NULL; /* if more than a page, give up FIXME */
2013 if (rqstp->rq_deferred) {
2014 dr = rqstp->rq_deferred;
2015 rqstp->rq_deferred = NULL;
2017 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
2018 /* FIXME maybe discard if size too large */
2019 dr = kmalloc(size, GFP_KERNEL);
2023 dr->handle.owner = rqstp->rq_server;
2024 dr->prot = rqstp->rq_prot;
2025 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
2026 dr->addrlen = rqstp->rq_addrlen;
2027 dr->daddr = rqstp->rq_daddr;
2028 dr->argslen = rqstp->rq_arg.len >> 2;
2029 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
2031 svc_xprt_get(rqstp->rq_xprt);
2032 dr->svsk = rqstp->rq_sock;
2034 dr->handle.revisit = svc_revisit;
2039 * recv data from a deferred request into an active one
2041 static int svc_deferred_recv(struct svc_rqst *rqstp)
2043 struct svc_deferred_req *dr = rqstp->rq_deferred;
2045 rqstp->rq_arg.head[0].iov_base = dr->args;
2046 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
2047 rqstp->rq_arg.page_len = 0;
2048 rqstp->rq_arg.len = dr->argslen<<2;
2049 rqstp->rq_prot = dr->prot;
2050 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
2051 rqstp->rq_addrlen = dr->addrlen;
2052 rqstp->rq_daddr = dr->daddr;
2053 rqstp->rq_respages = rqstp->rq_pages;
2054 return dr->argslen<<2;
2058 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2060 struct svc_deferred_req *dr = NULL;
2062 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
2064 spin_lock(&svsk->sk_lock);
2065 clear_bit(SK_DEFERRED, &svsk->sk_flags);
2066 if (!list_empty(&svsk->sk_deferred)) {
2067 dr = list_entry(svsk->sk_deferred.next,
2068 struct svc_deferred_req,
2070 list_del_init(&dr->handle.recent);
2071 set_bit(SK_DEFERRED, &svsk->sk_flags);
2073 spin_unlock(&svsk->sk_lock);