]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - net/9p/trans_fd.c
9p: apply common tagpool handling to trans_fd
[linux-2.6-omap-h63xx.git] / net / 9p / trans_fd.c
1 /*
2  * linux/fs/9p/trans_fd.c
3  *
4  * Fd transport layer.  Includes deprecated socket layer.
5  *
6  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License version 2
13  *  as published by the Free Software Foundation.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to:
22  *  Free Software Foundation
23  *  51 Franklin Street, Fifth Floor
24  *  Boston, MA  02111-1301  USA
25  *
26  */
27
28 #include <linux/in.h>
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/un.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
44
45 #define P9_PORT 564
46 #define MAX_SOCK_BUF (64*1024)
47 #define ERREQFLUSH      1
48 #define MAXPOLLWADDR    2
49
50 /**
51  * struct p9_fd_opts - per-transport options
52  * @rfd: file descriptor for reading (trans=fd)
53  * @wfd: file descriptor for writing (trans=fd)
54  * @port: port to connect to (trans=tcp)
55  *
56  */
57
58 struct p9_fd_opts {
59         int rfd;
60         int wfd;
61         u16 port;
62 };
63
64 /**
65  * struct p9_trans_fd - transport state
66  * @rd: reference to file to read from
67  * @wr: reference of file to write to
68  * @conn: connection state reference
69  *
70  */
71
72 struct p9_trans_fd {
73         struct file *rd;
74         struct file *wr;
75         struct p9_conn *conn;
76 };
77
78 /*
79   * Option Parsing (code inspired by NFS code)
80   *  - a little lazy - parse all fd-transport options
81   */
82
83 enum {
84         /* Options that take integer arguments */
85         Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
86 };
87
88 static const match_table_t tokens = {
89         {Opt_port, "port=%u"},
90         {Opt_rfdno, "rfdno=%u"},
91         {Opt_wfdno, "wfdno=%u"},
92         {Opt_err, NULL},
93 };
94
95 enum {
96         Rworksched = 1,         /* read work scheduled or running */
97         Rpending = 2,           /* can read */
98         Wworksched = 4,         /* write work scheduled or running */
99         Wpending = 8,           /* can write */
100 };
101
102 enum {
103         None,
104         Flushing,
105         Flushed,
106 };
107
108 /**
109  * struct p9_req - fd mux encoding of an rpc transaction
110  * @lock: protects req_list
111  * @tag: numeric tag for rpc transaction
112  * @tcall: request &p9_fcall structure
113  * @rcall: response &p9_fcall structure
114  * @err: error state
115  * @flush: flag to indicate RPC has been flushed
116  * @req_list: list link for higher level objects to chain requests
117  * @m: connection this request was issued on
118  * @wqueue: wait queue that client is blocked on for this rpc
119  *
120  */
121
122 struct p9_req {
123         spinlock_t lock;
124         int tag;
125         struct p9_fcall *tcall;
126         struct p9_fcall *rcall;
127         int err;
128         int flush;
129         struct list_head req_list;
130         struct p9_conn *m;
131         wait_queue_head_t wqueue;
132 };
133
134 struct p9_poll_wait {
135         struct p9_conn *conn;
136         wait_queue_t wait;
137         wait_queue_head_t *wait_addr;
138 };
139
140 /**
141  * struct p9_conn - fd mux connection state information
142  * @lock: protects mux_list (?)
143  * @mux_list: list link for mux to manage multiple connections (?)
144  * @client: reference to client instance for this connection
145  * @err: error state
146  * @req_list: accounting for requests which have been sent
147  * @unsent_req_list: accounting for requests that haven't been sent
148  * @rcall: current response &p9_fcall structure
149  * @rpos: read position in current frame
150  * @rbuf: current read buffer
151  * @wpos: write position for current frame
152  * @wsize: amount of data to write for current frame
153  * @wbuf: current write buffer
154  * @poll_wait: array of wait_q's for various worker threads
155  * @poll_waddr: ????
156  * @pt: poll state
157  * @rq: current read work
158  * @wq: current write work
159  * @wsched: ????
160  *
161  */
162
163 struct p9_conn {
164         spinlock_t lock; /* protect lock structure */
165         struct list_head mux_list;
166         struct p9_client *client;
167         int err;
168         struct list_head req_list;
169         struct list_head unsent_req_list;
170         struct p9_fcall *rcall;
171         int rpos;
172         char *rbuf;
173         int wpos;
174         int wsize;
175         char *wbuf;
176         struct list_head poll_pending_link;
177         struct p9_poll_wait poll_wait[MAXPOLLWADDR];
178         poll_table pt;
179         struct work_struct rq;
180         struct work_struct wq;
181         unsigned long wsched;
182 };
183
184 static DEFINE_SPINLOCK(p9_poll_lock);
185 static LIST_HEAD(p9_poll_pending_list);
186 static struct workqueue_struct *p9_mux_wq;
187 static struct task_struct *p9_poll_task;
188
189 static void p9_mux_poll_stop(struct p9_conn *m)
190 {
191         unsigned long flags;
192         int i;
193
194         for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
195                 struct p9_poll_wait *pwait = &m->poll_wait[i];
196
197                 if (pwait->wait_addr) {
198                         remove_wait_queue(pwait->wait_addr, &pwait->wait);
199                         pwait->wait_addr = NULL;
200                 }
201         }
202
203         spin_lock_irqsave(&p9_poll_lock, flags);
204         list_del_init(&m->poll_pending_link);
205         spin_unlock_irqrestore(&p9_poll_lock, flags);
206 }
207
208 static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
209 {
210         if (req->tag != P9_NOTAG &&
211             p9_idpool_check(req->tag, m->client->tagpool))
212                 p9_idpool_put(req->tag, m->client->tagpool);
213         kfree(req);
214 }
215
216 static void p9_conn_rpc_cb(struct p9_req *req);
217
218 static void p9_mux_flush_cb(struct p9_req *freq)
219 {
220         int tag;
221         struct p9_conn *m = freq->m;
222         struct p9_req *req, *rreq, *rptr;
223
224         P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
225                 freq->tcall, freq->rcall, freq->err,
226                 freq->tcall->params.tflush.oldtag);
227
228         spin_lock(&m->lock);
229         tag = freq->tcall->params.tflush.oldtag;
230         req = NULL;
231         list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
232                 if (rreq->tag == tag) {
233                         req = rreq;
234                         list_del(&req->req_list);
235                         break;
236                 }
237         }
238         spin_unlock(&m->lock);
239
240         if (req) {
241                 spin_lock(&req->lock);
242                 req->flush = Flushed;
243                 spin_unlock(&req->lock);
244
245                 p9_conn_rpc_cb(req);
246         }
247
248         kfree(freq->tcall);
249         kfree(freq->rcall);
250         p9_mux_free_request(m, freq);
251 }
252
253 static void p9_conn_rpc_cb(struct p9_req *req)
254 {
255         P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);
256
257         if (req->tcall->id == P9_TFLUSH) { /* flush callback */
258                 P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
259                 p9_mux_flush_cb(req);
260         } else {                        /* normal wakeup path */
261                 P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
262                 if (req->flush != None && !req->err)
263                         req->err = -ERESTARTSYS;
264
265                 wake_up(&req->wqueue);
266         }
267 }
268
269 /**
270  * p9_conn_cancel - cancel all pending requests with error
271  * @m: mux data
272  * @err: error code
273  *
274  */
275
276 void p9_conn_cancel(struct p9_conn *m, int err)
277 {
278         struct p9_req *req, *rtmp;
279         LIST_HEAD(cancel_list);
280
281         P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
282         m->err = err;
283         spin_lock(&m->lock);
284         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
285                 list_move(&req->req_list, &cancel_list);
286         }
287         list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
288                 list_move(&req->req_list, &cancel_list);
289         }
290         spin_unlock(&m->lock);
291
292         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
293                 list_del(&req->req_list);
294                 if (!req->err)
295                         req->err = err;
296
297                 p9_conn_rpc_cb(req);
298         }
299 }
300
301 static void process_request(struct p9_conn *m, struct p9_req *req)
302 {
303         int ecode;
304         struct p9_str *ename;
305
306         if (!req->err && req->rcall->id == P9_RERROR) {
307                 ecode = req->rcall->params.rerror.errno;
308                 ename = &req->rcall->params.rerror.error;
309
310                 P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
311                                                                 ename->str);
312
313                 if (m->client->dotu)
314                         req->err = -ecode;
315
316                 if (!req->err) {
317                         req->err = p9_errstr2errno(ename->str, ename->len);
318
319                         /* string match failed */
320                         if (!req->err) {
321                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
322                                 req->err = -ESERVERFAULT;
323                         }
324                 }
325         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
326                 P9_DPRINTK(P9_DEBUG_ERROR,
327                                 "fcall mismatch: expected %d, got %d\n",
328                                 req->tcall->id + 1, req->rcall->id);
329                 if (!req->err)
330                         req->err = -EIO;
331         }
332 }
333
334 static unsigned int
335 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
336 {
337         int ret, n;
338         struct p9_trans_fd *ts = NULL;
339
340         if (client && client->status == Connected)
341                 ts = client->trans;
342
343         if (!ts)
344                 return -EREMOTEIO;
345
346         if (!ts->rd->f_op || !ts->rd->f_op->poll)
347                 return -EIO;
348
349         if (!ts->wr->f_op || !ts->wr->f_op->poll)
350                 return -EIO;
351
352         ret = ts->rd->f_op->poll(ts->rd, pt);
353         if (ret < 0)
354                 return ret;
355
356         if (ts->rd != ts->wr) {
357                 n = ts->wr->f_op->poll(ts->wr, pt);
358                 if (n < 0)
359                         return n;
360                 ret = (ret & ~POLLOUT) | (n & ~POLLIN);
361         }
362
363         return ret;
364 }
365
366 /**
367  * p9_fd_read- read from a fd
368  * @client: client instance
369  * @v: buffer to receive data into
370  * @len: size of receive buffer
371  *
372  */
373
374 static int p9_fd_read(struct p9_client *client, void *v, int len)
375 {
376         int ret;
377         struct p9_trans_fd *ts = NULL;
378
379         if (client && client->status != Disconnected)
380                 ts = client->trans;
381
382         if (!ts)
383                 return -EREMOTEIO;
384
385         if (!(ts->rd->f_flags & O_NONBLOCK))
386                 P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
387
388         ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
389         if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
390                 client->status = Disconnected;
391         return ret;
392 }
393
394 /**
395  * p9_read_work - called when there is some data to be read from a transport
396  * @work: container of work to be done
397  *
398  */
399
400 static void p9_read_work(struct work_struct *work)
401 {
402         int n, err;
403         struct p9_conn *m;
404         struct p9_req *req, *rptr, *rreq;
405         struct p9_fcall *rcall;
406         char *rbuf;
407
408         m = container_of(work, struct p9_conn, rq);
409
410         if (m->err < 0)
411                 return;
412
413         rcall = NULL;
414         P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
415
416         if (!m->rcall) {
417                 m->rcall =
418                     kmalloc(sizeof(struct p9_fcall) + m->client->msize,
419                                                                 GFP_KERNEL);
420                 if (!m->rcall) {
421                         err = -ENOMEM;
422                         goto error;
423                 }
424
425                 m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
426                 m->rpos = 0;
427         }
428
429         clear_bit(Rpending, &m->wsched);
430         err = p9_fd_read(m->client, m->rbuf + m->rpos,
431                                                 m->client->msize - m->rpos);
432         P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
433         if (err == -EAGAIN) {
434                 clear_bit(Rworksched, &m->wsched);
435                 return;
436         }
437
438         if (err <= 0)
439                 goto error;
440
441         m->rpos += err;
442         while (m->rpos > 4) {
443                 n = le32_to_cpu(*(__le32 *) m->rbuf);
444                 if (n >= m->client->msize) {
445                         P9_DPRINTK(P9_DEBUG_ERROR,
446                                 "requested packet size too big: %d\n", n);
447                         err = -EIO;
448                         goto error;
449                 }
450
451                 if (m->rpos < n)
452                         break;
453
454                 err =
455                     p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
456                 if (err < 0)
457                         goto error;
458
459 #ifdef CONFIG_NET_9P_DEBUG
460                 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
461                         char buf[150];
462
463                         p9_printfcall(buf, sizeof(buf), m->rcall,
464                                 m->client->dotu);
465                         printk(KERN_NOTICE ">>> %p %s\n", m, buf);
466                 }
467 #endif
468
469                 rcall = m->rcall;
470                 rbuf = m->rbuf;
471                 if (m->rpos > n) {
472                         m->rcall = kmalloc(sizeof(struct p9_fcall) +
473                                                 m->client->msize, GFP_KERNEL);
474                         if (!m->rcall) {
475                                 err = -ENOMEM;
476                                 goto error;
477                         }
478
479                         m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
480                         memmove(m->rbuf, rbuf + n, m->rpos - n);
481                         m->rpos -= n;
482                 } else {
483                         m->rcall = NULL;
484                         m->rbuf = NULL;
485                         m->rpos = 0;
486                 }
487
488                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
489                                                         rcall->id, rcall->tag);
490
491                 req = NULL;
492                 spin_lock(&m->lock);
493                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
494                         if (rreq->tag == rcall->tag) {
495                                 req = rreq;
496                                 if (req->flush != Flushing)
497                                         list_del(&req->req_list);
498                                 break;
499                         }
500                 }
501                 spin_unlock(&m->lock);
502
503                 if (req) {
504                         req->rcall = rcall;
505                         process_request(m, req);
506
507                         if (req->flush != Flushing)
508                                 p9_conn_rpc_cb(req);
509                 } else {
510                         if (err >= 0 && rcall->id != P9_RFLUSH)
511                                 P9_DPRINTK(P9_DEBUG_ERROR,
512                                   "unexpected response mux %p id %d tag %d\n",
513                                   m, rcall->id, rcall->tag);
514                         kfree(rcall);
515                 }
516         }
517
518         if (!list_empty(&m->req_list)) {
519                 if (test_and_clear_bit(Rpending, &m->wsched))
520                         n = POLLIN;
521                 else
522                         n = p9_fd_poll(m->client, NULL);
523
524                 if (n & POLLIN) {
525                         P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
526                         queue_work(p9_mux_wq, &m->rq);
527                 } else
528                         clear_bit(Rworksched, &m->wsched);
529         } else
530                 clear_bit(Rworksched, &m->wsched);
531
532         return;
533
534 error:
535         p9_conn_cancel(m, err);
536         clear_bit(Rworksched, &m->wsched);
537 }
538
539 /**
540  * p9_fd_write - write to a socket
541  * @client: client instance
542  * @v: buffer to send data from
543  * @len: size of send buffer
544  *
545  */
546
547 static int p9_fd_write(struct p9_client *client, void *v, int len)
548 {
549         int ret;
550         mm_segment_t oldfs;
551         struct p9_trans_fd *ts = NULL;
552
553         if (client && client->status != Disconnected)
554                 ts = client->trans;
555
556         if (!ts)
557                 return -EREMOTEIO;
558
559         if (!(ts->wr->f_flags & O_NONBLOCK))
560                 P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
561
562         oldfs = get_fs();
563         set_fs(get_ds());
564         /* The cast to a user pointer is valid due to the set_fs() */
565         ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
566         set_fs(oldfs);
567
568         if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
569                 client->status = Disconnected;
570         return ret;
571 }
572
573 /**
574  * p9_write_work - called when a transport can send some data
575  * @work: container for work to be done
576  *
577  */
578
579 static void p9_write_work(struct work_struct *work)
580 {
581         int n, err;
582         struct p9_conn *m;
583         struct p9_req *req;
584
585         m = container_of(work, struct p9_conn, wq);
586
587         if (m->err < 0) {
588                 clear_bit(Wworksched, &m->wsched);
589                 return;
590         }
591
592         if (!m->wsize) {
593                 if (list_empty(&m->unsent_req_list)) {
594                         clear_bit(Wworksched, &m->wsched);
595                         return;
596                 }
597
598                 spin_lock(&m->lock);
599 again:
600                 req = list_entry(m->unsent_req_list.next, struct p9_req,
601                                req_list);
602                 list_move_tail(&req->req_list, &m->req_list);
603                 if (req->err == ERREQFLUSH)
604                         goto again;
605
606                 m->wbuf = req->tcall->sdata;
607                 m->wsize = req->tcall->size;
608                 m->wpos = 0;
609                 spin_unlock(&m->lock);
610         }
611
612         P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
613                                                                 m->wsize);
614         clear_bit(Wpending, &m->wsched);
615         err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
616         P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
617         if (err == -EAGAIN) {
618                 clear_bit(Wworksched, &m->wsched);
619                 return;
620         }
621
622         if (err < 0)
623                 goto error;
624         else if (err == 0) {
625                 err = -EREMOTEIO;
626                 goto error;
627         }
628
629         m->wpos += err;
630         if (m->wpos == m->wsize)
631                 m->wpos = m->wsize = 0;
632
633         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
634                 if (test_and_clear_bit(Wpending, &m->wsched))
635                         n = POLLOUT;
636                 else
637                         n = p9_fd_poll(m->client, NULL);
638
639                 if (n & POLLOUT) {
640                         P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
641                         queue_work(p9_mux_wq, &m->wq);
642                 } else
643                         clear_bit(Wworksched, &m->wsched);
644         } else
645                 clear_bit(Wworksched, &m->wsched);
646
647         return;
648
649 error:
650         p9_conn_cancel(m, err);
651         clear_bit(Wworksched, &m->wsched);
652 }
653
654 static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
655 {
656         struct p9_poll_wait *pwait =
657                 container_of(wait, struct p9_poll_wait, wait);
658         struct p9_conn *m = pwait->conn;
659         unsigned long flags;
660         DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
661
662         spin_lock_irqsave(&p9_poll_lock, flags);
663         if (list_empty(&m->poll_pending_link))
664                 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
665         spin_unlock_irqrestore(&p9_poll_lock, flags);
666
667         /* perform the default wake up operation */
668         return default_wake_function(&dummy_wait, mode, sync, key);
669 }
670
671 /**
672  * p9_pollwait - add poll task to the wait queue
673  * @filp: file pointer being polled
674  * @wait_address: wait_q to block on
675  * @p: poll state
676  *
677  * called by files poll operation to add v9fs-poll task to files wait queue
678  */
679
680 static void
681 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
682 {
683         struct p9_conn *m = container_of(p, struct p9_conn, pt);
684         struct p9_poll_wait *pwait = NULL;
685         int i;
686
687         for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
688                 if (m->poll_wait[i].wait_addr == NULL) {
689                         pwait = &m->poll_wait[i];
690                         break;
691                 }
692         }
693
694         if (!pwait) {
695                 P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
696                 return;
697         }
698
699         if (!wait_address) {
700                 P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
701                 pwait->wait_addr = ERR_PTR(-EIO);
702                 return;
703         }
704
705         pwait->conn = m;
706         pwait->wait_addr = wait_address;
707         init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
708         add_wait_queue(wait_address, &pwait->wait);
709 }
710
711 /**
712  * p9_conn_create - allocate and initialize the per-session mux data
713  * @client: client instance
714  *
715  * Note: Creates the polling task if this is the first session.
716  */
717
718 static struct p9_conn *p9_conn_create(struct p9_client *client)
719 {
720         int i, n;
721         struct p9_conn *m;
722
723         P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
724         m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
725         if (!m)
726                 return ERR_PTR(-ENOMEM);
727
728         spin_lock_init(&m->lock);
729         INIT_LIST_HEAD(&m->mux_list);
730         m->client = client;
731
732         INIT_LIST_HEAD(&m->req_list);
733         INIT_LIST_HEAD(&m->unsent_req_list);
734         INIT_WORK(&m->rq, p9_read_work);
735         INIT_WORK(&m->wq, p9_write_work);
736         INIT_LIST_HEAD(&m->poll_pending_link);
737         init_poll_funcptr(&m->pt, p9_pollwait);
738
739         n = p9_fd_poll(client, &m->pt);
740         if (n & POLLIN) {
741                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
742                 set_bit(Rpending, &m->wsched);
743         }
744
745         if (n & POLLOUT) {
746                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
747                 set_bit(Wpending, &m->wsched);
748         }
749
750         for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
751                 if (IS_ERR(m->poll_wait[i].wait_addr)) {
752                         p9_mux_poll_stop(m);
753                         kfree(m);
754                         /* return the error code */
755                         return (void *)m->poll_wait[i].wait_addr;
756                 }
757         }
758
759         return m;
760 }
761
762 /**
763  * p9_poll_mux - polls a mux and schedules read or write works if necessary
764  * @m: connection to poll
765  *
766  */
767
768 static void p9_poll_mux(struct p9_conn *m)
769 {
770         int n;
771
772         if (m->err < 0)
773                 return;
774
775         n = p9_fd_poll(m->client, NULL);
776         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
777                 P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
778                 if (n >= 0)
779                         n = -ECONNRESET;
780                 p9_conn_cancel(m, n);
781         }
782
783         if (n & POLLIN) {
784                 set_bit(Rpending, &m->wsched);
785                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
786                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
787                         P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
788                         queue_work(p9_mux_wq, &m->rq);
789                 }
790         }
791
792         if (n & POLLOUT) {
793                 set_bit(Wpending, &m->wsched);
794                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
795                 if ((m->wsize || !list_empty(&m->unsent_req_list))
796                     && !test_and_set_bit(Wworksched, &m->wsched)) {
797                         P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
798                         queue_work(p9_mux_wq, &m->wq);
799                 }
800         }
801 }
802
803 /**
804  * p9_send_request - send 9P request
805  * The function can sleep until the request is scheduled for sending.
806  * The function can be interrupted. Return from the function is not
807  * a guarantee that the request is sent successfully. Can return errors
808  * that can be retrieved by PTR_ERR macros.
809  *
810  * @m: mux data
811  * @tc: request to be sent
812  *
813  */
814
815 static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
816 {
817         int n;
818         struct p9_req *req;
819
820         P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
821                 tc, tc->id);
822         if (m->err < 0)
823                 return ERR_PTR(m->err);
824
825         req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
826         if (!req)
827                 return ERR_PTR(-ENOMEM);
828
829         n = P9_NOTAG;
830         if (tc->id != P9_TVERSION) {
831                 n = p9_idpool_get(m->client->tagpool);
832                 if (n < 0) {
833                         kfree(req);
834                         return ERR_PTR(-ENOMEM);
835                 }
836         }
837
838         p9_set_tag(tc, n);
839
840 #ifdef CONFIG_NET_9P_DEBUG
841         if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
842                 char buf[150];
843
844                 p9_printfcall(buf, sizeof(buf), tc, m->client->dotu);
845                 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
846         }
847 #endif
848
849         spin_lock_init(&req->lock);
850         req->m = m;
851         init_waitqueue_head(&req->wqueue);
852         req->tag = n;
853         req->tcall = tc;
854         req->rcall = NULL;
855         req->err = 0;
856         req->flush = None;
857
858         spin_lock(&m->lock);
859         list_add_tail(&req->req_list, &m->unsent_req_list);
860         spin_unlock(&m->lock);
861
862         if (test_and_clear_bit(Wpending, &m->wsched))
863                 n = POLLOUT;
864         else
865                 n = p9_fd_poll(m->client, NULL);
866
867         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
868                 queue_work(p9_mux_wq, &m->wq);
869
870         return req;
871 }
872
873 static int
874 p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
875 {
876         struct p9_fcall *fc;
877         struct p9_req *rreq, *rptr;
878
879         P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
880
881         /* if a response was received for a request, do nothing */
882         spin_lock(&req->lock);
883         if (req->rcall || req->err) {
884                 spin_unlock(&req->lock);
885                 P9_DPRINTK(P9_DEBUG_MUX,
886                         "mux %p req %p response already received\n", m, req);
887                 return 0;
888         }
889
890         req->flush = Flushing;
891         spin_unlock(&req->lock);
892
893         spin_lock(&m->lock);
894         /* if the request is not sent yet, just remove it from the list */
895         list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
896                 if (rreq->tag == req->tag) {
897                         P9_DPRINTK(P9_DEBUG_MUX,
898                            "mux %p req %p request is not sent yet\n", m, req);
899                         list_del(&rreq->req_list);
900                         req->flush = Flushed;
901                         spin_unlock(&m->lock);
902                         p9_conn_rpc_cb(req);
903                         return 0;
904                 }
905         }
906         spin_unlock(&m->lock);
907
908         clear_thread_flag(TIF_SIGPENDING);
909         fc = p9_create_tflush(req->tag);
910         p9_send_request(m, fc);
911         return 1;
912 }
913
914 /**
915  * p9_fd_rpc- sends 9P request and waits until a response is available.
916  *      The function can be interrupted.
917  * @client: client instance
918  * @tc: request to be sent
919  * @rc: pointer where a pointer to the response is stored
920  *
921  */
922
923 int
924 p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
925 {
926         struct p9_trans_fd *p = client->trans;
927         struct p9_conn *m = p->conn;
928         int err, sigpending;
929         unsigned long flags;
930         struct p9_req *req;
931
932         if (rc)
933                 *rc = NULL;
934
935         sigpending = 0;
936         if (signal_pending(current)) {
937                 sigpending = 1;
938                 clear_thread_flag(TIF_SIGPENDING);
939         }
940
941         req = p9_send_request(m, tc);
942         if (IS_ERR(req)) {
943                 err = PTR_ERR(req);
944                 P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
945                 return err;
946         }
947
948         err = wait_event_interruptible(req->wqueue, req->rcall != NULL ||
949                                                                 req->err < 0);
950         if (req->err < 0)
951                 err = req->err;
952
953         if (err == -ERESTARTSYS && client->status == Connected
954                                                         && m->err == 0) {
955                 if (p9_mux_flush_request(m, req)) {
956                         /* wait until we get response of the flush message */
957                         do {
958                                 clear_thread_flag(TIF_SIGPENDING);
959                                 err = wait_event_interruptible(req->wqueue,
960                                         req->rcall || req->err);
961                         } while (!req->rcall && !req->err &&
962                                         err == -ERESTARTSYS &&
963                                         client->status == Connected && !m->err);
964
965                         err = -ERESTARTSYS;
966                 }
967                 sigpending = 1;
968         }
969
970         if (sigpending) {
971                 spin_lock_irqsave(&current->sighand->siglock, flags);
972                 recalc_sigpending();
973                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
974         }
975
976         if (rc)
977                 *rc = req->rcall;
978         else
979                 kfree(req->rcall);
980
981         p9_mux_free_request(m, req);
982         if (err > 0)
983                 err = -EIO;
984
985         return err;
986 }
987
988 /**
989  * parse_options - parse mount options into session structure
990  * @options: options string passed from mount
991  * @opts: transport-specific structure to parse options into
992  *
993  * Returns 0 upon success, -ERRNO upon failure
994  */
995
996 static int parse_opts(char *params, struct p9_fd_opts *opts)
997 {
998         char *p;
999         substring_t args[MAX_OPT_ARGS];
1000         int option;
1001         char *options;
1002         int ret;
1003
1004         opts->port = P9_PORT;
1005         opts->rfd = ~0;
1006         opts->wfd = ~0;
1007
1008         if (!params)
1009                 return 0;
1010
1011         options = kstrdup(params, GFP_KERNEL);
1012         if (!options) {
1013                 P9_DPRINTK(P9_DEBUG_ERROR,
1014                                 "failed to allocate copy of option string\n");
1015                 return -ENOMEM;
1016         }
1017
1018         while ((p = strsep(&options, ",")) != NULL) {
1019                 int token;
1020                 int r;
1021                 if (!*p)
1022                         continue;
1023                 token = match_token(p, tokens, args);
1024                 r = match_int(&args[0], &option);
1025                 if (r < 0) {
1026                         P9_DPRINTK(P9_DEBUG_ERROR,
1027                          "integer field, but no integer?\n");
1028                         ret = r;
1029                         continue;
1030                 }
1031                 switch (token) {
1032                 case Opt_port:
1033                         opts->port = option;
1034                         break;
1035                 case Opt_rfdno:
1036                         opts->rfd = option;
1037                         break;
1038                 case Opt_wfdno:
1039                         opts->wfd = option;
1040                         break;
1041                 default:
1042                         continue;
1043                 }
1044         }
1045         kfree(options);
1046         return 0;
1047 }
1048
1049 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
1050 {
1051         struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
1052                                            GFP_KERNEL);
1053         if (!ts)
1054                 return -ENOMEM;
1055
1056         ts->rd = fget(rfd);
1057         ts->wr = fget(wfd);
1058         if (!ts->rd || !ts->wr) {
1059                 if (ts->rd)
1060                         fput(ts->rd);
1061                 if (ts->wr)
1062                         fput(ts->wr);
1063                 kfree(ts);
1064                 return -EIO;
1065         }
1066
1067         client->trans = ts;
1068         client->status = Connected;
1069
1070         return 0;
1071 }
1072
1073 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
1074 {
1075         int fd, ret;
1076
1077         csocket->sk->sk_allocation = GFP_NOIO;
1078         fd = sock_map_fd(csocket, 0);
1079         if (fd < 0) {
1080                 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
1081                 return fd;
1082         }
1083
1084         ret = p9_fd_open(client, fd, fd);
1085         if (ret < 0) {
1086                 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
1087                 sockfd_put(csocket);
1088                 return ret;
1089         }
1090
1091         ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
1092
1093         return 0;
1094 }
1095
1096 /**
1097  * p9_mux_destroy - cancels all pending requests and frees mux resources
1098  * @m: mux to destroy
1099  *
1100  */
1101
1102 static void p9_conn_destroy(struct p9_conn *m)
1103 {
1104         P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
1105                 m->mux_list.prev, m->mux_list.next);
1106
1107         p9_mux_poll_stop(m);
1108         cancel_work_sync(&m->rq);
1109         cancel_work_sync(&m->wq);
1110
1111         p9_conn_cancel(m, -ECONNRESET);
1112
1113         m->client = NULL;
1114         kfree(m);
1115 }
1116
1117 /**
1118  * p9_fd_close - shutdown file descriptor transport
1119  * @client: client instance
1120  *
1121  */
1122
1123 static void p9_fd_close(struct p9_client *client)
1124 {
1125         struct p9_trans_fd *ts;
1126
1127         if (!client)
1128                 return;
1129
1130         ts = client->trans;
1131         if (!ts)
1132                 return;
1133
1134         client->status = Disconnected;
1135
1136         p9_conn_destroy(ts->conn);
1137
1138         if (ts->rd)
1139                 fput(ts->rd);
1140         if (ts->wr)
1141                 fput(ts->wr);
1142
1143         kfree(ts);
1144 }
1145
1146 /*
1147  * stolen from NFS - maybe should be made a generic function?
1148  */
1149 static inline int valid_ipaddr4(const char *buf)
1150 {
1151         int rc, count, in[4];
1152
1153         rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
1154         if (rc != 4)
1155                 return -EINVAL;
1156         for (count = 0; count < 4; count++) {
1157                 if (in[count] > 255)
1158                         return -EINVAL;
1159         }
1160         return 0;
1161 }
1162
1163 static int
1164 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
1165 {
1166         int err;
1167         struct socket *csocket;
1168         struct sockaddr_in sin_server;
1169         struct p9_fd_opts opts;
1170         struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
1171
1172         err = parse_opts(args, &opts);
1173         if (err < 0)
1174                 return err;
1175
1176         if (valid_ipaddr4(addr) < 0)
1177                 return -EINVAL;
1178
1179         csocket = NULL;
1180
1181         sin_server.sin_family = AF_INET;
1182         sin_server.sin_addr.s_addr = in_aton(addr);
1183         sin_server.sin_port = htons(opts.port);
1184         sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
1185
1186         if (!csocket) {
1187                 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
1188                 err = -EIO;
1189                 goto error;
1190         }
1191
1192         err = csocket->ops->connect(csocket,
1193                                     (struct sockaddr *)&sin_server,
1194                                     sizeof(struct sockaddr_in), 0);
1195         if (err < 0) {
1196                 P9_EPRINTK(KERN_ERR,
1197                         "p9_trans_tcp: problem connecting socket to %s\n",
1198                         addr);
1199                 goto error;
1200         }
1201
1202         err = p9_socket_open(client, csocket);
1203         if (err < 0)
1204                 goto error;
1205
1206         p = (struct p9_trans_fd *) client->trans;
1207         p->conn = p9_conn_create(client);
1208         if (IS_ERR(p->conn)) {
1209                 err = PTR_ERR(p->conn);
1210                 p->conn = NULL;
1211                 goto error;
1212         }
1213
1214         return 0;
1215
1216 error:
1217         if (csocket)
1218                 sock_release(csocket);
1219
1220         kfree(p);
1221
1222         return err;
1223 }
1224
1225 static int
1226 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1227 {
1228         int err;
1229         struct socket *csocket;
1230         struct sockaddr_un sun_server;
1231         struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
1232
1233         csocket = NULL;
1234
1235         if (strlen(addr) > UNIX_PATH_MAX) {
1236                 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
1237                         addr);
1238                 err = -ENAMETOOLONG;
1239                 goto error;
1240         }
1241
1242         sun_server.sun_family = PF_UNIX;
1243         strcpy(sun_server.sun_path, addr);
1244         sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
1245         err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1246                         sizeof(struct sockaddr_un) - 1, 0);
1247         if (err < 0) {
1248                 P9_EPRINTK(KERN_ERR,
1249                         "p9_trans_unix: problem connecting socket: %s: %d\n",
1250                         addr, err);
1251                 goto error;
1252         }
1253
1254         err = p9_socket_open(client, csocket);
1255         if (err < 0)
1256                 goto error;
1257
1258         p = (struct p9_trans_fd *) client->trans;
1259         p->conn = p9_conn_create(client);
1260         if (IS_ERR(p->conn)) {
1261                 err = PTR_ERR(p->conn);
1262                 p->conn = NULL;
1263                 goto error;
1264         }
1265
1266         return 0;
1267
1268 error:
1269         if (csocket)
1270                 sock_release(csocket);
1271
1272         kfree(p);
1273         return err;
1274 }
1275
1276 static int
1277 p9_fd_create(struct p9_client *client, const char *addr, char *args)
1278 {
1279         int err;
1280         struct p9_fd_opts opts;
1281         struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
1282
1283         parse_opts(args, &opts);
1284
1285         if (opts.rfd == ~0 || opts.wfd == ~0) {
1286                 printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
1287                 return -ENOPROTOOPT;
1288         }
1289
1290         err = p9_fd_open(client, opts.rfd, opts.wfd);
1291         if (err < 0)
1292                 goto error;
1293
1294         p = (struct p9_trans_fd *) client->trans;
1295         p->conn = p9_conn_create(client);
1296         if (IS_ERR(p->conn)) {
1297                 err = PTR_ERR(p->conn);
1298                 p->conn = NULL;
1299                 goto error;
1300         }
1301
1302         return 0;
1303
1304 error:
1305         kfree(p);
1306         return err;
1307 }
1308
1309 static struct p9_trans_module p9_tcp_trans = {
1310         .name = "tcp",
1311         .maxsize = MAX_SOCK_BUF,
1312         .def = 1,
1313         .create = p9_fd_create_tcp,
1314         .close = p9_fd_close,
1315         .rpc = p9_fd_rpc,
1316         .owner = THIS_MODULE,
1317 };
1318
1319 static struct p9_trans_module p9_unix_trans = {
1320         .name = "unix",
1321         .maxsize = MAX_SOCK_BUF,
1322         .def = 0,
1323         .create = p9_fd_create_unix,
1324         .close = p9_fd_close,
1325         .rpc = p9_fd_rpc,
1326         .owner = THIS_MODULE,
1327 };
1328
1329 static struct p9_trans_module p9_fd_trans = {
1330         .name = "fd",
1331         .maxsize = MAX_SOCK_BUF,
1332         .def = 0,
1333         .create = p9_fd_create,
1334         .close = p9_fd_close,
1335         .rpc = p9_fd_rpc,
1336         .owner = THIS_MODULE,
1337 };
1338
1339 /**
1340  * p9_poll_proc - poll worker thread
1341  * @a: thread state and arguments
1342  *
1343  * polls all v9fs transports for new events and queues the appropriate
1344  * work to the work queue
1345  *
1346  */
1347
1348 static int p9_poll_proc(void *a)
1349 {
1350         unsigned long flags;
1351
1352         P9_DPRINTK(P9_DEBUG_MUX, "start %p\n", current);
1353  repeat:
1354         spin_lock_irqsave(&p9_poll_lock, flags);
1355         while (!list_empty(&p9_poll_pending_list)) {
1356                 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1357                                                         struct p9_conn,
1358                                                         poll_pending_link);
1359                 list_del_init(&conn->poll_pending_link);
1360                 spin_unlock_irqrestore(&p9_poll_lock, flags);
1361
1362                 p9_poll_mux(conn);
1363
1364                 spin_lock_irqsave(&p9_poll_lock, flags);
1365         }
1366         spin_unlock_irqrestore(&p9_poll_lock, flags);
1367
1368         set_current_state(TASK_INTERRUPTIBLE);
1369         if (list_empty(&p9_poll_pending_list)) {
1370                 P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
1371                 schedule();
1372         }
1373         __set_current_state(TASK_RUNNING);
1374
1375         if (!kthread_should_stop())
1376                 goto repeat;
1377
1378         P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
1379         return 0;
1380 }
1381
1382 int p9_trans_fd_init(void)
1383 {
1384         p9_mux_wq = create_workqueue("v9fs");
1385         if (!p9_mux_wq) {
1386                 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1387                 return -ENOMEM;
1388         }
1389
1390         p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1391         if (IS_ERR(p9_poll_task)) {
1392                 destroy_workqueue(p9_mux_wq);
1393                 printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1394                 return PTR_ERR(p9_poll_task);
1395         }
1396
1397         v9fs_register_trans(&p9_tcp_trans);
1398         v9fs_register_trans(&p9_unix_trans);
1399         v9fs_register_trans(&p9_fd_trans);
1400
1401         return 0;
1402 }
1403
1404 void p9_trans_fd_exit(void)
1405 {
1406         kthread_stop(p9_poll_task);
1407         v9fs_unregister_trans(&p9_tcp_trans);
1408         v9fs_unregister_trans(&p9_unix_trans);
1409         v9fs_unregister_trans(&p9_fd_trans);
1410
1411         destroy_workqueue(p9_mux_wq);
1412 }