if (new)
                nfs_free_client(new);
 
-       error = wait_event_interruptible(nfs_client_active_wq,
+       error = wait_event_killable(nfs_client_active_wq,
                                clp->cl_cons_state != NFS_CS_INITING);
        if (error < 0) {
                nfs_put_client(clp);
        if (server->flags & NFS_MOUNT_SOFT)
                server->client->cl_softrtry = 1;
 
-       server->client->cl_intr = 0;
-       if (server->flags & NFS4_MOUNT_INTR)
-               server->client->cl_intr = 1;
-
        return 0;
 }
 
 
        if (dreq->iocb)
                goto out;
 
-       result = wait_for_completion_interruptible(&dreq->completion);
+       result = wait_for_completion_killable(&dreq->completion);
 
        if (!result)
                result = dreq->error;
                               unsigned long nr_segs, loff_t pos)
 {
        ssize_t result = 0;
-       sigset_t oldset;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
-       struct rpc_clnt *clnt = NFS_CLIENT(inode);
        struct nfs_direct_req *dreq;
 
        dreq = nfs_direct_req_alloc();
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       rpc_clnt_sigmask(clnt, &oldset);
        result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
-       rpc_clnt_sigunmask(clnt, &oldset);
        nfs_direct_req_release(dreq);
 
        return result;
                                size_t count)
 {
        ssize_t result = 0;
-       sigset_t oldset;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
-       struct rpc_clnt *clnt = NFS_CLIENT(inode);
        struct nfs_direct_req *dreq;
        size_t wsize = NFS_SERVER(inode)->wsize;
        int sync = 0;
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       rpc_clnt_sigmask(clnt, &oldset);
        result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
        if (!result)
                result = nfs_direct_wait(dreq);
-       rpc_clnt_sigunmask(clnt, &oldset);
        nfs_direct_req_release(dreq);
 
        return result;
 
  */
 static int nfs_wait_on_inode(struct inode *inode)
 {
-       struct rpc_clnt *clnt = NFS_CLIENT(inode);
        struct nfs_inode *nfsi = NFS_I(inode);
-       sigset_t oldmask;
        int error;
 
-       rpc_clnt_sigmask(clnt, &oldmask);
        error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
-                                       nfs_wait_schedule, TASK_INTERRUPTIBLE);
-       rpc_clnt_sigunmask(clnt, &oldmask);
+                                       nfs_wait_schedule, TASK_KILLABLE);
 
        return error;
 }
 
                .program        = &mnt_program,
                .version        = version,
                .authflavor     = RPC_AUTH_UNIX,
-               .flags          = RPC_CLNT_CREATE_INTR,
+               .flags          = 0,
        };
        struct rpc_clnt         *mnt_clnt;
        int                     status;
 
 static int
 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
 {
-       sigset_t oldset;
        int res;
-       rpc_clnt_sigmask(clnt, &oldset);
        do {
                res = rpc_call_sync(clnt, msg, flags);
                if (res != -EJUKEBOX)
                        break;
-               schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
+               schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
                res = -ERESTARTSYS;
-       } while (!signalled());
-       rpc_clnt_sigunmask(clnt, &oldset);
+       } while (!fatal_signal_pending(current));
        return res;
 }
 
 
 
 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
 {
-       sigset_t oldset;
        int ret;
 
-       rpc_clnt_sigmask(task->tk_client, &oldset);
        ret = rpc_wait_for_completion_task(task);
-       rpc_clnt_sigunmask(task->tk_client, &oldset);
        return ret;
 }
 
        return 0;
 }
 
-static int nfs4_wait_bit_interruptible(void *word)
+static int nfs4_wait_bit_killable(void *word)
 {
-       if (signal_pending(current))
+       if (fatal_signal_pending(current))
                return -ERESTARTSYS;
        schedule();
        return 0;
 
 static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
 {
-       sigset_t oldset;
        int res;
 
        might_sleep();
 
        rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
 
-       rpc_clnt_sigmask(clnt, &oldset);
        res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
-                       nfs4_wait_bit_interruptible,
-                       TASK_INTERRUPTIBLE);
-       rpc_clnt_sigunmask(clnt, &oldset);
+                       nfs4_wait_bit_killable, TASK_KILLABLE);
 
        rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
        return res;
 
 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
 {
-       sigset_t oldset;
        int res = 0;
 
        might_sleep();
                *timeout = NFS4_POLL_RETRY_MIN;
        if (*timeout > NFS4_POLL_RETRY_MAX)
                *timeout = NFS4_POLL_RETRY_MAX;
-       rpc_clnt_sigmask(clnt, &oldset);
-       if (clnt->cl_intr) {
-               schedule_timeout_interruptible(*timeout);
-               if (signalled())
-                       res = -ERESTARTSYS;
-       } else
-               schedule_timeout_uninterruptible(*timeout);
-       rpc_clnt_sigunmask(clnt, &oldset);
+       schedule_timeout_killable(*timeout);
+       if (fatal_signal_pending(current))
+               res = -ERESTARTSYS;
        *timeout <<= 1;
        return res;
 }
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-       schedule_timeout_interruptible(timeout);
+       schedule_timeout_killable(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
 
                                nfs_data.flags &= ~NFS_MOUNT_SOFT;
                                break;
                        case Opt_intr:
-                               nfs_data.flags |= NFS_MOUNT_INTR;
-                               break;
                        case Opt_nointr:
-                               nfs_data.flags &= ~NFS_MOUNT_INTR;
                                break;
                        case Opt_posix:
                                nfs_data.flags |= NFS_MOUNT_POSIX;
 
                if (req != NULL)
                        break;
 
-               if (signalled() && (server->flags & NFS_MOUNT_INTR))
+               if (fatal_signal_pending(current))
                        return ERR_PTR(-ERESTARTSYS);
                yield();
        }
        kref_put(&req->wb_kref, nfs_free_request);
 }
 
-static int nfs_wait_bit_interruptible(void *word)
+static int nfs_wait_bit_killable(void *word)
 {
        int ret = 0;
 
-       if (signal_pending(current))
+       if (fatal_signal_pending(current))
                ret = -ERESTARTSYS;
        else
                schedule();
  * nfs_wait_on_request - Wait for a request to complete.
  * @req: request to wait upon.
  *
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
  * The user is responsible for holding a count on the request.
  */
 int
 nfs_wait_on_request(struct nfs_page *req)
 {
-       struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
-       sigset_t oldmask;
        int ret = 0;
 
        if (!test_bit(PG_BUSY, &req->wb_flags))
                goto out;
-       /*
-        * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
-        *       are not interrupted if intr flag is not set
-        */
-       rpc_clnt_sigmask(clnt, &oldmask);
        ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
-                       nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
-       rpc_clnt_sigunmask(clnt, &oldmask);
+                       nfs_wait_bit_killable, TASK_KILLABLE);
 out:
        return ret;
 }
 
  */
 static void nfs_execute_read(struct nfs_read_data *data)
 {
-       struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
-       sigset_t oldset;
-
-       rpc_clnt_sigmask(clnt, &oldset);
        rpc_execute(&data->task);
-       rpc_clnt_sigunmask(clnt, &oldset);
 }
 
 /*
 
                const char *nostr;
        } nfs_info[] = {
                { NFS_MOUNT_SOFT, ",soft", ",hard" },
-               { NFS_MOUNT_INTR, ",intr", ",nointr" },
                { NFS_MOUNT_NOCTO, ",nocto", "" },
                { NFS_MOUNT_NOAC, ",noac", "" },
                { NFS_MOUNT_NONLM, ",nolock", "" },
                        mnt->flags &= ~NFS_MOUNT_SOFT;
                        break;
                case Opt_intr:
-                       mnt->flags |= NFS_MOUNT_INTR;
-                       break;
                case Opt_nointr:
-                       mnt->flags &= ~NFS_MOUNT_INTR;
                        break;
                case Opt_posix:
                        mnt->flags |= NFS_MOUNT_POSIX;
 
 /*
  * Wait for a request to complete.
  *
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
  */
 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
 {
 
 static void nfs_execute_write(struct nfs_write_data *data)
 {
-       struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
-       sigset_t oldset;
-
-       rpc_clnt_sigmask(clnt, &oldset);
        rpc_execute(&data->task);
-       rpc_clnt_sigunmask(clnt, &oldset);
 }
 
 /*
 
 
 #define nfs_wait_event(clnt, wq, condition)                            \
 ({                                                                     \
-       int __retval = 0;                                               \
-       if (clnt->cl_intr) {                                            \
-               sigset_t oldmask;                                       \
-               rpc_clnt_sigmask(clnt, &oldmask);                       \
-               __retval = wait_event_interruptible(wq, condition);     \
-               rpc_clnt_sigunmask(clnt, &oldmask);                     \
-       } else                                                          \
-               wait_event(wq, condition);                              \
+       int __retval = wait_event_killable(wq, condition);              \
        __retval;                                                       \
 })
 
 
 /* bits in the flags field */
 
 #define NFS_MOUNT_SOFT         0x0001  /* 1 */
-#define NFS_MOUNT_INTR         0x0002  /* 1 */
+#define NFS_MOUNT_INTR         0x0002  /* 1 */ /* now unused, but ABI */
 #define NFS_MOUNT_SECURE       0x0004  /* 1 */
 #define NFS_MOUNT_POSIX                0x0008  /* 1 */
 #define NFS_MOUNT_NOCTO                0x0010  /* 1 */
 
        struct rpc_iostats *    cl_metrics;     /* per-client statistics */
 
        unsigned int            cl_softrtry : 1,/* soft timeouts */
-                               cl_intr     : 1,/* interruptible */
                                cl_discrtry : 1,/* disconnect before retry */
                                cl_autobind : 1;/* use getport() */
 
 
 /* Values for "flags" field */
 #define RPC_CLNT_CREATE_HARDRTRY       (1UL << 0)
-#define RPC_CLNT_CREATE_INTR           (1UL << 1)
 #define RPC_CLNT_CREATE_AUTOBIND       (1UL << 2)
 #define RPC_CLNT_CREATE_NONPRIVPORT    (1UL << 3)
 #define RPC_CLNT_CREATE_NOPING         (1UL << 4)
 
 #define RPC_TASK_DYNAMIC       0x0080          /* task was kmalloc'ed */
 #define RPC_TASK_KILLED                0x0100          /* task was killed */
 #define RPC_TASK_SOFT          0x0200          /* Use soft timeouts */
-#define RPC_TASK_NOINTR                0x0400          /* uninterruptible task */
 
 #define RPC_IS_ASYNC(t)                ((t)->tk_flags & RPC_TASK_ASYNC)
 #define RPC_IS_SWAPPER(t)      ((t)->tk_flags & RPC_TASK_SWAPPER)
 #define RPC_ASSASSINATED(t)    ((t)->tk_flags & RPC_TASK_KILLED)
 #define RPC_DO_CALLBACK(t)     ((t)->tk_callback != NULL)
 #define RPC_IS_SOFT(t)         ((t)->tk_flags & RPC_TASK_SOFT)
-#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
 
 #define RPC_TASK_RUNNING       0
 #define RPC_TASK_QUEUED                1
 
                return clnt;
 
        if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
-               int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+               int err = rpc_ping(clnt, RPC_TASK_SOFT);
                if (err != 0) {
                        rpc_shutdown_client(clnt);
                        return ERR_PTR(err);
        if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
                clnt->cl_softrtry = 0;
 
-       if (args->flags & RPC_CLNT_CREATE_INTR)
-               clnt->cl_intr = 1;
        if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
                clnt->cl_autobind = 1;
        if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
        clnt->cl_prog     = program->number;
        clnt->cl_vers     = version->number;
        clnt->cl_stats    = program->stats;
-       err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+       err = rpc_ping(clnt, RPC_TASK_SOFT);
        if (err != 0) {
                rpc_shutdown_client(clnt);
                clnt = ERR_PTR(err);
        .rpc_call_done = rpc_default_callback,
 };
 
-/*
- *     Export the signal mask handling for synchronous code that
- *     sleeps on RPC calls
- */
-#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
-
-static void rpc_save_sigmask(sigset_t *oldset, int intr)
-{
-       unsigned long   sigallow = sigmask(SIGKILL);
-       sigset_t sigmask;
-
-       /* Block all signals except those listed in sigallow */
-       if (intr)
-               sigallow |= RPC_INTR_SIGNALS;
-       siginitsetinv(&sigmask, sigallow);
-       sigprocmask(SIG_BLOCK, &sigmask, oldset);
-}
-
-static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
-{
-       rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
-}
-
-static inline void rpc_restore_sigmask(sigset_t *oldset)
-{
-       sigprocmask(SIG_SETMASK, oldset, NULL);
-}
-
-void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
-{
-       rpc_save_sigmask(oldset, clnt->cl_intr);
-}
-
-void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
-{
-       rpc_restore_sigmask(oldset);
-}
-
 static
 struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
                struct rpc_message *msg,
                void *data)
 {
        struct rpc_task *task, *ret;
-       sigset_t oldset;
 
        task = rpc_new_task(clnt, flags, ops, data);
        if (task == NULL) {
        }
 
        /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
-       rpc_task_sigmask(task, &oldset);
        if (msg != NULL) {
                rpc_call_setup(task, msg, 0);
                if (task->tk_status != 0) {
        rpc_execute(task);
        ret = task;
 out:
-       rpc_restore_sigmask(&oldset);
        return ret;
 }
 
 
                .program        = &rpcb_program,
                .version        = version,
                .authflavor     = RPC_AUTH_UNIX,
-               .flags          = (RPC_CLNT_CREATE_NOPING |
-                                  RPC_CLNT_CREATE_INTR),
+               .flags          = RPC_CLNT_CREATE_NOPING,
        };
 
        switch (srvaddr->sa_family) {
 
 }
 EXPORT_SYMBOL(rpc_init_wait_queue);
 
-static int rpc_wait_bit_interruptible(void *word)
+static int rpc_wait_bit_killable(void *word)
 {
-       if (signal_pending(current))
+       if (fatal_signal_pending(current))
                return -ERESTARTSYS;
        schedule();
        return 0;
 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
 {
        if (action == NULL)
-               action = rpc_wait_bit_interruptible;
+               action = rpc_wait_bit_killable;
        return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
-                       action, TASK_INTERRUPTIBLE);
+                       action, TASK_KILLABLE);
 }
 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
 
 
                /* sync task: sleep here */
                dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
-               /* Note: Caller should be using rpc_clnt_sigmask() */
                status = out_of_line_wait_on_bit(&task->tk_runstate,
-                               RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
-                               TASK_INTERRUPTIBLE);
+                               RPC_TASK_QUEUED, rpc_wait_bit_killable,
+                               TASK_KILLABLE);
                if (status == -ERESTARTSYS) {
                        /*
                         * When a sync task receives a signal, it exits with
                kref_get(&clnt->cl_kref);
                if (clnt->cl_softrtry)
                        task->tk_flags |= RPC_TASK_SOFT;
-               if (!clnt->cl_intr)
-                       task->tk_flags |= RPC_TASK_NOINTR;
        }
 
        BUG_ON(task->tk_ops == NULL);
 
 EXPORT_SYMBOL(rpc_call_sync);
 EXPORT_SYMBOL(rpc_call_async);
 EXPORT_SYMBOL(rpc_call_setup);
-EXPORT_SYMBOL(rpc_clnt_sigmask);
-EXPORT_SYMBOL(rpc_clnt_sigunmask);
 EXPORT_SYMBOL(rpc_delay);
 EXPORT_SYMBOL(rpc_restart_call);
 EXPORT_SYMBOL(rpc_setbufsize);