#define RLIMIT_NICE            13      /* max nice prio allowed to raise to
                                           0-39 for nice level 19 .. -20 */
 #define RLIMIT_RTPRIO          14      /* maximum realtime priority */
-
-#define RLIM_NLIMITS           15
+#define RLIMIT_RTTIME          15      /* timeout for RT tasks in us */
+#define RLIM_NLIMITS           16
 
 /*
  * SuS says limits have to be unsigned.
        [RLIMIT_MSGQUEUE]       = {   MQ_BYTES_MAX,   MQ_BYTES_MAX },   \
        [RLIMIT_NICE]           = { 0, 0 },                             \
        [RLIMIT_RTPRIO]         = { 0, 0 },                             \
+       [RLIMIT_RTTIME]         = {  RLIM_INFINITY,  RLIM_INFINITY },   \
 }
 
 #endif /* __KERNEL__ */
 
 struct sched_rt_entity {
        struct list_head run_list;
        unsigned int time_slice;
+       unsigned long timeout;
 };
 
 struct task_struct {
 
 {
        int maxfire;
        struct list_head *timers = tsk->cpu_timers;
+       struct signal_struct *const sig = tsk->signal;
 
        maxfire = 20;
        tsk->it_prof_expires = cputime_zero;
                t->firing = 1;
                list_move_tail(&t->entry, firing);
        }
+
+       /*
+        * Check for the special case thread timers.
+        */
+       if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
+               unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
+               unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
+
+               if (tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+                       /*
+                        * At the hard limit, we just die.
+                        * No need to calculate anything else now.
+                        */
+                       __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+                       return;
+               }
+               if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
+                       /*
+                        * At the soft limit, send a SIGXCPU every second.
+                        */
+                       if (sig->rlim[RLIMIT_RTTIME].rlim_cur
+                           < sig->rlim[RLIMIT_RTTIME].rlim_max) {
+                               sig->rlim[RLIMIT_RTTIME].rlim_cur +=
+                                                               USEC_PER_SEC;
+                       }
+                       __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+               }
+       }
 }
 
 /*
 
        inc_cpu_load(rq, p->se.load.weight);
 
        inc_rt_tasks(p, rq);
+
+       if (wakeup)
+               p->rt.timeout = 0;
 }
 
 /*
        }
 }
 
+static void watchdog(struct rq *rq, struct task_struct *p)
+{
+       unsigned long soft, hard;
+
+       if (!p->signal)
+               return;
+
+       soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
+       hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
+
+       if (soft != RLIM_INFINITY) {
+               unsigned long next;
+
+               p->rt.timeout++;
+               next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
+               if (next > p->rt.timeout) {
+                       u64 next_time = p->se.sum_exec_runtime;
+
+                       next_time += next * (NSEC_PER_SEC/HZ);
+                       if (p->it_sched_expires > next_time)
+                               p->it_sched_expires = next_time;
+               } else
+                       p->it_sched_expires = p->se.sum_exec_runtime;
+       }
+}
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
 
+       watchdog(rq, p);
+
        /*
         * RR tasks need a special form of timeslice management.
         * FIFO tasks have no timeslices.