]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] seccomp: tsc disable
authorAndrea Arcangeli <andrea@suse.de>
Mon, 27 Jun 2005 21:36:36 +0000 (14:36 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Mon, 27 Jun 2005 22:11:44 +0000 (15:11 -0700)
I believe at least for seccomp it's worth to turn off the tsc, not just for
HT but for the L2 cache too.  So it's up to you, either you turn it off
completely (which isn't very nice IMHO) or I recommend to apply this below
patch.

This has been tested successfully on x86-64 against current cogito
repository (i686 compiles so I didn't bother testing ;).  People selling
the cpu through cpushare may appreciate this bit for a peace of mind.

There's no way to get any timing info anymore with this applied
(gettimeofday is forbidden of course).  The seccomp environment is
completely deterministic so it can't be allowed to get timing info, it has
to be deterministic so in the future I can enable a computing mode that
does a parallel computing for each task with server side transparent
checkpointing and verification that the output is the same from all the 2/3
seller computers for each task, without the buyer even noticing (for now
the verification is left to the buyer client side and there's no
checkpointing, since that would require more kernel changes to track the
dirty bits but it'll be easy to extend once the basic mode is finished).

Eliminating a cold-cache read of the cr4 global variable will save one
cacheline during the tlb flush while making the code per-cpu-safe at the
same time.  Thanks to Mikael Pettersson for noticing the tlb flush wasn't
per-cpu-safe.

The global tlb flush can run from irq (IPI calling do_flush_tlb_all) but
it'll be transparent to the switch_to code since the IPI won't make any
change to the cr4 contents from the point of view of the interrupted code
and since it's now all per-cpu stuff, it will not race.  So no need to
disable irqs in switch_to slow path.

Signed-off-by: Andrea Arcangeli <andrea@cpushare.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/kernel/process.c
arch/x86_64/kernel/process.c
include/asm-i386/tlbflush.h
include/asm-x86_64/tlbflush.h
include/linux/seccomp.h

index 5f8cfa6b794085b9fb224e229398369bc0f4c64a..ba243a4cc119fd35774bfc26e3fb3587bb31c741 100644 (file)
@@ -616,6 +616,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
        tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
 }
 
+/*
+ * This function selects if the context switch from prev to next
+ * has to tweak the TSC disable bit in the cr4.
+ */
+static inline void disable_tsc(struct task_struct *prev_p,
+                              struct task_struct *next_p)
+{
+       struct thread_info *prev, *next;
+
+       /*
+        * gcc should eliminate the ->thread_info dereference if
+        * has_secure_computing returns 0 at compile time (SECCOMP=n).
+        */
+       prev = prev_p->thread_info;
+       next = next_p->thread_info;
+
+       if (has_secure_computing(prev) || has_secure_computing(next)) {
+               /* slow path here */
+               if (has_secure_computing(prev) &&
+                   !has_secure_computing(next)) {
+                       write_cr4(read_cr4() & ~X86_CR4_TSD);
+               } else if (!has_secure_computing(prev) &&
+                          has_secure_computing(next))
+                       write_cr4(read_cr4() | X86_CR4_TSD);
+       }
+}
+
 /*
  *     switch_to(x,yn) should switch tasks from x to y.
  *
@@ -695,6 +722,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
                handle_io_bitmap(next, tss);
 
+       disable_tsc(prev_p, next_p);
+
        return prev_p;
 }
 
index 1d91271796e56725e6a0d51474215830362d2ddc..7577f9d7a75d8373f1a79be83c4e30c63b867f19 100644 (file)
@@ -481,6 +481,33 @@ out:
        return err;
 }
 
+/*
+ * This function selects if the context switch from prev to next
+ * has to tweak the TSC disable bit in the cr4.
+ */
+static inline void disable_tsc(struct task_struct *prev_p,
+                              struct task_struct *next_p)
+{
+       struct thread_info *prev, *next;
+
+       /*
+        * gcc should eliminate the ->thread_info dereference if
+        * has_secure_computing returns 0 at compile time (SECCOMP=n).
+        */
+       prev = prev_p->thread_info;
+       next = next_p->thread_info;
+
+       if (has_secure_computing(prev) || has_secure_computing(next)) {
+               /* slow path here */
+               if (has_secure_computing(prev) &&
+                   !has_secure_computing(next)) {
+                       write_cr4(read_cr4() & ~X86_CR4_TSD);
+               } else if (!has_secure_computing(prev) &&
+                          has_secure_computing(next))
+                       write_cr4(read_cr4() | X86_CR4_TSD);
+       }
+}
+
 /*
  * This special macro can be used to load a debugging register
  */
@@ -599,6 +626,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
                }
        }
 
+       disable_tsc(prev_p, next_p);
+
        return prev_p;
 }
 
index f22fab0cea26486bd323a0432a3c093e44ddec27..ab216e1370efb85fb3b6f73dab1ad588bf1382f7 100644 (file)
  */
 #define __flush_tlb_global()                                           \
        do {                                                            \
-               unsigned int tmpreg;                                    \
+               unsigned int tmpreg, cr4, cr4_orig;                     \
                                                                        \
                __asm__ __volatile__(                                   \
-                       "movl %1, %%cr4;  # turn off PGE     \n"        \
+                       "movl %%cr4, %2;  # turn off PGE     \n"        \
+                       "movl %2, %1;                        \n"        \
+                       "andl %3, %1;                        \n"        \
+                       "movl %1, %%cr4;                     \n"        \
                        "movl %%cr3, %0;                     \n"        \
                        "movl %0, %%cr3;  # flush TLB        \n"        \
                        "movl %2, %%cr4;  # turn PGE back on \n"        \
-                       : "=&r" (tmpreg)                                \
-                       : "r" (mmu_cr4_features & ~X86_CR4_PGE),        \
-                         "r" (mmu_cr4_features)                        \
+                       : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+                       : "i" (~X86_CR4_PGE)                            \
                        : "memory");                                    \
        } while (0)
 
index 2e811ac262af78062c2ffb661bfa9eef3815b824..061742382520ecfe4cf416a5be91cba6b8b51f18 100644 (file)
  */
 #define __flush_tlb_global()                                           \
        do {                                                            \
-               unsigned long tmpreg;                                   \
+               unsigned long tmpreg, cr4, cr4_orig;                    \
                                                                        \
                __asm__ __volatile__(                                   \
-                       "movq %1, %%cr4;  # turn off PGE     \n"        \
+                       "movq %%cr4, %2;  # turn off PGE     \n"        \
+                       "movq %2, %1;                        \n"        \
+                       "andq %3, %1;                        \n"        \
+                       "movq %1, %%cr4;                     \n"        \
                        "movq %%cr3, %0;  # flush TLB        \n"        \
                        "movq %0, %%cr3;                     \n"        \
                        "movq %2, %%cr4;  # turn PGE back on \n"        \
-                       : "=&r" (tmpreg)                                \
-                       : "r" (mmu_cr4_features & ~X86_CR4_PGE),        \
-                         "r" (mmu_cr4_features)                        \
+                       : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+                       : "i" (~X86_CR4_PGE)                            \
                        : "memory");                                    \
        } while (0)
 
index 3a2702bbb1d67dc177544c294db50677a50015e4..dc89116bb1ca997bef56f6e938f4f6080ee4f878 100644 (file)
@@ -19,6 +19,11 @@ static inline void secure_computing(int this_syscall)
                __secure_computing(this_syscall);
 }
 
+static inline int has_secure_computing(struct thread_info *ti)
+{
+       return unlikely(test_ti_thread_flag(ti, TIF_SECCOMP));
+}
+
 #else /* CONFIG_SECCOMP */
 
 #if (__GNUC__ > 2)
@@ -28,6 +33,11 @@ static inline void secure_computing(int this_syscall)
 #endif
 
 #define secure_computing(x) do { } while (0)
+/* static inline to preserve typechecking */
+static inline int has_secure_computing(struct thread_info *ti)
+{
+       return 0;
+}
 
 #endif /* CONFIG_SECCOMP */