#ifndef CONFIG_ARM_THUMB
        elf_hwcap &= ~HWCAP_THUMB;
 #endif
-#ifndef CONFIG_VFP
-       elf_hwcap &= ~HWCAP_VFP;
-#endif
 
        cpu_proc_init();
 }
 
        b       __arm926_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
-       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
        .long   cpu_arm926_name
        .long   arm926_processor_functions
        .long   v4wbi_tlb_fns
 
 #endif
        mcr     p15, 0, r4, c2, c0, 1           @ load TTB1
 #endif /* CONFIG_MMU */
-#ifdef CONFIG_VFP
-       mrc     p15, 0, r0, c1, c0, 2
-       orr     r0, r0, #(0xf << 20)
-       mcr     p15, 0, r0, c1, c0, 2           @ Enable full access to VFP
-#endif
        adr     r5, v6_crval
        ldmia   r5, {r5, r6}
        mrc     p15, 0, r0, c1, c0, 0           @ read control register
        b       __v6_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
-       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
        .long   cpu_v6_name
        .long   v6_processor_functions
        .long   v6wbi_tlb_fns
 
        if (exceptions)
                vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 }
- 
+
 /*
  * VFP support code initialisation.
  */
 static int __init vfp_init(void)
 {
        unsigned int vfpsid;
+       unsigned int cpu_arch = cpu_architecture();
+       u32 access = 0;
+
+       if (cpu_arch >= CPU_ARCH_ARMv6) {
+               access = get_copro_access();
+
+               /*
+                * Enable full access to VFP (cp10 and cp11)
+                */
+               set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
+       }
 
        /*
         * First check that there is a VFP that we can use.
        printk(KERN_INFO "VFP support v0.3: ");
        if (VFP_arch) {
                printk("not present\n");
+
+               /*
+                * Restore the copro access register.
+                */
+               if (cpu_arch >= CPU_ARCH_ARMv6)
+                       set_copro_access(access);
        } else if (vfpsid & FPSID_NODOUBLE) {
                printk("no double precision support\n");
        } else {
                        (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
                        (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
                        (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
+
                vfp_vector = vfp_support_entry;
 
                thread_register_notifier(&vfp_notifier_block);
+
+               /*
+                * We detected VFP, and the support code is
+                * in place; report VFP support to userspace.
+                */
+               elf_hwcap |= HWCAP_VFP;
        }
        return 0;
 }
 
 #define        cpu_is_xscale() 1
 #endif
 
-#define set_cr(x)                                      \
-       __asm__ __volatile__(                           \
-       "mcr    p15, 0, %0, c1, c0, 0   @ set CR"       \
-       : : "r" (x) : "cc")
-
-#define get_cr()                                       \
-       ({                                              \
-       unsigned int __val;                             \
-       __asm__ __volatile__(                           \
-       "mrc    p15, 0, %0, c1, c0, 0   @ get CR"       \
-       : "=r" (__val) : : "cc");                       \
-       __val;                                          \
-       })
+static inline unsigned int get_cr(void)
+{
+       unsigned int val;
+       asm("mrc p15, 0, %0, c1, c0, 0  @ get CR" : "=r" (val) : : "cc");
+       return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+       asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
+         : : "r" (val) : "cc");
+}
+
+#define CPACC_FULL(n)          (3 << (n * 2))
+#define CPACC_SVC(n)           (1 << (n * 2))
+#define CPACC_DISABLE(n)       (0 << (n * 2))
+
+static inline unsigned int get_copro_access(void)
+{
+       unsigned int val;
+       asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
+         : "=r" (val) : : "cc");
+       return val;
+}
+
+static inline void set_copro_access(unsigned int val)
+{
+       asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
+         : : "r" (val) : "cc");
+}
 
 extern unsigned long cr_no_alignment;  /* defined in entry-armv.S */
 extern unsigned long cr_alignment;     /* defined in entry-armv.S */