]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branches 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess...
authorIngo Molnar <mingo@elte.hu>
Fri, 13 Feb 2009 08:47:32 +0000 (09:47 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 13 Feb 2009 08:47:32 +0000 (09:47 +0100)
1  2  3 
arch/x86/include/asm/page.h
arch/x86/include/asm/paravirt.h
arch/x86/kernel/hpet.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/ptrace.c
arch/x86/mm/ioremap.c
arch/x86/mm/pat.c
include/linux/mm.h
mm/mlock.c

index 40226999cbf850e13a086b5d333947f44f581347,e9873a2e86951d10c7a5576e3b908956d0e3e6f8,776579119a009feb1b2854a9ea18a68183d63243..05f2da7f387a87873959722595884ba1ff52b882
@@@@ -57,7 -57,7 -57,6 +57,6 @@@@ typedef struct { pgdval_t pgd; } pgd_t
   typedef struct { pgprotval_t pgprot; } pgprot_t;
   
   extern int page_is_ram(unsigned long pagenr);
-- extern int pagerange_is_ram(unsigned long start, unsigned long end);
   extern int devmem_is_allowed(unsigned long pagenr);
   extern void map_devmem(unsigned long pfn, unsigned long size,
                       pgprot_t vma_prot);
@@@@ -95,11 -95,6 -94,6 +94,11 @@@@ static inline pgdval_t native_pgd_val(p
        return pgd.pgd;
   }
   
 ++static inline pgdval_t pgd_flags(pgd_t pgd)
 ++{
 ++     return native_pgd_val(pgd) & PTE_FLAGS_MASK;
 ++}
 ++
   #if PAGETABLE_LEVELS >= 3
   #if PAGETABLE_LEVELS == 4
   typedef struct { pudval_t pud; } pud_t;
@@@@ -122,11 -117,6 -116,6 +121,11 @@@@ static inline pudval_t native_pud_val(p
   }
   #endif       /* PAGETABLE_LEVELS == 4 */
   
 ++static inline pudval_t pud_flags(pud_t pud)
 ++{
 ++     return native_pud_val(pud) & PTE_FLAGS_MASK;
 ++}
 ++
   typedef struct { pmdval_t pmd; } pmd_t;
   
   static inline pmd_t native_make_pmd(pmdval_t val)
@@@@ -138,7 -128,6 -127,6 +137,7 @@@@ static inline pmdval_t native_pmd_val(p
   {
        return pmd.pmd;
   }
 ++
   #else  /* PAGETABLE_LEVELS == 2 */
   #include <asm-generic/pgtable-nopmd.h>
   
@@@@ -148,11 -137,6 -136,6 +147,11 @@@@ static inline pmdval_t native_pmd_val(p
   }
   #endif       /* PAGETABLE_LEVELS >= 3 */
   
 ++static inline pmdval_t pmd_flags(pmd_t pmd)
 ++{
 ++     return native_pmd_val(pmd) & PTE_FLAGS_MASK;
 ++}
 ++
   static inline pte_t native_make_pte(pteval_t val)
   {
        return (pte_t) { .pte = val };
@@@@ -163,7 -147,7 -146,7 +162,7 @@@@ static inline pteval_t native_pte_val(p
        return pte.pte;
   }
   
 --static inline pteval_t native_pte_flags(pte_t pte)
 ++static inline pteval_t pte_flags(pte_t pte)
   {
        return native_pte_val(pte) & PTE_FLAGS_MASK;
   }
   #endif
   
   #define pte_val(x)   native_pte_val(x)
 --#define pte_flags(x) native_pte_flags(x)
   #define __pte(x)     native_make_pte(x)
   
   #endif       /* CONFIG_PARAVIRT */
index 1c244b64573feadfc30c5ab49f234a80411d9ab6,ba3e2ff6aedcb1a7c6648acdc9257ad0bb7e3202,a660eceaa2734643145936c1e5d971961f8925ab..b788dfd2048398b41f0b3fcfd84329392b38be49
   #define CLBR_EAX  (1 << 0)
   #define CLBR_ECX  (1 << 1)
   #define CLBR_EDX  (1 << 2)
 ++#define CLBR_EDI  (1 << 3)
   
 --#ifdef CONFIG_X86_64
 --#define CLBR_RSI  (1 << 3)
 --#define CLBR_RDI  (1 << 4)
 ++#ifdef CONFIG_X86_32
 ++/* CLBR_ANY should match all regs platform has. For i386, that's just it */
 ++#define CLBR_ANY  ((1 << 4) - 1)
 ++
 ++#define CLBR_ARG_REGS        (CLBR_EAX | CLBR_EDX | CLBR_ECX)
 ++#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
 ++#define CLBR_SCRATCH (0)
 ++#else
 ++#define CLBR_RAX  CLBR_EAX
 ++#define CLBR_RCX  CLBR_ECX
 ++#define CLBR_RDX  CLBR_EDX
 ++#define CLBR_RDI  CLBR_EDI
 ++#define CLBR_RSI  (1 << 4)
   #define CLBR_R8   (1 << 5)
   #define CLBR_R9   (1 << 6)
   #define CLBR_R10  (1 << 7)
   #define CLBR_R11  (1 << 8)
 ++
   #define CLBR_ANY  ((1 << 9) - 1)
 ++
 ++#define CLBR_ARG_REGS        (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
 ++                      CLBR_RCX | CLBR_R8 | CLBR_R9)
 ++#define CLBR_RET_REG (CLBR_RAX)
 ++#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
 ++
   #include <asm/desc_defs.h>
 --#else
 --/* CLBR_ANY should match all regs platform has. For i386, that's just it */
 --#define CLBR_ANY  ((1 << 3) - 1)
   #endif /* X86_64 */
   
 ++#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
 ++
   #ifndef __ASSEMBLY__
   #include <linux/types.h>
   #include <linux/cpumask.h>
@@@@ -57,14 -40,6 -40,6 +57,14 @@@@ struct tss_struct
   struct mm_struct;
   struct desc_struct;
   
 ++/*
 ++ * Wrapper type for pointers to code which uses the non-standard
 ++ * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
 ++ */
 ++struct paravirt_callee_save {
 ++     void *func;
 ++};
 ++
   /* general info */
   struct pv_info {
        unsigned int kernel_rpl;
@@@@ -214,15 -189,11 -189,11 +214,15 @@@@ struct pv_irq_ops 
         * expected to use X86_EFLAGS_IF; all other bits
         * returned from save_fl are undefined, and may be ignored by
         * restore_fl.
 ++      *
 ++      * NOTE: These functions callers expect the callee to preserve
 ++      * more registers than the standard C calling convention.
         */
 --     unsigned long (*save_fl)(void);
 --     void (*restore_fl)(unsigned long);
 --     void (*irq_disable)(void);
 --     void (*irq_enable)(void);
 ++     struct paravirt_callee_save save_fl;
 ++     struct paravirt_callee_save restore_fl;
 ++     struct paravirt_callee_save irq_disable;
 ++     struct paravirt_callee_save irq_enable;
 ++
        void (*safe_halt)(void);
        void (*halt)(void);
   
@@@@ -273,8 -244,7 -244,7 +273,8 @@@@ struct pv_mmu_ops 
        void (*flush_tlb_user)(void);
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(unsigned long addr);
 --     void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
 ++     void (*flush_tlb_others)(const struct cpumask *cpus,
 ++                              struct mm_struct *mm,
                                 unsigned long va);
   
        /* Hooks for allocating and freeing a pagetable top-level */
        void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
                                        pte_t *ptep, pte_t pte);
   
 --     pteval_t (*pte_val)(pte_t);
 --     pteval_t (*pte_flags)(pte_t);
 --     pte_t (*make_pte)(pteval_t pte);
 ++     struct paravirt_callee_save pte_val;
 ++     struct paravirt_callee_save make_pte;
   
 --     pgdval_t (*pgd_val)(pgd_t);
 --     pgd_t (*make_pgd)(pgdval_t pgd);
 ++     struct paravirt_callee_save pgd_val;
 ++     struct paravirt_callee_save make_pgd;
   
   #if PAGETABLE_LEVELS >= 3
   #ifdef CONFIG_X86_PAE
   
        void (*set_pud)(pud_t *pudp, pud_t pudval);
   
 --     pmdval_t (*pmd_val)(pmd_t);
 --     pmd_t (*make_pmd)(pmdval_t pmd);
 ++     struct paravirt_callee_save pmd_val;
 ++     struct paravirt_callee_save make_pmd;
   
   #if PAGETABLE_LEVELS == 4
 --     pudval_t (*pud_val)(pud_t);
 --     pud_t (*make_pud)(pudval_t pud);
 ++     struct paravirt_callee_save pud_val;
 ++     struct paravirt_callee_save make_pud;
   
        void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
   #endif       /* PAGETABLE_LEVELS == 4 */
@@@@ -417,8 -388,6 -388,6 +417,8 @@@@ extern struct pv_lock_ops pv_lock_ops
        asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
   
   unsigned paravirt_patch_nop(void);
 ++unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
 ++unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
   unsigned paravirt_patch_ignore(unsigned len);
   unsigned paravirt_patch_call(void *insnbuf,
                             const void *target, u16 tgt_clobbers,
@@@@ -510,45 -479,25 -479,25 +510,45 @@@@ int paravirt_disable_iospace(void)
    * makes sure the incoming and outgoing types are always correct.
    */
   #ifdef CONFIG_X86_32
 --#define PVOP_VCALL_ARGS                      unsigned long __eax, __edx, __ecx
 ++#define PVOP_VCALL_ARGS                              \
 ++     unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
   #define PVOP_CALL_ARGS                       PVOP_VCALL_ARGS
 ++
 ++#define PVOP_CALL_ARG1(x)            "a" ((unsigned long)(x))
 ++#define PVOP_CALL_ARG2(x)            "d" ((unsigned long)(x))
 ++#define PVOP_CALL_ARG3(x)            "c" ((unsigned long)(x))
 ++
   #define PVOP_VCALL_CLOBBERS          "=a" (__eax), "=d" (__edx),     \
                                        "=c" (__ecx)
   #define PVOP_CALL_CLOBBERS           PVOP_VCALL_CLOBBERS
 ++
 ++#define PVOP_VCALLEE_CLOBBERS                "=a" (__eax), "=d" (__edx)
 ++#define PVOP_CALLEE_CLOBBERS         PVOP_VCALLEE_CLOBBERS
 ++
   #define EXTRA_CLOBBERS
   #define VEXTRA_CLOBBERS
 --#else
 --#define PVOP_VCALL_ARGS              unsigned long __edi, __esi, __edx, __ecx
 ++#else  /* CONFIG_X86_64 */
 ++#define PVOP_VCALL_ARGS                                      \
 ++     unsigned long __edi = __edi, __esi = __esi,     \
 ++             __edx = __edx, __ecx = __ecx
   #define PVOP_CALL_ARGS               PVOP_VCALL_ARGS, __eax
 ++
 ++#define PVOP_CALL_ARG1(x)            "D" ((unsigned long)(x))
 ++#define PVOP_CALL_ARG2(x)            "S" ((unsigned long)(x))
 ++#define PVOP_CALL_ARG3(x)            "d" ((unsigned long)(x))
 ++#define PVOP_CALL_ARG4(x)            "c" ((unsigned long)(x))
 ++
   #define PVOP_VCALL_CLOBBERS  "=D" (__edi),                           \
                                "=S" (__esi), "=d" (__edx),             \
                                "=c" (__ecx)
 --
   #define PVOP_CALL_CLOBBERS   PVOP_VCALL_CLOBBERS, "=a" (__eax)
   
 ++#define PVOP_VCALLEE_CLOBBERS        "=a" (__eax)
 ++#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
 ++
   #define EXTRA_CLOBBERS        , "r8", "r9", "r10", "r11"
   #define VEXTRA_CLOBBERS       , "rax", "r8", "r9", "r10", "r11"
 --#endif
 ++#endif       /* CONFIG_X86_32 */
   
   #ifdef CONFIG_PARAVIRT_DEBUG
   #define PVOP_TEST_NULL(op)   BUG_ON(op == NULL)
   #define PVOP_TEST_NULL(op)   ((void)op)
   #endif
   
 --#define __PVOP_CALL(rettype, op, pre, post, ...)                     \
 ++#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,              \
 ++                   pre, post, ...)                                   \
        ({                                                              \
                rettype __ret;                                          \
 --             PVOP_CALL_ARGS;                                 \
 ++             PVOP_CALL_ARGS;                                         \
                PVOP_TEST_NULL(op);                                     \
                /* This is 32-bit specific, but is okay in 64-bit */    \
                /* since this condition will never hold */              \
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
 --                                  : PVOP_CALL_CLOBBERS               \
 ++                                  : call_clbr                        \
                                     : paravirt_type(op),               \
 --                                    paravirt_clobber(CLBR_ANY),      \
 ++                                    paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
 --                                  : "memory", "cc" EXTRA_CLOBBERS);  \
 ++                                  : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)((((u64)__edx) << 32) | __eax); \
                } else {                                                \
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
 --                                  : PVOP_CALL_CLOBBERS               \
 ++                                  : call_clbr                        \
                                     : paravirt_type(op),               \
 --                                    paravirt_clobber(CLBR_ANY),      \
 ++                                    paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
 --                                  : "memory", "cc" EXTRA_CLOBBERS);  \
 ++                                  : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)__eax;                         \
                }                                                       \
                __ret;                                                  \
        })
 --#define __PVOP_VCALL(op, pre, post, ...)                             \
 ++
 ++#define __PVOP_CALL(rettype, op, pre, post, ...)                     \
 ++     ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
 ++                   EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
 ++
 ++#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                       \
 ++     ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
 ++                   PVOP_CALLEE_CLOBBERS, ,                           \
 ++                   pre, post, ##__VA_ARGS__)
 ++
 ++
 ++#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)      \
        ({                                                              \
                PVOP_VCALL_ARGS;                                        \
                PVOP_TEST_NULL(op);                                     \
                asm volatile(pre                                        \
                             paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
 --                          : PVOP_VCALL_CLOBBERS                      \
 ++                          : call_clbr                                \
                             : paravirt_type(op),                       \
 --                            paravirt_clobber(CLBR_ANY),              \
 ++                            paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \
 --                          : "memory", "cc" VEXTRA_CLOBBERS);         \
 ++                          : "memory", "cc" extra_clbr);              \
        })
   
 ++#define __PVOP_VCALL(op, pre, post, ...)                             \
 ++     ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
 ++                    VEXTRA_CLOBBERS,                                 \
 ++                    pre, post, ##__VA_ARGS__)
 ++
 ++#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                      \
 ++     ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
 ++                   PVOP_VCALLEE_CLOBBERS, ,                          \
 ++                   pre, post, ##__VA_ARGS__)
 ++
 ++
 ++
   #define PVOP_CALL0(rettype, op)                                              \
        __PVOP_CALL(rettype, op, "", "")
   #define PVOP_VCALL0(op)                                                      \
        __PVOP_VCALL(op, "", "")
   
 ++#define PVOP_CALLEE0(rettype, op)                                    \
 ++     __PVOP_CALLEESAVE(rettype, op, "", "")
 ++#define PVOP_VCALLEE0(op)                                            \
 ++     __PVOP_VCALLEESAVE(op, "", "")
 ++
 ++
   #define PVOP_CALL1(rettype, op, arg1)                                        \
 --     __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
 ++     __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
   #define PVOP_VCALL1(op, arg1)                                                \
 --     __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
 ++     __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
 ++
 ++#define PVOP_CALLEE1(rettype, op, arg1)                                      \
 ++     __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
 ++#define PVOP_VCALLEE1(op, arg1)                                              \
 ++     __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
 ++
   
   #define PVOP_CALL2(rettype, op, arg1, arg2)                          \
 --     __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
 --     "1" ((unsigned long)(arg2)))
 ++     __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
 ++                 PVOP_CALL_ARG2(arg2))
   #define PVOP_VCALL2(op, arg1, arg2)                                  \
 --     __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
 --     "1" ((unsigned long)(arg2)))
 ++     __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
 ++                  PVOP_CALL_ARG2(arg2))
 ++
 ++#define PVOP_CALLEE2(rettype, op, arg1, arg2)                                \
 ++     __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
 ++                       PVOP_CALL_ARG2(arg2))
 ++#define PVOP_VCALLEE2(op, arg1, arg2)                                        \
 ++     __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
 ++                        PVOP_CALL_ARG2(arg2))
 ++
   
   #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                    \
 --     __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
 --     "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
 ++     __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
 ++                 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
   #define PVOP_VCALL3(op, arg1, arg2, arg3)                            \
 --     __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
 --     "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
 ++     __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
 ++                  PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
   
   /* This is the only difference in x86_64. We can make it much simpler */
   #ifdef CONFIG_X86_32
   #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                      \
        __PVOP_CALL(rettype, op,                                        \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
 --                 "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
 --                 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
 ++                 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
 ++                 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
   #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                              \
        __PVOP_VCALL(op,                                                \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
                    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
   #else
   #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                      \
 --     __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
 --     "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
 --     "3"((unsigned long)(arg4)))
 ++     __PVOP_CALL(rettype, op, "", "",                                \
 ++                 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
 ++                 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
   #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                              \
 --     __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
 --     "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
 --     "3"((unsigned long)(arg4)))
 ++     __PVOP_VCALL(op, "", "",                                        \
 ++                  PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
 ++                  PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
   #endif
   
   static inline int paravirt_enabled(void)
@@@@ -1079,11 -984,10 -984,10 +1079,11 @@@@ static inline void __flush_tlb_single(u
        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
   }
   
 --static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 ++static inline void flush_tlb_others(const struct cpumask *cpumask,
 ++                                 struct mm_struct *mm,
                                    unsigned long va)
   {
 --     PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
 ++     PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
   }
   
   static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@@@ -1155,13 -1059,13 -1059,13 +1155,13 @@@@ static inline pte_t __pte(pteval_t val
        pteval_t ret;
   
        if (sizeof(pteval_t) > sizeof(long))
 --             ret = PVOP_CALL2(pteval_t,
 --                              pv_mmu_ops.make_pte,
 --                              val, (u64)val >> 32);
 ++             ret = PVOP_CALLEE2(pteval_t,
 ++                                pv_mmu_ops.make_pte,
 ++                                val, (u64)val >> 32);
        else
 --             ret = PVOP_CALL1(pteval_t,
 --                              pv_mmu_ops.make_pte,
 --                              val);
 ++             ret = PVOP_CALLEE1(pteval_t,
 ++                                pv_mmu_ops.make_pte,
 ++                                val);
   
        return (pte_t) { .pte = ret };
   }
@@@@ -1171,12 -1075,29 -1075,29 +1171,12 @@@@ static inline pteval_t pte_val(pte_t pt
        pteval_t ret;
   
        if (sizeof(pteval_t) > sizeof(long))
 --             ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
 --                              pte.pte, (u64)pte.pte >> 32);
 --     else
 --             ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
 --                              pte.pte);
 --
 --     return ret;
 --}
 --
 --static inline pteval_t pte_flags(pte_t pte)
 --{
 --     pteval_t ret;
 --
 --     if (sizeof(pteval_t) > sizeof(long))
 --             ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
 --                              pte.pte, (u64)pte.pte >> 32);
 ++             ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
 ++                                pte.pte, (u64)pte.pte >> 32);
        else
 --             ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
 --                              pte.pte);
 ++             ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
 ++                                pte.pte);
   
 --#ifdef CONFIG_PARAVIRT_DEBUG
 --     BUG_ON(ret & PTE_PFN_MASK);
 --#endif
        return ret;
   }
   
@@@@ -1185,11 -1106,11 -1106,11 +1185,11 @@@@ static inline pgd_t __pgd(pgdval_t val
        pgdval_t ret;
   
        if (sizeof(pgdval_t) > sizeof(long))
 --             ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
 --                              val, (u64)val >> 32);
 ++             ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
 ++                                val, (u64)val >> 32);
        else
 --             ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
 --                              val);
 ++             ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
 ++                                val);
   
        return (pgd_t) { ret };
   }
@@@@ -1199,11 -1120,11 -1120,11 +1199,11 @@@@ static inline pgdval_t pgd_val(pgd_t pg
        pgdval_t ret;
   
        if (sizeof(pgdval_t) > sizeof(long))
 --             ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
 --                               pgd.pgd, (u64)pgd.pgd >> 32);
 ++             ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
 ++                                 pgd.pgd, (u64)pgd.pgd >> 32);
        else
 --             ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
 --                               pgd.pgd);
 ++             ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
 ++                                 pgd.pgd);
   
        return ret;
   }
@@@@ -1267,11 -1188,11 -1188,11 +1267,11 @@@@ static inline pmd_t __pmd(pmdval_t val
        pmdval_t ret;
   
        if (sizeof(pmdval_t) > sizeof(long))
 --             ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
 --                              val, (u64)val >> 32);
 ++             ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
 ++                                val, (u64)val >> 32);
        else
 --             ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
 --                              val);
 ++             ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
 ++                                val);
   
        return (pmd_t) { ret };
   }
@@@@ -1281,11 -1202,11 -1202,11 +1281,11 @@@@ static inline pmdval_t pmd_val(pmd_t pm
        pmdval_t ret;
   
        if (sizeof(pmdval_t) > sizeof(long))
 --             ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
 --                               pmd.pmd, (u64)pmd.pmd >> 32);
 ++             ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
 ++                                 pmd.pmd, (u64)pmd.pmd >> 32);
        else
 --             ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
 --                               pmd.pmd);
 ++             ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
 ++                                 pmd.pmd);
   
        return ret;
   }
@@@@ -1307,11 -1228,11 -1228,11 +1307,11 @@@@ static inline pud_t __pud(pudval_t val
        pudval_t ret;
   
        if (sizeof(pudval_t) > sizeof(long))
 --             ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
 --                              val, (u64)val >> 32);
 ++             ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
 ++                                val, (u64)val >> 32);
        else
 --             ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
 --                              val);
 ++             ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
 ++                                val);
   
        return (pud_t) { ret };
   }
@@@@ -1321,11 -1242,11 -1242,11 +1321,11 @@@@ static inline pudval_t pud_val(pud_t pu
        pudval_t ret;
   
        if (sizeof(pudval_t) > sizeof(long))
 --             ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
 --                               pud.pud, (u64)pud.pud >> 32);
 ++             ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
 ++                                 pud.pud, (u64)pud.pud >> 32);
        else
 --             ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
 --                               pud.pud);
 ++             ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
 ++                                 pud.pud);
   
        return ret;
   }
@@@@ -1431,14 -1352,14 -1352,7 +1431,7 @@@@ static inline void arch_leave_lazy_cpu_
        PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
   }
   
-- static inline void arch_flush_lazy_cpu_mode(void)
-- {
--      if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
--              arch_leave_lazy_cpu_mode();
--              arch_enter_lazy_cpu_mode();
--      }
-- }
-- 
++ void arch_flush_lazy_cpu_mode(void);
   
   #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
   static inline void arch_enter_lazy_mmu_mode(void)
@@@@ -1451,13 -1372,13 -1365,7 +1444,7 @@@@ static inline void arch_leave_lazy_mmu_
        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
   }
   
-- static inline void arch_flush_lazy_mmu_mode(void)
-- {
--      if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
--              arch_leave_lazy_mmu_mode();
--              arch_enter_lazy_mmu_mode();
--      }
-- }
++ void arch_flush_lazy_mmu_mode(void);
   
   static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
                                unsigned long phys, pgprot_t flags)
   }
   
   void _paravirt_nop(void);
 --#define paravirt_nop ((void *)_paravirt_nop)
 ++u32 _paravirt_ident_32(u32);
 ++u64 _paravirt_ident_64(u64);
   
 --void paravirt_use_bytelocks(void);
 ++#define paravirt_nop ((void *)_paravirt_nop)
   
   #ifdef CONFIG_SMP
   
@@@@ -1482,7 -1402,6 -1389,6 +1469,7 @@@@ static inline int __raw_spin_is_contend
   {
        return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
   }
 ++#define __raw_spin_is_contended      __raw_spin_is_contended
   
   static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
   {
@@@@ -1519,37 -1438,12 -1425,12 +1506,37 @@@@ extern struct paravirt_patch_site __par
        __parainstructions_end[];
   
   #ifdef CONFIG_X86_32
 --#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
 --#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
 ++#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
 ++#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
 ++
 ++/* save and restore all caller-save registers, except return value */
 ++#define PV_SAVE_ALL_CALLER_REGS              "pushl %ecx;"
 ++#define PV_RESTORE_ALL_CALLER_REGS   "popl  %ecx;"
 ++
   #define PV_FLAGS_ARG "0"
   #define PV_EXTRA_CLOBBERS
   #define PV_VEXTRA_CLOBBERS
   #else
 ++/* save and restore all caller-save registers, except return value */
 ++#define PV_SAVE_ALL_CALLER_REGS                                              \
 ++     "push %rcx;"                                                    \
 ++     "push %rdx;"                                                    \
 ++     "push %rsi;"                                                    \
 ++     "push %rdi;"                                                    \
 ++     "push %r8;"                                                     \
 ++     "push %r9;"                                                     \
 ++     "push %r10;"                                                    \
 ++     "push %r11;"
 ++#define PV_RESTORE_ALL_CALLER_REGS                                   \
 ++     "pop %r11;"                                                     \
 ++     "pop %r10;"                                                     \
 ++     "pop %r9;"                                                      \
 ++     "pop %r8;"                                                      \
 ++     "pop %rdi;"                                                     \
 ++     "pop %rsi;"                                                     \
 ++     "pop %rdx;"                                                     \
 ++     "pop %rcx;"
 ++
   /* We save some registers, but all of them, that's too much. We clobber all
    * caller saved registers but the argument parameter */
   #define PV_SAVE_REGS "pushq %%rdi;"
   #define PV_FLAGS_ARG "D"
   #endif
   
 ++/*
 ++ * Generate a thunk around a function which saves all caller-save
 ++ * registers except for the return value.  This allows C functions to
 ++ * be called from assembler code where fewer than normal registers are
 ++ * available.  It may also help code generation around calls from C
 ++ * code if the common case doesn't use many registers.
 ++ *
 ++ * When a callee is wrapped in a thunk, the caller can assume that all
 ++ * arg regs and all scratch registers are preserved across the
 ++ * call. The return value in rax/eax will not be saved, even for void
 ++ * functions.
 ++ */
 ++#define PV_CALLEE_SAVE_REGS_THUNK(func)                                      \
 ++     extern typeof(func) __raw_callee_save_##func;                   \
 ++     static void *__##func##__ __used = func;                        \
 ++                                                                     \
 ++     asm(".pushsection .text;"                                       \
 ++         "__raw_callee_save_" #func ": "                             \
 ++         PV_SAVE_ALL_CALLER_REGS                                     \
 ++         "call " #func ";"                                           \
 ++         PV_RESTORE_ALL_CALLER_REGS                                  \
 ++         "ret;"                                                      \
 ++         ".popsection")
 ++
 ++/* Get a reference to a callee-save function */
 ++#define PV_CALLEE_SAVE(func)                                         \
 ++     ((struct paravirt_callee_save) { __raw_callee_save_##func })
 ++
 ++/* Promise that "func" already uses the right calling convention */
 ++#define __PV_IS_CALLEE_SAVE(func)                    \
 ++     ((struct paravirt_callee_save) { func })
 ++
   static inline unsigned long __raw_local_save_flags(void)
   {
        unsigned long f;
   
 --     asm volatile(paravirt_alt(PV_SAVE_REGS
 --                               PARAVIRT_CALL
 --                               PV_RESTORE_REGS)
 ++     asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : paravirt_type(pv_irq_ops.save_fl),
                       paravirt_clobber(CLBR_EAX)
 --                  : "memory", "cc" PV_VEXTRA_CLOBBERS);
 ++                  : "memory", "cc");
        return f;
   }
   
   static inline void raw_local_irq_restore(unsigned long f)
   {
 --     asm volatile(paravirt_alt(PV_SAVE_REGS
 --                               PARAVIRT_CALL
 --                               PV_RESTORE_REGS)
 ++     asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : PV_FLAGS_ARG(f),
                       paravirt_type(pv_irq_ops.restore_fl),
                       paravirt_clobber(CLBR_EAX)
 --                  : "memory", "cc" PV_EXTRA_CLOBBERS);
 ++                  : "memory", "cc");
   }
   
   static inline void raw_local_irq_disable(void)
   {
 --     asm volatile(paravirt_alt(PV_SAVE_REGS
 --                               PARAVIRT_CALL
 --                               PV_RESTORE_REGS)
 ++     asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_disable),
                       paravirt_clobber(CLBR_EAX)
 --                  : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
 ++                  : "memory", "eax", "cc");
   }
   
   static inline void raw_local_irq_enable(void)
   {
 --     asm volatile(paravirt_alt(PV_SAVE_REGS
 --                               PARAVIRT_CALL
 --                               PV_RESTORE_REGS)
 ++     asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_enable),
                       paravirt_clobber(CLBR_EAX)
 --                  : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
 ++                  : "memory", "eax", "cc");
   }
   
   static inline unsigned long __raw_local_irq_save(void)
        .popsection
   
   
 ++#define COND_PUSH(set, mask, reg)                    \
 ++     .if ((~(set)) & mask); push %reg; .endif
 ++#define COND_POP(set, mask, reg)                     \
 ++     .if ((~(set)) & mask); pop %reg; .endif
 ++
   #ifdef CONFIG_X86_64
 --#define PV_SAVE_REGS                         \
 --     push %rax;                              \
 --     push %rcx;                              \
 --     push %rdx;                              \
 --     push %rsi;                              \
 --     push %rdi;                              \
 --     push %r8;                               \
 --     push %r9;                               \
 --     push %r10;                              \
 --     push %r11
 --#define PV_RESTORE_REGS                              \
 --     pop %r11;                               \
 --     pop %r10;                               \
 --     pop %r9;                                \
 --     pop %r8;                                \
 --     pop %rdi;                               \
 --     pop %rsi;                               \
 --     pop %rdx;                               \
 --     pop %rcx;                               \
 --     pop %rax
 ++
 ++#define PV_SAVE_REGS(set)                    \
 ++     COND_PUSH(set, CLBR_RAX, rax);          \
 ++     COND_PUSH(set, CLBR_RCX, rcx);          \
 ++     COND_PUSH(set, CLBR_RDX, rdx);          \
 ++     COND_PUSH(set, CLBR_RSI, rsi);          \
 ++     COND_PUSH(set, CLBR_RDI, rdi);          \
 ++     COND_PUSH(set, CLBR_R8, r8);            \
 ++     COND_PUSH(set, CLBR_R9, r9);            \
 ++     COND_PUSH(set, CLBR_R10, r10);          \
 ++     COND_PUSH(set, CLBR_R11, r11)
 ++#define PV_RESTORE_REGS(set)                 \
 ++     COND_POP(set, CLBR_R11, r11);           \
 ++     COND_POP(set, CLBR_R10, r10);           \
 ++     COND_POP(set, CLBR_R9, r9);             \
 ++     COND_POP(set, CLBR_R8, r8);             \
 ++     COND_POP(set, CLBR_RDI, rdi);           \
 ++     COND_POP(set, CLBR_RSI, rsi);           \
 ++     COND_POP(set, CLBR_RDX, rdx);           \
 ++     COND_POP(set, CLBR_RCX, rcx);           \
 ++     COND_POP(set, CLBR_RAX, rax)
 ++
   #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
   #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
   #define PARA_INDIRECT(addr)  *addr(%rip)
   #else
 --#define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
 --#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
 ++#define PV_SAVE_REGS(set)                    \
 ++     COND_PUSH(set, CLBR_EAX, eax);          \
 ++     COND_PUSH(set, CLBR_EDI, edi);          \
 ++     COND_PUSH(set, CLBR_ECX, ecx);          \
 ++     COND_PUSH(set, CLBR_EDX, edx)
 ++#define PV_RESTORE_REGS(set)                 \
 ++     COND_POP(set, CLBR_EDX, edx);           \
 ++     COND_POP(set, CLBR_ECX, ecx);           \
 ++     COND_POP(set, CLBR_EDI, edi);           \
 ++     COND_POP(set, CLBR_EAX, eax)
 ++
   #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
   #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
   #define PARA_INDIRECT(addr)  *%cs:addr
   
   #define DISABLE_INTERRUPTS(clobbers)                                 \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
 --               PV_SAVE_REGS;                                         \
 ++               PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
 --               PV_RESTORE_REGS;)                     \
 ++               PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
   
   #define ENABLE_INTERRUPTS(clobbers)                                  \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
 --               PV_SAVE_REGS;                                         \
 ++               PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
 --               PV_RESTORE_REGS;)
 ++               PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
   
   #define USERGS_SYSRET32                                                      \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
                  swapgs)
   
 ++/*
 ++ * Note: swapgs is very special, and in practise is either going to be
 ++ * implemented with a single "swapgs" instruction or something very
 ++ * special.  Either way, we don't need to save any registers for
 ++ * it.
 ++ */
   #define SWAPGS                                                               \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
 --               PV_SAVE_REGS;                                         \
 --               call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
 --               PV_RESTORE_REGS                                       \
 ++               call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
                 )
   
   #define GET_CR2_INTO_RCX                             \
diff --combined arch/x86/kernel/hpet.c
index 388254f69a2ad13721f87797d7aa7790126655df,cd759ad90690e72d109aed4309adeb87755977e5,5c8da2c2c185e260c7a0fe88c0588c9626302b45..a00545fe5cdd1dae730fe18031b631bd1f57d8d5
@@@@ -269,6 -269,6 -269,8 +269,8 @@@@ static void hpet_set_mode(enum clock_ev
                now = hpet_readl(HPET_COUNTER);
                cmp = now + (unsigned long) delta;
                cfg = hpet_readl(HPET_Tn_CFG(timer));
++              /* Make sure we use edge triggered interrupts */
++              cfg &= ~HPET_TN_LEVEL;
                cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
                       HPET_TN_SETVAL | HPET_TN_32BIT;
                hpet_writel(cfg, HPET_Tn_CFG(timer));
@@@@ -628,12 -628,11 -630,12 +630,12 @@@@ static int hpet_cpuhp_notify(struct not
   
        switch (action & 0xf) {
        case CPU_ONLINE:
 -              INIT_DELAYED_WORK(&work.work, hpet_work);
 +              INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
                init_completion(&work.complete);
                /* FIXME: add schedule_work_on() */
                schedule_delayed_work_on(cpu, &work.work, 0);
                wait_for_completion(&work.complete);
 +              destroy_timer_on_stack(&work.work.timer);
                break;
        case CPU_DEAD:
                if (hdev) {
@@@@ -897,21 -896,13 -899,13 +899,21 @@@@ static unsigned long hpet_rtc_flags
   static int hpet_prev_update_sec;
   static struct rtc_time hpet_alarm_time;
   static unsigned long hpet_pie_count;
 --static unsigned long hpet_t1_cmp;
 ++static u32 hpet_t1_cmp;
   static unsigned long hpet_default_delta;
   static unsigned long hpet_pie_delta;
   static unsigned long hpet_pie_limit;
   
   static rtc_irq_handler irq_handler;
   
 ++/*
 ++ * Check that the hpet counter c1 is ahead of the c2
 ++ */
 ++static inline int hpet_cnt_ahead(u32 c1, u32 c2)
 ++{
 ++     return (s32)(c2 - c1) < 0;
 ++}
 ++
   /*
    * Registers a IRQ handler.
    */
@@@@ -1083,7 -1074,7 -1077,7 +1085,7 @@@@ static void hpet_rtc_timer_reinit(void
                hpet_t1_cmp += delta;
                hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
                lost_ints++;
 --     } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
 ++     } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
   
        if (lost_ints) {
                if (hpet_rtc_flags & RTC_PIE)
index cea11c8e30498bf102925f383e33c18608b44dcf,e4c8fb608873797c381f825cc874c5a4e0506e1b,c6520a4e85d4fe3479070d03867bfd65e50dd081..6dc4dca255e42e097455e2f6ec75bf0546682033
@@@@ -44,17 -44,6 -44,6 +44,17 @@@@ void _paravirt_nop(void
   {
   }
   
 ++/* identity function, which can be inlined */
 ++u32 _paravirt_ident_32(u32 x)
 ++{
 ++     return x;
 ++}
 ++
 ++u64 _paravirt_ident_64(u64 x)
 ++{
 ++     return x;
 ++}
 ++
   static void __init default_banner(void)
   {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@@@ -149,16 -138,9 -138,9 +149,16 @@@@ unsigned paravirt_patch_default(u8 type
        if (opfunc == NULL)
                /* If there's no function, patch it with a ud2a (BUG) */
                ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
 --     else if (opfunc == paravirt_nop)
 ++     else if (opfunc == _paravirt_nop)
                /* If the operation is a nop, then nop the callsite */
                ret = paravirt_patch_nop();
 ++
 ++     /* identity functions just return their single argument */
 ++     else if (opfunc == _paravirt_ident_32)
 ++             ret = paravirt_patch_ident_32(insnbuf, len);
 ++     else if (opfunc == _paravirt_ident_64)
 ++             ret = paravirt_patch_ident_64(insnbuf, len);
 ++
        else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@@@ -286,6 -268,6 -268,32 +286,32 @@@@ enum paravirt_lazy_mode paravirt_get_la
        return __get_cpu_var(paravirt_lazy_mode);
   }
   
++ void arch_flush_lazy_mmu_mode(void)
++ {
++      preempt_disable();
++ 
++      if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
++              WARN_ON(preempt_count() == 1);
++              arch_leave_lazy_mmu_mode();
++              arch_enter_lazy_mmu_mode();
++      }
++ 
++      preempt_enable();
++ }
++ 
++ void arch_flush_lazy_cpu_mode(void)
++ {
++      preempt_disable();
++ 
++      if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
++              WARN_ON(preempt_count() == 1);
++              arch_leave_lazy_cpu_mode();
++              arch_enter_lazy_cpu_mode();
++      }
++ 
++      preempt_enable();
++ }
++ 
   struct pv_info pv_info = {
        .name = "bare hardware",
        .paravirt_enabled = 0,
@@@@ -310,10 -292,10 -318,10 +336,10 @@@@ struct pv_time_ops pv_time_ops = 
   
   struct pv_irq_ops pv_irq_ops = {
        .init_IRQ = native_init_IRQ,
 --     .save_fl = native_save_fl,
 --     .restore_fl = native_restore_fl,
 --     .irq_disable = native_irq_disable,
 --     .irq_enable = native_irq_enable,
 ++     .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
 ++     .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
 ++     .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
 ++     .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
        .safe_halt = native_safe_halt,
        .halt = native_halt,
   #ifdef CONFIG_X86_64
@@@@ -391,14 -373,6 -399,6 +417,14 @@@@ struct pv_apic_ops pv_apic_ops = 
   #endif
   };
   
 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
 ++/* 32-bit pagetable entries */
 ++#define PTE_IDENT    __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
 ++#else
 ++/* 64-bit pagetable entries */
 ++#define PTE_IDENT    __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
 ++#endif
 ++
   struct pv_mmu_ops pv_mmu_ops = {
   #ifndef CONFIG_X86_64
        .pagetable_setup_start = native_pagetable_setup_start,
        .pmd_clear = native_pmd_clear,
   #endif
        .set_pud = native_set_pud,
 --     .pmd_val = native_pmd_val,
 --     .make_pmd = native_make_pmd,
 ++
 ++     .pmd_val = PTE_IDENT,
 ++     .make_pmd = PTE_IDENT,
   
   #if PAGETABLE_LEVELS == 4
 --     .pud_val = native_pud_val,
 --     .make_pud = native_make_pud,
 ++     .pud_val = PTE_IDENT,
 ++     .make_pud = PTE_IDENT,
 ++
        .set_pgd = native_set_pgd,
   #endif
   #endif /* PAGETABLE_LEVELS >= 3 */
   
 --     .pte_val = native_pte_val,
 --     .pte_flags = native_pte_flags,
 --     .pgd_val = native_pgd_val,
 ++     .pte_val = PTE_IDENT,
 ++     .pgd_val = PTE_IDENT,
   
 --     .make_pte = native_make_pte,
 --     .make_pgd = native_make_pgd,
 ++     .make_pte = PTE_IDENT,
 ++     .make_pgd = PTE_IDENT,
   
        .dup_mmap = paravirt_nop,
        .exit_mmap = paravirt_nop,
diff --combined arch/x86/kernel/ptrace.c
index 7ec39ab37a2d0c09cf99c78284d540300f0c9115,0a5df5f82fb9579b1be4e824aa499ac561d9ba10,5a4c23d898926149e65b8cb5d826b7e868f4a527..d2f7cd5b2c8378e057152846a98dedde59535086
@@@@ -75,7 -75,10 -75,10 +75,7 @@@@ static inline bool invalid_selector(u1
   static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
   {
        BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
 --     regno >>= 2;
 --     if (regno > FS)
 --             --regno;
 --     return &regs->bx + regno;
 ++     return &regs->bx + (regno >> 2);
   }
   
   static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
        if (offset != offsetof(struct user_regs_struct, gs))
                retval = *pt_regs_access(task_pt_regs(task), offset);
        else {
 --             retval = task->thread.gs;
                if (task == current)
 --                     savesegment(gs, retval);
 ++                     retval = get_user_gs(task_pt_regs(task));
 ++             else
 ++                     retval = task_user_gs(task);
        }
        return retval;
   }
@@@@ -124,10 -126,13 -126,13 +124,10 @@@@ static int set_segment_reg(struct task_
                break;
   
        case offsetof(struct user_regs_struct, gs):
 --             task->thread.gs = value;
                if (task == current)
 --                     /*
 --                      * The user-mode %gs is not affected by
 --                      * kernel entry, so we must update the CPU.
 --                      */
 --                     loadsegment(gs, value);
 ++                     set_user_gs(task_pt_regs(task), value);
 ++             else
 ++                     task_user_gs(task) = value;
        }
   
        return 0;
@@@@ -805,12 -810,12 -810,16 +805,16 @@@@ static void ptrace_bts_untrace(struct t
   
   static void ptrace_bts_detach(struct task_struct *child)
   {
--      if (unlikely(child->bts)) {
--              ds_release_bts(child->bts);
--              child->bts = NULL;
-- 
--              ptrace_bts_free_buffer(child);
--      }
++      /*
++       * Ptrace_detach() races with ptrace_untrace() in case
++       * the child dies and is reaped by another thread.
++       *
++       * We only do the memory accounting at this point and
++       * leave the buffer deallocation and the bts tracer
++       * release to ptrace_bts_untrace() which will be called
++       * later on with tasklist_lock held.
++       */
++      release_locked_buffer(child->bts_buffer, child->bts_size);
   }
   #else
   static inline void ptrace_bts_fork(struct task_struct *tsk) {}
diff --combined arch/x86/mm/ioremap.c
index 1448bcb7f22f709e38e53d13ada5483f6ce04586,bd85d42819e1eadad218a266cdade7e00bf7659a,f45d5e29a72edf4e15616d48c0d174f7905ac417..433f7bd4648af4206fbc4afb0b2fd6b6dab59b6e
@@@@ -134,25 -134,25 -134,6 +134,6 @@@@ int page_is_ram(unsigned long pagenr
        return 0;
   }
   
-- int pagerange_is_ram(unsigned long start, unsigned long end)
-- {
--      int ram_page = 0, not_rampage = 0;
--      unsigned long page_nr;
-- 
--      for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
--           ++page_nr) {
--              if (page_is_ram(page_nr))
--                      ram_page = 1;
--              else
--                      not_rampage = 1;
-- 
--              if (ram_page == not_rampage)
--                      return -1;
--      }
-- 
--      return ram_page;
-- }
-- 
   /*
    * Fix up the linear direct mapping of the kernel to avoid cache attribute
    * conflicts.
@@@@ -367,7 -367,7 -348,7 +348,7 @@@@ EXPORT_SYMBOL(ioremap_nocache)
    *
    * Must be freed with iounmap.
    */
 --void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
 ++void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
   {
        if (pat_enabled)
                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
@@@@ -557,9 -557,34 -538,9 +538,9 @@@@ void __init early_ioremap_init(void
        }
   }
   
 - void __init early_ioremap_clear(void)
 - {
 -      pmd_t *pmd;
 - 
 -      if (early_ioremap_debug)
 -              printk(KERN_INFO "early_ioremap_clear()\n");
 - 
 -      pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
 -      pmd_clear(pmd);
 -      paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
 -      __flush_tlb_all();
 - }
 - 
   void __init early_ioremap_reset(void)
   {
 -      enum fixed_addresses idx;
 -      unsigned long addr, phys;
 -      pte_t *pte;
 - 
        after_paging_init = 1;
 -      for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
 -              addr = fix_to_virt(idx);
 -              pte = early_ioremap_pte(addr);
 -              if (pte_present(*pte)) {
 -                      phys = pte_val(*pte) & PAGE_MASK;
 -                      set_fixmap(idx, phys);
 -              }
 -      }
   }
   
   static void __init __early_set_fixmap(enum fixed_addresses idx,
diff --combined arch/x86/mm/pat.c
index 9127e31c726866d462d0da61d03ceade383ced4e,8b08fb955274148454a2acaa538c78215322e655,aebbf67a79d02326724e91065180675ecb525637..05f9aef6818a14f02c87b7a4e4cf6a0d5d455b71
   #ifdef CONFIG_X86_PAT
   int __read_mostly pat_enabled = 1;
   
 --void __cpuinit pat_disable(char *reason)
 ++void __cpuinit pat_disable(const char *reason)
   {
        pat_enabled = 0;
        printk(KERN_INFO "%s\n", reason);
@@@@ -42,11 -42,6 -42,6 +42,11 @@@@ static int __init nopat(char *str
        return 0;
   }
   early_param("nopat", nopat);
 ++#else
 ++static inline void pat_disable(const char *reason)
 ++{
 ++     (void)reason;
 ++}
   #endif
   
   
@@@@ -83,20 -78,16 -78,16 +83,20 @@@@ void pat_init(void
        if (!pat_enabled)
                return;
   
 --     /* Paranoia check. */
 --     if (!cpu_has_pat && boot_pat_state) {
 --             /*
 --              * If this happens we are on a secondary CPU, but
 --              * switched to PAT on the boot CPU. We have no way to
 --              * undo PAT.
 --              */
 --             printk(KERN_ERR "PAT enabled, "
 --                    "but not supported by secondary CPU\n");
 --             BUG();
 ++     if (!cpu_has_pat) {
 ++             if (!boot_pat_state) {
 ++                     pat_disable("PAT not supported by CPU.");
 ++                     return;
 ++             } else {
 ++                     /*
 ++                      * If this happens we are on a secondary CPU, but
 ++                      * switched to PAT on the boot CPU. We have no way to
 ++                      * undo PAT.
 ++                      */
 ++                     printk(KERN_ERR "PAT enabled, "
 ++                            "but not supported by secondary CPU\n");
 ++                     BUG();
 ++             }
        }
   
        /* Set PWT to Write-Combining. All other bits stay the same */
@@@@ -220,6 -211,6 -211,33 +220,33 @@@@ chk_conflict(struct memtype *new, struc
   static struct memtype *cached_entry;
   static u64 cached_start;
   
++ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
++ {
++      int ram_page = 0, not_rampage = 0;
++      unsigned long page_nr;
++ 
++      for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
++           ++page_nr) {
++              /*
++               * For legacy reasons, physical address range in the legacy ISA
++               * region is tracked as non-RAM. This will allow users of
++               * /dev/mem to map portions of legacy ISA region, even when
++               * some of those portions are listed(or not even listed) with
++               * different e820 types(RAM/reserved/..)
++               */
++              if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
++                  page_is_ram(page_nr))
++                      ram_page = 1;
++              else
++                      not_rampage = 1;
++ 
++              if (ram_page == not_rampage)
++                      return -1;
++      }
++ 
++      return ram_page;
++ }
++ 
   /*
    * For RAM pages, mark the pages as non WB memory type using
    * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@@@ -342,23 -333,11 -360,15 +369,15 @@@@ int reserve_memtype(u64 start, u64 end
                                              req_type & _PAGE_CACHE_MASK);
        }
   
 -      is_range_ram = pagerange_is_ram(start, end);
 +      if (new_type)
 +              *new_type = actual_type;
 + 
-       /*
-        * For legacy reasons, some parts of the physical address range in the
-        * legacy 1MB region is treated as non-RAM (even when listed as RAM in
-        * the e820 tables).  So we will track the memory attributes of this
-        * legacy 1MB region using the linear memtype_list always.
-        */
-       if (end >= ISA_END_ADDRESS) {
-               is_range_ram = pagerange_is_ram(start, end);
-               if (is_range_ram == 1)
-                       return reserve_ram_pages_type(start, end, req_type,
-                                                     new_type);
-               else if (is_range_ram < 0)
-                       return -EINVAL;
-       }
++      is_range_ram = pat_pagerange_is_ram(start, end);
+       if (is_range_ram == 1)
 -              return reserve_ram_pages_type(start, end, req_type, new_type);
++              return reserve_ram_pages_type(start, end, req_type,
++                                            new_type);
+       else if (is_range_ram < 0)
+               return -EINVAL;
   
        new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
        if (!new)
        new->end        = end;
        new->type       = actual_type;
   
 -      if (new_type)
 -              *new_type = actual_type;
 - 
        spin_lock(&memtype_lock);
   
        if (cached_entry && start >= cached_start)
@@@@ -455,19 -437,11 -465,11 +474,11 @@@@ int free_memtype(u64 start, u64 end
        if (is_ISA_range(start, end - 1))
                return 0;
   
-       /*
-        * For legacy reasons, some parts of the physical address range in the
-        * legacy 1MB region is treated as non-RAM (even when listed as RAM in
-        * the e820 tables).  So we will track the memory attributes of this
-        * legacy 1MB region using the linear memtype_list always.
-        */
-       if (end >= ISA_END_ADDRESS) {
-               is_range_ram = pagerange_is_ram(start, end);
-               if (is_range_ram == 1)
-                       return free_ram_pages_type(start, end);
-               else if (is_range_ram < 0)
-                       return -EINVAL;
-       }
 -      is_range_ram = pagerange_is_ram(start, end);
++      is_range_ram = pat_pagerange_is_ram(start, end);
+       if (is_range_ram == 1)
+               return free_ram_pages_type(start, end);
+       else if (is_range_ram < 0)
+               return -EINVAL;
   
        spin_lock(&memtype_lock);
        list_for_each_entry(entry, &memtype_list, nd) {
@@@@ -635,17 -609,17 -637,13 +646,13 @@@@ static int reserve_pfn_range(u64 paddr
        unsigned long flags;
        unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
   
--      is_ram = pagerange_is_ram(paddr, paddr + size);
++      is_ram = pat_pagerange_is_ram(paddr, paddr + size);
   
--      if (is_ram != 0) {
--              /*
--               * For mapping RAM pages, drivers need to call
--               * set_memory_[uc|wc|wb] directly, for reserve and free, before
--               * setting up the PTE.
--               */
--              WARN_ON_ONCE(1);
--              return 0;
--      }
++      /*
++       * reserve_pfn_range() doesn't support RAM pages.
++       */
++      if (is_ram != 0)
++              return -EINVAL;
   
        ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
        if (ret)
@@@@ -702,7 -676,7 -700,7 +709,7 @@@@ static void free_pfn_range(u64 paddr, u
   {
        int is_ram;
   
--      is_ram = pagerange_is_ram(paddr, paddr + size);
++      is_ram = pat_pagerange_is_ram(paddr, paddr + size);
        if (is_ram == 0)
                free_memtype(paddr, paddr + size);
   }
diff --combined include/linux/mm.h
index 323561582c100bf1b18c3a3b6084eda970f89eeb,e8ddc98b8405fdf7c9d32680731e0d5738bb97e1,3d7fb44d7d7ec295886c9805a00da1ef34038ba3..7dc04ff5ab89ab12acc55201875f36f03b0477fa
@@@@ -1129,7 -1129,8 -1129,8 +1129,7 @@@@ extern unsigned long do_mmap_pgoff(stru
        unsigned long flag, unsigned long pgoff);
   extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, unsigned long flags,
 --     unsigned int vm_flags, unsigned long pgoff,
 --     int accountable);
 ++     unsigned int vm_flags, unsigned long pgoff);
   
   static inline unsigned long do_mmap(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
@@@@ -1304,5 -1305,5 -1305,6 +1304,6 @@@@ void vmemmap_populate_print_last(void)
   
   extern void *alloc_locked_buffer(size_t size);
   extern void free_locked_buffer(void *buffer, size_t size);
++ extern void release_locked_buffer(void *buffer, size_t size);
   #endif /* __KERNEL__ */
   #endif /* _LINUX_MM_H */
diff --combined mm/mlock.c
index 037161d61b4e72432d9d6bae71507d251b522afc,2904a347e4761169655696120751c371e4d74959,2b57f7e603906aec66aa7f8bdfd0861fd04ccb8f..cbe9e0581b75dcaf06335ccc017a68789d6247d2
@@@@ -294,10 -294,14 -294,10 +294,10 @@@@ static inline int __mlock_posix_error_r
    *
    * return number of pages [> 0] to be removed from locked_vm on success
    * of "special" vmas.
 -  *
 -  * return negative error if vma spanning @start-@range disappears while
 -  * mmap semaphore is dropped.  Unlikely?
    */
   long mlock_vma_pages_range(struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
   {
 -      struct mm_struct *mm = vma->vm_mm;
        int nr_pages = (end - start) / PAGE_SIZE;
        BUG_ON(!(vma->vm_flags & VM_LOCKED));
   
        if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
                        is_vm_hugetlb_page(vma) ||
                        vma == get_gate_vma(current))) {
 -              long error;
 -              downgrade_write(&mm->mmap_sem);
   
 -              error = __mlock_vma_pages_range(vma, start, end, 1);
  -             return __mlock_vma_pages_range(vma, start, end, 1);
 ++             __mlock_vma_pages_range(vma, start, end, 1);
  +
 -              up_read(&mm->mmap_sem);
 -              /* vma can change or disappear */
 -              down_write(&mm->mmap_sem);
 -              vma = find_vma(mm, start);
 -              /* non-NULL vma must contain @start, but need to check @end */
 -              if (!vma ||  end > vma->vm_end)
 -                      return -ENOMEM;
 - 
 -              return 0;       /* hide other errors from mmap(), et al */
 ++             /* Hide errors from mmap() and other callers */
 ++             return 0;
        }
   
        /*
@@@@ -425,14 -438,41 -422,14 +425,14 @@@@ success
        vma->vm_flags = newflags;
   
        if (lock) {
 -              /*
 -               * mmap_sem is currently held for write.  Downgrade the write
 -               * lock to a read lock so that other faults, mmap scans, ...
 -               * while we fault in all pages.
 -               */
 -              downgrade_write(&mm->mmap_sem);
 - 
                ret = __mlock_vma_pages_range(vma, start, end, 1);
   
 -              /*
 -               * Need to reacquire mmap sem in write mode, as our callers
 -               * expect this.  We have no support for atomically upgrading
 -               * a sem to write, so we need to check for ranges while sem
 -               * is unlocked.
 -               */
 -              up_read(&mm->mmap_sem);
 -              /* vma can change or disappear */
 -              down_write(&mm->mmap_sem);
 -              *prev = find_vma(mm, start);
 -              /* non-NULL *prev must contain @start, but need to check @end */
 -              if (!(*prev) || end > (*prev)->vm_end)
 -                      ret = -ENOMEM;
 -              else if (ret > 0) {
 +              if (ret > 0) {
                        mm->locked_vm -= ret;
                        ret = 0;
                } else
                        ret = __mlock_posix_error_return(ret); /* translate if needed */
        } else {
 -              /*
 -               * TODO:  for unlocking, pages will already be resident, so
 -               * we don't need to wait for allocations/reclaim/pagein, ...
 -               * However, unlocking a very large region can still take a
 -               * while.  Should we downgrade the semaphore for both lock
 -               * AND unlock ?
 -               */
                __mlock_vma_pages_range(vma, start, end, 0);
        }
   
@@@@ -660,7 -700,7 -657,7 +660,7 @@@@ void *alloc_locked_buffer(size_t size
        return buffer;
   }
   
-- void free_locked_buffer(void *buffer, size_t size)
++ void release_locked_buffer(void *buffer, size_t size)
   {
        unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
   
        current->mm->locked_vm -= pgsz;
   
        up_write(&current->mm->mmap_sem);
++ }
++ 
++ void free_locked_buffer(void *buffer, size_t size)
++ {
++      release_locked_buffer(buffer, size);
   
        kfree(buffer);
   }