From: Ingo Molnar Date: Mon, 6 Oct 2008 14:15:57 +0000 (+0200) Subject: Merge branch 'x86/header-guards' into x86-v28-for-linus-phase1 X-Git-Tag: v2.6.28-rc1~734^2~4 X-Git-Url: http://www.pilppa.org/gitweb/gitweb.cgi?a=commitdiff_plain;h=b8cd9d056bbc5f2630ab1787dbf76f83bbb517c0;hp=-c;p=linux-2.6-omap-h63xx.git Merge branch 'x86/header-guards' into x86-v28-for-linus-phase1 Conflicts: include/asm-x86/dma-mapping.h include/asm-x86/gpio.h include/asm-x86/idle.h include/asm-x86/kvm_host.h include/asm-x86/namei.h include/asm-x86/uaccess.h Signed-off-by: Ingo Molnar --- b8cd9d056bbc5f2630ab1787dbf76f83bbb517c0 diff --combined include/asm-x86/acpi.h index 35d1743b57a,bd76299586b..392e17336be --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_ACPI_H - #define _ASM_X86_ACPI_H + #ifndef ASM_X86__ACPI_H + #define ASM_X86__ACPI_H /* * Copyright (C) 2001 Paul Diefenbaugh @@@ -140,8 -140,6 +140,8 @@@ static inline unsigned int acpi_process boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A) return 1; + else if (boot_cpu_has(X86_FEATURE_AMDC1E)) + return 1; else return max_cstate; } @@@ -175,4 -173,4 +175,4 @@@ static inline void acpi_fake_nodes(cons #define acpi_unlazy_tlb(x) leave_mm(x) - #endif /*__X86_ASM_ACPI_H*/ + #endif /* ASM_X86__ACPI_H */ diff --combined include/asm-x86/amd_iommu_types.h index dcc81206739,e6b4d5b0837..1ffa4e53c98 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h @@@ -17,8 -17,8 +17,8 @@@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - #ifndef __AMD_IOMMU_TYPES_H__ - #define __AMD_IOMMU_TYPES_H__ + #ifndef ASM_X86__AMD_IOMMU_TYPES_H + #define ASM_X86__AMD_IOMMU_TYPES_H #include #include @@@ -31,6 -31,9 +31,6 @@@ #define ALIAS_TABLE_ENTRY_SIZE 2 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) -/* helper macros */ -#define LOW_U32(x) ((x) & ((1ULL << 32)-1)) - /* Length of the MMIO region for the AMD IOMMU */ #define MMIO_REGION_LENGTH 0x4000 @@@ -66,9 -69,6 +66,9 @@@ #define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_STATUS_OFFSET 0x2020 +/* MMIO status bits */ +#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 + /* feature control bits */ #define CONTROL_IOMMU_EN 0x00ULL #define CONTROL_HT_TUN_EN 0x01ULL @@@ -89,7 -89,6 +89,7 @@@ #define CMD_INV_IOMMU_PAGES 0x03 #define CMD_COMPL_WAIT_STORE_MASK 0x01 +#define CMD_COMPL_WAIT_INT_MASK 0x02 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 @@@ -100,7 -99,6 +100,7 @@@ #define DEV_ENTRY_TRANSLATION 0x01 #define DEV_ENTRY_IR 0x3d #define DEV_ENTRY_IW 0x3e +#define DEV_ENTRY_NO_PAGE_FAULT 0x62 #define DEV_ENTRY_EX 0x67 #define DEV_ENTRY_SYSMGT1 0x68 #define DEV_ENTRY_SYSMGT2 0x69 @@@ -341,4 -339,4 +341,4 @@@ static inline u16 calc_devid(u8 bus, u return (((u16)bus) << 8) | devfn; } - #endif + #endif /* ASM_X86__AMD_IOMMU_TYPES_H */ diff --combined include/asm-x86/atomic_64.h index 91c7d03e65b,ebbc753af6a..2cb218c4a35 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef __ARCH_X86_64_ATOMIC__ - #define __ARCH_X86_64_ATOMIC__ + #ifndef ASM_X86__ATOMIC_64_H + #define ASM_X86__ATOMIC_64_H #include #include @@@ -228,7 -228,7 +228,7 @@@ static inline void atomic64_add(long i { asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) - : "ir" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter)); } /** @@@ -242,7 -242,7 +242,7 @@@ static inline void atomic64_sub(long i { asm volatile(LOCK_PREFIX "subq %1,%0" : "=m" (v->counter) - : "ir" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter)); } /** @@@ -260,7 -260,7 +260,7 @@@ static inline int atomic64_sub_and_test asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" : "=m" (v->counter), "=qm" (c) - : "ir" (i), "m" (v->counter) : "memory"); + : "er" (i), "m" (v->counter) : "memory"); return c; } @@@ -341,7 -341,7 +341,7 @@@ static inline int atomic64_add_negative asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" : "=m" (v->counter), "=qm" (c) - : "ir" (i), "m" (v->counter) : "memory"); + : "er" (i), "m" (v->counter) : "memory"); return c; } @@@ -470,4 -470,4 +470,4 @@@ static inline void atomic_or_long(unsig #define smp_mb__after_atomic_inc() barrier() #include - #endif + #endif /* ASM_X86__ATOMIC_64_H */ diff --combined include/asm-x86/cpufeature.h index cfcfb0a806b,2f3143e4514..250fa0cb144 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@@ -1,8 -1,8 +1,8 @@@ /* * Defines x86 CPU feature bits */ - #ifndef _ASM_X86_CPUFEATURE_H - #define _ASM_X86_CPUFEATURE_H + #ifndef ASM_X86__CPUFEATURE_H + #define ASM_X86__CPUFEATURE_H #include @@@ -72,16 -72,14 +72,16 @@@ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ +#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ +#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ -#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ +#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ +#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ @@@ -93,7 -91,6 +93,7 @@@ #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ @@@ -192,7 -189,6 +192,7 @@@ extern const char * const x86_power_fla #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) +#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 @@@ -224,4 -220,4 +224,4 @@@ #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ - #endif /* _ASM_X86_CPUFEATURE_H */ + #endif /* ASM_X86__CPUFEATURE_H */ diff --combined include/asm-x86/device.h index 3c034f48fdb,dbe88554389..1bece04c7d9 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h @@@ -1,16 -1,13 +1,16 @@@ - #ifndef _ASM_X86_DEVICE_H - #define _ASM_X86_DEVICE_H + #ifndef ASM_X86__DEVICE_H + #define ASM_X86__DEVICE_H struct dev_archdata { #ifdef CONFIG_ACPI void *acpi_handle; #endif +#ifdef CONFIG_X86_64 +struct dma_mapping_ops *dma_ops; +#endif #ifdef CONFIG_DMAR void *iommu; /* hook for IOMMU specific extension */ #endif }; - #endif /* _ASM_X86_DEVICE_H */ + #endif /* ASM_X86__DEVICE_H */ diff --combined include/asm-x86/dma-mapping.h index ad9cd6d49bf,71b6f7d22e9..5d200e78bd8 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_DMA_MAPPING_H_ - #define _ASM_DMA_MAPPING_H_ + #ifndef ASM_X86__DMA_MAPPING_H + #define ASM_X86__DMA_MAPPING_H /* * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for @@@ -17,8 -17,7 +17,8 @@@ extern int panic_on_overflow extern int force_iommu; struct dma_mapping_ops { - int (*mapping_error)(dma_addr_t dma_addr); + int (*mapping_error)(struct device *dev, + dma_addr_t dma_addr); void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, @@@ -57,32 -56,14 +57,32 @@@ int is_phys; }; -extern const struct dma_mapping_ops *dma_ops; +extern struct dma_mapping_ops *dma_ops; -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) { - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dma_addr); +#ifdef CONFIG_X86_32 + return dma_ops; +#else + if (unlikely(!dev) || !dev->archdata.dma_ops) + return dma_ops; + else + return dev->archdata.dma_ops; +#endif +} + +/* Make sure we keep the same behaviour */ +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ +#ifdef CONFIG_X86_32 + return 0; +#else + struct dma_mapping_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); return (dma_addr == bad_dma_address); +#endif } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) @@@ -102,53 -83,44 +102,53 @@@ static inline dma_addr_ dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); + return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->unmap_single) - dma_ops->unmap_single(dev, addr, size, direction); + if (ops->unmap_single) + ops->unmap_single(dev, addr, size, direction); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_sg(hwdev, sg, nents, direction); + return ops->map_sg(hwdev, sg, nents, direction); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->unmap_sg) - dma_ops->unmap_sg(hwdev, sg, nents, direction); + if (ops->unmap_sg) + ops->unmap_sg(hwdev, sg, nents, direction); } static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_for_cpu) - dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, - direction); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); flush_write_buffers(); } @@@ -156,11 -128,10 +156,11 @@@ static inline voi dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_for_device) - dma_ops->sync_single_for_device(hwdev, dma_handle, size, - direction); + if (ops->sync_single_for_device) + ops->sync_single_for_device(hwdev, dma_handle, size, direction); flush_write_buffers(); } @@@ -168,12 -139,11 +168,12 @@@ static inline voi dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { - BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_range_for_cpu) - dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, direction); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); + if (ops->sync_single_range_for_cpu) + ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, + size, direction); flush_write_buffers(); } @@@ -182,12 -152,11 +182,12 @@@ dma_sync_single_range_for_device(struc unsigned long offset, size_t size, int direction) { - BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_range_for_device) - dma_ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, direction); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); + if (ops->sync_single_range_for_device) + ops->sync_single_range_for_device(hwdev, dma_handle, + offset, size, direction); flush_write_buffers(); } @@@ -195,11 -164,9 +195,11 @@@ static inline voi dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_sg_for_cpu) - dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); } @@@ -207,11 -174,9 +207,11 @@@ static inline voi dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_sg_for_device) - dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(hwdev, sg, nelems, direction); flush_write_buffers(); } @@@ -220,11 -185,9 +220,11 @@@ static inline dma_addr_t dma_map_page(s size_t offset, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_single(dev, page_to_phys(page)+offset, - size, direction); + return ops->map_single(dev, page_to_phys(page) + offset, + size, direction); } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, @@@ -249,5 -212,25 +249,5 @@@ static inline int dma_get_cache_alignme #define dma_is_consistent(d, h) (1) -#ifdef CONFIG_X86_32 -# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -struct dma_coherent_mem { - void *virt_base; - u32 device_base; - int size; - int flags; - unsigned long *bitmap; -}; - -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); - -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); -#endif /* CONFIG_X86_32 */ +#include - #endif + #endif /* ASM_X86__DMA_MAPPING_H */ diff --combined include/asm-x86/efi.h index d4f2b0abe92,69c7b7ab43e..ed2de22e870 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_EFI_H - #define _ASM_X86_EFI_H + #ifndef ASM_X86__EFI_H + #define ASM_X86__EFI_H #ifdef CONFIG_X86_32 @@@ -86,7 -86,7 +86,7 @@@ extern u64 efi_call6(void *fp, u64 arg1 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) -extern void *efi_ioremap(unsigned long addr, unsigned long size); +extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); #endif /* CONFIG_X86_32 */ @@@ -94,4 -94,4 +94,4 @@@ extern void efi_reserve_early(void) extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); - #endif + #endif /* ASM_X86__EFI_H */ diff --combined include/asm-x86/genapic_32.h index 754d635f90f,4904c672e4f..34280f02766 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_GENAPIC_H - #define _ASM_GENAPIC_H 1 + #ifndef ASM_X86__GENAPIC_32_H + #define ASM_X86__GENAPIC_32_H #include @@@ -118,7 -118,6 +118,7 @@@ enum uv_system_type {UV_NONE, UV_LEGACY #define get_uv_system_type() UV_NONE #define is_uv_system() 0 #define uv_wakeup_secondary(a, b) 1 +#define uv_system_init() do {} while (0) - #endif + #endif /* ASM_X86__GENAPIC_32_H */ diff --combined include/asm-x86/genapic_64.h index a47d6312913,381a09d19d5..25097a8cc5e --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_GENAPIC_H - #define _ASM_GENAPIC_H 1 + #ifndef ASM_X86__GENAPIC_64_H + #define ASM_X86__GENAPIC_64_H /* * Copyright 2004 James Cleverdon, IBM. @@@ -42,9 -42,8 +42,9 @@@ extern int is_uv_system(void) extern struct genapic apic_x2apic_uv_x; DECLARE_PER_CPU(int, x2apic_extra_bits); extern void uv_cpu_init(void); +extern void uv_system_init(void); extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); extern void setup_apic_routing(void); - #endif + #endif /* ASM_X86__GENAPIC_64_H */ diff --combined include/asm-x86/geode.h index 2c1cda0b8a8,1ef738e01a0..3f3444be263 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@@ -7,8 -7,8 +7,8 @@@ * as published by the Free Software Foundation. */ - #ifndef _ASM_GEODE_H_ - #define _ASM_GEODE_H_ + #ifndef ASM_X86__GEODE_H + #define ASM_X86__GEODE_H #include #include @@@ -50,7 -50,6 +50,7 @@@ extern int geode_get_dev_base(unsigned #define MSR_PIC_YSEL_HIGH 0x51400021 #define MSR_PIC_ZSEL_LOW 0x51400022 #define MSR_PIC_ZSEL_HIGH 0x51400023 +#define MSR_PIC_IRQM_LPC 0x51400025 #define MSR_MFGPT_IRQ 0x51400028 #define MSR_MFGPT_NR 0x51400029 @@@ -238,7 -237,7 +238,7 @@@ static inline u16 geode_mfgpt_read(int } extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); -extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable); +extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable); extern int geode_mfgpt_alloc_timer(int timer, int domain); #define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) @@@ -250,4 -249,4 +250,4 @@@ extern int __init mfgpt_timer_setup(voi static inline int mfgpt_timer_setup(void) { return 0; } #endif - #endif + #endif /* ASM_X86__GEODE_H */ diff --combined include/asm-x86/gpio.h index c4c91b37c10,f9e8f8918a9..497fb980d96 --- a/include/asm-x86/gpio.h +++ b/include/asm-x86/gpio.h @@@ -16,6 -16,10 +16,6 @@@ #ifndef _ASM_I386_GPIO_H #define _ASM_I386_GPIO_H -#ifdef CONFIG_X86_RDC321X -#include -#else /* CONFIG_X86_RDC321X */ - #include #ifdef CONFIG_GPIOLIB @@@ -53,4 -57,6 +53,4 @@@ static inline int irq_to_gpio(unsigned #endif /* CONFIG_GPIOLIB */ - #endif /* _ASM_I386_GPIO_H */ -#endif /* CONFIG_X86_RDC321X */ - + #endif /* ASM_X86__GPIO_H */ diff --combined include/asm-x86/hw_irq.h index edd0b95f14d,3f3c465b915..2af4e36d43d --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_HW_IRQ_H - #define _ASM_HW_IRQ_H + #ifndef ASM_X86__HW_IRQ_H + #define ASM_X86__HW_IRQ_H /* * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar @@@ -98,18 -98,10 +98,18 @@@ extern void (*const interrupt[NR_IRQS]) #else typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); -extern spinlock_t vector_lock; #endif -extern void setup_vector_irq(int cpu); + +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64) +extern void lock_vector_lock(void); +extern void unlock_vector_lock(void); +extern void __setup_vector_irq(int cpu); +#else +static inline void lock_vector_lock(void) {} +static inline void unlock_vector_lock(void) {} +static inline void __setup_vector_irq(int cpu) {} +#endif #endif /* !ASSEMBLY_ */ - #endif + #endif /* ASM_X86__HW_IRQ_H */ diff --combined include/asm-x86/i387.h index 56d00e31aec,6f60a6b8997..72c3bd27e8e --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@@ -7,13 -7,12 +7,13 @@@ * x86-64 work by Andi Kleen 2002 */ - #ifndef _ASM_X86_I387_H - #define _ASM_X86_I387_H + #ifndef ASM_X86__I387_H + #define ASM_X86__I387_H #include #include #include +#include #include #include #include @@@ -63,6 -62,8 +63,6 @@@ static inline int restore_fpu_checking( #else : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); #endif - if (unlikely(err)) - init_fpu(current); return err; } @@@ -235,37 -236,6 +235,37 @@@ static inline void kernel_fpu_end(void preempt_enable(); } +/* + * Some instructions like VIA's padlock instructions generate a spurious + * DNA fault but don't modify SSE registers. And these instructions + * get used from interrupt context aswell. To prevent these kernel instructions + * in interrupt context interact wrongly with other user/kernel fpu usage, we + * should use them only in the context of irq_ts_save/restore() + */ +static inline int irq_ts_save(void) +{ + /* + * If we are in process context, we are ok to take a spurious DNA fault. + * Otherwise, doing clts() in process context require pre-emption to + * be disabled or some heavy lifting like kernel_fpu_begin() + */ + if (!in_interrupt()) + return 0; + + if (read_cr0() & X86_CR0_TS) { + clts(); + return 1; + } + + return 0; +} + +static inline void irq_ts_restore(int TS_state) +{ + if (TS_state) + stts(); +} + #ifdef CONFIG_X86_64 static inline void save_init_fpu(struct task_struct *tsk) @@@ -336,4 -306,4 +336,4 @@@ static inline unsigned short get_fpu_mx } } - #endif /* _ASM_X86_I387_H */ + #endif /* ASM_X86__I387_H */ diff --combined include/asm-x86/idle.h index cbb64912361,dc9c7944847..baa3f783d27 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_64_IDLE_H - #define _ASM_X86_64_IDLE_H 1 + #ifndef ASM_X86__IDLE_H + #define ASM_X86__IDLE_H #define IDLE_START 1 #define IDLE_END 2 @@@ -10,6 -10,4 +10,6 @@@ void idle_notifier_register(struct noti void enter_idle(void); void exit_idle(void); +void c1e_remove_cpu(int cpu); + - #endif + #endif /* ASM_X86__IDLE_H */ diff --combined include/asm-x86/io.h index 0f954dc89cb,1b75a43bb6c..688f8a4085a --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_IO_H - #define _ASM_X86_IO_H + #ifndef ASM_X86__IO_H + #define ASM_X86__IO_H #define ARCH_HAS_IOREMAP_WC @@@ -21,7 -21,7 +21,7 @@@ extern void __iomem *fix_ioremap(unsign #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ -{ type ret; asm volatile("mov" size " %1,%0":"=" reg (ret) \ +{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ :"m" (*(volatile type __force *)addr) barrier); return ret; } #define build_mmio_write(name, size, type, reg, barrier) \ @@@ -29,13 -29,13 +29,13 @@@ static inline void name(type val, volat { asm volatile("mov" size " %0,%1": :reg (val), \ "m" (*(volatile type __force *)addr) barrier); } -build_mmio_read(readb, "b", unsigned char, "q", :"memory") -build_mmio_read(readw, "w", unsigned short, "r", :"memory") -build_mmio_read(readl, "l", unsigned int, "r", :"memory") +build_mmio_read(readb, "b", unsigned char, "=q", :"memory") +build_mmio_read(readw, "w", unsigned short, "=r", :"memory") +build_mmio_read(readl, "l", unsigned int, "=r", :"memory") -build_mmio_read(__readb, "b", unsigned char, "q", ) -build_mmio_read(__readw, "w", unsigned short, "r", ) -build_mmio_read(__readl, "l", unsigned int, "r", ) +build_mmio_read(__readb, "b", unsigned char, "=q", ) +build_mmio_read(__readw, "w", unsigned short, "=r", ) +build_mmio_read(__readl, "l", unsigned int, "=r", ) build_mmio_write(writeb, "b", unsigned char, "q", :"memory") build_mmio_write(writew, "w", unsigned short, "r", :"memory") @@@ -59,8 -59,8 +59,8 @@@ build_mmio_write(__writel, "l", unsigne #define mmiowb() barrier() #ifdef CONFIG_X86_64 -build_mmio_read(readq, "q", unsigned long, "r", :"memory") -build_mmio_read(__readq, "q", unsigned long, "r", ) +build_mmio_read(readq, "q", unsigned long, "=r", :"memory") +build_mmio_read(__readq, "q", unsigned long, "=r", ) build_mmio_write(writeq, "q", unsigned long, "r", :"memory") build_mmio_write(__writeq, "q", unsigned long, "r", ) @@@ -99,4 -99,4 +99,4 @@@ extern void early_iounmap(void *addr, u extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); - #endif /* _ASM_X86_IO_H */ + #endif /* ASM_X86__IO_H */ diff --combined include/asm-x86/iommu.h index 5f888cc5be4,4f3d212d827..e86f44148c6 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h @@@ -1,14 -1,11 +1,14 @@@ - #ifndef _ASM_X8664_IOMMU_H - #define _ASM_X8664_IOMMU_H 1 + #ifndef ASM_X86__IOMMU_H + #define ASM_X86__IOMMU_H extern void pci_iommu_shutdown(void); extern void no_iommu_init(void); +extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; +extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); + #ifdef CONFIG_GART_IOMMU extern int gart_iommu_aperture; extern int gart_iommu_aperture_allowed; @@@ -42,4 -39,4 +42,4 @@@ static inline void gart_iommu_hole_init } #endif - #endif + #endif /* ASM_X86__IOMMU_H */ diff --combined include/asm-x86/irq_vectors.h index a48c7f2dbdc,646d59f5ebf..c5d2d767a1f --- a/include/asm-x86/irq_vectors.h +++ b/include/asm-x86/irq_vectors.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_IRQ_VECTORS_H - #define _ASM_IRQ_VECTORS_H + #ifndef ASM_X86__IRQ_VECTORS_H + #define ASM_X86__IRQ_VECTORS_H #include @@@ -76,7 -76,6 +76,7 @@@ #define CALL_FUNCTION_SINGLE_VECTOR 0xfb #define THERMAL_APIC_VECTOR 0xfa #define THRESHOLD_APIC_VECTOR 0xf9 +#define UV_BAU_MESSAGE 0xf8 #define INVALIDATE_TLB_VECTOR_END 0xf7 #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ @@@ -110,15 -109,7 +110,15 @@@ #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) -#if !defined(CONFIG_X86_VOYAGER) +#ifdef CONFIG_X86_64 +# if NR_CPUS < MAX_IO_APICS +# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) +# else +# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) +# endif +# define NR_IRQ_VECTORS NR_IRQS + +#elif !defined(CONFIG_X86_VOYAGER) # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) @@@ -179,4 -170,4 +179,4 @@@ #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) - #endif /* _ASM_IRQ_VECTORS_H */ + #endif /* ASM_X86__IRQ_VECTORS_H */ diff --combined include/asm-x86/kexec.h index 4246ab7dc98,262b63ec911..ea09600d612 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _KEXEC_H - #define _KEXEC_H + #ifndef ASM_X86__KEXEC_H + #define ASM_X86__KEXEC_H #ifdef CONFIG_X86_32 # define PA_CONTROL_PAGE 0 @@@ -10,15 -10,14 +10,15 @@@ # define VA_PTE_0 5 # define PA_PTE_1 6 # define VA_PTE_1 7 +# define PA_SWAP_PAGE 8 # ifdef CONFIG_X86_PAE -# define PA_PMD_0 8 -# define VA_PMD_0 9 -# define PA_PMD_1 10 -# define VA_PMD_1 11 -# define PAGES_NR 12 +# define PA_PMD_0 9 +# define VA_PMD_0 10 +# define PA_PMD_1 11 +# define VA_PMD_1 12 +# define PAGES_NR 13 # else -# define PAGES_NR 8 +# define PAGES_NR 9 # endif #else # define PA_CONTROL_PAGE 0 @@@ -41,10 -40,6 +41,10 @@@ # define PAGES_NR 17 #endif +#ifdef CONFIG_X86_32 +# define KEXEC_CONTROL_CODE_MAX_SIZE 2048 +#endif + #ifndef __ASSEMBLY__ #include @@@ -67,7 -62,7 +67,7 @@@ /* Maximum address we can use for the control code buffer */ # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE -# define KEXEC_CONTROL_CODE_SIZE 4096 +# define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ # define KEXEC_ARCH KEXEC_ARCH_386 @@@ -83,7 -78,7 +83,7 @@@ # define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) /* Allocate one page for the pdp and the second for the code */ -# define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) +# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL) /* The native architecture */ # define KEXEC_ARCH KEXEC_ARCH_X86_64 @@@ -157,12 -152,11 +157,12 @@@ static inline void crash_setup_regs(str } #ifdef CONFIG_X86_32 -asmlinkage NORET_TYPE void +asmlinkage unsigned long relocate_kernel(unsigned long indirection_page, unsigned long control_page, unsigned long start_address, - unsigned int has_pae) ATTRIB_NORET; + unsigned int has_pae, + unsigned int preserve_context); #else NORET_TYPE void relocate_kernel(unsigned long indirection_page, @@@ -172,4 -166,4 +172,4 @@@ #endif /* __ASSEMBLY__ */ - #endif /* _KEXEC_H */ + #endif /* ASM_X86__KEXEC_H */ diff --combined include/asm-x86/kgdb.h index 94d63db1036,83a7ee228ab..d283863354d --- a/include/asm-x86/kgdb.h +++ b/include/asm-x86/kgdb.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_KGDB_H_ - #define _ASM_KGDB_H_ + #ifndef ASM_X86__KGDB_H + #define ASM_X86__KGDB_H /* * Copyright (C) 2001-2004 Amit S. Kale @@@ -39,13 -39,12 +39,13 @@@ enum regnames GDB_FS, /* 14 */ GDB_GS, /* 15 */ }; +#define NUMREGBYTES ((GDB_GS+1)*4) #else /* ! CONFIG_X86_32 */ -enum regnames { +enum regnames64 { GDB_AX, /* 0 */ - GDB_DX, /* 1 */ + GDB_BX, /* 1 */ GDB_CX, /* 2 */ - GDB_BX, /* 3 */ + GDB_DX, /* 3 */ GDB_SI, /* 4 */ GDB_DI, /* 5 */ GDB_BP, /* 6 */ @@@ -59,15 -58,18 +59,15 @@@ GDB_R14, /* 14 */ GDB_R15, /* 15 */ GDB_PC, /* 16 */ - GDB_PS, /* 17 */ }; -#endif /* CONFIG_X86_32 */ -/* - * Number of bytes of registers: - */ -#ifdef CONFIG_X86_32 -# define NUMREGBYTES 64 -#else -# define NUMREGBYTES ((GDB_PS+1)*8) -#endif +enum regnames32 { + GDB_PS = 34, + GDB_CS, + GDB_SS, +}; +#define NUMREGBYTES ((GDB_SS+1)*4) +#endif /* CONFIG_X86_32 */ static inline void arch_kgdb_breakpoint(void) { @@@ -76,4 -78,4 +76,4 @@@ #define BREAK_INSTR_SIZE 1 #define CACHE_FLUSH_IS_SAFE 1 - #endif /* _ASM_KGDB_H_ */ + #endif /* ASM_X86__KGDB_H */ diff --combined include/asm-x86/kvm_host.h index c2e34c27590,920823d53d6..69794547f51 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@@ -1,4 -1,4 +1,4 @@@ - #/* + /* * Kernel-based Virtual Machine driver for Linux * * This header defines architecture specific interfaces, x86 version @@@ -8,12 -8,11 +8,12 @@@ * */ - #ifndef ASM_KVM_HOST_H - #define ASM_KVM_HOST_H + #ifndef ASM_X86__KVM_HOST_H + #define ASM_X86__KVM_HOST_H #include #include +#include #include #include @@@ -252,7 -251,6 +252,7 @@@ struct kvm_vcpu_arch gfn_t gfn; /* presumed gfn during guest pte update */ pfn_t pfn; /* pfn corresponding to that gfn */ int largepage; + unsigned long mmu_seq; } update_pte; struct i387_fxsave_struct host_fx_image; @@@ -558,7 -556,6 +558,7 @@@ int kvm_fix_hypercall(struct kvm_vcpu * int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); void kvm_enable_tdp(void); +void kvm_disable_tdp(void); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int complete_pio(struct kvm_vcpu *vcpu); @@@ -722,7 -719,7 +722,7 @@@ asmlinkage void kvm_handle_fault_on_reb #define __kvm_handle_fault_on_reboot(insn) \ "666: " insn "\n\t" \ - ".pushsection .text.fixup, \"ax\" \n" \ + ".pushsection .fixup, \"ax\" \n" \ "667: \n\t" \ KVM_EX_PUSH " $666b \n\t" \ "jmp kvm_handle_fault_on_reboot \n\t" \ @@@ -731,8 -728,4 +731,8 @@@ KVM_EX_ENTRY " 666b, 667b \n\t" \ ".popsection" +#define KVM_ARCH_WANT_MMU_NOTIFIER +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +int kvm_age_hva(struct kvm *kvm, unsigned long hva); + - #endif + #endif /* ASM_X86__KVM_HOST_H */ diff --combined include/asm-x86/mach-summit/mach_apic.h index c47e2ab5c5c,ef77af36cc9..7a66758d701 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef __ASM_MACH_APIC_H - #define __ASM_MACH_APIC_H + #ifndef ASM_X86__MACH_SUMMIT__MACH_APIC_H + #define ASM_X86__MACH_SUMMIT__MACH_APIC_H #include @@@ -122,7 -122,7 +122,7 @@@ static inline physid_mask_t ioapic_phys static inline physid_mask_t apicid_to_cpu_present(int apicid) { - return physid_mask_of_physid(0); + return physid_mask_of_physid(apicid); } static inline void setup_portio_remap(void) @@@ -182,4 -182,4 +182,4 @@@ static inline u32 phys_pkg_id(u32 cpuid return hard_smp_processor_id() >> index_msb; } - #endif /* __ASM_MACH_APIC_H */ + #endif /* ASM_X86__MACH_SUMMIT__MACH_APIC_H */ diff --combined include/asm-x86/mce.h index 531eaa58745,6a580f24d4a..036133eaf74 --- a/include/asm-x86/mce.h +++ b/include/asm-x86/mce.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_MCE_H - #define _ASM_X86_MCE_H + #ifndef ASM_X86__MCE_H + #define ASM_X86__MCE_H #ifdef __x86_64__ @@@ -92,7 -92,6 +92,7 @@@ extern int mce_disabled void mce_log(struct mce *m); DECLARE_PER_CPU(struct sys_device, device_mce); +extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); @@@ -127,4 -126,4 +127,4 @@@ extern void restart_mce(void) #endif /* __KERNEL__ */ - #endif + #endif /* ASM_X86__MCE_H */ diff --combined include/asm-x86/mman.h index 90bc4108a4f,b6b41aa1cbc..4ef28e6de38 --- a/include/asm-x86/mman.h +++ b/include/asm-x86/mman.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_MMAN_H - #define _ASM_X86_MMAN_H + #ifndef ASM_X86__MMAN_H + #define ASM_X86__MMAN_H #include @@@ -12,9 -12,8 +12,9 @@@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ - #endif /* _ASM_X86_MMAN_H */ + #endif /* ASM_X86__MMAN_H */ diff --combined include/asm-x86/mmconfig.h index e293ab81e85,8689f1e7bc0..fb79b1cf5d0 --- a/include/asm-x86/mmconfig.h +++ b/include/asm-x86/mmconfig.h @@@ -1,12 -1,12 +1,12 @@@ - #ifndef _ASM_MMCONFIG_H - #define _ASM_MMCONFIG_H + #ifndef ASM_X86__MMCONFIG_H + #define ASM_X86__MMCONFIG_H #ifdef CONFIG_PCI_MMCONFIG extern void __cpuinit fam10h_check_enable_mmcfg(void); -extern void __init check_enable_amd_mmconf_dmi(void); +extern void __cpuinit check_enable_amd_mmconf_dmi(void); #else static inline void fam10h_check_enable_mmcfg(void) { } static inline void check_enable_amd_mmconf_dmi(void) { } #endif - #endif + #endif /* ASM_X86__MMCONFIG_H */ diff --combined include/asm-x86/mmzone_32.h index 5862e646065,b98590fdc9e..121b65d61d8 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@@ -3,8 -3,8 +3,8 @@@ * */ - #ifndef _ASM_MMZONE_H_ - #define _ASM_MMZONE_H_ + #ifndef ASM_X86__MMZONE_32_H + #define ASM_X86__MMZONE_32_H #include @@@ -97,16 -97,10 +97,16 @@@ static inline int pfn_valid(int pfn reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) #define alloc_bootmem(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ + __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \ + __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ @@@ -131,4 -125,4 +131,4 @@@ }) #endif /* CONFIG_NEED_MULTIPLE_NODES */ - #endif /* _ASM_MMZONE_H_ */ + #endif /* ASM_X86__MMZONE_32_H */ diff --combined include/asm-x86/msr.h index 2362cfda1fb,032992035bd..eee83f783f6 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef __ASM_X86_MSR_H_ - #define __ASM_X86_MSR_H_ + #ifndef ASM_X86__MSR_H + #define ASM_X86__MSR_H #include @@@ -52,14 -52,14 +52,14 @@@ static inline unsigned long long native { DECLARE_ARGS(val, low, high); - asm volatile("2: rdmsr ; xor %0,%0\n" + asm volatile("2: rdmsr ; xor %[err],%[err]\n" "1:\n\t" ".section .fixup,\"ax\"\n\t" - "3: mov %3,%0 ; jmp 1b\n\t" + "3: mov %[fault],%[err] ; jmp 1b\n\t" ".previous\n\t" _ASM_EXTABLE(2b, 3b) - : "=r" (*err), EAX_EDX_RET(val, low, high) - : "c" (msr), "i" (-EFAULT)); + : [err] "=r" (*err), EAX_EDX_RET(val, low, high) + : "c" (msr), [fault] "i" (-EFAULT)); return EAX_EDX_VAL(val, low, high); } @@@ -73,15 -73,15 +73,15 @@@ static inline int native_write_msr_safe unsigned low, unsigned high) { int err; - asm volatile("2: wrmsr ; xor %0,%0\n" + asm volatile("2: wrmsr ; xor %[err],%[err]\n" "1:\n\t" ".section .fixup,\"ax\"\n\t" - "3: mov %4,%0 ; jmp 1b\n\t" + "3: mov %[fault],%[err] ; jmp 1b\n\t" ".previous\n\t" _ASM_EXTABLE(2b, 3b) - : "=a" (err) + : [err] "=a" (err) : "c" (msr), "0" (low), "d" (high), - "i" (-EFAULT) + [fault] "i" (-EFAULT) : "memory"); return err; } @@@ -192,20 -192,19 +192,20 @@@ do #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) #ifdef CONFIG_SMP -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); - int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); #else /* CONFIG_SMP */ -static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { rdmsr(msr_no, *l, *h); + return 0; } -static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) +static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { wrmsr(msr_no, l, h); + return 0; } static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) @@@ -221,4 -220,4 +221,4 @@@ static inline int wrmsr_safe_on_cpu(uns #endif /* __KERNEL__ */ - #endif + #endif /* ASM_X86__MSR_H */ diff --combined include/asm-x86/percpu.h index f643a3a92da,0afc8324807..e10a1d0678c --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_PERCPU_H_ - #define _ASM_X86_PERCPU_H_ + #ifndef ASM_X86__PERCPU_H + #define ASM_X86__PERCPU_H #ifdef CONFIG_X86_64 #include @@@ -182,7 -182,7 +182,7 @@@ do { DEFINE_PER_CPU(_type, _name) = _initvalue; \ __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ { [0 ... NR_CPUS-1] = _initvalue }; \ - __typeof__(_type) *_name##_early_ptr = _name##_early_map + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ EXPORT_PER_CPU_SYMBOL(_name) @@@ -215,4 -215,4 +215,4 @@@ #endif /* !CONFIG_SMP */ - #endif /* _ASM_X86_PERCPU_H_ */ + #endif /* ASM_X86__PERCPU_H */ diff --combined include/asm-x86/pgtable.h index 04caa2f544d,3ca03f902e0..70fcb2adbb7 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_X86_PGTABLE_H - #define _ASM_X86_PGTABLE_H + #ifndef ASM_X86__PGTABLE_H + #define ASM_X86__PGTABLE_H #define FIRST_USER_ADDRESS 0 @@@ -18,7 -18,6 +18,7 @@@ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) @@@ -35,8 -34,6 +35,8 @@@ #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) +#define __HAVE_ARCH_PTE_SPECIAL #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) @@@ -57,7 -54,7 +57,7 @@@ /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ - _PAGE_ACCESSED | _PAGE_DIRTY) + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_WB (0) @@@ -183,7 -180,7 +183,7 @@@ static inline int pte_exec(pte_t pte static inline int pte_special(pte_t pte) { - return 0; + return pte_val(pte) & _PAGE_SPECIAL; } static inline int pmd_large(pmd_t pte) @@@ -249,7 -246,7 +249,7 @@@ static inline pte_t pte_clrglobal(pte_ static inline pte_t pte_mkspecial(pte_t pte) { - return pte; + return __pte(pte_val(pte) | _PAGE_SPECIAL); } extern pteval_t __supported_pte_mask; @@@ -521,4 -518,4 +521,4 @@@ static inline void clone_pgd_range(pgd_ #include #endif /* __ASSEMBLY__ */ - #endif /* _ASM_X86_PGTABLE_H */ + #endif /* ASM_X86__PGTABLE_H */ diff --combined include/asm-x86/pgtable_64.h index 549144d03d9,609c24975c6..e3dcf7a08a0 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _X86_64_PGTABLE_H - #define _X86_64_PGTABLE_H + #ifndef ASM_X86__PGTABLE_64_H + #define ASM_X86__PGTABLE_64_H #include #ifndef __ASSEMBLY__ @@@ -151,7 -151,7 +151,7 @@@ static inline void native_pgd_clear(pgd #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) #define MODULES_VADDR _AC(0xffffffffa0000000, UL) -#define MODULES_END _AC(0xfffffffffff00000, UL) +#define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) #ifndef __ASSEMBLY__ @@@ -284,4 -284,4 +284,4 @@@ extern void cleanup_highmap(void) #define __HAVE_ARCH_PTE_SAME #endif /* !__ASSEMBLY__ */ - #endif /* _X86_64_PGTABLE_H */ + #endif /* ASM_X86__PGTABLE_64_H */ diff --combined include/asm-x86/processor.h index 4df3e2f6fb5,d60b4d81feb..24cc5261af0 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef __ASM_X86_PROCESSOR_H - #define __ASM_X86_PROCESSOR_H + #ifndef ASM_X86__PROCESSOR_H + #define ASM_X86__PROCESSOR_H #include @@@ -728,29 -728,6 +728,29 @@@ extern unsigned long boot_option_idle_ extern unsigned long idle_halt; extern unsigned long idle_nomwait; +/* + * on systems with caches, caches must be flashed as the absolute + * last instruction before going into a suspended halt. Otherwise, + * dirty data can linger in the cache and become stale on resume, + * leading to strange errors. + * + * perform a variety of operations to guarantee that the compiler + * will not reorder instructions. wbinvd itself is serializing + * so the processor will not reorder. + * + * Systems without cache can just go into halt. + */ +static inline void wbinvd_halt(void) +{ + mb(); + /* check for clflush to determine if wbinvd is legal */ + if (cpu_has_clflush) + asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); + else + while (1) + halt(); +} + extern void enable_sep_cpu(void); extern int sysenter_setup(void); @@@ -943,4 -920,4 +943,4 @@@ extern void start_thread(struct pt_reg extern int get_tsc_mode(unsigned long adr); extern int set_tsc_mode(unsigned int val); - #endif + #endif /* ASM_X86__PROCESSOR_H */ diff --combined include/asm-x86/required-features.h index 5c2ff4bc298,d6822e099c5..a01c4e37633 --- a/include/asm-x86/required-features.h +++ b/include/asm-x86/required-features.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_REQUIRED_FEATURES_H - #define _ASM_REQUIRED_FEATURES_H 1 + #ifndef ASM_X86__REQUIRED_FEATURES_H + #define ASM_X86__REQUIRED_FEATURES_H /* Define minimum CPUID feature set for kernel These bits are checked really early to actually display a visible error message before the @@@ -41,12 -41,6 +41,12 @@@ # define NEED_3DNOW 0 #endif +#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) +# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) +#else +# define NEED_NOPL 0 +#endif + #ifdef CONFIG_X86_64 #define NEED_PSE 0 #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) @@@ -73,10 -67,10 +73,10 @@@ #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) #define REQUIRED_MASK2 0 -#define REQUIRED_MASK3 0 +#define REQUIRED_MASK3 (NEED_NOPL) #define REQUIRED_MASK4 0 #define REQUIRED_MASK5 0 #define REQUIRED_MASK6 0 #define REQUIRED_MASK7 0 - #endif + #endif /* ASM_X86__REQUIRED_FEATURES_H */ diff --combined include/asm-x86/spinlock.h index e39c790dbfd,cbe01086ba6..5d08fa280fd --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _X86_SPINLOCK_H_ - #define _X86_SPINLOCK_H_ + #ifndef ASM_X86__SPINLOCK_H + #define ASM_X86__SPINLOCK_H #include #include @@@ -65,7 -65,7 +65,7 @@@ static inline int __ticket_spin_is_cont { int tmp = ACCESS_ONCE(lock->slock); - return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; + return (((tmp >> 8) - tmp) & 0xff) > 1; } static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) @@@ -127,7 -127,7 +127,7 @@@ static inline int __ticket_spin_is_cont { int tmp = ACCESS_ONCE(lock->slock); - return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; + return (((tmp >> 16) - tmp) & 0xffff) > 1; } static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) @@@ -366,4 -366,4 +366,4 @@@ static inline void __raw_write_unlock(r #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() - #endif + #endif /* ASM_X86__SPINLOCK_H */ diff --combined include/asm-x86/swiotlb.h index 2730b351afc,9486c400a71..1e20adbcad4 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_SWIOTLB_H - #define _ASM_SWIOTLB_H 1 + #ifndef ASM_X86__SWIOTLB_H + #define ASM_X86__SWIOTLB_H #include @@@ -35,7 -35,7 +35,7 @@@ extern int swiotlb_map_sg(struct devic int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); -extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); +extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); extern void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); @@@ -55,4 -55,4 +55,4 @@@ static inline void pci_swiotlb_init(voi static inline void dma_mark_clean(void *addr, size_t size) {} - #endif /* _ASM_SWIOTLB_H */ + #endif /* ASM_X86__SWIOTLB_H */ diff --combined include/asm-x86/uaccess.h index 5f702d1d521,1838f3959a5..48ebc0ad40e --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef _ASM_UACCES_H_ - #define _ASM_UACCES_H_ + #ifndef ASM_X86__UACCESS_H + #define ASM_X86__UACCESS_H /* * User space memory access functions */ @@@ -450,5 -450,4 +450,5 @@@ extern struct movsl_mask # include "uaccess_64.h" #endif - #endif + #endif /* ASM_X86__UACCESS_H */ + diff --combined include/asm-x86/uaccess_64.h index 45806d60bcb,5cfd2951c9e..c96c1f5d07a --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@@ -1,5 -1,5 +1,5 @@@ - #ifndef __X86_64_UACCESS_H - #define __X86_64_UACCESS_H + #ifndef ASM_X86__UACCESS_64_H + #define ASM_X86__UACCESS_64_H /* * User space memory access functions @@@ -7,7 -7,6 +7,7 @@@ #include #include #include +#include #include /* @@@ -199,4 -198,4 +199,4 @@@ static inline int __copy_from_user_inat unsigned long copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); - #endif /* __X86_64_UACCESS_H */ + #endif /* ASM_X86__UACCESS_64_H */ diff --combined include/asm-x86/uv/uv_bau.h index 610b6b308e9,0950239acaf..77153fb18f5 --- a/include/asm-x86/uv/uv_bau.h +++ b/include/asm-x86/uv/uv_bau.h @@@ -8,8 -8,8 +8,8 @@@ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ - #ifndef __ASM_X86_UV_BAU__ - #define __ASM_X86_UV_BAU__ + #ifndef ASM_X86__UV__UV_BAU_H + #define ASM_X86__UV__UV_BAU_H #include #define BITSPERBYTE 8 @@@ -40,6 -40,11 +40,6 @@@ #define UV_ACTIVATION_DESCRIPTOR_SIZE 32 #define UV_DISTRIBUTION_SIZE 256 #define UV_SW_ACK_NPENDING 8 -#define UV_BAU_MESSAGE 200 -/* - * Messaging irq; see irq_64.h and include/asm-x86/hw_irq_64.h - * To be dynamically allocated in the future - */ #define UV_NET_ENDPOINT_INTD 0x38 #define UV_DESC_BASE_PNODE_SHIFT 49 #define UV_PAYLOADQ_PNODE_SHIFT 49 @@@ -329,4 -334,4 +329,4 @@@ extern int uv_flush_tlb_others(cpumask_ extern void uv_bau_message_intr1(void); extern void uv_bau_timeout_intr1(void); - #endif /* __ASM_X86_UV_BAU__ */ + #endif /* ASM_X86__UV__UV_BAU_H */ diff --combined include/asm-x86/xen/hypervisor.h index 04ee0610014,06c350452c5..0ef3a88b869 --- a/include/asm-x86/xen/hypervisor.h +++ b/include/asm-x86/xen/hypervisor.h @@@ -30,11 -30,12 +30,11 @@@ * IN THE SOFTWARE. */ - #ifndef __HYPERVISOR_H__ - #define __HYPERVISOR_H__ + #ifndef ASM_X86__XEN__HYPERVISOR_H + #define ASM_X86__XEN__HYPERVISOR_H #include #include -#include #include #include @@@ -69,4 -70,4 +69,4 @@@ u64 jiffies_to_st(unsigned long jiffies #define is_running_on_xen() (xen_start_info ? 1 : 0) - #endif /* __HYPERVISOR_H__ */ + #endif /* ASM_X86__XEN__HYPERVISOR_H */