]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branches 'x86/apic', 'x86/cleanups', 'x86/cpufeature', 'x86/crashdump', 'x86...
authorIngo Molnar <mingo@elte.hu>
Tue, 23 Dec 2008 15:27:23 +0000 (16:27 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 23 Dec 2008 15:27:23 +0000 (16:27 +0100)
284 files changed:
Documentation/arm/mem_alignment
Documentation/feature-removal-schedule.txt
Documentation/filesystems/proc.txt
Documentation/kernel-parameters.txt
Documentation/nmi_watchdog.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/usb/gadget_serial.txt
Documentation/usb/proc_usb_info.txt
Documentation/usb/usbmon.txt
Documentation/x86/boot.txt
Documentation/x86/pat.txt
Documentation/x86/x86_64/boot-options.txt
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
arch/arm/common/sa1111.c
arch/arm/kernel/armksyms.c
arch/arm/kernel/traps.c
arch/arm/mach-pxa/include/mach/reset.h
arch/arm/mm/fault.c
arch/avr32/boards/favr-32/flash.c
arch/avr32/boards/favr-32/setup.c
arch/avr32/boot/images/Makefile
arch/avr32/configs/atstk1006_defconfig
arch/avr32/mach-at32ap/at32ap700x.c
arch/ia64/hp/sim/Kconfig
arch/powerpc/kernel/misc_32.S
arch/powerpc/lib/rheap.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/cell/axon_msi.c
arch/sh/Kconfig
arch/sparc/include/asm/ptrace_32.h
arch/sparc/include/asm/ptrace_64.h
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/boot/video-vga.c
arch/x86/boot/video.c
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/bigsmp/apic.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/byteorder.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/emergency-restart.h
arch/x86/include/asm/es7000/apic.h
arch/x86/include/asm/es7000/wakecpu.h
arch/x86/include/asm/genapic_32.h
arch/x86/include/asm/genapic_64.h
arch/x86/include/asm/hypervisor.h [new file with mode: 0644]
arch/x86/include/asm/ia32.h
arch/x86/include/asm/idle.h
arch/x86/include/asm/io.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_regs_32.h
arch/x86/include/asm/kexec.h
arch/x86/include/asm/mach-default/mach_apic.h
arch/x86/include/asm/mach-default/mach_wakecpu.h
arch/x86/include/asm/mach-default/smpboot_hooks.h
arch/x86/include/asm/mach-generic/mach_apic.h
arch/x86/include/asm/mach-generic/mach_wakecpu.h [new file with mode: 0644]
arch/x86/include/asm/mmu_context_32.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/numaq/wakecpu.h
arch/x86/include/asm/pci.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/prctl.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/reboot.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/sigframe.h [new file with mode: 0644]
arch/x86/include/asm/signal.h
arch/x86/include/asm/sparsemem.h
arch/x86/include/asm/syscalls.h
arch/x86/include/asm/system.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/trampoline.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/tsc.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uv/bios.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/asm/vmi.h
arch/x86/include/asm/vmware.h [new file with mode: 0644]
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/asm/xen/hypervisor.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/apic.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/bios_uv.c
arch/x86/kernel/check.c [new file with mode: 0644]
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/hypervisor.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/vmware.c [new file with mode: 0644]
arch/x86/kernel/crash.c
arch/x86/kernel/ds.c
arch/x86/kernel/dumpstack.c [new file with mode: 0644]
arch/x86/kernel/dumpstack.h [new file with mode: 0644]
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/e820.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/es7000_32.c
arch/x86/kernel/genapic_64.c
arch/x86/kernel/genx2apic_uv_x.c
arch/x86/kernel/head.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/init_task.c
arch/x86/kernel/io_apic.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/microcode_intel.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/nmi.c
arch/x86/kernel/numaq_32.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/process.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/relocate_kernel_32.S
arch/x86/kernel/setup.c
arch/x86/kernel/sigframe.h [deleted file]
arch/x86/kernel/signal.c [moved from arch/x86/kernel/signal_32.c with 73% similarity]
arch/x86/kernel/signal_64.c [deleted file]
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/time_64.c
arch/x86/kernel/tlb_32.c
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kernel/vmi_32.c
arch/x86/kernel/vsyscall_64.c
arch/x86/mach-generic/bigsmp.c
arch/x86/mach-generic/default.c
arch/x86/mach-generic/es7000.c
arch/x86/mach-generic/probe.c
arch/x86/mach-generic/summit.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/pat.c
arch/x86/pci/common.c
arch/x86/pci/direct.c
arch/x86/pci/pci.h
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/multicalls.c
arch/x86/xen/setup.c
crypto/async_tx/async_xor.c
drivers/acpi/pci_irq.c
drivers/acpi/toshiba_acpi.c
drivers/ata/libata-core.c
drivers/ata/pata_hpt366.c
drivers/block/cciss.c
drivers/char/xilinx_hwicap/buffer_icap.c
drivers/char/xilinx_hwicap/buffer_icap.h
drivers/char/xilinx_hwicap/fifo_icap.c
drivers/char/xilinx_hwicap/fifo_icap.h
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/char/xilinx_hwicap/xilinx_hwicap.h
drivers/dma/dmaengine.c
drivers/dma/ioat_dma.c
drivers/dma/iop-adma.c
drivers/dma/mv_xor.c
drivers/firmware/dmi_scan.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/ieee1394/nodemgr.c
drivers/md/bitmap.c
drivers/message/fusion/mptscsih.c
drivers/misc/sgi-gru/gruprocfs.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xp_main.c
drivers/misc/sgi-xp/xp_sn2.c
drivers/misc/sgi-xp/xp_uv.c
drivers/misc/sgi-xp/xpc.h
drivers/misc/sgi-xp/xpc_sn2.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/net/bnx2.c
drivers/net/e1000e/ich8lan.c
drivers/net/enc28j60.c
drivers/net/jme.h
drivers/net/phy/mdio_bus.c
drivers/net/starfire.c
drivers/net/sungem.c
drivers/net/tlan.c
drivers/pci/hotplug/acpiphp.h
drivers/pci/hotplug/acpiphp_core.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/ibmphp_core.c
drivers/pci/hotplug/pciehp_core.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/quirks.c
drivers/pcmcia/bfin_cf_pcmcia.c
drivers/scsi/aacraid/linit.c
drivers/scsi/ibmvscsi/ibmvstgt.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_lib.c
drivers/sh/maple/maple.c
drivers/staging/Kconfig
drivers/usb/class/usbtmc.c
drivers/usb/core/driver.c
drivers/usb/gadget/f_rndis.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/unusual_devs.h
drivers/xen/balloon.c
drivers/xen/features.c
drivers/xen/grant-table.c
fs/9p/fid.c
fs/9p/v9fs.c
fs/9p/vfs_dentry.c
fs/9p/vfs_inode.c
fs/cifs/cifssmb.c
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/xattr.c
include/asm-generic/bug.h
include/asm-generic/pgtable.h
include/linux/dmi.h
include/linux/kexec.h
include/linux/mm.h
include/linux/netdevice.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/smp.h
include/linux/usb/ch9.h
include/net/irda/irda_device.h
include/xen/interface/event_channel.h
kernel/cgroup.c
kernel/posix-timers.c
kernel/sched_clock.c
lib/bug.c
lib/dynamic_printk.c
mm/memory.c
mm/migrate.c
mm/slob.c
mm/swapfile.c
net/core/netpoll.c
net/ipv4/netfilter/nf_nat_rule.c
net/ipv4/tcp_vegas.c
net/ipv6/ndisc.c
net/netlabel/netlabel_unlabeled.c
net/phonet/pep-gprs.c
net/sched/sch_netem.c
sound/pci/hda/patch_sigmatel.c
sound/soc/omap/omap-pcm.c

index d145ccca169a37aa3ff42b06c734d9068525d9d7..c7c7a114c78c795c702f8543c57c7558466a9a4b 100644 (file)
@@ -24,7 +24,7 @@ real bad - it changes the behaviour of all unaligned instructions in user
 space, and might cause programs to fail unexpectedly.
 
 To change the alignment trap behavior, simply echo a number into
-/proc/sys/debug/alignment.  The number is made up from various bits:
+/proc/cpu/alignment.  The number is made up from various bits:
 
 bit            behavior when set
 ---            -----------------
index c28a2ac88f9d9ed1457ab0c04324e3cf20fe986f..1a8af7354e799843721c3fb7ac40ab2df9352671 100644 (file)
@@ -244,18 +244,6 @@ Who:       Michael Buesch <mb@bu3sch.de>
 
 ---------------------------
 
-What:  init_mm export
-When:  2.6.26
-Why:   Not used in-tree. The current out-of-tree users used it to
-       work around problems in the CPA code which should be resolved
-       by now. One usecase was described to provide verification code
-       of the CPA operation. That's a good idea in general, but such
-       code / infrastructure should be in the kernel and not in some
-       out-of-tree driver.
-Who:   Thomas Gleixner <tglx@linutronix.de>
-
-----------------------------
-
 What:  usedac i386 kernel parameter
 When:  2.6.27
 Why:   replaced by allowdac and no dac combination
index bb1b0dd3bfcbcc919bea2fbdf706607608f915c5..71df353e367c9e8c3f2833fe7114c875a6d49d3b 100644 (file)
@@ -1339,10 +1339,13 @@ nmi_watchdog
 
 Enables/Disables the NMI watchdog on x86 systems.  When the value is non-zero
 the NMI watchdog is enabled and will continuously test all online cpus to
-determine whether or not they are still functioning properly.
+determine whether or not they are still functioning properly. Currently,
+passing "nmi_watchdog=" parameter at boot time is required for this function
+to work.
 
-Because the NMI watchdog shares registers with oprofile, by disabling the NMI
-watchdog, oprofile may have more registers to utilize.
+If LAPIC NMI watchdog method is in use (nmi_watchdog=2 kernel parameter), the
+NMI watchdog shares registers with oprofile. By disabling the NMI watchdog,
+oprofile may have more registers to utilize.
 
 msgmni
 ------
index e0f346d201edb70fae654c55f6be842c4465a5ff..d5418d52891027c6ef25277b253b98965be94451 100644 (file)
@@ -1393,7 +1393,20 @@ and is between 256 and 4096 characters. It is defined in the file
                        when a NMI is triggered.
                        Format: [state][,regs][,debounce][,die]
 
-       nmi_watchdog=   [KNL,BUGS=X86-32] Debugging features for SMP kernels
+       nmi_watchdog=   [KNL,BUGS=X86-32,X86-64] Debugging features for SMP kernels
+                       Format: [panic,][num]
+                       Valid num: 0,1,2
+                       0 - turn nmi_watchdog off
+                       1 - use the IO-APIC timer for the NMI watchdog
+                       2 - use the local APIC for the NMI watchdog using
+                       a performance counter. Note: This will use one performance
+                       counter and the local APIC's performance vector.
+                       When panic is specified panic when an NMI watchdog timeout occurs.
+                       This is useful when you use a panic=... timeout and need the box
+                       quickly up again.
+                       Instead of 1 and 2 it is possible to use the following
+                       symbolic names: lapic and ioapic
+                       Example: nmi_watchdog=2 or nmi_watchdog=panic,lapic
 
        no387           [BUGS=X86-32] Tells the kernel to use the 387 maths
                        emulation library even if a 387 maths coprocessor
@@ -1626,6 +1639,17 @@ and is between 256 and 4096 characters. It is defined in the file
                nomsi           [MSI] If the PCI_MSI kernel config parameter is
                                enabled, this kernel boot option can be used to
                                disable the use of MSI interrupts system-wide.
+               noioapicquirk   [APIC] Disable all boot interrupt quirks.
+                               Safety option to keep boot IRQs enabled. This
+                               should never be necessary.
+               ioapicreroute   [APIC] Enable rerouting of boot IRQs to the
+                               primary IO-APIC for bridges that cannot disable
+                               boot IRQs. This fixes a source of spurious IRQs
+                               when the system masks IRQs.
+               noioapicreroute [APIC] Disable workaround that uses the
+                               boot IRQ equivalent of an IRQ that connects to
+                               a chipset where boot IRQs cannot be disabled.
+                               The opposite of ioapicreroute.
                biosirq         [X86-32] Use PCI BIOS calls to get the interrupt
                                routing table. These calls are known to be buggy
                                on several machines and they hang the machine
@@ -2255,6 +2279,13 @@ and is between 256 and 4096 characters. It is defined in the file
                        Format:
                        <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
 
+       tsc=            Disable clocksource-must-verify flag for TSC.
+                       Format: <string>
+                       [x86] reliable: mark tsc clocksource as reliable, this
+                       disables clocksource verification at runtime.
+                       Used to enable high-resolution timer mode on older
+                       hardware, and in virtualized environment.
+
        turbografx.map[2|3]=    [HW,JOY]
                        TurboGraFX parallel port interface
                        Format:
index 90aa4531cb67343b03b396610aa0db77c1d1e0ba..bf9f80a982829ae06062e9cc118762335936ad4a 100644 (file)
@@ -69,6 +69,11 @@ to the overall system performance.
 On x86 nmi_watchdog is disabled by default so you have to enable it with
 a boot time parameter.
 
+It's possible to disable the NMI watchdog in run-time by writing "0" to
+/proc/sys/kernel/nmi_watchdog. Writing "1" to the same file will re-enable
+the NMI watchdog. Notice that you still need to use "nmi_watchdog=" parameter
+at boot time.
+
 NOTE: In kernels prior to 2.4.2-ac18 the NMI-oopser is enabled unconditionally
 on x86 SMP boxes.
 
index 3cd2ad958176c9e4f4969de00597ea55633d0c2c..394d7d378dc74daf6899621d2bca0c37cdd598cf 100644 (file)
@@ -1063,6 +1063,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
 
        STAC9227/9228/9229/927x
          ref           Reference board
+         ref-no-jd     Reference board without HP/Mic jack detection
          3stack        D965 3stack
          5stack        D965 5stack + SPDIF
          dell-3stack   Dell Dimension E520
@@ -1076,6 +1077,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
 
        STAC92HD73*
          ref           Reference board
+         no-jd         BIOS setup but without jack-detection
          dell-m6-amic  Dell desktops/laptops with analog mics
          dell-m6-dmic  Dell desktops/laptops with digital mics
          dell-m6       Dell desktops/laptops with both type of mics
index 9b22bd14c3481789ac126c34842002ef7093a72a..eac7df94d8e3b7a9367e47f0c74e690502969be7 100644 (file)
@@ -114,11 +114,11 @@ modules.
 Then you must load the gadget serial driver.  To load it as an
 ACM device (recommended for interoperability), do this:
 
-  modprobe g_serial use_acm=1
+  modprobe g_serial
 
 To load it as a vendor specific bulk in/out device, do this:
 
-  modprobe g_serial
+  modprobe g_serial use_acm=0
 
 This will also automatically load the underlying gadget peripheral
 controller driver.  This must be done each time you reboot the gadget
index 077e9032d0cda17b08be1e8f85b496f37cef6b85..fafcd47232600a1a80351fcd5dfc79492e6ce8e9 100644 (file)
@@ -49,8 +49,10 @@ it and 002/048 sometime later.
 
 These files can be read as binary data.  The binary data consists
 of first the device descriptor, then the descriptors for each
-configuration of the device.  That information is also shown in
-text form by the /proc/bus/usb/devices file, described later.
+configuration of the device.  Multi-byte fields in the device and
+configuration descriptors, but not other descriptors, are converted
+to host endianness by the kernel.  This information is also shown
+in text form by the /proc/bus/usb/devices file, described later.
 
 These files may also be used to write user-level drivers for the USB
 devices.  You would open the /proc/bus/usb/BBB/DDD file read/write,
index 2917ce4ffdc4f5df267bdc9b5f239a9679c2e6d4..270481906dc80d1b25a0c29f607ef55e83ae16be 100644 (file)
@@ -34,11 +34,12 @@ if usbmon is built into the kernel.
 Verify that bus sockets are present.
 
 # ls /sys/kernel/debug/usbmon
-0s  0t  0u  1s  1t  1u  2s  2t  2u  3s  3t  3u  4s  4t  4u
+0s  0u  1s  1t  1u  2s  2t  2u  3s  3t  3u  4s  4t  4u
 #
 
-Now you can choose to either use the sockets numbered '0' (to capture packets on
-all buses), and skip to step #3, or find the bus used by your device with step #2.
+Now you can choose to either use the socket '0u' (to capture packets on all
+buses), and skip to step #3, or find the bus used by your device with step #2.
+This allows to filter away annoying devices that talk continuously.
 
 2. Find which bus connects to the desired device
 
@@ -99,8 +100,9 @@ on the event type, but there is a set of words, common for all types.
 
 Here is the list of words, from left to right:
 
-- URB Tag. This is used to identify URBs is normally a kernel mode address
- of the URB structure in hexadecimal.
+- URB Tag. This is used to identify URBs, and is normally an in-kernel address
+  of the URB structure in hexadecimal, but can be a sequence number or any
+  other unique string, within reason.
 
 - Timestamp in microseconds, a decimal number. The timestamp's resolution
   depends on available clock, and so it can be much worse than a microsecond
index 83c0033ee9e01d26866494b58115a1da9b2c429a..fcdc62b3c3d8d8d1b826c04ce70a3bf9ea0b2a9b 100644 (file)
@@ -349,7 +349,7 @@ Protocol:   2.00+
        3  SYSLINUX
        4  EtherBoot
        5  ELILO
-       7  GRuB
+       7  GRUB
        8  U-BOOT
        9  Xen
        A  Gujin
@@ -537,8 +537,8 @@ Type:               read
 Offset/size:   0x248/4
 Protocol:      2.08+
 
-  If non-zero then this field contains the offset from the end of the
-  real-mode code to the payload.
+  If non-zero then this field contains the offset from the beginning
+  of the protected-mode code to the payload.
 
   The payload may be compressed. The format of both the compressed and
   uncompressed data should be determined using the standard magic
index c93ff5f4c0ddc0e979daa1ad0e950c8c1c6e31c1..cf08c9fff3cdaa62b2217f8e4526d9a7cc88fbec 100644 (file)
@@ -80,6 +80,30 @@ pci proc               |    --    |    --      |       WC         |
                        |          |            |                  |
 -------------------------------------------------------------------
 
+Advanced APIs for drivers
+-------------------------
+A. Exporting pages to users with remap_pfn_range, io_remap_pfn_range,
+vm_insert_pfn
+
+Drivers wanting to export some pages to userspace do it by using mmap
+interface and a combination of
+1) pgprot_noncached()
+2) io_remap_pfn_range() or remap_pfn_range() or vm_insert_pfn()
+
+With PAT support, a new API pgprot_writecombine is being added. So, drivers can
+continue to use the above sequence, with either pgprot_noncached() or
+pgprot_writecombine() in step 1, followed by step 2.
+
+In addition, step 2 internally tracks the region as UC or WC in memtype
+list in order to ensure no conflicting mapping.
+
+Note that this set of APIs only works with IO (non RAM) regions. If driver
+wants to export a RAM region, it has to do set_memory_uc() or set_memory_wc()
+as step 0 above and also track the usage of those pages and use set_memory_wb()
+before the page is freed to free pool.
+
+
+
 Notes:
 
 -- in the above table mean "Not suggested usage for the API". Some of the --'s
index f6d561a1a9b2d3d022baf17bb05138485fb32130..34c13040a718f6febbccadc1f32bc24f4a060664 100644 (file)
@@ -79,17 +79,6 @@ Timing
   Report when timer interrupts are lost because some code turned off
   interrupts for too long.
 
-  nmi_watchdog=NUMBER[,panic]
-  NUMBER can be:
-  0 don't use an NMI watchdog
-  1 use the IO-APIC timer for the NMI watchdog
-  2 use the local APIC for the NMI watchdog using a performance counter. Note
-  This will use one performance counter and the local APIC's performance
-  vector.
-  When panic is specified panic when an NMI watchdog timeout occurs.
-  This is useful when you use a panic=... timeout and need the box
-  quickly up again.
-
   nohpet
   Don't use the HPET timer.
 
index efce7509736911434e299d0fe63e344c70d7b1e3..29b52b14d0b43950fa79f3948b1fbe4789396fbf 100644 (file)
@@ -6,7 +6,7 @@ Virtual memory map with 4 level page tables:
 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
 hole caused by [48:63] sign extension
 ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole
-ffff810000000000 - ffffc0ffffffffff (=46 bits) direct mapping of all phys. memory
+ffff880000000000 - ffffc0ffffffffff (=57 TB) direct mapping of all phys. memory
 ffffc10000000000 - ffffc1ffffffffff (=40 bits) hole
 ffffc20000000000 - ffffe1ffffffffff (=45 bits) vmalloc/ioremap space
 ffffe20000000000 - ffffe2ffffffffff (=40 bits) virtual memory map (1TB)
index 24741de12a39b8d680974f385770842e0b376bf0..c42a567e010c7db73ef761370179e346867aaf3e 100644 (file)
@@ -1527,10 +1527,10 @@ W:      http://ebtables.sourceforge.net/
 S:     Maintained
 
 ECRYPT FILE SYSTEM
-P:     Mike Halcrow, Phillip Hellewell
-M:     mhalcrow@us.ibm.com, phillip@hellewell.homeip.net
-L:     ecryptfs-devel@lists.sourceforge.net
-W:     http://ecryptfs.sourceforge.net/
+P:     Tyler Hicks, Dustin Kirkland
+M:     tyhicks@linux.vnet.ibm.com, kirkland@canonical.com
+L:     ecryptfs-devel@lists.launchpad.net
+W:     https://launchpad.net/ecryptfs
 S:     Supported
 
 EDAC-CORE
@@ -2191,9 +2191,9 @@ S:        Supported
 
 INOTIFY
 P:     John McCutchan
-M:     ttb@tentacle.dhs.org
+M:     john@johnmccutchan.com
 P:     Robert Love
-M:     rml@novell.com
+M:     rlove@rlove.org
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
index 6c2f51b70454578b8c2210e8069b2e1388d722f8..4c8d79710b84b9c6d58b36cd2269e3c5622765f0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 28
-EXTRAVERSION = -rc8
+EXTRAVERSION = -rc9
 NAME = Erotic Pickled Herring
 
 # *DOCUMENTATION*
index 47ccec95f3e867ce5141f7e0db7bdb7f2e57667d..ef12794c3c68f7f2d6be4ec3dcda643540ed091d 100644 (file)
@@ -630,7 +630,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
                return -ENOMEM;
 
        sachip->clk = clk_get(me, "SA1111_CLK");
-       if (!sachip->clk) {
+       if (IS_ERR(sachip->clk)) {
                ret = PTR_ERR(sachip->clk);
                goto err_free;
        }
index c74f766ffc124406e56ab76918db8bdf4c1e30ec..23af3c972c9a7d202c0a71a340e29067d4ac8d74 100644 (file)
@@ -115,6 +115,8 @@ EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 
 #ifdef CONFIG_MMU
+EXPORT_SYMBOL(copy_page);
+
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__clear_user);
@@ -181,8 +183,6 @@ EXPORT_SYMBOL(_find_first_bit_be);
 EXPORT_SYMBOL(_find_next_bit_be);
 #endif
 
-EXPORT_SYMBOL(copy_page);
-
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(mcount);
 #endif
index 57e6874d0b809a21a27afe21d436fa8dca9046e9..79abc4ddc0cf602cf8ae77728fefd8d2080ea826 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/personality.h>
 #include <linux/kallsyms.h>
 #include <linux/delay.h>
+#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/uaccess.h>
 
index 7b8842cfa5fce30753914f538a0d125d2d1a349d..31e6a7b6ad80d27f055b9ce49da0b7e036e06755 100644 (file)
@@ -12,9 +12,8 @@ extern void clear_reset_status(unsigned int mask);
 
 /**
  * init_gpio_reset() - register GPIO as reset generator
- *
- * @gpio - gpio nr
- * @output - set gpio as out/low instead of input during normal work
+ * @gpio: gpio nr
+ * @output: set gpio as out/low instead of input during normal work
  */
 extern int init_gpio_reset(int gpio, int output);
 
index 2df8d9facf5741c060bb9f028c93b7c3bbe33a6e..22c9530e91e2fd8568fa78e1e5aec95ed7a72a24 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/signal.h>
 #include <linux/mm.h>
+#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
index 5f139b7cb5f7c0aeb22d7b9b2cdba3b3682702f1..604bbd5e41d9b6c6c98dd7ceec835a345e1b22fd 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 
-#include <asm/arch/smc.h>
+#include <mach/smc.h>
 
 static struct smc_timing flash_timing __initdata = {
        .ncs_read_setup         = 0,
index 7538f3d2b9e0742a4a5647cc37a5b1fa7f6e611d..1ee4faf0742dba057a8dd6db0eb589e9758d5936 100644 (file)
 
 #include <asm/setup.h>
 
-#include <asm/arch/at32ap700x.h>
-#include <asm/arch/init.h>
-#include <asm/arch/board.h>
-#include <asm/arch/portmux.h>
+#include <mach/at32ap700x.h>
+#include <mach/init.h>
+#include <mach/board.h>
+#include <mach/portmux.h>
 
 /* Oscillator frequencies. These are board-specific */
 unsigned long at32_board_osc_rates[3] = {
index 219720a47bf93772c77d8a57787fe46812bbb006..1848bf0d7f62642568356ded4833102e91724216 100644 (file)
@@ -10,7 +10,7 @@ MKIMAGE               := $(srctree)/scripts/mkuboot.sh
 
 extra-y                := vmlinux.bin vmlinux.gz
 
-OBJCOPYFLAGS_vmlinux.bin := -O binary
+OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note.gnu.build-id
 $(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
 
index 8b6e54c9946aa835c1afb055350b8c3d166f839c..6c45a3b77aa3a877dc327ae0b978c7b42cae2d07 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc1
-# Tue Aug  5 15:40:26 2008
+# Linux kernel version: 2.6.28-rc8
+# Thu Dec 18 11:22:23 2008
 #
 CONFIG_AVR32=y
 CONFIG_GENERIC_GPIO=y
@@ -67,6 +67,7 @@ CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLUB_DEBUG=y
 # CONFIG_SLAB is not set
@@ -77,15 +78,8 @@ CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_HAVE_OPROFILE=y
 CONFIG_KPROBES=y
-# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
-# CONFIG_HAVE_IOREMAP_PROT is not set
 CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-# CONFIG_USE_GENERIC_SMP_HELPERS is not set
 CONFIG_HAVE_CLK=y
-CONFIG_PROC_PAGE_MONITOR=y
 # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -118,6 +112,7 @@ CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
 CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
 
 #
 # System Type and features
@@ -134,6 +129,8 @@ CONFIG_CPU_AT32AP700X=y
 CONFIG_CPU_AT32AP7000=y
 CONFIG_BOARD_ATSTK1000=y
 # CONFIG_BOARD_ATNGW100 is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MIMC200 is not set
 # CONFIG_BOARD_ATSTK1002 is not set
 # CONFIG_BOARD_ATSTK1003 is not set
 # CONFIG_BOARD_ATSTK1004 is not set
@@ -171,14 +168,14 @@ CONFIG_FLATMEM_MANUAL=y
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_NR_QUICK=2
 CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
 # CONFIG_OWNERSHIP_TRACE is not set
 CONFIG_NMI_DEBUGGING=y
 # CONFIG_HZ_100 is not set
@@ -186,7 +183,7 @@ CONFIG_HZ_250=y
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
 CONFIG_CMDLINE=""
 
 #
@@ -228,6 +225,8 @@ CONFIG_CPU_FREQ_AT32AP=y
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 CONFIG_NET=y
 
@@ -299,6 +298,7 @@ CONFIG_IPV6_TUNNEL=m
 # CONFIG_ATM is not set
 CONFIG_STP=m
 CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 CONFIG_LLC=m
@@ -321,14 +321,8 @@ CONFIG_LLC=m
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_PHONET is not set
+# CONFIG_WIRELESS is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
 
@@ -359,6 +353,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
 # User Modules And Translation Layers
 #
 CONFIG_MTD_CHAR=y
+CONFIG_HAVE_MTD_OTP=y
 CONFIG_MTD_BLKDEVS=y
 CONFIG_MTD_BLOCK=y
 # CONFIG_FTL is not set
@@ -407,6 +402,8 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
 # Self-contained MTD device drivers
 #
 CONFIG_MTD_DATAFLASH=m
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+CONFIG_MTD_DATAFLASH_OTP=y
 CONFIG_MTD_M25P80=m
 CONFIG_M25PXX_USE_FAST_READ=y
 # CONFIG_MTD_SLRAM is not set
@@ -464,9 +461,10 @@ CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_TCB_CLKSRC=y
 CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
 CONFIG_ATMEL_SSC=m
 # CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_C2PORT is not set
 
 #
 # SCSI device support
@@ -548,6 +546,9 @@ CONFIG_MACB=y
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 # CONFIG_B44 is not set
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
@@ -653,6 +654,7 @@ CONFIG_UNIX98_PTYS=y
 CONFIG_I2C=m
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
 CONFIG_I2C_ALGOBIT=m
 
 #
@@ -716,6 +718,10 @@ CONFIG_GPIOLIB=y
 # CONFIG_DEBUG_GPIO is not set
 CONFIG_GPIO_SYSFS=y
 
+#
+# Memory mapped GPIO expanders:
+#
+
 #
 # I2C GPIO expanders:
 #
@@ -745,11 +751,11 @@ CONFIG_WATCHDOG=y
 #
 # CONFIG_SOFT_WATCHDOG is not set
 CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
@@ -758,6 +764,10 @@ CONFIG_SSB_POSSIBLE=y
 # CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
@@ -783,6 +793,7 @@ CONFIG_SSB_POSSIBLE=y
 CONFIG_FB=y
 # CONFIG_FIRMWARE_EDID is not set
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
@@ -804,10 +815,13 @@ CONFIG_FB_CFB_IMAGEBLIT=y
 # CONFIG_FB_S1D13XXX is not set
 CONFIG_FB_ATMEL=y
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_LTV350QV=y
 # CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
 # CONFIG_LCD_VGG2432A4 is not set
 # CONFIG_LCD_PLATFORM is not set
 # CONFIG_BACKLIGHT_CLASS_DEVICE is not set
@@ -818,6 +832,7 @@ CONFIG_LCD_LTV350QV=y
 # CONFIG_DISPLAY_SUPPORT is not set
 # CONFIG_LOGO is not set
 CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -848,28 +863,32 @@ CONFIG_USB_SUPPORT=y
 # CONFIG_USB_ARCH_HAS_EHCI is not set
 # CONFIG_USB_OTG_WHITELIST is not set
 # CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
 #
 CONFIG_USB_GADGET=y
 # CONFIG_USB_GADGET_DEBUG is not set
 # CONFIG_USB_GADGET_DEBUG_FILES is not set
 # CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
 CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
 CONFIG_USB_GADGET_ATMEL_USBA=y
 CONFIG_USB_ATMEL_USBA=y
 # CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
 # CONFIG_USB_GADGET_LH7A40X is not set
 # CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
 # CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
 # CONFIG_USB_GADGET_DUMMY_HCD is not set
 CONFIG_USB_GADGET_DUALSPEED=y
 CONFIG_USB_ZERO=m
@@ -887,7 +906,7 @@ CONFIG_MMC=y
 # CONFIG_MMC_UNSAFE_RESUME is not set
 
 #
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
 #
 CONFIG_MMC_BLOCK=y
 CONFIG_MMC_BLOCK_BOUNCE=y
@@ -895,10 +914,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y
 # CONFIG_MMC_TEST is not set
 
 #
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
 #
 # CONFIG_MMC_SDHCI is not set
 CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
 CONFIG_MMC_SPI=m
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -918,6 +938,7 @@ CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 # CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
@@ -950,25 +971,31 @@ CONFIG_RTC_INTF_DEV=y
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_S35390A is not set
 # CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
 
 #
 # SPI RTC drivers
 #
 # CONFIG_RTC_DRV_M41T94 is not set
 # CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
 # CONFIG_RTC_DRV_R9701 is not set
 # CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
 # CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
 # CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
@@ -989,6 +1016,8 @@ CONFIG_DMA_ENGINE=y
 # CONFIG_NET_DMA is not set
 CONFIG_DMATEST=m
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_STAGING_EXCLUDE_BUILD=y
 
 #
 # File systems
@@ -998,12 +1027,17 @@ CONFIG_EXT2_FS=m
 # CONFIG_EXT2_FS_XIP is not set
 CONFIG_EXT3_FS=m
 # CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+CONFIG_EXT4_FS=m
+CONFIG_EXT4DEV_COMPAT=y
+# CONFIG_EXT4_FS_XATTR is not set
 CONFIG_JBD=m
 # CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=m
+# CONFIG_JBD2_DEBUG is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_DNOTIFY is not set
@@ -1036,6 +1070,7 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
@@ -1054,7 +1089,8 @@ CONFIG_TMPFS=y
 # CONFIG_EFS_FS is not set
 CONFIG_JFFS2_FS=y
 CONFIG_JFFS2_FS_DEBUG=0
-# CONFIG_JFFS2_FS_WRITEBUFFER is not set
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
 # CONFIG_JFFS2_SUMMARY is not set
 # CONFIG_JFFS2_FS_XATTR is not set
 # CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
@@ -1088,6 +1124,7 @@ CONFIG_LOCKD=y
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -1185,10 +1222,21 @@ CONFIG_DEBUG_BUGVERBOSE=y
 CONFIG_FRAME_POINTER=y
 # CONFIG_BOOT_PRINTK_DELAY is not set
 # CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_KPROBES_SANITY_TEST is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_LKDTM is not set
 # CONFIG_FAULT_INJECTION is not set
+
+#
+# Tracers
+#
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
 
 #
@@ -1196,17 +1244,26 @@ CONFIG_FRAME_POINTER=y
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_FILE_CAPABILITIES is not set
 CONFIG_CRYPTO=y
 
 #
 # Crypto core or helper
 #
+CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=y
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_CRYPTD is not set
@@ -1257,7 +1314,7 @@ CONFIG_CRYPTO_SHA1=m
 #
 # Ciphers
 #
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
 # CONFIG_CRYPTO_ANUBIS is not set
 # CONFIG_CRYPTO_ARC4 is not set
 # CONFIG_CRYPTO_BLOWFISH is not set
@@ -1278,14 +1335,17 @@ CONFIG_CRYPTO_DES=m
 #
 CONFIG_CRYPTO_DEFLATE=y
 CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
 # CONFIG_CRYPTO_HW is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
-# CONFIG_GENERIC_FIND_NEXT_BIT is not set
 CONFIG_CRC_CCITT=m
 CONFIG_CRC16=y
 CONFIG_CRC_T10DIF=m
index 0c6e02f80a31ce6cd562aa2147d0144a3e89b913..066252eebf614e98ffb96c1c58705cbb9e35840b 100644 (file)
@@ -967,28 +967,28 @@ static inline void configure_usart0_pins(void)
 {
        u32 pin_mask = (1 << 8) | (1 << 9); /* RXD & TXD */
 
-       select_peripheral(PIOA, pin_mask, PERIPH_B, 0);
+       select_peripheral(PIOA, pin_mask, PERIPH_B, AT32_GPIOF_PULLUP);
 }
 
 static inline void configure_usart1_pins(void)
 {
        u32 pin_mask = (1 << 17) | (1 << 18); /* RXD & TXD */
 
-       select_peripheral(PIOA, pin_mask, PERIPH_A, 0);
+       select_peripheral(PIOA, pin_mask, PERIPH_A, AT32_GPIOF_PULLUP);
 }
 
 static inline void configure_usart2_pins(void)
 {
        u32 pin_mask = (1 << 26) | (1 << 27); /* RXD & TXD */
 
-       select_peripheral(PIOB, pin_mask, PERIPH_B, 0);
+       select_peripheral(PIOB, pin_mask, PERIPH_B, AT32_GPIOF_PULLUP);
 }
 
 static inline void configure_usart3_pins(void)
 {
        u32 pin_mask = (1 << 18) | (1 << 17); /* RXD & TXD */
 
-       select_peripheral(PIOB, pin_mask, PERIPH_B, 0);
+       select_peripheral(PIOB, pin_mask, PERIPH_B, AT32_GPIOF_PULLUP);
 }
 
 static struct platform_device *__initdata at32_usarts[4];
index f92306bbedb848eb146ece3afde7729abdddac13..8d513a8c5266b4db31ef1cb6fd6eb75777c7dda3 100644 (file)
@@ -4,6 +4,7 @@ menu "HP Simulator drivers"
 
 config HP_SIMETH
        bool "Simulated Ethernet "
+       depends on NET
 
 config HP_SIMSERIAL
        bool "Simulated serial driver support"
index bdc8b0e860e5e1b25cc3dafe23455ec1a738812b..5c33bc14bd9fe161a75d7c12bc6809e77cba5a93 100644 (file)
@@ -479,17 +479,20 @@ _GLOBAL(_tlbil_pid)
  * (no broadcast)
  */
 _GLOBAL(_tlbil_va)
+       mfmsr   r10
+       wrteei  0
        slwi    r4,r4,16
        mtspr   SPRN_MAS6,r4            /* assume AS=0 for now */
        tlbsx   0,r3
        mfspr   r4,SPRN_MAS1            /* check valid */
        andis.  r3,r4,MAS1_VALID@h
-       beqlr
+       beq     1f
        rlwinm  r4,r4,0,1,31
        mtspr   SPRN_MAS1,r4
        tlbwe
        msync
        isync
+1:     wrtee   r10
        blr
 #endif /* CONFIG_FSL_BOOKE */
 
index 29b2941cada0b67ac5be0248c2a1321179bf19a6..45907c1dae66da343344b5e1cfe9c682afc409ff 100644 (file)
@@ -556,6 +556,7 @@ unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, co
                be = blk->start + blk->size;
                if (s >= bs && e <= be)
                        break;
+               blk = NULL;
        }
 
        if (blk == NULL)
index 7bbf4e4ed4306b3a395906b6307808df326fb7cf..f0c3b88d50fa5a5f98847bd1914be1881885af9d 100644 (file)
@@ -507,6 +507,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 {
        struct hstate *hstate = hstate_file(file);
        int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
+
+       if (!mmu_huge_psizes[mmu_psize])
+               return -EINVAL;
        return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
 }
 
index a8397bbad3d4d54a5cd50ae9ee201118c767d1d4..cf81049e1e51a0ce98d0ebdf9b7a74d5901de211 100644 (file)
@@ -901,10 +901,17 @@ static void mark_reserved_regions_for_nid(int nid)
                        if (end_pfn > node_ar.end_pfn)
                                reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
                                        - (start_pfn << PAGE_SHIFT);
-                       dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
-                               reserve_size, node_ar.nid);
-                       reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
-                                               reserve_size, BOOTMEM_DEFAULT);
+                       /*
+                        * Only worry about *this* node, others may not
+                        * yet have valid NODE_DATA().
+                        */
+                       if (node_ar.nid == nid) {
+                               dbg("reserve_bootmem %lx %lx nid=%d\n",
+                                       physbase, reserve_size, node_ar.nid);
+                               reserve_bootmem_node(NODE_DATA(node_ar.nid),
+                                               physbase, reserve_size,
+                                               BOOTMEM_DEFAULT);
+                       }
                        /*
                         * if reserved region is contained in the active region
                         * then done.
@@ -929,7 +936,6 @@ static void mark_reserved_regions_for_nid(int nid)
 void __init do_init_bootmem(void)
 {
        int nid;
-       unsigned int i;
 
        min_low_pfn = 0;
        max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
index 442cf36aa172920b355710e65a68624d43293d33..0ce45c2b42f8eb67a1ea5156fcf476bdb3f3924a 100644 (file)
@@ -413,6 +413,9 @@ static int axon_msi_probe(struct of_device *device,
                        MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
                        MSIC_CTRL_FIFO_SIZE);
 
+       msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
+                               & MSIC_FIFO_SIZE_MASK;
+
        device->dev.platform_data = msic;
 
        ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
index 80119b3398e7f2acb1f2a1091c247cb8193e947e..5c9cbfc14c4d5ad9ad06bf18f3a4b2f523eebd0b 100644 (file)
@@ -55,6 +55,8 @@ config GENERIC_HARDIRQS
 
 config GENERIC_HARDIRQS_NO__DO_IRQ
        def_bool y
+       depends on SUPERH32 && (!SH_DREAMCAST && !SH_SH4202_MICRODEV && \
+                               !SH_7751_SYSTEMH && !HD64461)
 
 config GENERIC_IRQ_PROBE
        def_bool y
index d409c4f21a5cdea420e75fef24a2ceec1cb0e834..4cef450167dd6ae697e7e429078979f2277f92fd 100644 (file)
@@ -62,6 +62,8 @@ struct sparc_stackf {
 
 #ifdef __KERNEL__
 
+#include <asm/system.h>
+
 static inline bool pt_regs_is_syscall(struct pt_regs *regs)
 {
        return (regs->psr & PSR_SYSCALL);
@@ -72,6 +74,14 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
        return (regs->psr &= ~PSR_SYSCALL);
 }
 
+#define arch_ptrace_stop_needed(exit_code, info) \
+({     flush_user_windows(); \
+       current_thread_info()->w_saved != 0;    \
+})
+
+#define arch_ptrace_stop(exit_code, info) \
+       synchronize_user_stack()
+
 #define user_mode(regs) (!((regs)->psr & PSR_PS))
 #define instruction_pointer(regs) ((regs)->pc)
 #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
index 84e969f06afe318d986a9890d130cbf26759b704..cd6fbfc2043533636dd16fec2918cf0a07124be1 100644 (file)
@@ -114,6 +114,7 @@ struct sparc_trapf {
 #ifdef __KERNEL__
 
 #include <linux/threads.h>
+#include <asm/system.h>
 
 static inline int pt_regs_trap_type(struct pt_regs *regs)
 {
@@ -130,6 +131,14 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
        return (regs->tstate &= ~TSTATE_SYSCALL);
 }
 
+#define arch_ptrace_stop_needed(exit_code, info) \
+({     flush_user_windows(); \
+       get_thread_wsaved() != 0; \
+})
+
+#define arch_ptrace_stop(exit_code, info) \
+       synchronize_user_stack()
+
 struct global_reg_snapshot {
        unsigned long           tstate;
        unsigned long           tpc;
index ac22bb7719f730e6b8d12b305ec9e08952a513a4..1cbec0269d394163a9245c021b282e454db3531c 100644 (file)
@@ -19,6 +19,8 @@ config X86_64
 config X86
        def_bool y
        select HAVE_AOUT if X86_32
+       select HAVE_READQ
+       select HAVE_WRITEQ
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_IDE
        select HAVE_OPROFILE
@@ -87,6 +89,10 @@ config GENERIC_IOMAP
 config GENERIC_BUG
        def_bool y
        depends on BUG
+       select GENERIC_BUG_RELATIVE_POINTERS if X86_64
+
+config GENERIC_BUG_RELATIVE_POINTERS
+       bool
 
 config GENERIC_HWEIGHT
        def_bool y
@@ -242,21 +248,13 @@ config X86_FIND_SMP_CONFIG
        def_bool y
        depends on X86_MPPARSE || X86_VOYAGER
 
-if ACPI
 config X86_MPPARSE
-       def_bool y
-       bool "Enable MPS table"
+       bool "Enable MPS table" if ACPI
+       default y
        depends on X86_LOCAL_APIC
        help
          For old smp systems that do not have proper acpi support. Newer systems
          (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
-endif
-
-if !ACPI
-config X86_MPPARSE
-       def_bool y
-       depends on X86_LOCAL_APIC
-endif
 
 choice
        prompt "Subarchitecture Type"
@@ -465,10 +463,6 @@ config X86_CYCLONE_TIMER
        def_bool y
        depends on X86_GENERICARCH
 
-config ES7000_CLUSTERED_APIC
-       def_bool y
-       depends on SMP && X86_ES7000 && MPENTIUMIII
-
 source "arch/x86/Kconfig.cpu"
 
 config HPET_TIMER
@@ -660,6 +654,30 @@ config X86_VISWS_APIC
        def_bool y
        depends on X86_32 && X86_VISWS
 
+config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
+       bool "Reroute for broken boot IRQs"
+       default n
+       depends on X86_IO_APIC
+       help
+         This option enables a workaround that fixes a source of
+         spurious interrupts. This is recommended when threaded
+         interrupt handling is used on systems where the generation of
+         superfluous "boot interrupts" cannot be disabled.
+
+         Some chipsets generate a legacy INTx "boot IRQ" when the IRQ
+         entry in the chipset's IO-APIC is masked (as, e.g. the RT
+         kernel does during interrupt handling). On chipsets where this
+         boot IRQ generation cannot be disabled, this workaround keeps
+         the original IRQ line masked so that only the equivalent "boot
+         IRQ" is delivered to the CPUs. The workaround also tells the
+         kernel to set up the IRQ handler on the boot IRQ line. In this
+         way only one interrupt is delivered to the kernel. Otherwise
+         the spurious second interrupt may cause the kernel to bring
+         down (vital) interrupt lines.
+
+         Only affects "broken" chipsets. Interrupt sharing may be
+         increased on these systems.
+
 config X86_MCE
        bool "Machine Check Exception"
        depends on !X86_VOYAGER
@@ -956,24 +974,37 @@ config X86_PAE
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool X86_64 || X86_PAE
 
+config DIRECT_GBPAGES
+       bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
+       default y
+       depends on X86_64
+       help
+         Allow the kernel linear mapping to use 1GB pages on CPUs that
+         support it. This can improve the kernel's performance a tiny bit by
+         reducing TLB pressure. If in doubt, say "Y".
+
 # Common NUMA Features
 config NUMA
-       bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
+       bool "Numa Memory Allocation and Scheduler Support"
        depends on SMP
        depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
        default n if X86_PC
        default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
        help
          Enable NUMA (Non Uniform Memory Access) support.
+
          The kernel will try to allocate memory used by a CPU on the
          local memory controller of the CPU and add some more
          NUMA awareness to the kernel.
 
-         For 32-bit this is currently highly experimental and should be only
-         used for kernel development. It might also cause boot failures.
-         For 64-bit this is recommended on all multiprocessor Opteron systems.
-         If the system is EM64T, you should say N unless your system is
-         EM64T NUMA.
+         For 64-bit this is recommended if the system is Intel Core i7
+         (or later), AMD Opteron, or EM64T NUMA.
+
+         For 32-bit this is only needed on (rare) 32-bit-only platforms
+         that support NUMA topologies, such as NUMAQ / Summit, or if you
+         boot a 32-bit kernel on a 64-bit NUMA platform.
+
+         Otherwise, you should say N.
 
 comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
        depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
@@ -1493,6 +1524,10 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
        def_bool y
        depends on X86_64 || (X86_32 && HIGHMEM)
 
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+       def_bool y
+       depends on MEMORY_HOTPLUG
+
 config HAVE_ARCH_EARLY_PFN_TO_NID
        def_bool X86_64
        depends on NUMA
@@ -1632,13 +1667,6 @@ config APM_ALLOW_INTS
          many of the newer IBM Thinkpads.  If you experience hangs when you
          suspend, try setting this to Y.  Otherwise, say N.
 
-config APM_REAL_MODE_POWER_OFF
-       bool "Use real mode APM BIOS call to power off"
-       help
-         Use real mode APM BIOS calls to switch off the computer. This is
-         a work-around for a number of buggy BIOSes. Switch this option on if
-         your computer crashes instead of powering off properly.
-
 endif # APM
 
 source "arch/x86/kernel/cpu/cpufreq/Kconfig"
index 2a3dfbd5e677b548e5e75f43da69e934b90a7eff..4ee768660f75098e465b1fb29fe79e384ce87cd1 100644 (file)
@@ -114,18 +114,6 @@ config DEBUG_RODATA
          data. This is recommended so that we can catch kernel bugs sooner.
          If in doubt, say "Y".
 
-config DIRECT_GBPAGES
-       bool "Enable gbpages-mapped kernel pagetables"
-       depends on DEBUG_KERNEL && EXPERIMENTAL && X86_64
-       help
-         Enable gigabyte pages support (if the CPU supports it). This can
-         improve the kernel's performance a tiny bit by reducing TLB
-         pressure.
-
-         This is experimental code.
-
-         If in doubt, say "N".
-
 config DEBUG_RODATA_TEST
        bool "Testcase for the DEBUG_RODATA feature"
        depends on DEBUG_RODATA
@@ -307,10 +295,10 @@ config OPTIMIZE_INLINING
          developers have marked 'inline'. Doing so takes away freedom from gcc to
          do what it thinks is best, which is desirable for the gcc 3.x series of
          compilers. The gcc 4.x series have a rewritten inlining algorithm and
-         disabling this option will generate a smaller kernel there. Hopefully
-         this algorithm is so good that allowing gcc4 to make the decision can
-         become the default in the future, until then this option is there to
-         test gcc for this.
+         enabling this option will generate a smaller kernel there. Hopefully
+         this algorithm is so good that allowing gcc 4.x and above to make the
+         decision will become the default in the future. Until then this option
+         is there to test gcc for this.
 
          If unsure, say N.
 
index b939cb476decf7d4d3089f6210c4fe3ecfb5f5be..5d4742ed4aa21dd83d968798a1c7ff92447907a1 100644 (file)
@@ -34,7 +34,7 @@ static struct mode_info cga_modes[] = {
        { VIDEO_80x25,  80, 25, 0 },
 };
 
-__videocard video_vga;
+static __videocard video_vga;
 
 /* Set basic 80x25 mode */
 static u8 vga_set_basic_mode(void)
@@ -259,7 +259,7 @@ static int vga_probe(void)
        return mode_count[adapter];
 }
 
-__videocard video_vga = {
+static __videocard video_vga = {
        .card_name      = "VGA",
        .probe          = vga_probe,
        .set_mode       = vga_set_mode,
index 83598b23093aa31398db2d1c9a5f89eb30733357..3bef2c1febe936bd01f1b8d7d6b7d1eb5b1f9bb6 100644 (file)
@@ -226,7 +226,7 @@ static unsigned int mode_menu(void)
 
 #ifdef CONFIG_VIDEO_RETAIN
 /* Save screen content to the heap */
-struct saved_screen {
+static struct saved_screen {
        int x, y;
        int curx, cury;
        u16 *data;
index 13b8c86ae98570f67375b505e9d106374e24b574..b30a08ed8eb48b8f64f9c79cc5e62f11d63ec669 100644 (file)
@@ -77,7 +77,7 @@ CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_AUDIT_TREE=y
 # CONFIG_IKCONFIG is not set
-CONFIG_LOG_BUF_SHIFT=17
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
 # CONFIG_CGROUP_DEBUG is not set
 CONFIG_CGROUP_NS=y
@@ -298,7 +298,7 @@ CONFIG_KEXEC=y
 CONFIG_CRASH_DUMP=y
 # CONFIG_KEXEC_JUMP is not set
 CONFIG_PHYSICAL_START=0x1000000
-CONFIG_RELOCATABLE=y
+# CONFIG_RELOCATABLE is not set
 CONFIG_PHYSICAL_ALIGN=0x200000
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_COMPAT_VDSO is not set
index f0a03d7a7d63f2f2c32f7daea043a0935021bba3..0e7dbc0a3e460e2934887c8c0d744c0515f66c6d 100644 (file)
@@ -77,7 +77,7 @@ CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_AUDIT_TREE=y
 # CONFIG_IKCONFIG is not set
-CONFIG_LOG_BUF_SHIFT=17
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
 # CONFIG_CGROUP_DEBUG is not set
 CONFIG_CGROUP_NS=y
@@ -298,7 +298,7 @@ CONFIG_SCHED_HRTICK=y
 CONFIG_KEXEC=y
 CONFIG_CRASH_DUMP=y
 CONFIG_PHYSICAL_START=0x1000000
-CONFIG_RELOCATABLE=y
+# CONFIG_RELOCATABLE is not set
 CONFIG_PHYSICAL_ALIGN=0x200000
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_COMPAT_VDSO is not set
index 4bc02b23674b5dfaaf91e813fb2be320dc57cd23..b195f85526e35813e08e93b2553587a37896af2f 100644 (file)
@@ -32,6 +32,8 @@
 #include <asm/proto.h>
 #include <asm/vdso.h>
 
+#include <asm/sigframe.h>
+
 #define DEBUG_SIG 0
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -41,7 +43,6 @@
                         X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
                         X86_EFLAGS_CF)
 
-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
 
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
@@ -173,47 +174,28 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
 /*
  * Do a signal return; undo the signal stack.
  */
+#define COPY(x)                        {               \
+       err |= __get_user(regs->x, &sc->x);     \
+}
 
-struct sigframe
-{
-       u32 pretcode;
-       int sig;
-       struct sigcontext_ia32 sc;
-       struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */
-       unsigned int extramask[_COMPAT_NSIG_WORDS-1];
-       char retcode[8];
-       /* fp state follows here */
-};
-
-struct rt_sigframe
-{
-       u32 pretcode;
-       int sig;
-       u32 pinfo;
-       u32 puc;
-       compat_siginfo_t info;
-       struct ucontext_ia32 uc;
-       char retcode[8];
-       /* fp state follows here */
-};
-
-#define COPY(x)                {               \
-       unsigned int reg;               \
-       err |= __get_user(reg, &sc->x); \
-       regs->x = reg;                  \
+#define COPY_SEG_CPL3(seg)     {                       \
+               unsigned short tmp;                     \
+               err |= __get_user(tmp, &sc->seg);       \
+               regs->seg = tmp | 3;                    \
 }
 
-#define RELOAD_SEG(seg,mask)                                           \
-       { unsigned int cur;                                             \
-         unsigned short pre;                                           \
-         err |= __get_user(pre, &sc->seg);                             \
-         savesegment(seg, cur);                                        \
-         pre |= mask;                                                  \
-         if (pre != cur) loadsegment(seg, pre); }
+#define RELOAD_SEG(seg)                {               \
+       unsigned int cur, pre;                  \
+       err |= __get_user(pre, &sc->seg);       \
+       savesegment(seg, cur);                  \
+       pre |= 3;                               \
+       if (pre != cur)                         \
+               loadsegment(seg, pre);          \
+}
 
 static int ia32_restore_sigcontext(struct pt_regs *regs,
                                   struct sigcontext_ia32 __user *sc,
-                                  unsigned int *peax)
+                                  unsigned int *pax)
 {
        unsigned int tmpflags, gs, oldgs, err = 0;
        void __user *buf;
@@ -240,18 +222,16 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
        if (gs != oldgs)
                load_gs_index(gs);
 
-       RELOAD_SEG(fs, 3);
-       RELOAD_SEG(ds, 3);
-       RELOAD_SEG(es, 3);
+       RELOAD_SEG(fs);
+       RELOAD_SEG(ds);
+       RELOAD_SEG(es);
 
        COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
        COPY(dx); COPY(cx); COPY(ip);
        /* Don't touch extended registers */
 
-       err |= __get_user(regs->cs, &sc->cs);
-       regs->cs |= 3;
-       err |= __get_user(regs->ss, &sc->ss);
-       regs->ss |= 3;
+       COPY_SEG_CPL3(cs);
+       COPY_SEG_CPL3(ss);
 
        err |= __get_user(tmpflags, &sc->flags);
        regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -262,15 +242,13 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
        buf = compat_ptr(tmp);
        err |= restore_i387_xstate_ia32(buf);
 
-       err |= __get_user(tmp, &sc->ax);
-       *peax = tmp;
-
+       err |= __get_user(*pax, &sc->ax);
        return err;
 }
 
 asmlinkage long sys32_sigreturn(struct pt_regs *regs)
 {
-       struct sigframe __user *frame = (struct sigframe __user *)(regs->sp-8);
+       struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
        sigset_t set;
        unsigned int ax;
 
@@ -300,12 +278,12 @@ badframe:
 
 asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
 {
-       struct rt_sigframe __user *frame;
+       struct rt_sigframe_ia32 __user *frame;
        sigset_t set;
        unsigned int ax;
        struct pt_regs tregs;
 
-       frame = (struct rt_sigframe __user *)(regs->sp - 4);
+       frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
 
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
@@ -359,20 +337,15 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
        err |= __put_user(regs->dx, &sc->dx);
        err |= __put_user(regs->cx, &sc->cx);
        err |= __put_user(regs->ax, &sc->ax);
-       err |= __put_user(regs->cs, &sc->cs);
-       err |= __put_user(regs->ss, &sc->ss);
        err |= __put_user(current->thread.trap_no, &sc->trapno);
        err |= __put_user(current->thread.error_code, &sc->err);
        err |= __put_user(regs->ip, &sc->ip);
+       err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
        err |= __put_user(regs->flags, &sc->flags);
        err |= __put_user(regs->sp, &sc->sp_at_signal);
+       err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
 
-       tmp = save_i387_xstate_ia32(fpstate);
-       if (tmp < 0)
-               err = -EFAULT;
-       else
-               err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
-                                       &sc->fpstate);
+       err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
 
        /* non-iBCS2 extensions.. */
        err |= __put_user(mask, &sc->oldmask);
@@ -400,7 +373,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
        }
 
        /* This is the legacy signal stack switching. */
-       else if ((regs->ss & 0xffff) != __USER_DS &&
+       else if ((regs->ss & 0xffff) != __USER32_DS &&
                !(ka->sa.sa_flags & SA_RESTORER) &&
                 ka->sa.sa_restorer)
                sp = (unsigned long) ka->sa.sa_restorer;
@@ -408,6 +381,8 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
        if (used_math()) {
                sp = sp - sig_xstate_ia32_size;
                *fpstate = (struct _fpstate_ia32 *) sp;
+               if (save_i387_xstate_ia32(*fpstate) < 0)
+                       return (void __user *) -1L;
        }
 
        sp -= frame_size;
@@ -420,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
 int ia32_setup_frame(int sig, struct k_sigaction *ka,
                     compat_sigset_t *set, struct pt_regs *regs)
 {
-       struct sigframe __user *frame;
+       struct sigframe_ia32 __user *frame;
        void __user *restorer;
        int err = 0;
        void __user *fpstate = NULL;
@@ -430,12 +405,10 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
                u16 poplmovl;
                u32 val;
                u16 int80;
-               u16 pad;
        } __attribute__((packed)) code = {
                0xb858,          /* popl %eax ; movl $...,%eax */
                __NR_ia32_sigreturn,
                0x80cd,         /* int $0x80 */
-               0,
        };
 
        frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
@@ -471,7 +444,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
         * These are actually not used anymore, but left because some
         * gdb versions depend on them as a marker.
         */
-       err |= __copy_to_user(frame->retcode, &code, 8);
+       err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
        if (err)
                return -EFAULT;
 
@@ -501,7 +474,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
 int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                        compat_sigset_t *set, struct pt_regs *regs)
 {
-       struct rt_sigframe __user *frame;
+       struct rt_sigframe_ia32 __user *frame;
        void __user *restorer;
        int err = 0;
        void __user *fpstate = NULL;
@@ -511,8 +484,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                u8 movl;
                u32 val;
                u16 int80;
-               u16 pad;
-               u8  pad2;
+               u8  pad;
        } __attribute__((packed)) code = {
                0xb8,
                __NR_ia32_rt_sigreturn,
@@ -559,7 +531,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         * Not actually used anymore, but left because some gdb
         * versions need it.
         */
-       err |= __copy_to_user(frame->retcode, &code, 8);
+       err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
        if (err)
                return -EFAULT;
 
@@ -572,11 +544,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->dx = (unsigned long) &frame->info;
        regs->cx = (unsigned long) &frame->uc;
 
-       /* Make -mregparm=3 work */
-       regs->ax = sig;
-       regs->dx = (unsigned long) &frame->info;
-       regs->cx = (unsigned long) &frame->uc;
-
        loadsegment(ds, __USER32_DS);
        loadsegment(es, __USER32_DS);
 
index 3b1510b4fc5741290d5660bb4c30f34bc70f6371..25caa0738af5f9d99399d3c2f36405bc62d7115b 100644 (file)
@@ -193,6 +193,7 @@ extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
 static inline void lapic_shutdown(void) { }
 #define local_apic_timer_c2_ok         1
 static inline void init_apic_mappings(void) { }
+static inline void disable_local_APIC(void) { }
 
 #endif /* !CONFIG_X86_LOCAL_APIC */
 
index 1d9543b9d35824c259a1110005866be3d1f54a4e..ce547f24a1cd30da5da6a9fc088d4a94cae2f7c8 100644 (file)
@@ -24,8 +24,6 @@ static inline cpumask_t target_cpus(void)
 #define INT_DELIVERY_MODE      (dest_Fixed)
 #define INT_DEST_MODE          (0)    /* phys delivery to target proc */
 #define NO_BALANCE_IRQ         (0)
-#define WAKE_SECONDARY_VIA_INIT
-
 
 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
 {
index 360010322711037f80040829acceed8de6a15f42..9fa9dcdf344baeb25ccfa79db6222b5aae470ba5 100644 (file)
@@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  */
 static inline void change_bit(int nr, volatile unsigned long *addr)
 {
-       asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
+       if (IS_IMMEDIATE(nr)) {
+               asm volatile(LOCK_PREFIX "xorb %1,%0"
+                       : CONST_MASK_ADDR(nr, addr)
+                       : "iq" ((u8)CONST_MASK(nr)));
+       } else {
+               asm volatile(LOCK_PREFIX "btc %1,%0"
+                       : BITOP_ADDR(addr)
+                       : "Ir" (nr));
+       }
 }
 
 /**
index 3def2065fcea73ba085fcb31ee5063e2cfc57adb..d9cf1cd156d24b5cebe5a8ed41292b6300e8c9c2 100644 (file)
@@ -9,7 +9,7 @@
 #ifdef CONFIG_X86_32
 # define __BUG_C0      "2:\t.long 1b, %c0\n"
 #else
-# define __BUG_C0      "2:\t.quad 1b, %c0\n"
+# define __BUG_C0      "2:\t.long 1b - 2b, %c0 - 2b\n"
 #endif
 
 #define BUG()                                                  \
index e02ae2d89acf13936aad26502e485aa6ddae0b65..f110ad417df3e0121333cbb06564a492ef8be2c2 100644 (file)
@@ -4,26 +4,33 @@
 #include <asm/types.h>
 #include <linux/compiler.h>
 
-#ifdef __GNUC__
+#define __LITTLE_ENDIAN
 
-#ifdef __i386__
-
-static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
 {
-#ifdef CONFIG_X86_BSWAP
-       asm("bswap %0" : "=r" (x) : "0" (x));
-#else
+#ifdef __i386__
+# ifdef CONFIG_X86_BSWAP
+       asm("bswap %0" : "=r" (val) : "0" (val));
+# else
        asm("xchgb %b0,%h0\n\t" /* swap lower bytes     */
            "rorl $16,%0\n\t"   /* swap words           */
            "xchgb %b0,%h0"     /* swap higher bytes    */
-           : "=q" (x)
-           : "0" (x));
+           : "=q" (val)
+           : "0" (val));
+# endif
+
+#else /* __i386__ */
+       asm("bswapl %0"
+           : "=r" (val)
+           : "0" (val));
 #endif
-       return x;
+       return val;
 }
+#define __arch_swab32 __arch_swab32
 
-static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
+static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
 {
+#ifdef __i386__
        union {
                struct {
                        __u32 a;
@@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
                __u64 u;
        } v;
        v.u = val;
-#ifdef CONFIG_X86_BSWAP
+# ifdef CONFIG_X86_BSWAP
        asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
            : "=r" (v.s.a), "=r" (v.s.b)
            : "0" (v.s.a), "1" (v.s.b));
-#else
-       v.s.a = ___arch__swab32(v.s.a);
-       v.s.b = ___arch__swab32(v.s.b);
+# else
+       v.s.a = __arch_swab32(v.s.a);
+       v.s.b = __arch_swab32(v.s.b);
        asm("xchgl %0,%1"
            : "=r" (v.s.a), "=r" (v.s.b)
            : "0" (v.s.a), "1" (v.s.b));
-#endif
+# endif
        return v.u;
-}
-
 #else /* __i386__ */
-
-static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
-{
        asm("bswapq %0"
-           : "=r" (x)
-           : "0" (x));
-       return x;
-}
-
-static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
-       asm("bswapl %0"
-           : "=r" (x)
-           : "0" (x));
-       return x;
-}
-
+           : "=r" (val)
+           : "0" (val));
+       return val;
 #endif
+}
+#define __arch_swab64 __arch_swab64
 
-/* Do not define swab16.  Gcc is smart enough to recognize "C" version and
-   convert it into rotation or exhange.  */
-
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-#define __BYTEORDER_HAS_U64__
-
-#endif /* __GNUC__ */
-
-#include <linux/byteorder/little_endian.h>
+#include <linux/byteorder.h>
 
 #endif /* _ASM_X86_BYTEORDER_H */
index cfdf8c2c5c313a0de8639577cb12a3679292637c..ea408dcba5135a04d35514e784ce7837ad90e285 100644 (file)
@@ -80,7 +80,6 @@
 #define X86_FEATURE_UP         (3*32+ 9) /* smp kernel running on up */
 #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_NOPL       (3*32+20) /* The NOPL (0F 1F) instructions */
 #define X86_FEATURE_PEBS       (3*32+12) /* Precise-Event Based Sampling */
 #define X86_FEATURE_BTS                (3*32+13) /* Branch Trace Store */
 #define X86_FEATURE_SYSCALL32  (3*32+14) /* "" syscall in ia32 userspace */
@@ -92,6 +91,8 @@
 #define X86_FEATURE_NOPL       (3*32+20) /* The NOPL (0F 1F) instructions */
 #define X86_FEATURE_AMDC1E     (3*32+21) /* AMD C1E detected */
 #define X86_FEATURE_XTOPOLOGY  (3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC        (3*32+24) /* TSC does not stop in C states */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3       (4*32+ 0) /* "pni" SSE-3 */
 #define X86_FEATURE_XSAVE      (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
 #define X86_FEATURE_OSXSAVE    (4*32+27) /* "" XSAVE enabled in the OS */
 #define X86_FEATURE_AVX                (4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
 
 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
 #define X86_FEATURE_XSTORE     (5*32+ 2) /* "rng" RNG present (xstore) */
@@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_xmm4_2         boot_cpu_has(X86_FEATURE_XMM4_2)
 #define cpu_has_x2apic         boot_cpu_has(X86_FEATURE_X2APIC)
 #define cpu_has_xsave          boot_cpu_has(X86_FEATURE_XSAVE)
+#define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
 
 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
 # define cpu_has_invlpg                1
index 94826cf874557acedd7afe248daa588a0c6383dd..cc70c1c78ca47545bb7b927f878d3cb02275f142 100644 (file)
@@ -8,7 +8,9 @@ enum reboot_type {
        BOOT_BIOS = 'b',
 #endif
        BOOT_ACPI = 'a',
-       BOOT_EFI = 'e'
+       BOOT_EFI = 'e',
+       BOOT_CF9 = 'p',
+       BOOT_CF9_COND = 'q',
 };
 
 extern enum reboot_type reboot_type;
index 380f0b4f17edfcf5c1e1440baed7231e07cdb684..e24ef876915f8833cd996c1a879b1b3ecaa54826 100644 (file)
@@ -9,31 +9,27 @@ static inline int apic_id_registered(void)
                return (1);
 }
 
-static inline cpumask_t target_cpus(void)
+static inline cpumask_t target_cpus_cluster(void)
 {
-#if defined CONFIG_ES7000_CLUSTERED_APIC
        return CPU_MASK_ALL;
-#else
+}
+
+static inline cpumask_t target_cpus(void)
+{
        return cpumask_of_cpu(smp_processor_id());
-#endif
 }
 
-#if defined CONFIG_ES7000_CLUSTERED_APIC
-#define APIC_DFR_VALUE         (APIC_DFR_CLUSTER)
-#define INT_DELIVERY_MODE      (dest_LowestPrio)
-#define INT_DEST_MODE          (1)    /* logical delivery broadcast to all procs */
-#define NO_BALANCE_IRQ         (1)
-#undef  WAKE_SECONDARY_VIA_INIT
-#define WAKE_SECONDARY_VIA_MIP
-#else
+#define APIC_DFR_VALUE_CLUSTER         (APIC_DFR_CLUSTER)
+#define INT_DELIVERY_MODE_CLUSTER      (dest_LowestPrio)
+#define INT_DEST_MODE_CLUSTER          (1) /* logical delivery broadcast to all procs */
+#define NO_BALANCE_IRQ_CLUSTER         (1)
+
 #define APIC_DFR_VALUE         (APIC_DFR_FLAT)
 #define INT_DELIVERY_MODE      (dest_Fixed)
 #define INT_DEST_MODE          (0)    /* phys delivery to target procs */
 #define NO_BALANCE_IRQ         (0)
 #undef  APIC_DEST_LOGICAL
 #define APIC_DEST_LOGICAL      0x0
-#define WAKE_SECONDARY_VIA_INIT
-#endif
 
 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
 {
@@ -60,6 +56,16 @@ static inline unsigned long calculate_ldr(int cpu)
  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
  * document number 292116).  So here it goes...
  */
+static inline void init_apic_ldr_cluster(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
 static inline void init_apic_ldr(void)
 {
        unsigned long val;
@@ -70,10 +76,6 @@ static inline void init_apic_ldr(void)
        apic_write(APIC_LDR, val);
 }
 
-#ifndef CONFIG_X86_GENERICARCH
-extern void enable_apic_mode(void);
-#endif
-
 extern int apic_version [MAX_APICS];
 static inline void setup_apic_routing(void)
 {
@@ -144,7 +146,7 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
        return (1);
 }
 
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
 {
        int num_bits_set;
        int cpus_found = 0;
@@ -154,11 +156,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
        num_bits_set = cpus_weight(cpumask);
        /* Return id to all */
        if (num_bits_set == NR_CPUS)
-#if defined CONFIG_ES7000_CLUSTERED_APIC
                return 0xFF;
-#else
-               return cpu_to_logical_apicid(0);
-#endif
        /*
         * The cpus in the mask must all be on the apic cluster.  If are not
         * on the same apicid cluster return default value of TARGET_CPUS.
@@ -171,11 +169,40 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
                        if (apicid_cluster(apicid) !=
                                        apicid_cluster(new_apicid)){
                                printk ("%s: Not a valid mask!\n", __func__);
-#if defined CONFIG_ES7000_CLUSTERED_APIC
                                return 0xFF;
-#else
+                       }
+                       apicid = new_apicid;
+                       cpus_found++;
+               }
+               cpu++;
+       }
+       return apicid;
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+       int num_bits_set;
+       int cpus_found = 0;
+       int cpu;
+       int apicid;
+
+       num_bits_set = cpus_weight(cpumask);
+       /* Return id to all */
+       if (num_bits_set == NR_CPUS)
+               return cpu_to_logical_apicid(0);
+       /*
+        * The cpus in the mask must all be on the apic cluster.  If are not
+        * on the same apicid cluster return default value of TARGET_CPUS.
+        */
+       cpu = first_cpu(cpumask);
+       apicid = cpu_to_logical_apicid(cpu);
+       while (cpus_found < num_bits_set) {
+               if (cpu_isset(cpu, cpumask)) {
+                       int new_apicid = cpu_to_logical_apicid(cpu);
+                       if (apicid_cluster(apicid) !=
+                                       apicid_cluster(new_apicid)){
+                               printk ("%s: Not a valid mask!\n", __func__);
                                return cpu_to_logical_apicid(0);
-#endif
                        }
                        apicid = new_apicid;
                        cpus_found++;
index 3984934619136119dfd7d95045d5930ab415e917..78f0daaee436333d9162013c62324dfe66bb1b9b 100644 (file)
@@ -1,36 +1,12 @@
 #ifndef __ASM_ES7000_WAKECPU_H
 #define __ASM_ES7000_WAKECPU_H
 
-/*
- * This file copes with machines that wakeup secondary CPUs by the
- * INIT, INIT, STARTUP sequence.
- */
-
-#ifdef CONFIG_ES7000_CLUSTERED_APIC
-#define WAKE_SECONDARY_VIA_MIP
-#else
-#define WAKE_SECONDARY_VIA_INIT
-#endif
-
-#ifdef WAKE_SECONDARY_VIA_MIP
-extern int es7000_start_cpu(int cpu, unsigned long eip);
-static inline int
-wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
-{
-       int boot_error = 0;
-       boot_error = es7000_start_cpu(phys_apicid, start_eip);
-       return boot_error;
-}
-#endif
-
-#define TRAMPOLINE_LOW phys_to_virt(0x467)
-#define TRAMPOLINE_HIGH phys_to_virt(0x469)
-
-#define boot_cpu_apicid boot_cpu_physical_apicid
+#define TRAMPOLINE_PHYS_LOW    0x467
+#define TRAMPOLINE_PHYS_HIGH   0x469
 
 static inline void wait_for_init_deassert(atomic_t *deassert)
 {
-#ifdef WAKE_SECONDARY_VIA_INIT
+#ifndef CONFIG_ES7000_CLUSTERED_APIC
        while (!atomic_read(deassert))
                cpu_relax();
 #endif
@@ -50,9 +26,12 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
 {
 }
 
-#define inquire_remote_apic(apicid) do {               \
-               if (apic_verbosity >= APIC_DEBUG)       \
-                       __inquire_remote_apic(apicid);  \
-       } while (0)
+extern void __inquire_remote_apic(int apicid);
+
+static inline void inquire_remote_apic(int apicid)
+{
+       if (apic_verbosity >= APIC_DEBUG)
+               __inquire_remote_apic(apicid);
+}
 
 #endif /* __ASM_MACH_WAKECPU_H */
index 5cbd4fcc06fd20425ce21ead1386c47d92d0b152..0ac17d33a8c7114a015ba2bb4971e3b5e381a6da 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_X86_GENAPIC_32_H
 
 #include <asm/mpspec.h>
+#include <asm/atomic.h>
 
 /*
  * Generic APIC driver interface.
@@ -65,6 +66,14 @@ struct genapic {
        void (*send_IPI_allbutself)(int vector);
        void (*send_IPI_all)(int vector);
 #endif
+       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
+       int trampoline_phys_low;
+       int trampoline_phys_high;
+       void (*wait_for_init_deassert)(atomic_t *deassert);
+       void (*smp_callin_clear_local_apic)(void);
+       void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
+       void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
+       void (*inquire_remote_apic)(int apicid);
 };
 
 #define APICFUNC(x) .x = x,
@@ -105,16 +114,24 @@ struct genapic {
        APICFUNC(get_apic_id)                           \
        .apic_id_mask = APIC_ID_MASK,                   \
        APICFUNC(cpu_mask_to_apicid)                    \
-       APICFUNC(vector_allocation_domain)                      \
+       APICFUNC(vector_allocation_domain)              \
        APICFUNC(acpi_madt_oem_check)                   \
        IPIFUNC(send_IPI_mask)                          \
        IPIFUNC(send_IPI_allbutself)                    \
        IPIFUNC(send_IPI_all)                           \
        APICFUNC(enable_apic_mode)                      \
        APICFUNC(phys_pkg_id)                           \
+       .trampoline_phys_low = TRAMPOLINE_PHYS_LOW,             \
+       .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH,           \
+       APICFUNC(wait_for_init_deassert)                \
+       APICFUNC(smp_callin_clear_local_apic)           \
+       APICFUNC(store_NMI_vector)                      \
+       APICFUNC(restore_NMI_vector)                    \
+       APICFUNC(inquire_remote_apic)                   \
 }
 
 extern struct genapic *genapic;
+extern void es7000_update_genapic_to_cluster(void);
 
 enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
 #define get_uv_system_type()           UV_NONE
index 13c4e96199ea17d1e52430dac24a85f4291f50d7..2cae011668b7e09a1397e09b7fc9241f63b48d1d 100644 (file)
@@ -32,6 +32,8 @@ struct genapic {
        unsigned int (*get_apic_id)(unsigned long x);
        unsigned long (*set_apic_id)(unsigned int id);
        unsigned long apic_id_mask;
+       /* wakeup_secondary_cpu */
+       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
 };
 
 extern struct genapic *genapic;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
new file mode 100644 (file)
index 0000000..369f5c5
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2008, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef ASM_X86__HYPERVISOR_H
+#define ASM_X86__HYPERVISOR_H
+
+extern unsigned long get_hypervisor_tsc_freq(void);
+extern void init_hypervisor(struct cpuinfo_x86 *c);
+
+#endif
index 97989c0e534c7a8af039805dc5a04ad11ed021a5..50ca486fd88c9468fe52b4a9fb357f192fbd6ab0 100644 (file)
@@ -129,24 +129,6 @@ typedef struct compat_siginfo {
        } _sifields;
 } compat_siginfo_t;
 
-struct sigframe32 {
-       u32 pretcode;
-       int sig;
-       struct sigcontext_ia32 sc;
-       struct _fpstate_ia32 fpstate;
-       unsigned int extramask[_COMPAT_NSIG_WORDS-1];
-};
-
-struct rt_sigframe32 {
-       u32 pretcode;
-       int sig;
-       u32 pinfo;
-       u32 puc;
-       compat_siginfo_t info;
-       struct ucontext_ia32 uc;
-       struct _fpstate_ia32 fpstate;
-};
-
 struct ustat32 {
        __u32                   f_tfree;
        compat_ino_t            f_tinode;
index 44c89c3a23e9a1f9507536b4d84f0f6b3d225c61..38d87379e270596635ff569242c54ff71ad3ce69 100644 (file)
@@ -8,8 +8,13 @@ struct notifier_block;
 void idle_notifier_register(struct notifier_block *n);
 void idle_notifier_unregister(struct notifier_block *n);
 
+#ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
+#else /* !CONFIG_X86_64 */
+static inline void enter_idle(void) { }
+static inline void exit_idle(void) { }
+#endif /* CONFIG_X86_64 */
 
 void c1e_remove_cpu(int cpu);
 
index ac2abc88cd95fa3e9049acc0440d65d59cd47ac1..33513b9a67f3e69dad897cc01a33b1b78849ae0a 100644 (file)
@@ -4,6 +4,7 @@
 #define ARCH_HAS_IOREMAP_WC
 
 #include <linux/compiler.h>
+#include <asm-generic/int-ll64.h>
 
 #define build_mmio_read(name, size, type, reg, barrier) \
 static inline type name(const volatile void __iomem *addr) \
@@ -45,20 +46,40 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
 #define mmiowb() barrier()
 
 #ifdef CONFIG_X86_64
+
 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
-build_mmio_read(__readq, "q", unsigned long, "=r", )
 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
-build_mmio_write(__writeq, "q", unsigned long, "r", )
 
-#define readq_relaxed(a) __readq(a)
-#define __raw_readq __readq
-#define __raw_writeq writeq
+#else
+
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+       const volatile u32 __iomem *p = addr;
+       u32 low, high;
+
+       low = readl(p);
+       high = readl(p + 1);
+
+       return low + ((u64)high << 32);
+}
+
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+       writel(val, addr);
+       writel(val >> 32, addr+4);
+}
 
-/* Let people know we have them */
-#define readq readq
-#define writeq writeq
 #endif
 
+#define readq_relaxed(a)       readq(a)
+
+#define __raw_readq(a)         readq(a)
+#define __raw_writeq(val, addr)        writeq(val, addr)
+
+/* Let people know that we have them */
+#define readq                  readq
+#define writeq                 writeq
+
 extern int iommu_bio_merge;
 
 #ifdef CONFIG_X86_32
index 6afd9933a7dd96d96822a8ecb058cc2528ed09ac..e475e009ae5d4e60dc0e828ad96373ef91f2799a 100644 (file)
@@ -156,11 +156,21 @@ extern int sis_apic_bug;
 /* 1 if "noapic" boot option passed */
 extern int skip_ioapic_setup;
 
+/* 1 if "noapic" boot option passed */
+extern int noioapicquirk;
+
+/* -1 if "noapic" boot option passed */
+extern int noioapicreroute;
+
 /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
 extern int timer_through_8259;
 
 static inline void disable_ioapic_setup(void)
 {
+#ifdef CONFIG_PCI
+       noioapicquirk = 1;
+       noioapicreroute = -1;
+#endif
        skip_ioapic_setup = 1;
 }
 
index bae0eda954864aa62e90b6ec438b82c8785129dc..28e409fc73f3df33e4c6b2cbde5e99e0433e0fa1 100644 (file)
@@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq)
 # endif
 #endif
 
-#ifdef CONFIG_IRQBALANCE
-extern int irqbalance_disable(char *str);
-#endif
-
 #ifdef CONFIG_HOTPLUG_CPU
 #include <linux/cpumask.h>
 extern void fixup_irqs(cpumask_t map);
index af2f02d27fc7a51a8e0a5c8d4470b0f9bd90330e..86afd7473457e4cff97b120cfcde5516125a1366 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <asm/percpu.h>
 
+#define ARCH_HAS_OWN_IRQ_REGS
+
 DECLARE_PER_CPU(struct pt_regs *, irq_regs);
 
 static inline struct pt_regs *get_irq_regs(void)
index a1f22771a15a2d07a88d9e071b17ebb6860631ef..c61d8b2ab8b9d8ac00e8c514ee26028fe601eb86 100644 (file)
@@ -5,21 +5,8 @@
 # define PA_CONTROL_PAGE       0
 # define VA_CONTROL_PAGE       1
 # define PA_PGD                        2
-# define VA_PGD                        3
-# define PA_PTE_0              4
-# define VA_PTE_0              5
-# define PA_PTE_1              6
-# define VA_PTE_1              7
-# define PA_SWAP_PAGE          8
-# ifdef CONFIG_X86_PAE
-#  define PA_PMD_0             9
-#  define VA_PMD_0             10
-#  define PA_PMD_1             11
-#  define VA_PMD_1             12
-#  define PAGES_NR             13
-# else
-#  define PAGES_NR             9
-# endif
+# define PA_SWAP_PAGE          3
+# define PAGES_NR              4
 #else
 # define PA_CONTROL_PAGE       0
 # define VA_CONTROL_PAGE       1
@@ -170,6 +157,20 @@ relocate_kernel(unsigned long indirection_page,
                unsigned long start_address) ATTRIB_NORET;
 #endif
 
+#ifdef CONFIG_X86_32
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+       pgd_t *pgd;
+#ifdef CONFIG_X86_PAE
+       pmd_t *pmd0;
+       pmd_t *pmd1;
+#endif
+       pte_t *pte0;
+       pte_t *pte1;
+};
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_KEXEC_H */
index ff3a6c236c00c940d3cbc94d0895399fd25828f8..6cb3a467e0673c031c6e39b675481466a0c7531a 100644 (file)
@@ -32,11 +32,13 @@ static inline cpumask_t target_cpus(void)
 #define vector_allocation_domain    (genapic->vector_allocation_domain)
 #define read_apic_id()  (GET_APIC_ID(apic_read(APIC_ID)))
 #define send_IPI_self (genapic->send_IPI_self)
+#define wakeup_secondary_cpu (genapic->wakeup_cpu)
 extern void setup_apic_routing(void);
 #else
 #define INT_DELIVERY_MODE dest_LowestPrio
 #define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
 #define TARGET_CPUS (target_cpus())
+#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
 /*
  * Set up the logical destination ID.
  *
index 9d80db91e9926f040445ebffeb44cac66ce8f4d3..ceb01366014629ba1c8220dff2a95bb313a2bf30 100644 (file)
@@ -1,17 +1,8 @@
 #ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
 #define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
 
-/* 
- * This file copes with machines that wakeup secondary CPUs by the
- * INIT, INIT, STARTUP sequence.
- */
-
-#define WAKE_SECONDARY_VIA_INIT
-
-#define TRAMPOLINE_LOW phys_to_virt(0x467)
-#define TRAMPOLINE_HIGH phys_to_virt(0x469)
-
-#define boot_cpu_apicid boot_cpu_physical_apicid
+#define TRAMPOLINE_PHYS_LOW (0x467)
+#define TRAMPOLINE_PHYS_HIGH (0x469)
 
 static inline void wait_for_init_deassert(atomic_t *deassert)
 {
@@ -33,9 +24,12 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
 {
 }
 
-#define inquire_remote_apic(apicid) do {               \
-               if (apic_verbosity >= APIC_DEBUG)       \
-                       __inquire_remote_apic(apicid);  \
-       } while (0)
+extern void __inquire_remote_apic(int apicid);
+
+static inline void inquire_remote_apic(int apicid)
+{
+       if (apic_verbosity >= APIC_DEBUG)
+               __inquire_remote_apic(apicid);
+}
 
 #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
index dbab36d64d48f2b15aa6be7dea09e9feb91f869f..23bf52103b8905e662e3b9e5491b89d5b8c8c933 100644 (file)
@@ -13,9 +13,11 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
        CMOS_WRITE(0xa, 0xf);
        local_flush_tlb();
        pr_debug("1.\n");
-       *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+                                                                start_eip >> 4;
        pr_debug("2.\n");
-       *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+                                                        start_eip & 0xf;
        pr_debug("3.\n");
 }
 
@@ -32,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
         */
        CMOS_WRITE(0, 0xf);
 
-       *((volatile long *) phys_to_virt(0x467)) = 0;
+       *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 }
 
 static inline void __init smpboot_setup_io_apic(void)
index 5180bd7478fbc3d2cfc533f22e707a8099201e23..e430f47df667255e26bab25ecb123702426b6e63 100644 (file)
@@ -27,6 +27,7 @@
 #define vector_allocation_domain (genapic->vector_allocation_domain)
 #define enable_apic_mode (genapic->enable_apic_mode)
 #define phys_pkg_id (genapic->phys_pkg_id)
+#define wakeup_secondary_cpu (genapic->wakeup_cpu)
 
 extern void generic_bigsmp_probe(void);
 
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
new file mode 100644 (file)
index 0000000..1ab16b1
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
+#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
+
+#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
+#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
+#define wait_for_init_deassert (genapic->wait_for_init_deassert)
+#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
+#define store_NMI_vector (genapic->store_NMI_vector)
+#define restore_NMI_vector (genapic->restore_NMI_vector)
+#define inquire_remote_apic (genapic->inquire_remote_apic)
+
+#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
index 8e10015781fb2d6794aa0cfabae334956b365c88..7e98ce1d2c0e5ad8b2aca1c1e5d733cd839d9cc8 100644 (file)
@@ -4,9 +4,8 @@
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 #ifdef CONFIG_SMP
-       unsigned cpu = smp_processor_id();
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
+       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
+               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
 #endif
 }
 
@@ -20,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev,
                /* stop flush ipis for the previous mm */
                cpu_clear(cpu, prev->cpu_vm_mask);
 #ifdef CONFIG_SMP
-               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-               per_cpu(cpu_tlbstate, cpu).active_mm = next;
+               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
+               x86_write_percpu(cpu_tlbstate.active_mm, next);
 #endif
                cpu_set(cpu, next->cpu_vm_mask);
 
@@ -36,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev,
        }
 #ifdef CONFIG_SMP
        else {
-               per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-               BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
+               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
 
                if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
                        /* We were in lazy tlb mode and leave_mm disabled
index e38859d577a117e1cc1ae2cf75e278c595cfb369..cb58643947b963dcc276cbf81ab1cf92b6dd149a 100644 (file)
@@ -85,7 +85,9 @@
 /* AMD64 MSRs. Not complete. See the architecture manual for a more
    complete list. */
 
+#define MSR_AMD64_PATCH_LEVEL          0x0000008b
 #define MSR_AMD64_NB_CFG               0xc001001f
+#define MSR_AMD64_PATCH_LOADER         0xc0010020
 #define MSR_AMD64_IBSFETCHCTL          0xc0011030
 #define MSR_AMD64_IBSFETCHLINAD                0xc0011031
 #define MSR_AMD64_IBSFETCHPHYSAD       0xc0011032
index 42f639b991b44d02e8b2d391755631b32e3f9781..4640ddd58fb9599ac0c604d8f6be4e15b8233fe2 100644 (file)
@@ -22,10 +22,10 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
 }
 
 /*
- * i386 calling convention returns 64-bit value in edx:eax, while
- * x86_64 returns at rax. Also, the "A" constraint does not really
- * mean rdx:rax in x86_64, so we need specialized behaviour for each
- * architecture
+ * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
+ * constraint has different meanings. For i386, "A" means exactly
+ * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
+ * it means rax *or* rdx.
  */
 #ifdef CONFIG_X86_64
 #define DECLARE_ARGS(val, low, high)   unsigned low, high
index c577bda5b1c52c768047d1e664e0040356b57176..6f499df8eddbfd4246132bd570b87bfde68b4e00 100644 (file)
@@ -3,12 +3,8 @@
 
 /* This file copes with machines that wakeup secondary CPUs by NMIs */
 
-#define WAKE_SECONDARY_VIA_NMI
-
-#define TRAMPOLINE_LOW phys_to_virt(0x8)
-#define TRAMPOLINE_HIGH phys_to_virt(0xa)
-
-#define boot_cpu_apicid boot_cpu_logical_apicid
+#define TRAMPOLINE_PHYS_LOW (0x8)
+#define TRAMPOLINE_PHYS_HIGH (0xa)
 
 /* We don't do anything here because we use NMI's to boot instead */
 static inline void wait_for_init_deassert(atomic_t *deassert)
@@ -27,17 +23,23 @@ static inline void smp_callin_clear_local_apic(void)
 static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
 {
        printk("Storing NMI vector\n");
-       *high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
-       *low = *((volatile unsigned short *) TRAMPOLINE_LOW);
+       *high =
+         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
+       *low =
+         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
 }
 
 static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
 {
        printk("Restoring NMI vector\n");
-       *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high;
-       *((volatile unsigned short *) TRAMPOLINE_LOW) = *low;
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+                                                                *high;
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+                                                                *low;
 }
 
-#define inquire_remote_apic(apicid) {}
+static inline void inquire_remote_apic(int apicid)
+{
+}
 
 #endif /* __ASM_NUMAQ_WAKECPU_H */
index 875b38edf19383f0e1176b9a73dede0f49f42c38..647781298e7ef7cbc98b14ad75f2552b0092507f 100644 (file)
@@ -19,6 +19,8 @@ struct pci_sysdata {
 };
 
 extern int pci_routeirq;
+extern int noioapicquirk;
+extern int noioapicreroute;
 
 /* scan a bus after allocating a pci_sysdata for it */
 extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
index b17edfd23628953139fa58778fa7ae2d66222222..e0d199fe1d83896578421d79aac509ebd843fdf3 100644 (file)
@@ -56,23 +56,55 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #define pte_none(x)            (!(x).pte_low)
 
 /*
- * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
- * into this range:
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
+ * split up the 29 bits of offset into this range:
  */
 #define PTE_FILE_MAX_BITS      29
+#define PTE_FILE_SHIFT1                (_PAGE_BIT_PRESENT + 1)
+#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
+#define PTE_FILE_SHIFT2                (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3                (_PAGE_BIT_PROTNONE + 1)
+#else
+#define PTE_FILE_SHIFT2                (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT3                (_PAGE_BIT_FILE + 1)
+#endif
+#define PTE_FILE_BITS1         (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2         (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
 
 #define pte_to_pgoff(pte)                                              \
-       ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5))
+       ((((pte).pte_low >> PTE_FILE_SHIFT1)                            \
+         & ((1U << PTE_FILE_BITS1) - 1))                               \
+        + ((((pte).pte_low >> PTE_FILE_SHIFT2)                         \
+            & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1)         \
+        + (((pte).pte_low >> PTE_FILE_SHIFT3)                          \
+           << (PTE_FILE_BITS1 + PTE_FILE_BITS2)))
 
 #define pgoff_to_pte(off)                                              \
-       ((pte_t) { .pte_low = (((off) & 0x1f) << 1) +                   \
-                       (((off) >> 5) << 8) + _PAGE_FILE })
+       ((pte_t) { .pte_low =                                           \
+        (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1)    \
+        + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1))  \
+           << PTE_FILE_SHIFT2)                                         \
+        + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2))                \
+           << PTE_FILE_SHIFT3)                                         \
+        + _PAGE_FILE })
 
 /* Encode and de-code a swap entry */
-#define __swp_type(x)                  (((x).val >> 1) & 0x1f)
-#define __swp_offset(x)                        ((x).val >> 8)
-#define __swp_entry(type, offset)                              \
-       ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
+#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+#else
+#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
+#endif
+
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(x)                  (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
+                                        & ((1U << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)                        ((x).val >> SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset)      ((swp_entry_t) { \
+                                        ((type) << (_PAGE_BIT_PRESENT + 1)) \
+                                        | ((offset) << SWP_OFFSET_SHIFT) })
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { (pte).pte_low })
 #define __swp_entry_to_pte(x)          ((pte_t) { .pte = (x).val })
 
index 52597aeadfff029e0b45aa57ee5030a15e774289..447da43cddb360a6f92a2ce98896599359546e6d 100644 (file)
@@ -166,6 +166,7 @@ static inline int pte_none(pte_t pte)
 #define PTE_FILE_MAX_BITS       32
 
 /* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
 #define __swp_type(x)                  (((x).val) & 0x1f)
 #define __swp_offset(x)                        ((x).val >> 5)
 #define __swp_entry(type, offset)      ((swp_entry_t){(type) | (offset) << 5})
index c012f3b11671574150720dcaee023780cd79d905..83e69f4a37f03bafb39869169d6286210c7c95df 100644 (file)
@@ -10,7 +10,6 @@
 #define _PAGE_BIT_PCD          4       /* page cache disabled */
 #define _PAGE_BIT_ACCESSED     5       /* was accessed (raised by CPU) */
 #define _PAGE_BIT_DIRTY                6       /* was written to (raised by CPU) */
-#define _PAGE_BIT_FILE         6
 #define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page */
 #define _PAGE_BIT_PAT          7       /* on 4KB pages */
 #define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
 #define _PAGE_BIT_CPA_TEST     _PAGE_BIT_UNUSED1
 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
 
+/* If _PAGE_BIT_PRESENT is clear, we use these: */
+/* - if the user mapped it with PROT_NONE; pte_present gives true */
+#define _PAGE_BIT_PROTNONE     _PAGE_BIT_GLOBAL
+/* - set: nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_BIT_FILE         _PAGE_BIT_DIRTY
+
 #define _PAGE_PRESENT  (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
 #define _PAGE_RW       (_AT(pteval_t, 1) << _PAGE_BIT_RW)
 #define _PAGE_USER     (_AT(pteval_t, 1) << _PAGE_BIT_USER)
 #define _PAGE_NX       (_AT(pteval_t, 0))
 #endif
 
-/* If _PAGE_PRESENT is clear, we use these: */
-#define _PAGE_FILE     _PAGE_DIRTY     /* nonlinear file mapping,
-                                        * saved PTE; unset:swap */
-#define _PAGE_PROTNONE _PAGE_PSE       /* if the user mapped it with PROT_NONE;
-                                          pte_present gives true */
+#define _PAGE_FILE     (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
 #define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
                         _PAGE_ACCESSED | _PAGE_DIRTY)
 #define PGD_IDENT_ATTR  0x001          /* PRESENT (no other attributes) */
 #endif
 
+/*
+ * Macro to mark a page protection value as UC-
+ */
+#define pgprot_noncached(prot)                                 \
+       ((boot_cpu_data.x86 > 3)                                \
+        ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))  \
+        : (prot))
+
 #ifndef __ASSEMBLY__
 
+#define pgprot_writecombine    pgprot_writecombine
+extern pgprot_t pgprot_writecombine(pgprot_t prot);
+
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -329,6 +342,9 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
 
 #ifndef __ASSEMBLY__
+/* Indicate that x86 has its own track and untrack pfn vma functions */
+#define __HAVE_PFNMAP_TRACKING
+
 #define __HAVE_PHYS_MEM_ACCESS_PROT
 struct file;
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
index f9d5889b336be5eb5d9f575d22ac512be7d63f7e..72b020deb46b9352a5365410b2011e55b4c56021 100644 (file)
@@ -100,15 +100,6 @@ extern unsigned long pg0[];
 # include <asm/pgtable-2level.h>
 #endif
 
-/*
- * Macro to mark a page protection value as "uncacheable".
- * On processors which do not support it, this is a no-op.
- */
-#define pgprot_noncached(prot)                                 \
-       ((boot_cpu_data.x86 > 3)                                \
-        ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \
-        : (prot))
-
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
index 545a0e042bb2d44268d7a240e97b2c14309568b9..ba09289accaa0322c70fb580a40f83677a900c9f 100644 (file)
@@ -146,7 +146,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
 #define PGDIR_MASK     (~(PGDIR_SIZE - 1))
 
 
-#define MAXMEM          _AC(0x00003fffffffffff, UL)
+#define MAXMEM          _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
 #define VMALLOC_START    _AC(0xffffc20000000000, UL)
 #define VMALLOC_END      _AC(0xffffe1ffffffffff, UL)
 #define VMEMMAP_START   _AC(0xffffe20000000000, UL)
@@ -176,12 +176,6 @@ static inline int pmd_bad(pmd_t pmd)
 
 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))   /* FIXME: is this right? */
 
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot)                                 \
-       (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))
-
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -250,10 +244,22 @@ static inline int pud_large(pud_t pte)
 extern int direct_gbpages;
 
 /* Encode and de-code a swap entry */
-#define __swp_type(x)                  (((x).val >> 1) & 0x3f)
-#define __swp_offset(x)                        ((x).val >> 8)
-#define __swp_entry(type, offset)      ((swp_entry_t) { ((type) << 1) | \
-                                                        ((offset) << 8) })
+#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
+#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+#else
+#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
+#endif
+
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(x)                  (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
+                                        & ((1U << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)                        ((x).val >> SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset)      ((swp_entry_t) { \
+                                        ((type) << (_PAGE_BIT_PRESENT + 1)) \
+                                        | ((offset) << SWP_OFFSET_SHIFT) })
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)          ((pte_t) { .pte = (x).val })
 
index fe681147a4f78fcd5cc0235cff57616af81e3f38..a8894647dd9aba3fd5139eb2277e39a10d17cacb 100644 (file)
@@ -6,5 +6,8 @@
 #define ARCH_GET_FS 0x1003
 #define ARCH_GET_GS 0x1004
 
+#ifdef CONFIG_X86_64
+extern long sys_arch_prctl(int, unsigned long);
+#endif /* CONFIG_X86_64 */
 
 #endif /* _ASM_X86_PRCTL_H */
index 5ca01e3832699b0378c36d79a4d17361a3d27b88..a570eafa4755e6571b8e7cacc3c0746019efbf5e 100644 (file)
@@ -110,6 +110,7 @@ struct cpuinfo_x86 {
        /* Index into per_cpu list: */
        u16                     cpu_index;
 #endif
+       unsigned int            x86_hyper_vendor;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
 #define X86_VENDOR_INTEL       0
@@ -123,6 +124,9 @@ struct cpuinfo_x86 {
 
 #define X86_VENDOR_UNKNOWN     0xff
 
+#define X86_HYPER_VENDOR_NONE  0
+#define X86_HYPER_VENDOR_VMWARE 1
+
 /*
  * capabilities of CPUs
  */
index df7710354f851dc957c497b30a77f4c6468c01df..562d4fd31ba89abdfba10d1069621c19608ac983 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_REBOOT_H
 #define _ASM_X86_REBOOT_H
 
+#include <linux/kdebug.h>
+
 struct pt_regs;
 
 struct machine_ops {
@@ -18,4 +20,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs);
 void native_machine_shutdown(void);
 void machine_real_restart(const unsigned char *code, int length);
 
+typedef void (*nmi_shootdown_cb)(int, struct die_args*);
+void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+
 #endif /* _ASM_X86_REBOOT_H */
index f12d37237465df8b0a181d37685b212c0dd70cd1..4fcd53fd5f43c06c26a8e057fa4f65b5603cf89b 100644 (file)
@@ -8,6 +8,10 @@
 /* Interrupt control for vSMPowered x86_64 systems */
 void vsmp_init(void);
 
+
+void setup_bios_corruption_check(void);
+
+
 #ifdef CONFIG_X86_VISWS
 extern void visws_early_detect(void);
 extern int is_visws_box(void);
@@ -16,6 +20,8 @@ static inline void visws_early_detect(void) { }
 static inline int is_visws_box(void) { return 0; }
 #endif
 
+extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
+extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
 /*
  * Any setup quirks to be performed?
  */
@@ -39,6 +45,7 @@ struct x86_quirks {
        void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
                                     unsigned short oemsize);
        int (*setup_ioapic_ids)(void);
+       int (*update_genapic)(void);
 };
 
 extern struct x86_quirks *x86_quirks;
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
new file mode 100644 (file)
index 0000000..4e0fe26
--- /dev/null
@@ -0,0 +1,70 @@
+#ifndef _ASM_X86_SIGFRAME_H
+#define _ASM_X86_SIGFRAME_H
+
+#include <asm/sigcontext.h>
+#include <asm/siginfo.h>
+#include <asm/ucontext.h>
+
+#ifdef CONFIG_X86_32
+#define sigframe_ia32          sigframe
+#define rt_sigframe_ia32       rt_sigframe
+#define sigcontext_ia32                sigcontext
+#define _fpstate_ia32          _fpstate
+#define ucontext_ia32          ucontext
+#else /* !CONFIG_X86_32 */
+
+#ifdef CONFIG_IA32_EMULATION
+#include <asm/ia32.h>
+#endif /* CONFIG_IA32_EMULATION */
+
+#endif /* CONFIG_X86_32 */
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+struct sigframe_ia32 {
+       u32 pretcode;
+       int sig;
+       struct sigcontext_ia32 sc;
+       /*
+        * fpstate is unused. fpstate is moved/allocated after
+        * retcode[] below. This movement allows to have the FP state and the
+        * future state extensions (xsave) stay together.
+        * And at the same time retaining the unused fpstate, prevents changing
+        * the offset of extramask[] in the sigframe and thus prevent any
+        * legacy application accessing/modifying it.
+        */
+       struct _fpstate_ia32 fpstate_unused;
+#ifdef CONFIG_IA32_EMULATION
+       unsigned int extramask[_COMPAT_NSIG_WORDS-1];
+#else /* !CONFIG_IA32_EMULATION */
+       unsigned long extramask[_NSIG_WORDS-1];
+#endif /* CONFIG_IA32_EMULATION */
+       char retcode[8];
+       /* fp state follows here */
+};
+
+struct rt_sigframe_ia32 {
+       u32 pretcode;
+       int sig;
+       u32 pinfo;
+       u32 puc;
+#ifdef CONFIG_IA32_EMULATION
+       compat_siginfo_t info;
+#else /* !CONFIG_IA32_EMULATION */
+       struct siginfo info;
+#endif /* CONFIG_IA32_EMULATION */
+       struct ucontext_ia32 uc;
+       char retcode[8];
+       /* fp state follows here */
+};
+#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
+
+#ifdef CONFIG_X86_64
+struct rt_sigframe {
+       char __user *pretcode;
+       struct ucontext uc;
+       struct siginfo info;
+       /* fp state follows here */
+};
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_SIGFRAME_H */
index 96ac44f275da40f24a73b5180b9e4c240d5fc8ba..7761a5d554bb2eb567024375b5bb3c083991946f 100644 (file)
@@ -121,6 +121,10 @@ typedef unsigned long sigset_t;
 
 #ifndef __ASSEMBLY__
 
+# ifdef __KERNEL__
+extern void do_notify_resume(struct pt_regs *, void *, __u32);
+# endif /* __KERNEL__ */
+
 #ifdef __i386__
 # ifdef __KERNEL__
 struct old_sigaction {
@@ -141,8 +145,6 @@ struct k_sigaction {
        struct sigaction sa;
 };
 
-extern void do_notify_resume(struct pt_regs *, void *, __u32);
-
 # else /* __KERNEL__ */
 /* Here we must cater to libcs that poke about in kernel headers.  */
 
index be44f7dab395ad87492e0021cbf7adb99cadb1d2..e3cc3c063ec5e77683c088fd80906de90fbf9bd2 100644 (file)
@@ -27,7 +27,7 @@
 #else /* CONFIG_X86_32 */
 # define SECTION_SIZE_BITS     27 /* matt - 128 is convenient right now */
 # define MAX_PHYSADDR_BITS     44
-# define MAX_PHYSMEM_BITS      44
+# define MAX_PHYSMEM_BITS      44 /* Can be max 45 bits */
 #endif
 
 #endif /* CONFIG_SPARSEMEM */
index 87803da440100d50222d86cf7a1ac9cf00ed29ca..9c6797c3e56c1363e9f0376c7dea3f8733b81fe6 100644 (file)
 /* kernel/ioport.c */
 asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
 
+/* kernel/ldt.c */
+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
+
+/* kernel/tls.c */
+asmlinkage int sys_set_thread_area(struct user_desc __user *);
+asmlinkage int sys_get_thread_area(struct user_desc __user *);
+
 /* X86_32 only */
 #ifdef CONFIG_X86_32
 /* kernel/process_32.c */
@@ -33,14 +40,11 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
                             struct old_sigaction __user *);
 asmlinkage int sys_sigaltstack(unsigned long);
 asmlinkage unsigned long sys_sigreturn(unsigned long);
-asmlinkage int sys_rt_sigreturn(unsigned long);
+asmlinkage int sys_rt_sigreturn(struct pt_regs);
 
 /* kernel/ioport.c */
 asmlinkage long sys_iopl(unsigned long);
 
-/* kernel/ldt.c */
-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
-
 /* kernel/sys_i386_32.c */
 asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
                          unsigned long, unsigned long, unsigned long);
@@ -54,10 +58,6 @@ asmlinkage int sys_uname(struct old_utsname __user *);
 struct oldold_utsname;
 asmlinkage int sys_olduname(struct oldold_utsname __user *);
 
-/* kernel/tls.c */
-asmlinkage int sys_set_thread_area(struct user_desc __user *);
-asmlinkage int sys_get_thread_area(struct user_desc __user *);
-
 /* kernel/vm86_32.c */
 asmlinkage int sys_vm86old(struct pt_regs);
 asmlinkage int sys_vm86(struct pt_regs);
index 2ed3f0f44ff73519a149b37c9bd658a26361c8de..8e626ea33a1a64d1565c10a0784200ef92ec9a31 100644 (file)
 # define AT_VECTOR_SIZE_ARCH 1
 #endif
 
-#ifdef CONFIG_X86_32
-
 struct task_struct; /* one of the stranger aspects of C forward declarations */
 struct task_struct *__switch_to(struct task_struct *prev,
                                struct task_struct *next);
 
+#ifdef CONFIG_X86_32
+
 /*
  * Saving eflags is important. It switches not only IOPL between tasks,
  * it also protects other tasks from NT leaking through sysenter etc.
@@ -314,6 +314,8 @@ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
 
+void stop_this_cpu(void *dummy);
+
 /*
  * Force strict CPU ordering.
  * And yes, this is required on UP too when we're talking
index e44d379faad2b891245ef9cc2332dcaf8f8491c5..8dbc57390d2573c495afbd03ddf33d4dac390a42 100644 (file)
@@ -24,7 +24,7 @@ struct exec_domain;
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
        struct exec_domain      *exec_domain;   /* execution domain */
-       unsigned long           flags;          /* low level flags */
+       __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
        __u32                   cpu;            /* current CPU */
        int                     preempt_count;  /* 0 => preemptable,
index fa0d79facdbc4b0175db51082d1afcaad3f4a060..780ba0ab94f908da84b44d71f1740d47ac2677de 100644 (file)
@@ -3,6 +3,7 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_X86_TRAMPOLINE
 /*
  * Trampoline 80x86 program as an array.
  */
@@ -13,8 +14,14 @@ extern unsigned char *trampoline_base;
 extern unsigned long init_rsp;
 extern unsigned long initial_code;
 
+#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 #define TRAMPOLINE_BASE 0x6000
+
 extern unsigned long setup_trampoline(void);
+extern void __init reserve_trampoline_memory(void);
+#else
+static inline void reserve_trampoline_memory(void) {};
+#endif /* CONFIG_X86_TRAMPOLINE */
 
 #endif /* __ASSEMBLY__ */
 
index 45dee286e45cda98bc1fe20e79c30ccded41ff5d..2ee0a3bceedf4738cc0e65b1cda67110399550f1 100644 (file)
@@ -46,6 +46,10 @@ dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
 dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
 dotraplinkage void do_segment_not_present(struct pt_regs *, long);
 dotraplinkage void do_stack_segment(struct pt_regs *, long);
+#ifdef CONFIG_X86_64
+dotraplinkage void do_double_fault(struct pt_regs *, long);
+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *);
+#endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
 dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
@@ -72,10 +76,13 @@ static inline int get_si_code(unsigned long condition)
 extern int panic_on_unrecovered_nmi;
 extern int kstack_depth_to_print;
 
-#ifdef CONFIG_X86_32
 void math_error(void __user *);
-unsigned long patch_espfix_desc(unsigned long, unsigned long);
 asmlinkage void math_emulate(long);
+#ifdef CONFIG_X86_32
+unsigned long patch_espfix_desc(unsigned long, unsigned long);
+#else
+asmlinkage void smp_thermal_interrupt(void);
+asmlinkage void mce_threshold_interrupt(void);
 #endif
 
 #endif /* _ASM_X86_TRAPS_H */
index 9cd83a8e40d59a3dae2978e23887d81311370ae3..38ae163cc91b00bf029d5ed5e29cd8ba96bd461b 100644 (file)
@@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void)
 
 static __always_inline cycles_t vget_cycles(void)
 {
-       cycles_t cycles;
-
        /*
         * We only do VDSOs on TSC capable CPUs, so this shouldnt
         * access boot_cpu_data (which is not VDSO-safe):
@@ -44,11 +42,7 @@ static __always_inline cycles_t vget_cycles(void)
        if (!cpu_has_tsc)
                return 0;
 #endif
-       rdtsc_barrier();
-       cycles = (cycles_t)__native_read_tsc();
-       rdtsc_barrier();
-
-       return cycles;
+       return (cycles_t)__native_read_tsc();
 }
 
 extern void tsc_init(void);
index 35c54921b2e434cdd95bbd223077f2b1155d0f1f..580c3ee6c58c4d0479dbce9ea491c78049255e04 100644 (file)
@@ -350,14 +350,14 @@ do {                                                                      \
 
 #define __put_user_nocheck(x, ptr, size)                       \
 ({                                                             \
-       long __pu_err;                                          \
+       int __pu_err;                                           \
        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
        __pu_err;                                               \
 })
 
 #define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
-       long __gu_err;                                                  \
+       int __gu_err;                                                   \
        unsigned long __gu_val;                                         \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
index d931d3b7e6f71b394d87f5766835dd8ef892dfec..7ed17ff502b9edd76afb96fe682609a4df1afffe 100644 (file)
 enum uv_bios_cmd {
        UV_BIOS_COMMON,
        UV_BIOS_GET_SN_INFO,
-       UV_BIOS_FREQ_BASE
+       UV_BIOS_FREQ_BASE,
+       UV_BIOS_WATCHLIST_ALLOC,
+       UV_BIOS_WATCHLIST_FREE,
+       UV_BIOS_MEMPROTECT,
+       UV_BIOS_GET_PARTITION_ADDR
 };
 
 /*
  * Status values returned from a BIOS call.
  */
 enum {
+       BIOS_STATUS_MORE_PASSES         =  1,
        BIOS_STATUS_SUCCESS             =  0,
        BIOS_STATUS_UNIMPLEMENTED       = -ENOSYS,
        BIOS_STATUS_EINVAL              = -EINVAL,
@@ -71,6 +76,21 @@ union partition_info_u {
        };
 };
 
+union uv_watchlist_u {
+       u64     val;
+       struct {
+               u64     blade   : 16,
+                       size    : 32,
+                       filler  : 16;
+       };
+};
+
+enum uv_memprotect {
+       UV_MEMPROT_RESTRICT_ACCESS,
+       UV_MEMPROT_ALLOW_AMO,
+       UV_MEMPROT_ALLOW_RW
+};
+
 /*
  * bios calls have 6 parameters
  */
@@ -80,14 +100,20 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
 
 extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
 extern s64 uv_bios_freq_base(u64, u64 *);
+extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
+                                       unsigned long *);
+extern int uv_bios_mq_watchlist_free(int, int);
+extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
+extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
 
 extern void uv_bios_init(void);
 
+extern unsigned long sn_rtc_cycles_per_second;
 extern int uv_type;
 extern long sn_partition_id;
-extern long uv_coherency_id;
-extern long uv_region_size;
-#define partition_coherence_id()       (uv_coherency_id)
+extern long sn_coherency_id;
+extern long sn_region_size;
+#define partition_coherence_id()       (sn_coherency_id)
 
 extern struct kobject *sgi_uv_kobj;    /* /sys/firmware/sgi_uv */
 
index 7a5782610b2bcb0af69d99be174d0815a6722663..777327ef05c19f0887c0b9b089a6f71135395e94 100644 (file)
  */
 #define UV_MAX_NASID_VALUE     (UV_MAX_NUMALINK_NODES * 2)
 
+struct uv_scir_s {
+       struct timer_list timer;
+       unsigned long   offset;
+       unsigned long   last;
+       unsigned long   idle_on;
+       unsigned long   idle_off;
+       unsigned char   state;
+       unsigned char   enabled;
+};
+
 /*
  * The following defines attributes of the HUB chip. These attributes are
  * frequently referenced and are kept in the per-cpu data areas of each cpu.
  * They are kept together in a struct to minimize cache misses.
  */
 struct uv_hub_info_s {
-       unsigned long   global_mmr_base;
-       unsigned long   gpa_mask;
-       unsigned long   gnode_upper;
-       unsigned long   lowmem_remap_top;
-       unsigned long   lowmem_remap_base;
-       unsigned short  pnode;
-       unsigned short  pnode_mask;
-       unsigned short  coherency_domain_number;
-       unsigned short  numa_blade_id;
-       unsigned char   blade_processor_id;
-       unsigned char   m_val;
-       unsigned char   n_val;
+       unsigned long           global_mmr_base;
+       unsigned long           gpa_mask;
+       unsigned long           gnode_upper;
+       unsigned long           lowmem_remap_top;
+       unsigned long           lowmem_remap_base;
+       unsigned short          pnode;
+       unsigned short          pnode_mask;
+       unsigned short          coherency_domain_number;
+       unsigned short          numa_blade_id;
+       unsigned char           blade_processor_id;
+       unsigned char           m_val;
+       unsigned char           n_val;
+       struct uv_scir_s        scir;
 };
+
 DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
 #define uv_hub_info            (&__get_cpu_var(__uv_hub_info))
 #define uv_cpu_hub_info(cpu)   (&per_cpu(__uv_hub_info, cpu))
@@ -163,6 +175,30 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
 
 #define UV_APIC_PNODE_SHIFT    6
 
+/* Local Bus from cpu's perspective */
+#define LOCAL_BUS_BASE         0x1c00000
+#define LOCAL_BUS_SIZE         (4 * 1024 * 1024)
+
+/*
+ * System Controller Interface Reg
+ *
+ * Note there are NO leds on a UV system.  This register is only
+ * used by the system controller to monitor system-wide operation.
+ * There are 64 regs per node.  With Nahelem cpus (2 cores per node,
+ * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
+ * a node.
+ *
+ * The window is located at top of ACPI MMR space
+ */
+#define SCIR_WINDOW_COUNT      64
+#define SCIR_LOCAL_MMR_BASE    (LOCAL_BUS_BASE + \
+                                LOCAL_BUS_SIZE - \
+                                SCIR_WINDOW_COUNT)
+
+#define SCIR_CPU_HEARTBEAT     0x01    /* timer interrupt */
+#define SCIR_CPU_ACTIVITY      0x02    /* not idle */
+#define SCIR_CPU_HB_INTERVAL   (HZ)    /* once per second */
+
 /*
  * Macros for converting between kernel virtual addresses, socket local physical
  * addresses, and UV global physical addresses.
@@ -174,7 +210,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
 {
        if (paddr < uv_hub_info->lowmem_remap_top)
-               paddr += uv_hub_info->lowmem_remap_base;
+               paddr |= uv_hub_info->lowmem_remap_base;
        return paddr | uv_hub_info->gnode_upper;
 }
 
@@ -182,19 +218,7 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
 /* socket virtual --> UV global physical address */
 static inline unsigned long uv_gpa(void *v)
 {
-       return __pa(v) | uv_hub_info->gnode_upper;
-}
-
-/* socket virtual --> UV global physical address */
-static inline void *uv_vgpa(void *v)
-{
-       return (void *)uv_gpa(v);
-}
-
-/* UV global physical address --> socket virtual */
-static inline void *uv_va(unsigned long gpa)
-{
-       return __va(gpa & uv_hub_info->gpa_mask);
+       return uv_soc_phys_ram_to_gpa(__pa(v));
 }
 
 /* pnode, offset --> socket virtual */
@@ -277,6 +301,16 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
        *uv_local_mmr_address(offset) = val;
 }
 
+static inline unsigned char uv_read_local_mmr8(unsigned long offset)
+{
+       return *((unsigned char *)uv_local_mmr_address(offset));
+}
+
+static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
+{
+       *((unsigned char *)uv_local_mmr_address(offset)) = val;
+}
+
 /*
  * Structures and definitions for converting between cpu, node, pnode, and blade
  * numbers.
@@ -351,5 +385,20 @@ static inline int uv_num_possible_blades(void)
        return uv_possible_blades;
 }
 
-#endif /* _ASM_X86_UV_UV_HUB_H */
+/* Update SCIR state */
+static inline void uv_set_scir_bits(unsigned char value)
+{
+       if (uv_hub_info->scir.state != value) {
+               uv_hub_info->scir.state = value;
+               uv_write_local_mmr8(uv_hub_info->scir.offset, value);
+       }
+}
+static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
+{
+       if (uv_cpu_hub_info(cpu)->scir.state != value) {
+               uv_cpu_hub_info(cpu)->scir.state = value;
+               uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
+       }
+}
 
+#endif /* _ASM_X86_UV_UV_HUB_H */
index b7c0dea119fed8f8ea62e520ecf1e504e65e05f0..61e08c0a290785c5527d9faf607c0c2ab2e384e2 100644 (file)
@@ -223,9 +223,15 @@ struct pci_header {
 } __attribute__((packed));
 
 /* Function prototypes for bootstrapping */
+#ifdef CONFIG_VMI
 extern void vmi_init(void);
+extern void vmi_activate(void);
 extern void vmi_bringup(void);
-extern void vmi_apply_boot_page_allocations(void);
+#else
+static inline void vmi_init(void) {}
+static inline void vmi_activate(void) {}
+static inline void vmi_bringup(void) {}
+#endif
 
 /* State needed to start an application processor in an SMP system. */
 struct vmi_ap_state {
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
new file mode 100644 (file)
index 0000000..c11b7e1
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2008, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef ASM_X86__VMWARE_H
+#define ASM_X86__VMWARE_H
+
+extern unsigned long vmware_get_tsc_khz(void);
+extern int vmware_platform(void);
+extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
+
+#endif
index 3f6000d95fe21f38dae31ff2dbfb8945703d91f4..5e79ca694326cdeb85886d371565ad506fe6f0af 100644 (file)
 #ifndef _ASM_X86_XEN_HYPERCALL_H
 #define _ASM_X86_XEN_HYPERCALL_H
 
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
index a38d25ac87d2abdbfb6e077f304210aef43d78f8..81fbd735aec47e3edbed57a5d83eb142f3c3571e 100644 (file)
 #ifndef _ASM_X86_XEN_HYPERVISOR_H
 #define _ASM_X86_XEN_HYPERVISOR_H
 
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/version.h>
-
-#include <asm/ptrace.h>
-#include <asm/page.h>
-#include <asm/desc.h>
-#if defined(__i386__)
-#  ifdef CONFIG_X86_PAE
-#   include <asm-generic/pgtable-nopud.h>
-#  else
-#   include <asm-generic/pgtable-nopmd.h>
-#  endif
-#endif
-#include <asm/xen/hypercall.h>
-
 /* arch/i386/kernel/setup.c */
 extern struct shared_info *HYPERVISOR_shared_info;
 extern struct start_info *xen_start_info;
 
-/* arch/i386/mach-xen/evtchn.c */
-/* Force a proper event-channel callback from Xen. */
-extern void force_evtchn_callback(void);
-
-/* Turn jiffies into Xen system time. */
-u64 jiffies_to_st(unsigned long jiffies);
-
-
-#define MULTI_UVMFLAGS_INDEX 3
-#define MULTI_UVMDOMID_INDEX 4
-
 enum xen_domain_type {
        XEN_NATIVE,
        XEN_PV_DOMAIN,
@@ -74,9 +45,15 @@ enum xen_domain_type {
 
 extern enum xen_domain_type xen_domain_type;
 
+#ifdef CONFIG_XEN
 #define xen_domain()           (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain()                (xen_domain_type == XEN_PV_DOMAIN)
+#else
+#define xen_domain()           (0)
+#endif
+
+#define xen_pv_domain()                (xen_domain() && xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain()       (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN)
+
 #define xen_initial_domain()   (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
-#define xen_hvm_domain()       (xen_domain_type == XEN_HVM_DOMAIN)
 
 #endif /* _ASM_X86_XEN_HYPERVISOR_H */
index bc628998a1b9dc896a416d6ea0ef5293463c8a24..7ef617ef1df3ca38c1d29286a4742e770982dc67 100644 (file)
@@ -1,11 +1,16 @@
 #ifndef _ASM_X86_XEN_PAGE_H
 #define _ASM_X86_XEN_PAGE_H
 
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
 #include <linux/pfn.h>
 
 #include <asm/uaccess.h>
+#include <asm/page.h>
 #include <asm/pgtable.h>
 
+#include <xen/interface/xen.h>
 #include <xen/features.h>
 
 /* Xen machine address */
index b62a7667828eb77574128a8ca82b1f54cee28238..1f208aaee7803fa1e799bacccd729888451fed6e 100644 (file)
@@ -12,6 +12,7 @@ CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg
 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
 endif
 
 #
@@ -23,9 +24,9 @@ CFLAGS_vsyscall_64.o  := $(PROFILING) -g0 $(nostackp)
 CFLAGS_hpet.o          := $(nostackp)
 CFLAGS_tsc.o           := $(nostackp)
 
-obj-y                  := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
+obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
-obj-y                  += time_$(BITS).o ioport.o ldt.o
+obj-y                  += time_$(BITS).o ioport.o ldt.o dumpstack.o
 obj-y                  += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
 obj-$(CONFIG_X86_VISWS)        += visws_quirks.o
 obj-$(CONFIG_X86_32)   += probe_roms_32.o
@@ -105,6 +106,8 @@ microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
 microcode-$(CONFIG_MICROCODE_AMD)      += microcode_amd.o
 obj-$(CONFIG_MICROCODE)                        += microcode.o
 
+obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
+
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
index 4c51a2f8fd315900071de65e6ee3bee43ea2718f..65d0b72777ea099a99ced450bec404220af6e2e2 100644 (file)
@@ -1360,6 +1360,17 @@ static void __init acpi_process_madt(void)
                        disable_acpi();
                }
        }
+
+       /*
+        * ACPI supports both logical (e.g. Hyper-Threading) and physical
+        * processors, where MPS only supports physical.
+        */
+       if (acpi_lapic && acpi_ioapic)
+               printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
+                      "information\n");
+       else if (acpi_lapic)
+               printk(KERN_INFO "Using ACPI for processor (LAPIC) "
+                      "configuration information\n");
 #endif
        return;
 }
index a7b6dec6fc3f4afc72f57651b1523156d415431b..0a60d60ed036b264e6d4c05174dfc3b464574852 100644 (file)
@@ -235,8 +235,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
        status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
        writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
 
-       if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
-               printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
+       if (unlikely(i == EXIT_LOOP_COUNT))
+               panic("AMD IOMMU: Completion wait loop failed\n");
+
 out:
        spin_unlock_irqrestore(&iommu->lock, flags);
 
index 30ae2701b3df1b8976400d5996ed72a4a98baeea..c6cc22815d35f232dd21fae7d2718a3c161b373c 100644 (file)
@@ -427,6 +427,10 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
        memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
                        &entry, sizeof(entry));
 
+       /* set head and tail to zero manually */
+       writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+
        iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
 
        return cmd_buf;
@@ -1074,7 +1078,8 @@ int __init amd_iommu_init(void)
                goto free;
 
        /* IOMMU rlookup table - find the IOMMU for a specific device */
-       amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL,
+       amd_iommu_rlookup_table = (void *)__get_free_pages(
+                       GFP_KERNEL | __GFP_ZERO,
                        get_order(rlookup_table_size));
        if (amd_iommu_rlookup_table == NULL)
                goto free;
index 16f94879b52578aba3e689d7a8adcccf59ab8ecb..20c6e12c0475534c17c5b72c1eed4225288e16fa 100644 (file)
@@ -441,6 +441,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
                v = apic_read(APIC_LVTT);
                v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
                apic_write(APIC_LVTT, v);
+               apic_write(APIC_TMICT, 0xffffffff);
                break;
        case CLOCK_EVT_MODE_RESUME:
                /* Nothing to do here */
@@ -559,13 +560,13 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
        } else {
                res = (((u64)deltapm) *  mult) >> 22;
                do_div(res, 1000000);
-               printk(KERN_WARNING "APIC calibration not consistent "
+               pr_warning("APIC calibration not consistent "
                        "with PM Timer: %ldms instead of 100ms\n",
                        (long)res);
                /* Correct the lapic counter value */
                res = (((u64)(*delta)) * pm_100ms);
                do_div(res, deltapm);
-               printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
+               pr_info("APIC delta adjusted to PM-Timer: "
                        "%lu (%ld)\n", (unsigned long)res, *delta);
                *delta = (long)res;
        }
@@ -645,8 +646,7 @@ static int __init calibrate_APIC_clock(void)
         */
        if (calibration_result < (1000000 / HZ)) {
                local_irq_enable();
-               printk(KERN_WARNING
-                      "APIC frequency too slow, disabling apic timer\n");
+               pr_warning("APIC frequency too slow, disabling apic timer\n");
                return -1;
        }
 
@@ -672,13 +672,9 @@ static int __init calibrate_APIC_clock(void)
                while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
                        cpu_relax();
 
-               local_irq_disable();
-
                /* Stop the lapic timer */
                lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
 
-               local_irq_enable();
-
                /* Jiffies delta */
                deltaj = lapic_cal_j2 - lapic_cal_j1;
                apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
@@ -692,8 +688,7 @@ static int __init calibrate_APIC_clock(void)
                local_irq_enable();
 
        if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
-               printk(KERN_WARNING
-                      "APIC timer disabled due to verification failure.\n");
+               pr_warning("APIC timer disabled due to verification failure.\n");
                        return -1;
        }
 
@@ -714,7 +709,7 @@ void __init setup_boot_APIC_clock(void)
         * broadcast mechanism is used. On UP systems simply ignore it.
         */
        if (disable_apic_timer) {
-               printk(KERN_INFO "Disabling APIC timer\n");
+               pr_info("Disabling APIC timer\n");
                /* No broadcast on UP ! */
                if (num_possible_cpus() > 1) {
                        lapic_clockevent.mult = 1;
@@ -741,7 +736,7 @@ void __init setup_boot_APIC_clock(void)
        if (nmi_watchdog != NMI_IO_APIC)
                lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
        else
-               printk(KERN_WARNING "APIC timer registered as dummy,"
+               pr_warning("APIC timer registered as dummy,"
                        " due to nmi_watchdog=%d!\n", nmi_watchdog);
 
        /* Setup the lapic or request the broadcast */
@@ -773,8 +768,7 @@ static void local_apic_timer_interrupt(void)
         * spurious.
         */
        if (!evt->event_handler) {
-               printk(KERN_WARNING
-                      "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+               pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
                /* Switch it off */
                lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
                return;
@@ -814,9 +808,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
         * Besides, if we don't timer interrupts ignore the global
         * interrupt lock, which is the WrongThing (tm) to do.
         */
-#ifdef CONFIG_X86_64
        exit_idle();
-#endif
        irq_enter();
        local_apic_timer_interrupt();
        irq_exit();
@@ -1093,7 +1085,7 @@ static void __cpuinit lapic_setup_esr(void)
        unsigned int oldvalue, value, maxlvt;
 
        if (!lapic_is_integrated()) {
-               printk(KERN_INFO "No ESR for 82489DX.\n");
+               pr_info("No ESR for 82489DX.\n");
                return;
        }
 
@@ -1104,7 +1096,7 @@ static void __cpuinit lapic_setup_esr(void)
                 * ESR disabled - we can't do anything useful with the
                 * errors anyway - mbligh
                 */
-               printk(KERN_INFO "Leaving ESR disabled.\n");
+               pr_info("Leaving ESR disabled.\n");
                return;
        }
 
@@ -1298,7 +1290,7 @@ void check_x2apic(void)
        rdmsr(MSR_IA32_APICBASE, msr, msr2);
 
        if (msr & X2APIC_ENABLE) {
-               printk("x2apic enabled by BIOS, switching to x2apic ops\n");
+               pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
                x2apic_preenabled = x2apic = 1;
                apic_ops = &x2apic_ops;
        }
@@ -1310,7 +1302,7 @@ void enable_x2apic(void)
 
        rdmsr(MSR_IA32_APICBASE, msr, msr2);
        if (!(msr & X2APIC_ENABLE)) {
-               printk("Enabling x2apic\n");
+               pr_info("Enabling x2apic\n");
                wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
        }
 }
@@ -1325,9 +1317,8 @@ void __init enable_IR_x2apic(void)
                return;
 
        if (!x2apic_preenabled && disable_x2apic) {
-               printk(KERN_INFO
-                      "Skipped enabling x2apic and Interrupt-remapping "
-                      "because of nox2apic\n");
+               pr_info("Skipped enabling x2apic and Interrupt-remapping "
+                       "because of nox2apic\n");
                return;
        }
 
@@ -1335,22 +1326,19 @@ void __init enable_IR_x2apic(void)
                panic("Bios already enabled x2apic, can't enforce nox2apic");
 
        if (!x2apic_preenabled && skip_ioapic_setup) {
-               printk(KERN_INFO
-                      "Skipped enabling x2apic and Interrupt-remapping "
-                      "because of skipping io-apic setup\n");
+               pr_info("Skipped enabling x2apic and Interrupt-remapping "
+                       "because of skipping io-apic setup\n");
                return;
        }
 
        ret = dmar_table_init();
        if (ret) {
-               printk(KERN_INFO
-                      "dmar_table_init() failed with %d:\n", ret);
+               pr_info("dmar_table_init() failed with %d:\n", ret);
 
                if (x2apic_preenabled)
                        panic("x2apic enabled by bios. But IR enabling failed");
                else
-                       printk(KERN_INFO
-                              "Not enabling x2apic,Intr-remapping\n");
+                       pr_info("Not enabling x2apic,Intr-remapping\n");
                return;
        }
 
@@ -1359,7 +1347,7 @@ void __init enable_IR_x2apic(void)
 
        ret = save_mask_IO_APIC_setup();
        if (ret) {
-               printk(KERN_INFO "Saving IO-APIC state failed: %d\n", ret);
+               pr_info("Saving IO-APIC state failed: %d\n", ret);
                goto end;
        }
 
@@ -1394,14 +1382,11 @@ end:
 
        if (!ret) {
                if (!x2apic_preenabled)
-                       printk(KERN_INFO
-                              "Enabled x2apic and interrupt-remapping\n");
+                       pr_info("Enabled x2apic and interrupt-remapping\n");
                else
-                       printk(KERN_INFO
-                              "Enabled Interrupt-remapping\n");
+                       pr_info("Enabled Interrupt-remapping\n");
        } else
-               printk(KERN_ERR
-                      "Failed to enable Interrupt-remapping and x2apic\n");
+               pr_err("Failed to enable Interrupt-remapping and x2apic\n");
 #else
        if (!cpu_has_x2apic)
                return;
@@ -1410,8 +1395,8 @@ end:
                panic("x2apic enabled prior OS handover,"
                      " enable CONFIG_INTR_REMAP");
 
-       printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
-              " and x2apic\n");
+       pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
+               " and x2apic\n");
 #endif
 
        return;
@@ -1428,7 +1413,7 @@ end:
 static int __init detect_init_APIC(void)
 {
        if (!cpu_has_apic) {
-               printk(KERN_INFO "No local APIC present\n");
+               pr_info("No local APIC present\n");
                return -1;
        }
 
@@ -1469,8 +1454,8 @@ static int __init detect_init_APIC(void)
                 * "lapic" specified.
                 */
                if (!force_enable_local_apic) {
-                       printk(KERN_INFO "Local APIC disabled by BIOS -- "
-                              "you can enable it with \"lapic\"\n");
+                       pr_info("Local APIC disabled by BIOS -- "
+                               "you can enable it with \"lapic\"\n");
                        return -1;
                }
                /*
@@ -1480,8 +1465,7 @@ static int __init detect_init_APIC(void)
                 */
                rdmsr(MSR_IA32_APICBASE, l, h);
                if (!(l & MSR_IA32_APICBASE_ENABLE)) {
-                       printk(KERN_INFO
-                              "Local APIC disabled by BIOS -- reenabling.\n");
+                       pr_info("Local APIC disabled by BIOS -- reenabling.\n");
                        l &= ~MSR_IA32_APICBASE_BASE;
                        l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
                        wrmsr(MSR_IA32_APICBASE, l, h);
@@ -1494,7 +1478,7 @@ static int __init detect_init_APIC(void)
         */
        features = cpuid_edx(1);
        if (!(features & (1 << X86_FEATURE_APIC))) {
-               printk(KERN_WARNING "Could not enable APIC!\n");
+               pr_warning("Could not enable APIC!\n");
                return -1;
        }
        set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
@@ -1505,14 +1489,14 @@ static int __init detect_init_APIC(void)
        if (l & MSR_IA32_APICBASE_ENABLE)
                mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
 
-       printk(KERN_INFO "Found and enabled local APIC!\n");
+       pr_info("Found and enabled local APIC!\n");
 
        apic_pm_activate();
 
        return 0;
 
 no_apic:
-       printk(KERN_INFO "No local APIC present or hardware disabled\n");
+       pr_info("No local APIC present or hardware disabled\n");
        return -1;
 }
 #endif
@@ -1588,12 +1572,12 @@ int __init APIC_init_uniprocessor(void)
 {
 #ifdef CONFIG_X86_64
        if (disable_apic) {
-               printk(KERN_INFO "Apic disabled\n");
+               pr_info("Apic disabled\n");
                return -1;
        }
        if (!cpu_has_apic) {
                disable_apic = 1;
-               printk(KERN_INFO "Apic disabled by BIOS\n");
+               pr_info("Apic disabled by BIOS\n");
                return -1;
        }
 #else
@@ -1605,8 +1589,8 @@ int __init APIC_init_uniprocessor(void)
         */
        if (!cpu_has_apic &&
            APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
-               printk(KERN_ERR "BIOS bug, local APIC 0x%x not detected!...\n",
-                      boot_cpu_physical_apicid);
+               pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
+                       boot_cpu_physical_apicid);
                clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
                return -1;
        }
@@ -1682,9 +1666,7 @@ void smp_spurious_interrupt(struct pt_regs *regs)
 {
        u32 v;
 
-#ifdef CONFIG_X86_64
        exit_idle();
-#endif
        irq_enter();
        /*
         * Check if this really is a spurious interrupt and ACK it
@@ -1699,8 +1681,8 @@ void smp_spurious_interrupt(struct pt_regs *regs)
        add_pda(irq_spurious_count, 1);
 #else
        /* see sw-dev-man vol 3, chapter 7.4.13.5 */
-       printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
-              "should never happen.\n", smp_processor_id());
+       pr_info("spurious APIC interrupt on CPU#%d, "
+               "should never happen.\n", smp_processor_id());
        __get_cpu_var(irq_stat).irq_spurious_count++;
 #endif
        irq_exit();
@@ -1713,9 +1695,7 @@ void smp_error_interrupt(struct pt_regs *regs)
 {
        u32 v, v1;
 
-#ifdef CONFIG_X86_64
        exit_idle();
-#endif
        irq_enter();
        /* First tickle the hardware, only then report what went on. -- REW */
        v = apic_read(APIC_ESR);
@@ -1724,17 +1704,18 @@ void smp_error_interrupt(struct pt_regs *regs)
        ack_APIC_irq();
        atomic_inc(&irq_err_count);
 
-       /* Here is what the APIC error bits mean:
-          0: Send CS error
-          1: Receive CS error
-          2: Send accept error
-          3: Receive accept error
-          4: Reserved
-          5: Send illegal vector
-          6: Received illegal vector
-          7: Illegal register address
-       */
-       printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
+       /*
+        * Here is what the APIC error bits mean:
+        * 0: Send CS error
+        * 1: Receive CS error
+        * 2: Send accept error
+        * 3: Receive accept error
+        * 4: Reserved
+        * 5: Send illegal vector
+        * 6: Received illegal vector
+        * 7: Illegal register address
+        */
+       pr_debug("APIC error on CPU%d: %02x(%02x)\n",
                smp_processor_id(), v , v1);
        irq_exit();
 }
@@ -1838,15 +1819,15 @@ void __cpuinit generic_processor_info(int apicid, int version)
         * Validate version
         */
        if (version == 0x0) {
-               printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
-                               "fixing up to 0x10. (tell your hw vendor)\n",
-                               version);
+               pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
+                       "fixing up to 0x10. (tell your hw vendor)\n",
+                       version);
                version = 0x10;
        }
        apic_version[apicid] = version;
 
        if (num_processors >= NR_CPUS) {
-               printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
+               pr_warning("WARNING: NR_CPUS limit of %i reached."
                        "  Processor ignored.\n", NR_CPUS);
                return;
        }
@@ -2209,7 +2190,7 @@ static int __init apic_set_verbosity(char *arg)
        else if (strcmp("verbose", arg) == 0)
                apic_verbosity = APIC_VERBOSE;
        else {
-               printk(KERN_WARNING "APIC Verbosity level %s not recognised"
+               pr_warning("APIC Verbosity level %s not recognised"
                        " use apic=verbose or apic=debug\n", arg);
                return -EINVAL;
        }
index 5145a6e72bbbf7f6a4e93d85aed20c769f0883dc..3a26525a3f31a03f3490f7e17be537a6fb3e7c7e 100644 (file)
@@ -391,11 +391,7 @@ static int power_off;
 #else
 static int power_off = 1;
 #endif
-#ifdef CONFIG_APM_REAL_MODE_POWER_OFF
-static int realmode_power_off = 1;
-#else
 static int realmode_power_off;
-#endif
 #ifdef CONFIG_APM_ALLOW_INTS
 static int allow_ints = 1;
 #else
index 6649d09ad88f8830df49427c7d8763196eb0a0f6..ee4df08feee6b0a64b62ca639d39cffbdd0cc87a 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/suspend.h>
 #include <linux/kbuild.h>
 #include <asm/ucontext.h>
-#include "sigframe.h"
+#include <asm/sigframe.h>
 #include <asm/pgtable.h>
 #include <asm/fixmap.h>
 #include <asm/processor.h>
index 7fcf63d22f8b28c4457b6f8e7fb2eae54b071b1f..1d41d3f1edbcaa3d39450561d25b8ef0fb801e38 100644 (file)
@@ -20,6 +20,8 @@
 
 #include <xen/interface/xen.h>
 
+#include <asm/sigframe.h>
+
 #define __NO_STUBS 1
 #undef __SYSCALL
 #undef _ASM_X86_UNISTD_64_H
@@ -87,7 +89,7 @@ int main(void)
        BLANK();
 #undef ENTRY
        DEFINE(IA32_RT_SIGFRAME_sigcontext,
-              offsetof (struct rt_sigframe32, uc.uc_mcontext));
+              offsetof (struct rt_sigframe_ia32, uc.uc_mcontext));
        BLANK();
 #endif
        DEFINE(pbe_address, offsetof(struct pbe, address));
index f0dfe6f17e7eabbe41f90a8afd75c54766081fa2..2a0a2a3cac263a87edc38f12894185749a1b9dd5 100644 (file)
@@ -69,10 +69,10 @@ s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
 
 long sn_partition_id;
 EXPORT_SYMBOL_GPL(sn_partition_id);
-long uv_coherency_id;
-EXPORT_SYMBOL_GPL(uv_coherency_id);
-long uv_region_size;
-EXPORT_SYMBOL_GPL(uv_region_size);
+long sn_coherency_id;
+EXPORT_SYMBOL_GPL(sn_coherency_id);
+long sn_region_size;
+EXPORT_SYMBOL_GPL(sn_region_size);
 int uv_type;
 
 
@@ -100,6 +100,56 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
        return ret;
 }
 
+int
+uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
+                          unsigned long *intr_mmr_offset)
+{
+       union uv_watchlist_u size_blade;
+       u64 watchlist;
+       s64 ret;
+
+       size_blade.size = mq_size;
+       size_blade.blade = blade;
+
+       /*
+        * bios returns watchlist number or negative error number.
+        */
+       ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
+                       size_blade.val, (u64)intr_mmr_offset,
+                       (u64)&watchlist, 0);
+       if (ret < BIOS_STATUS_SUCCESS)
+               return ret;
+
+       return watchlist;
+}
+EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
+
+int
+uv_bios_mq_watchlist_free(int blade, int watchlist_num)
+{
+       return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
+                               blade, watchlist_num, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
+
+s64
+uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
+{
+       return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
+                                       perms, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
+
+s64
+uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+       s64 ret;
+
+       ret = uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
+                                       (u64)addr, buf, (u64)len, 0);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
 
 s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
 {
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
new file mode 100644 (file)
index 0000000..2ac0ab7
--- /dev/null
@@ -0,0 +1,161 @@
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <asm/e820.h>
+#include <asm/proto.h>
+
+/*
+ * Some BIOSes seem to corrupt the low 64k of memory during events
+ * like suspend/resume and unplugging an HDMI cable.  Reserve all
+ * remaining free memory in that area and fill it with a distinct
+ * pattern.
+ */
+#define MAX_SCAN_AREAS 8
+
+static int __read_mostly memory_corruption_check = -1;
+
+static unsigned __read_mostly corruption_check_size = 64*1024;
+static unsigned __read_mostly corruption_check_period = 60; /* seconds */
+
+static struct e820entry scan_areas[MAX_SCAN_AREAS];
+static int num_scan_areas;
+
+
+static __init int set_corruption_check(char *arg)
+{
+       char *end;
+
+       memory_corruption_check = simple_strtol(arg, &end, 10);
+
+       return (*end == 0) ? 0 : -EINVAL;
+}
+early_param("memory_corruption_check", set_corruption_check);
+
+static __init int set_corruption_check_period(char *arg)
+{
+       char *end;
+
+       corruption_check_period = simple_strtoul(arg, &end, 10);
+
+       return (*end == 0) ? 0 : -EINVAL;
+}
+early_param("memory_corruption_check_period", set_corruption_check_period);
+
+static __init int set_corruption_check_size(char *arg)
+{
+       char *end;
+       unsigned size;
+
+       size = memparse(arg, &end);
+
+       if (*end == '\0')
+               corruption_check_size = size;
+
+       return (size == corruption_check_size) ? 0 : -EINVAL;
+}
+early_param("memory_corruption_check_size", set_corruption_check_size);
+
+
+void __init setup_bios_corruption_check(void)
+{
+       u64 addr = PAGE_SIZE;   /* assume first page is reserved anyway */
+
+       if (memory_corruption_check == -1) {
+               memory_corruption_check =
+#ifdef CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
+                       1
+#else
+                       0
+#endif
+                       ;
+       }
+
+       if (corruption_check_size == 0)
+               memory_corruption_check = 0;
+
+       if (!memory_corruption_check)
+               return;
+
+       corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
+
+       while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
+               u64 size;
+               addr = find_e820_area_size(addr, &size, PAGE_SIZE);
+
+               if (addr == 0)
+                       break;
+
+               if ((addr + size) > corruption_check_size)
+                       size = corruption_check_size - addr;
+
+               if (size == 0)
+                       break;
+
+               e820_update_range(addr, size, E820_RAM, E820_RESERVED);
+               scan_areas[num_scan_areas].addr = addr;
+               scan_areas[num_scan_areas].size = size;
+               num_scan_areas++;
+
+               /* Assume we've already mapped this early memory */
+               memset(__va(addr), 0, size);
+
+               addr += size;
+       }
+
+       printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
+              num_scan_areas);
+       update_e820();
+}
+
+
+void check_for_bios_corruption(void)
+{
+       int i;
+       int corruption = 0;
+
+       if (!memory_corruption_check)
+               return;
+
+       for (i = 0; i < num_scan_areas; i++) {
+               unsigned long *addr = __va(scan_areas[i].addr);
+               unsigned long size = scan_areas[i].size;
+
+               for (; size; addr++, size -= sizeof(unsigned long)) {
+                       if (!*addr)
+                               continue;
+                       printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n",
+                              addr, __pa(addr), *addr);
+                       corruption = 1;
+                       *addr = 0;
+               }
+       }
+
+       WARN_ONCE(corruption, KERN_ERR "Memory corruption detected in low memory\n");
+}
+
+static void check_corruption(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(bios_check_work, check_corruption);
+
+static void check_corruption(struct work_struct *dummy)
+{
+       check_for_bios_corruption();
+       schedule_delayed_work(&bios_check_work,
+               round_jiffies_relative(corruption_check_period*HZ)); 
+}
+
+static int start_periodic_check_for_corruption(void)
+{
+       if (!memory_corruption_check || corruption_check_period == 0)
+               return 0;
+
+       printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
+              corruption_check_period);
+
+       /* First time we run the checks right away */
+       schedule_delayed_work(&bios_check_work, 0);
+       return 0;
+}
+
+module_init(start_periodic_check_for_corruption);
+
index 82ec6075c057b695cf6904a28a58fb8c60c37d7a..a5c04e88777e84fcabeb46c67c4fb4941a6261f4 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-y                  := intel_cacheinfo.o addon_cpuid_features.o
 obj-y                  += proc.o capflags.o powerflags.o common.o
+obj-y                  += vmware.o hypervisor.o
 
 obj-$(CONFIG_X86_32)   += bugs.o cmpxchg.o
 obj-$(CONFIG_X86_64)   += bugs_64.o
index b9c9ea0217a9b1fd4581f27fed692aaa6fe167ee..42e0853030cbe43275cc596c3f5896388dd30cae 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/proto.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
+#include <asm/hypervisor.h>
 
 #include "cpu.h"
 
@@ -703,6 +704,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
        detect_ht(c);
 #endif
 
+       init_hypervisor(c);
        /*
         * On SMP, boot_cpu_data holds the common feature set between
         * all CPUs; so make sure that we indicate which features are
@@ -862,7 +864,7 @@ EXPORT_SYMBOL(_cpu_pda);
 
 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
 
-char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
+static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
 
 void __cpuinit pda_init(int cpu)
 {
@@ -903,8 +905,8 @@ void __cpuinit pda_init(int cpu)
        }
 }
 
-char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
-                          DEBUG_STKSZ] __page_aligned_bss;
+static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
+                                 DEBUG_STKSZ] __page_aligned_bss;
 
 extern asmlinkage void ignore_sysret(void);
 
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
new file mode 100644 (file)
index 0000000..fb5b86a
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Common hypervisor code
+ *
+ * Copyright (C) 2008, VMware, Inc.
+ * Author : Alok N Kataria <akataria@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/vmware.h>
+#include <asm/hypervisor.h>
+
+static inline void __cpuinit
+detect_hypervisor_vendor(struct cpuinfo_x86 *c)
+{
+       if (vmware_platform()) {
+               c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
+       } else {
+               c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
+       }
+}
+
+unsigned long get_hypervisor_tsc_freq(void)
+{
+       if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
+               return vmware_get_tsc_khz();
+       return 0;
+}
+
+static inline void __cpuinit
+hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
+{
+       if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) {
+               vmware_set_feature_bits(c);
+               return;
+       }
+}
+
+void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
+{
+       detect_hypervisor_vendor(c);
+       hypervisor_set_feature_bits(c);
+}
index cce0b6118d550e015a34c5e1b0f9bc60de1f8b3d..816f27f289b10a416847a141b20b1ed9540a530b 100644 (file)
@@ -307,12 +307,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_P4);
        if (c->x86 == 6)
                set_cpu_cap(c, X86_FEATURE_P3);
+#endif
 
        if (cpu_has_bts)
                ptrace_bts_init_intel(c);
 
-#endif
-
        detect_extended_topology(c);
        if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
                /*
index 3f46afbb1cf1946f52212ce96942429a9885cb04..68b5d8681cbb54597edf9021721745dbab19cb5c 100644 (file)
@@ -644,20 +644,17 @@ static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
        return show_shared_cpu_map_func(leaf, 1, buf);
 }
 
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
-       switch(this_leaf->eax.split.type) {
-           case CACHE_TYPE_DATA:
+static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
+{
+       switch (this_leaf->eax.split.type) {
+       case CACHE_TYPE_DATA:
                return sprintf(buf, "Data\n");
-               break;
-           case CACHE_TYPE_INST:
+       case CACHE_TYPE_INST:
                return sprintf(buf, "Instruction\n");
-               break;
-           case CACHE_TYPE_UNIFIED:
+       case CACHE_TYPE_UNIFIED:
                return sprintf(buf, "Unified\n");
-               break;
-           default:
+       default:
                return sprintf(buf, "Unknown\n");
-               break;
        }
 }
 
index 4b031a4ac8562d3ed69f14f0d750dfe8519a057e..1c838032fd3732fde9bff776cbb63a2821115d60 100644 (file)
@@ -510,12 +510,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
  */
 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
 {
-       static cpumask_t mce_cpus = CPU_MASK_NONE;
-
        mce_cpu_quirks(c);
 
        if (mce_dont_init ||
-           cpu_test_and_set(smp_processor_id(), mce_cpus) ||
            !mce_available(c))
                return;
 
index c78c04821ea18a58266b812fef480aa0b5ec0fbb..1159e269e596bc05d9b82d3ed633a434c67d1649 100644 (file)
@@ -803,6 +803,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
 }
 
 static struct res_range __initdata range[RANGE_NUM];
+static int __initdata nr_range;
 
 #ifdef CONFIG_MTRR_SANITIZER
 
@@ -1206,39 +1207,43 @@ struct mtrr_cleanup_result {
 #define PSHIFT         (PAGE_SHIFT - 10)
 
 static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
-static struct res_range __initdata range_new[RANGE_NUM];
 static unsigned long __initdata min_loss_pfn[RANGE_NUM];
 
-static int __init mtrr_cleanup(unsigned address_bits)
+static void __init print_out_mtrr_range_state(void)
 {
-       unsigned long extra_remove_base, extra_remove_size;
-       unsigned long base, size, def, dummy;
-       mtrr_type type;
-       int nr_range, nr_range_new;
-       u64 chunk_size, gran_size;
-       unsigned long range_sums, range_sums_new;
-       int index_good;
-       int num_reg_good;
        int i;
+       char start_factor = 'K', size_factor = 'K';
+       unsigned long start_base, size_base;
+       mtrr_type type;
 
-       /* extra one for all 0 */
-       int num[MTRR_NUM_TYPES + 1];
+       for (i = 0; i < num_var_ranges; i++) {
 
-       if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
-               return 0;
-       rdmsr(MTRRdefType_MSR, def, dummy);
-       def &= 0xff;
-       if (def != MTRR_TYPE_UNCACHABLE)
-               return 0;
+               size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
+               if (!size_base)
+                       continue;
 
-       /* get it and store it aside */
-       memset(range_state, 0, sizeof(range_state));
-       for (i = 0; i < num_var_ranges; i++) {
-               mtrr_if->get(i, &base, &size, &type);
-               range_state[i].base_pfn = base;
-               range_state[i].size_pfn = size;
-               range_state[i].type = type;
+               size_base = to_size_factor(size_base, &size_factor),
+               start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
+               start_base = to_size_factor(start_base, &start_factor),
+               type = range_state[i].type;
+
+               printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
+                       i, start_base, start_factor,
+                       size_base, size_factor,
+                       (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
+                           ((type == MTRR_TYPE_WRPROT) ? "WP" :
+                            ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
+                       );
        }
+}
+
+static int __init mtrr_need_cleanup(void)
+{
+       int i;
+       mtrr_type type;
+       unsigned long size;
+       /* extra one for all 0 */
+       int num[MTRR_NUM_TYPES + 1];
 
        /* check entries number */
        memset(num, 0, sizeof(num));
@@ -1263,29 +1268,133 @@ static int __init mtrr_cleanup(unsigned address_bits)
                num_var_ranges - num[MTRR_NUM_TYPES])
                return 0;
 
-       /* print original var MTRRs at first, for debugging: */
-       printk(KERN_DEBUG "original variable MTRRs\n");
-       for (i = 0; i < num_var_ranges; i++) {
-               char start_factor = 'K', size_factor = 'K';
-               unsigned long start_base, size_base;
+       return 1;
+}
 
-               size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
-               if (!size_base)
-                       continue;
+static unsigned long __initdata range_sums;
+static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
+                                        unsigned long extra_remove_base,
+                                        unsigned long extra_remove_size,
+                                        int i)
+{
+       int num_reg;
+       static struct res_range range_new[RANGE_NUM];
+       static int nr_range_new;
+       unsigned long range_sums_new;
+
+       /* convert ranges to var ranges state */
+       num_reg = x86_setup_var_mtrrs(range, nr_range,
+                                               chunk_size, gran_size);
+
+       /* we got new setting in range_state, check it */
+       memset(range_new, 0, sizeof(range_new));
+       nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
+                               extra_remove_base, extra_remove_size);
+       range_sums_new = sum_ranges(range_new, nr_range_new);
+
+       result[i].chunk_sizek = chunk_size >> 10;
+       result[i].gran_sizek = gran_size >> 10;
+       result[i].num_reg = num_reg;
+       if (range_sums < range_sums_new) {
+               result[i].lose_cover_sizek =
+                       (range_sums_new - range_sums) << PSHIFT;
+               result[i].bad = 1;
+       } else
+               result[i].lose_cover_sizek =
+                       (range_sums - range_sums_new) << PSHIFT;
 
-               size_base = to_size_factor(size_base, &size_factor),
-               start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
-               start_base = to_size_factor(start_base, &start_factor),
-               type = range_state[i].type;
+       /* double check it */
+       if (!result[i].bad && !result[i].lose_cover_sizek) {
+               if (nr_range_new != nr_range ||
+                       memcmp(range, range_new, sizeof(range)))
+                               result[i].bad = 1;
+       }
 
-               printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
-                       i, start_base, start_factor,
-                       size_base, size_factor,
-                       (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
-                           ((type == MTRR_TYPE_WRPROT) ? "WP" :
-                            ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
-                       );
+       if (!result[i].bad && (range_sums - range_sums_new <
+                               min_loss_pfn[num_reg])) {
+               min_loss_pfn[num_reg] =
+                       range_sums - range_sums_new;
        }
+}
+
+static void __init mtrr_print_out_one_result(int i)
+{
+       char gran_factor, chunk_factor, lose_factor;
+       unsigned long gran_base, chunk_base, lose_base;
+
+       gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
+       chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
+       lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
+       printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
+                       result[i].bad ? "*BAD*" : " ",
+                       gran_base, gran_factor, chunk_base, chunk_factor);
+       printk(KERN_CONT "num_reg: %d  \tlose cover RAM: %s%ld%c\n",
+                       result[i].num_reg, result[i].bad ? "-" : "",
+                       lose_base, lose_factor);
+}
+
+static int __init mtrr_search_optimal_index(void)
+{
+       int i;
+       int num_reg_good;
+       int index_good;
+
+       if (nr_mtrr_spare_reg >= num_var_ranges)
+               nr_mtrr_spare_reg = num_var_ranges - 1;
+       num_reg_good = -1;
+       for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
+               if (!min_loss_pfn[i])
+                       num_reg_good = i;
+       }
+
+       index_good = -1;
+       if (num_reg_good != -1) {
+               for (i = 0; i < NUM_RESULT; i++) {
+                       if (!result[i].bad &&
+                           result[i].num_reg == num_reg_good &&
+                           !result[i].lose_cover_sizek) {
+                               index_good = i;
+                               break;
+                       }
+               }
+       }
+
+       return index_good;
+}
+
+
+static int __init mtrr_cleanup(unsigned address_bits)
+{
+       unsigned long extra_remove_base, extra_remove_size;
+       unsigned long base, size, def, dummy;
+       mtrr_type type;
+       u64 chunk_size, gran_size;
+       int index_good;
+       int i;
+
+       if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
+               return 0;
+       rdmsr(MTRRdefType_MSR, def, dummy);
+       def &= 0xff;
+       if (def != MTRR_TYPE_UNCACHABLE)
+               return 0;
+
+       /* get it and store it aside */
+       memset(range_state, 0, sizeof(range_state));
+       for (i = 0; i < num_var_ranges; i++) {
+               mtrr_if->get(i, &base, &size, &type);
+               range_state[i].base_pfn = base;
+               range_state[i].size_pfn = size;
+               range_state[i].type = type;
+       }
+
+       /* check if we need handle it and can handle it */
+       if (!mtrr_need_cleanup())
+               return 0;
+
+       /* print original var MTRRs at first, for debugging: */
+       printk(KERN_DEBUG "original variable MTRRs\n");
+       print_out_mtrr_range_state();
 
        memset(range, 0, sizeof(range));
        extra_remove_size = 0;
@@ -1309,176 +1418,64 @@ static int __init mtrr_cleanup(unsigned address_bits)
               range_sums >> (20 - PAGE_SHIFT));
 
        if (mtrr_chunk_size && mtrr_gran_size) {
-               int num_reg;
-               char gran_factor, chunk_factor, lose_factor;
-               unsigned long gran_base, chunk_base, lose_base;
-
-               debug_print++;
-               /* convert ranges to var ranges state */
-               num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
-                                             mtrr_gran_size);
+               i = 0;
+               mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
+                                     extra_remove_base, extra_remove_size, i);
 
-               /* we got new setting in range_state, check it */
-               memset(range_new, 0, sizeof(range_new));
-               nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
-                                                     extra_remove_base,
-                                                     extra_remove_size);
-               range_sums_new = sum_ranges(range_new, nr_range_new);
+               mtrr_print_out_one_result(i);
 
-               i = 0;
-               result[i].chunk_sizek = mtrr_chunk_size >> 10;
-               result[i].gran_sizek = mtrr_gran_size >> 10;
-               result[i].num_reg = num_reg;
-               if (range_sums < range_sums_new) {
-                       result[i].lose_cover_sizek =
-                               (range_sums_new - range_sums) << PSHIFT;
-                       result[i].bad = 1;
-               } else
-                       result[i].lose_cover_sizek =
-                               (range_sums - range_sums_new) << PSHIFT;
-
-               gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
-               chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
-               lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
-               printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
-                        result[i].bad?"*BAD*":" ",
-                        gran_base, gran_factor, chunk_base, chunk_factor);
-               printk(KERN_CONT "num_reg: %d  \tlose cover RAM: %s%ld%c\n",
-                        result[i].num_reg, result[i].bad?"-":"",
-                        lose_base, lose_factor);
                if (!result[i].bad) {
                        set_var_mtrr_all(address_bits);
                        return 1;
                }
                printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
                       "will find optimal one\n");
-               debug_print--;
-               memset(result, 0, sizeof(result[0]));
        }
 
        i = 0;
        memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
        memset(result, 0, sizeof(result));
        for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
-               char gran_factor;
-               unsigned long gran_base;
-
-               if (debug_print)
-                       gran_base = to_size_factor(gran_size >> 10, &gran_factor);
 
                for (chunk_size = gran_size; chunk_size < (1ULL<<32);
                     chunk_size <<= 1) {
-                       int num_reg;
 
-                       if (debug_print) {
-                               char chunk_factor;
-                               unsigned long chunk_base;
-
-                               chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
-                               printk(KERN_INFO "\n");
-                               printk(KERN_INFO "gran_size: %ld%c   chunk_size: %ld%c \n",
-                                      gran_base, gran_factor, chunk_base, chunk_factor);
-                       }
                        if (i >= NUM_RESULT)
                                continue;
 
-                       /* convert ranges to var ranges state */
-                       num_reg = x86_setup_var_mtrrs(range, nr_range,
-                                                        chunk_size, gran_size);
-
-                       /* we got new setting in range_state, check it */
-                       memset(range_new, 0, sizeof(range_new));
-                       nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
-                                        extra_remove_base, extra_remove_size);
-                       range_sums_new = sum_ranges(range_new, nr_range_new);
-
-                       result[i].chunk_sizek = chunk_size >> 10;
-                       result[i].gran_sizek = gran_size >> 10;
-                       result[i].num_reg = num_reg;
-                       if (range_sums < range_sums_new) {
-                               result[i].lose_cover_sizek =
-                                       (range_sums_new - range_sums) << PSHIFT;
-                               result[i].bad = 1;
-                       } else
-                               result[i].lose_cover_sizek =
-                                       (range_sums - range_sums_new) << PSHIFT;
-
-                       /* double check it */
-                       if (!result[i].bad && !result[i].lose_cover_sizek) {
-                               if (nr_range_new != nr_range ||
-                                       memcmp(range, range_new, sizeof(range)))
-                                               result[i].bad = 1;
+                       mtrr_calc_range_state(chunk_size, gran_size,
+                                     extra_remove_base, extra_remove_size, i);
+                       if (debug_print) {
+                               mtrr_print_out_one_result(i);
+                               printk(KERN_INFO "\n");
                        }
 
-                       if (!result[i].bad && (range_sums - range_sums_new <
-                                              min_loss_pfn[num_reg])) {
-                               min_loss_pfn[num_reg] =
-                                       range_sums - range_sums_new;
-                       }
                        i++;
                }
        }
 
-       /* print out all */
-       for (i = 0; i < NUM_RESULT; i++) {
-               char gran_factor, chunk_factor, lose_factor;
-               unsigned long gran_base, chunk_base, lose_base;
-
-               gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
-               chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
-               lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
-               printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
-                        result[i].bad?"*BAD*":" ",
-                        gran_base, gran_factor, chunk_base, chunk_factor);
-               printk(KERN_CONT "num_reg: %d  \tlose cover RAM: %s%ld%c\n",
-                        result[i].num_reg, result[i].bad?"-":"",
-                        lose_base, lose_factor);
-       }
-
        /* try to find the optimal index */
-       if (nr_mtrr_spare_reg >= num_var_ranges)
-               nr_mtrr_spare_reg = num_var_ranges - 1;
-       num_reg_good = -1;
-       for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
-               if (!min_loss_pfn[i])
-                       num_reg_good = i;
-       }
-
-       index_good = -1;
-       if (num_reg_good != -1) {
-               for (i = 0; i < NUM_RESULT; i++) {
-                       if (!result[i].bad &&
-                           result[i].num_reg == num_reg_good &&
-                           !result[i].lose_cover_sizek) {
-                               index_good = i;
-                               break;
-                       }
-               }
-       }
+       index_good = mtrr_search_optimal_index();
 
        if (index_good != -1) {
-               char gran_factor, chunk_factor, lose_factor;
-               unsigned long gran_base, chunk_base, lose_base;
-
                printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
                i = index_good;
-               gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
-               chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
-               lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
-               printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
-                        gran_base, gran_factor, chunk_base, chunk_factor);
-               printk(KERN_CONT "num_reg: %d  \tlose RAM: %ld%c\n",
-                        result[i].num_reg, lose_base, lose_factor);
+               mtrr_print_out_one_result(i);
+
                /* convert ranges to var ranges state */
                chunk_size = result[i].chunk_sizek;
                chunk_size <<= 10;
                gran_size = result[i].gran_sizek;
                gran_size <<= 10;
-               debug_print++;
                x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
-               debug_print--;
                set_var_mtrr_all(address_bits);
+               printk(KERN_DEBUG "New variable MTRRs\n");
+               print_out_mtrr_range_state();
                return 1;
+       } else {
+               /* print out all */
+               for (i = 0; i < NUM_RESULT; i++)
+                       mtrr_print_out_one_result(i);
        }
 
        printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
@@ -1562,7 +1559,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
 {
        unsigned long i, base, size, highest_pfn = 0, def, dummy;
        mtrr_type type;
-       int nr_range;
        u64 total_trim_size;
 
        /* extra one for all 0 */
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
new file mode 100644 (file)
index 0000000..284c399
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * VMware Detection code.
+ *
+ * Copyright (C) 2008, VMware, Inc.
+ * Author : Alok N Kataria <akataria@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/dmi.h>
+#include <asm/div64.h>
+#include <asm/vmware.h>
+
+#define CPUID_VMWARE_INFO_LEAF 0x40000000
+#define VMWARE_HYPERVISOR_MAGIC        0x564D5868
+#define VMWARE_HYPERVISOR_PORT 0x5658
+
+#define VMWARE_PORT_CMD_GETVERSION     10
+#define VMWARE_PORT_CMD_GETHZ          45
+
+#define VMWARE_PORT(cmd, eax, ebx, ecx, edx)                           \
+       __asm__("inl (%%dx)" :                                          \
+                       "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :    \
+                       "0"(VMWARE_HYPERVISOR_MAGIC),                   \
+                       "1"(VMWARE_PORT_CMD_##cmd),                     \
+                       "2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) :    \
+                       "memory");
+
+static inline int __vmware_platform(void)
+{
+       uint32_t eax, ebx, ecx, edx;
+       VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx);
+       return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
+}
+
+static unsigned long __vmware_get_tsc_khz(void)
+{
+        uint64_t tsc_hz;
+        uint32_t eax, ebx, ecx, edx;
+
+        VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+
+        if (ebx == UINT_MAX)
+                return 0;
+        tsc_hz = eax | (((uint64_t)ebx) << 32);
+        do_div(tsc_hz, 1000);
+        BUG_ON(tsc_hz >> 32);
+        return tsc_hz;
+}
+
+/*
+ * While checking the dmi string infomation, just checking the product
+ * serial key should be enough, as this will always have a VMware
+ * specific string when running under VMware hypervisor.
+ */
+int vmware_platform(void)
+{
+       if (cpu_has_hypervisor) {
+               unsigned int eax, ebx, ecx, edx;
+               char hyper_vendor_id[13];
+
+               cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx);
+               memcpy(hyper_vendor_id + 0, &ebx, 4);
+               memcpy(hyper_vendor_id + 4, &ecx, 4);
+               memcpy(hyper_vendor_id + 8, &edx, 4);
+               hyper_vendor_id[12] = '\0';
+               if (!strcmp(hyper_vendor_id, "VMwareVMware"))
+                       return 1;
+       } else if (dmi_available && dmi_name_in_serial("VMware") &&
+                  __vmware_platform())
+               return 1;
+
+       return 0;
+}
+
+unsigned long vmware_get_tsc_khz(void)
+{
+       BUG_ON(!vmware_platform());
+       return __vmware_get_tsc_khz();
+}
+
+/*
+ * VMware hypervisor takes care of exporting a reliable TSC to the guest.
+ * Still, due to timing difference when running on virtual cpus, the TSC can
+ * be marked as unstable in some cases. For example, the TSC sync check at
+ * bootup can fail due to a marginal offset between vcpus' TSCs (though the
+ * TSCs do not drift from each other).  Also, the ACPI PM timer clocksource
+ * is not suitable as a watchdog when running on a hypervisor because the
+ * kernel may miss a wrap of the counter if the vcpu is descheduled for a
+ * long time. To skip these checks at runtime we set these capability bits,
+ * so that the kernel could just trust the hypervisor with providing a
+ * reliable virtual TSC that is suitable for timekeeping.
+ */
+void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c)
+{
+       set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+       set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
+}
index 2685538179097a1e8e073ebf07cd49e898ef4f6e..d84a852e4cd76f4983bc617d674c7e59c0b3632d 100644 (file)
 
 #include <mach_ipi.h>
 
-/* This keeps a track of which one is crashing cpu. */
-static int crashing_cpu;
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
-static atomic_t waiting_for_crash_ipi;
 
-static int crash_nmi_callback(struct notifier_block *self,
-                       unsigned long val, void *data)
+static void kdump_nmi_callback(int cpu, struct die_args *args)
 {
        struct pt_regs *regs;
 #ifdef CONFIG_X86_32
        struct pt_regs fixed_regs;
 #endif
-       int cpu;
 
-       if (val != DIE_NMI_IPI)
-               return NOTIFY_OK;
-
-       regs = ((struct die_args *)data)->regs;
-       cpu = raw_smp_processor_id();
-
-       /* Don't do anything if this handler is invoked on crashing cpu.
-        * Otherwise, system will completely hang. Crashing cpu can get
-        * an NMI if system was initially booted with nmi_watchdog parameter.
-        */
-       if (cpu == crashing_cpu)
-               return NOTIFY_STOP;
-       local_irq_disable();
+       regs = args->regs;
 
 #ifdef CONFIG_X86_32
        if (!user_mode_vm(regs)) {
@@ -65,54 +48,19 @@ static int crash_nmi_callback(struct notifier_block *self,
        }
 #endif
        crash_save_cpu(regs, cpu);
-       disable_local_APIC();
-       atomic_dec(&waiting_for_crash_ipi);
-       /* Assume hlt works */
-       halt();
-       for (;;)
-               cpu_relax();
-
-       return 1;
-}
 
-static void smp_send_nmi_allbutself(void)
-{
-       cpumask_t mask = cpu_online_map;
-       cpu_clear(safe_smp_processor_id(), mask);
-       if (!cpus_empty(mask))
-               send_IPI_mask(mask, NMI_VECTOR);
+       disable_local_APIC();
 }
 
-static struct notifier_block crash_nmi_nb = {
-       .notifier_call = crash_nmi_callback,
-};
-
-static void nmi_shootdown_cpus(void)
+static void kdump_nmi_shootdown_cpus(void)
 {
-       unsigned long msecs;
-
-       atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
-       /* Would it be better to replace the trap vector here? */
-       if (register_die_notifier(&crash_nmi_nb))
-               return;         /* return what? */
-       /* Ensure the new callback function is set before sending
-        * out the NMI
-        */
-       wmb();
+       nmi_shootdown_cpus(kdump_nmi_callback);
 
-       smp_send_nmi_allbutself();
-
-       msecs = 1000; /* Wait at most a second for the other cpus to stop */
-       while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
-               mdelay(1);
-               msecs--;
-       }
-
-       /* Leave the nmi callback set */
        disable_local_APIC();
 }
+
 #else
-static void nmi_shootdown_cpus(void)
+static void kdump_nmi_shootdown_cpus(void)
 {
        /* There are no cpus to shootdown */
 }
@@ -131,9 +79,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
        /* The kernel is broken so disable interrupts */
        local_irq_disable();
 
-       /* Make a note of crashing cpu. Will be used in NMI callback.*/
-       crashing_cpu = safe_smp_processor_id();
-       nmi_shootdown_cpus();
+       kdump_nmi_shootdown_cpus();
        lapic_shutdown();
 #if defined(CONFIG_X86_IO_APIC)
        disable_IO_APIC();
index a2d1176c38ee0d59b3e47925a79d8aebdff73d20..d6938d9351cfefa4afaaf5b78eb3b584111709e5 100644 (file)
@@ -847,17 +847,16 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
        switch (c->x86) {
        case 0x6:
                switch (c->x86_model) {
+               case 0 ... 0xC:
+                       /* sorry, don't know about them */
+                       break;
                case 0xD:
                case 0xE: /* Pentium M */
                        ds_configure(&ds_cfg_var);
                        break;
-               case 0xF: /* Core2 */
-               case 0x1C: /* Atom */
+               default: /* Core2, Atom, ... */
                        ds_configure(&ds_cfg_64);
                        break;
-               default:
-                       /* sorry, don't know about them */
-                       break;
                }
                break;
        case 0xF:
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
new file mode 100644 (file)
index 0000000..5962176
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/kexec.h>
+#include <linux/bug.h>
+#include <linux/nmi.h>
+#include <linux/sysfs.h>
+
+#include <asm/stacktrace.h>
+
+#include "dumpstack.h"
+
+int panic_on_unrecovered_nmi;
+unsigned int code_bytes = 64;
+int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
+static int die_counter;
+
+void printk_address(unsigned long address, int reliable)
+{
+       printk(" [<%p>] %s%pS\n", (void *) address,
+                       reliable ? "" : "? ", (void *) address);
+}
+
+/*
+ * x86-64 can have up to three kernel stacks:
+ * process stack
+ * interrupt stack
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+static inline int valid_stack_ptr(struct thread_info *tinfo,
+                       void *p, unsigned int size, void *end)
+{
+       void *t = tinfo;
+       if (end) {
+               if (p < end && p >= (end-THREAD_SIZE))
+                       return 1;
+               else
+                       return 0;
+       }
+       return p > t && p < t + THREAD_SIZE - size;
+}
+
+unsigned long
+print_context_stack(struct thread_info *tinfo,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data,
+               unsigned long *end)
+{
+       struct stack_frame *frame = (struct stack_frame *)bp;
+
+       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+               unsigned long addr;
+
+               addr = *stack;
+               if (__kernel_text_address(addr)) {
+                       if ((unsigned long) stack == bp + sizeof(long)) {
+                               ops->address(data, addr, 1);
+                               frame = frame->next_frame;
+                               bp = (unsigned long) frame;
+                       } else {
+                               ops->address(data, addr, bp == 0);
+                       }
+               }
+               stack++;
+       }
+       return bp;
+}
+
+
+static void
+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+       printk(data);
+       print_symbol(msg, symbol);
+       printk("\n");
+}
+
+static void print_trace_warning(void *data, char *msg)
+{
+       printk("%s%s\n", (char *)data, msg);
+}
+
+static int print_trace_stack(void *data, char *name)
+{
+       printk("%s <%s> ", (char *)data, name);
+       return 0;
+}
+
+/*
+ * Print one address/symbol entries per line.
+ */
+static void print_trace_address(void *data, unsigned long addr, int reliable)
+{
+       touch_nmi_watchdog();
+       printk(data);
+       printk_address(addr, reliable);
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+       .warning = print_trace_warning,
+       .warning_symbol = print_trace_warning_symbol,
+       .stack = print_trace_stack,
+       .address = print_trace_address,
+};
+
+void
+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp, char *log_lvl)
+{
+       printk("%sCall Trace:\n", log_lvl);
+       dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
+}
+
+void show_trace(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp)
+{
+       show_trace_log_lvl(task, regs, stack, bp, "");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+       show_stack_log_lvl(task, NULL, sp, 0, "");
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+       unsigned long bp = 0;
+       unsigned long stack;
+
+#ifdef CONFIG_FRAME_POINTER
+       if (!bp)
+               get_bp(bp);
+#endif
+
+       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+               current->pid, current->comm, print_tainted(),
+               init_utsname()->release,
+               (int)strcspn(init_utsname()->version, " "),
+               init_utsname()->version);
+       show_trace(NULL, NULL, &stack, bp);
+}
+EXPORT_SYMBOL(dump_stack);
+
+static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
+
+unsigned __kprobes long oops_begin(void)
+{
+       int cpu;
+       unsigned long flags;
+
+       oops_enter();
+
+       /* racy, but better than risking deadlock. */
+       raw_local_irq_save(flags);
+       cpu = smp_processor_id();
+       if (!__raw_spin_trylock(&die_lock)) {
+               if (cpu == die_owner)
+                       /* nested oops. should stop eventually */;
+               else
+                       __raw_spin_lock(&die_lock);
+       }
+       die_nest_count++;
+       die_owner = cpu;
+       console_verbose();
+       bust_spinlocks(1);
+       return flags;
+}
+
+void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+{
+       if (regs && kexec_should_crash(current))
+               crash_kexec(regs);
+
+       bust_spinlocks(0);
+       die_owner = -1;
+       add_taint(TAINT_DIE);
+       die_nest_count--;
+       if (!die_nest_count)
+               /* Nest count reaches zero, release the lock. */
+               __raw_spin_unlock(&die_lock);
+       raw_local_irq_restore(flags);
+       oops_exit();
+
+       if (!signr)
+               return;
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
+       do_exit(signr);
+}
+
+int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+{
+#ifdef CONFIG_X86_32
+       unsigned short ss;
+       unsigned long sp;
+#endif
+       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+       printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+       printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       printk("DEBUG_PAGEALLOC");
+#endif
+       printk("\n");
+       sysfs_printk_last_file();
+       if (notify_die(DIE_OOPS, str, regs, err,
+                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+               return 1;
+
+       show_registers(regs);
+#ifdef CONFIG_X86_32
+       sp = (unsigned long) (&regs->sp);
+       savesegment(ss, ss);
+       if (user_mode(regs)) {
+               sp = regs->sp;
+               ss = regs->ss & 0xffff;
+       }
+       printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
+       print_symbol("%s", regs->ip);
+       printk(" SS:ESP %04x:%08lx\n", ss, sp);
+#else
+       /* Executive summary in case the oops scrolled away */
+       printk(KERN_ALERT "RIP ");
+       printk_address(regs->ip, 1);
+       printk(" RSP <%016lx>\n", regs->sp);
+#endif
+       return 0;
+}
+
+/*
+ * This is gone through when something in the kernel has done something bad
+ * and is about to be terminated:
+ */
+void die(const char *str, struct pt_regs *regs, long err)
+{
+       unsigned long flags = oops_begin();
+       int sig = SIGSEGV;
+
+       if (!user_mode_vm(regs))
+               report_bug(regs->ip, regs);
+
+       if (__die(str, regs, err))
+               sig = 0;
+       oops_end(flags, regs, sig);
+}
+
+void notrace __kprobes
+die_nmi(char *str, struct pt_regs *regs, int do_panic)
+{
+       unsigned long flags;
+
+       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
+               return;
+
+       /*
+        * We are in trouble anyway, lets at least try
+        * to get a message out.
+        */
+       flags = oops_begin();
+       printk(KERN_EMERG "%s", str);
+       printk(" on CPU%d, ip %08lx, registers:\n",
+               smp_processor_id(), regs->ip);
+       show_registers(regs);
+       oops_end(flags, regs, 0);
+       if (do_panic || panic_on_oops)
+               panic("Non maskable interrupt");
+       nmi_exit();
+       local_irq_enable();
+       do_exit(SIGBUS);
+}
+
+static int __init oops_setup(char *s)
+{
+       if (!s)
+               return -EINVAL;
+       if (!strcmp(s, "panic"))
+               panic_on_oops = 1;
+       return 0;
+}
+early_param("oops", oops_setup);
+
+static int __init kstack_setup(char *s)
+{
+       if (!s)
+               return -EINVAL;
+       kstack_depth_to_print = simple_strtoul(s, NULL, 0);
+       return 0;
+}
+early_param("kstack", kstack_setup);
+
+static int __init code_bytes_setup(char *s)
+{
+       code_bytes = simple_strtoul(s, NULL, 0);
+       if (code_bytes > 8192)
+               code_bytes = 8192;
+
+       return 1;
+}
+__setup("code_bytes=", code_bytes_setup);
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
new file mode 100644 (file)
index 0000000..3119a80
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
+
+#ifndef DUMPSTACK_H
+#define DUMPSTACK_H
+
+#ifdef CONFIG_X86_32
+#define STACKSLOTS_PER_LINE 8
+#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
+#else
+#define STACKSLOTS_PER_LINE 4
+#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
+#endif
+
+extern unsigned long
+print_context_stack(struct thread_info *tinfo,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data,
+               unsigned long *end);
+
+extern void
+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp, char *log_lvl);
+
+extern void
+show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *sp, unsigned long bp, char *log_lvl);
+
+extern unsigned int code_bytes;
+extern int kstack_depth_to_print;
+
+/* The form of the top of the frame on the stack */
+struct stack_frame {
+       struct stack_frame *next_frame;
+       unsigned long return_address;
+};
+#endif
index b3614752197b6c6e3c0cf53c7b718dd0167fbdea..7b031b106ec8f65cb40ee4042eb8341c356e4a09 100644 (file)
 
 #include <asm/stacktrace.h>
 
-#define STACKSLOTS_PER_LINE 8
-#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
-
-int panic_on_unrecovered_nmi;
-int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
-static unsigned int code_bytes = 64;
-static int die_counter;
-
-void printk_address(unsigned long address, int reliable)
-{
-       printk(" [<%p>] %s%pS\n", (void *) address,
-                       reliable ? "" : "? ", (void *) address);
-}
-
-static inline int valid_stack_ptr(struct thread_info *tinfo,
-                       void *p, unsigned int size, void *end)
-{
-       void *t = tinfo;
-       if (end) {
-               if (p < end && p >= (end-THREAD_SIZE))
-                       return 1;
-               else
-                       return 0;
-       }
-       return p > t && p < t + THREAD_SIZE - size;
-}
-
-/* The form of the top of the frame on the stack */
-struct stack_frame {
-       struct stack_frame *next_frame;
-       unsigned long return_address;
-};
-
-static inline unsigned long
-print_context_stack(struct thread_info *tinfo,
-               unsigned long *stack, unsigned long bp,
-               const struct stacktrace_ops *ops, void *data,
-               unsigned long *end)
-{
-       struct stack_frame *frame = (struct stack_frame *)bp;
-
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
-               unsigned long addr;
-
-               addr = *stack;
-               if (__kernel_text_address(addr)) {
-                       if ((unsigned long) stack == bp + sizeof(long)) {
-                               ops->address(data, addr, 1);
-                               frame = frame->next_frame;
-                               bp = (unsigned long) frame;
-                       } else {
-                               ops->address(data, addr, bp == 0);
-                       }
-               }
-               stack++;
-       }
-       return bp;
-}
+#include "dumpstack.h"
 
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
@@ -119,57 +62,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 }
 EXPORT_SYMBOL(dump_trace);
 
-static void
-print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
-{
-       printk(data);
-       print_symbol(msg, symbol);
-       printk("\n");
-}
-
-static void print_trace_warning(void *data, char *msg)
-{
-       printk("%s%s\n", (char *)data, msg);
-}
-
-static int print_trace_stack(void *data, char *name)
-{
-       printk("%s <%s> ", (char *)data, name);
-       return 0;
-}
-
-/*
- * Print one address/symbol entries per line.
- */
-static void print_trace_address(void *data, unsigned long addr, int reliable)
-{
-       touch_nmi_watchdog();
-       printk(data);
-       printk_address(addr, reliable);
-}
-
-static const struct stacktrace_ops print_trace_ops = {
-       .warning = print_trace_warning,
-       .warning_symbol = print_trace_warning_symbol,
-       .stack = print_trace_stack,
-       .address = print_trace_address,
-};
-
-static void
-show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp, char *log_lvl)
-{
-       printk("%sCall Trace:\n", log_lvl);
-       dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
-}
-
-void show_trace(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp)
-{
-       show_trace_log_lvl(task, regs, stack, bp, "");
-}
-
-static void
+void
 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                unsigned long *sp, unsigned long bp, char *log_lvl)
 {
@@ -196,33 +89,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
-       show_stack_log_lvl(task, NULL, sp, 0, "");
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
-       unsigned long bp = 0;
-       unsigned long stack;
-
-#ifdef CONFIG_FRAME_POINTER
-       if (!bp)
-               get_bp(bp);
-#endif
-
-       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-               current->pid, current->comm, print_tainted(),
-               init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       show_trace(NULL, NULL, &stack, bp);
-}
-
-EXPORT_SYMBOL(dump_stack);
 
 void show_registers(struct pt_regs *regs)
 {
@@ -283,167 +149,3 @@ int is_valid_bugaddr(unsigned long ip)
        return ud2 == 0x0b0f;
 }
 
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
-static int die_owner = -1;
-static unsigned int die_nest_count;
-
-unsigned __kprobes long oops_begin(void)
-{
-       unsigned long flags;
-
-       oops_enter();
-
-       if (die_owner != raw_smp_processor_id()) {
-               console_verbose();
-               raw_local_irq_save(flags);
-               __raw_spin_lock(&die_lock);
-               die_owner = smp_processor_id();
-               die_nest_count = 0;
-               bust_spinlocks(1);
-       } else {
-               raw_local_irq_save(flags);
-       }
-       die_nest_count++;
-       return flags;
-}
-
-void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
-{
-       bust_spinlocks(0);
-       die_owner = -1;
-       add_taint(TAINT_DIE);
-       __raw_spin_unlock(&die_lock);
-       raw_local_irq_restore(flags);
-
-       if (!regs)
-               return;
-
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-       if (panic_on_oops)
-               panic("Fatal exception");
-       oops_exit();
-       do_exit(signr);
-}
-
-int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-{
-       unsigned short ss;
-       unsigned long sp;
-
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
-       printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC");
-#endif
-       printk("\n");
-       sysfs_printk_last_file();
-       if (notify_die(DIE_OOPS, str, regs, err,
-                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
-               return 1;
-
-       show_registers(regs);
-       /* Executive summary in case the oops scrolled away */
-       sp = (unsigned long) (&regs->sp);
-       savesegment(ss, ss);
-       if (user_mode(regs)) {
-               sp = regs->sp;
-               ss = regs->ss & 0xffff;
-       }
-       printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
-       print_symbol("%s", regs->ip);
-       printk(" SS:ESP %04x:%08lx\n", ss, sp);
-       return 0;
-}
-
-/*
- * This is gone through when something in the kernel has done something bad
- * and is about to be terminated:
- */
-void die(const char *str, struct pt_regs *regs, long err)
-{
-       unsigned long flags = oops_begin();
-
-       if (die_nest_count < 3) {
-               report_bug(regs->ip, regs);
-
-               if (__die(str, regs, err))
-                       regs = NULL;
-       } else {
-               printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
-       }
-
-       oops_end(flags, regs, SIGSEGV);
-}
-
-static DEFINE_SPINLOCK(nmi_print_lock);
-
-void notrace __kprobes
-die_nmi(char *str, struct pt_regs *regs, int do_panic)
-{
-       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
-               return;
-
-       spin_lock(&nmi_print_lock);
-       /*
-       * We are in trouble anyway, lets at least try
-       * to get a message out:
-       */
-       bust_spinlocks(1);
-       printk(KERN_EMERG "%s", str);
-       printk(" on CPU%d, ip %08lx, registers:\n",
-               smp_processor_id(), regs->ip);
-       show_registers(regs);
-       if (do_panic)
-               panic("Non maskable interrupt");
-       console_silent();
-       spin_unlock(&nmi_print_lock);
-
-       /*
-        * If we are in kernel we are probably nested up pretty bad
-        * and might aswell get out now while we still can:
-        */
-       if (!user_mode_vm(regs)) {
-               current->thread.trap_no = 2;
-               crash_kexec(regs);
-       }
-
-       bust_spinlocks(0);
-       do_exit(SIGSEGV);
-}
-
-static int __init oops_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       if (!strcmp(s, "panic"))
-               panic_on_oops = 1;
-       return 0;
-}
-early_param("oops", oops_setup);
-
-static int __init kstack_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-       return 0;
-}
-early_param("kstack", kstack_setup);
-
-static int __init code_bytes_setup(char *s)
-{
-       code_bytes = simple_strtoul(s, NULL, 0);
-       if (code_bytes > 8192)
-               code_bytes = 8192;
-
-       return 1;
-}
-__setup("code_bytes=", code_bytes_setup);
index 96a5db7da8a747e5192f410c60fe9494e8d8e8d1..33ff10287a5d7b29162068c4d4f6d2eb141724db 100644 (file)
 
 #include <asm/stacktrace.h>
 
-#define STACKSLOTS_PER_LINE 4
-#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
-
-int panic_on_unrecovered_nmi;
-int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
-static unsigned int code_bytes = 64;
-static int die_counter;
-
-void printk_address(unsigned long address, int reliable)
-{
-       printk(" [<%p>] %s%pS\n", (void *) address,
-                       reliable ? "" : "? ", (void *) address);
-}
+#include "dumpstack.h"
 
 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
                                        unsigned *usedp, char **idp)
@@ -113,51 +101,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
-                       void *p, unsigned int size, void *end)
-{
-       void *t = tinfo;
-       if (end) {
-               if (p < end && p >= (end-THREAD_SIZE))
-                       return 1;
-               else
-                       return 0;
-       }
-       return p > t && p < t + THREAD_SIZE - size;
-}
-
-/* The form of the top of the frame on the stack */
-struct stack_frame {
-       struct stack_frame *next_frame;
-       unsigned long return_address;
-};
-
-static inline unsigned long
-print_context_stack(struct thread_info *tinfo,
-               unsigned long *stack, unsigned long bp,
-               const struct stacktrace_ops *ops, void *data,
-               unsigned long *end)
-{
-       struct stack_frame *frame = (struct stack_frame *)bp;
-
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
-               unsigned long addr;
-
-               addr = *stack;
-               if (__kernel_text_address(addr)) {
-                       if ((unsigned long) stack == bp + sizeof(long)) {
-                               ops->address(data, addr, 1);
-                               frame = frame->next_frame;
-                               bp = (unsigned long) frame;
-                       } else {
-                               ops->address(data, addr, bp == 0);
-                       }
-               }
-               stack++;
-       }
-       return bp;
-}
-
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
@@ -248,57 +191,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 }
 EXPORT_SYMBOL(dump_trace);
 
-static void
-print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
-{
-       printk(data);
-       print_symbol(msg, symbol);
-       printk("\n");
-}
-
-static void print_trace_warning(void *data, char *msg)
-{
-       printk("%s%s\n", (char *)data, msg);
-}
-
-static int print_trace_stack(void *data, char *name)
-{
-       printk("%s <%s> ", (char *)data, name);
-       return 0;
-}
-
-/*
- * Print one address/symbol entries per line.
- */
-static void print_trace_address(void *data, unsigned long addr, int reliable)
-{
-       touch_nmi_watchdog();
-       printk(data);
-       printk_address(addr, reliable);
-}
-
-static const struct stacktrace_ops print_trace_ops = {
-       .warning = print_trace_warning,
-       .warning_symbol = print_trace_warning_symbol,
-       .stack = print_trace_stack,
-       .address = print_trace_address,
-};
-
-static void
-show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp, char *log_lvl)
-{
-       printk("%sCall Trace:\n", log_lvl);
-       dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
-}
-
-void show_trace(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp)
-{
-       show_trace_log_lvl(task, regs, stack, bp, "");
-}
-
-static void
+void
 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                unsigned long *sp, unsigned long bp, char *log_lvl)
 {
@@ -342,33 +235,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
-       show_stack_log_lvl(task, NULL, sp, 0, "");
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
-       unsigned long bp = 0;
-       unsigned long stack;
-
-#ifdef CONFIG_FRAME_POINTER
-       if (!bp)
-               get_bp(bp);
-#endif
-
-       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-               current->pid, current->comm, print_tainted(),
-               init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       show_trace(NULL, NULL, &stack, bp);
-}
-EXPORT_SYMBOL(dump_stack);
-
 void show_registers(struct pt_regs *regs)
 {
        int i;
@@ -429,147 +295,3 @@ int is_valid_bugaddr(unsigned long ip)
        return ud2 == 0x0b0f;
 }
 
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
-static int die_owner = -1;
-static unsigned int die_nest_count;
-
-unsigned __kprobes long oops_begin(void)
-{
-       int cpu;
-       unsigned long flags;
-
-       oops_enter();
-
-       /* racy, but better than risking deadlock. */
-       raw_local_irq_save(flags);
-       cpu = smp_processor_id();
-       if (!__raw_spin_trylock(&die_lock)) {
-               if (cpu == die_owner)
-                       /* nested oops. should stop eventually */;
-               else
-                       __raw_spin_lock(&die_lock);
-       }
-       die_nest_count++;
-       die_owner = cpu;
-       console_verbose();
-       bust_spinlocks(1);
-       return flags;
-}
-
-void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
-{
-       die_owner = -1;
-       bust_spinlocks(0);
-       die_nest_count--;
-       if (!die_nest_count)
-               /* Nest count reaches zero, release the lock. */
-               __raw_spin_unlock(&die_lock);
-       raw_local_irq_restore(flags);
-       if (!regs) {
-               oops_exit();
-               return;
-       }
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-       if (panic_on_oops)
-               panic("Fatal exception");
-       oops_exit();
-       do_exit(signr);
-}
-
-int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-{
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
-       printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC");
-#endif
-       printk("\n");
-       sysfs_printk_last_file();
-       if (notify_die(DIE_OOPS, str, regs, err,
-                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
-               return 1;
-
-       show_registers(regs);
-       add_taint(TAINT_DIE);
-       /* Executive summary in case the oops scrolled away */
-       printk(KERN_ALERT "RIP ");
-       printk_address(regs->ip, 1);
-       printk(" RSP <%016lx>\n", regs->sp);
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-       return 0;
-}
-
-void die(const char *str, struct pt_regs *regs, long err)
-{
-       unsigned long flags = oops_begin();
-
-       if (!user_mode(regs))
-               report_bug(regs->ip, regs);
-
-       if (__die(str, regs, err))
-               regs = NULL;
-       oops_end(flags, regs, SIGSEGV);
-}
-
-notrace __kprobes void
-die_nmi(char *str, struct pt_regs *regs, int do_panic)
-{
-       unsigned long flags;
-
-       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
-               return;
-
-       flags = oops_begin();
-       /*
-        * We are in trouble anyway, lets at least try
-        * to get a message out.
-        */
-       printk(KERN_EMERG "%s", str);
-       printk(" on CPU%d, ip %08lx, registers:\n",
-               smp_processor_id(), regs->ip);
-       show_registers(regs);
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-       if (do_panic || panic_on_oops)
-               panic("Non maskable interrupt");
-       oops_end(flags, NULL, SIGBUS);
-       nmi_exit();
-       local_irq_enable();
-       do_exit(SIGBUS);
-}
-
-static int __init oops_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       if (!strcmp(s, "panic"))
-               panic_on_oops = 1;
-       return 0;
-}
-early_param("oops", oops_setup);
-
-static int __init kstack_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-       return 0;
-}
-early_param("kstack", kstack_setup);
-
-static int __init code_bytes_setup(char *s)
-{
-       code_bytes = simple_strtoul(s, NULL, 0);
-       if (code_bytes > 8192)
-               code_bytes = 8192;
-
-       return 1;
-}
-__setup("code_bytes=", code_bytes_setup);
index 7aafeb5263efbbb8de9e7aad35680d2d882a0882..65a13943e0982524877768164cf8cc0cd63688f4 100644 (file)
@@ -677,22 +677,6 @@ struct early_res {
 };
 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
        { 0, PAGE_SIZE, "BIOS data page" },     /* BIOS data page */
-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_TRAMPOLINE)
-       { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" },
-#endif
-#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
-       /*
-        * But first pinch a few for the stack/trampoline stuff
-        * FIXME: Don't need the extra page at 4K, but need to fix
-        * trampoline before removing it. (see the GDT stuff)
-        */
-       { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE" },
-       /*
-        * Has to be in very low memory so we can execute
-        * real-mode AP code.
-        */
-       { TRAMPOLINE_BASE, TRAMPOLINE_BASE + PAGE_SIZE, "TRAMPOLINE" },
-#endif
        {}
 };
 
index 34ad997d3834dc773b71ae4f27e18828447d2853..23b138e31e9c2fe229c42050ab2c5e864cac4710 100644 (file)
@@ -875,49 +875,6 @@ static struct console early_dbgp_console = {
 };
 #endif
 
-/* Console interface to a host file on AMD's SimNow! */
-
-static int simnow_fd;
-
-enum {
-       MAGIC1 = 0xBACCD00A,
-       MAGIC2 = 0xCA110000,
-       XOPEN = 5,
-       XWRITE = 4,
-};
-
-static noinline long simnow(long cmd, long a, long b, long c)
-{
-       long ret;
-
-       asm volatile("cpuid" :
-                    "=a" (ret) :
-                    "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
-       return ret;
-}
-
-static void __init simnow_init(char *str)
-{
-       char *fn = "klog";
-
-       if (*str == '=')
-               fn = ++str;
-       /* error ignored */
-       simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
-}
-
-static void simnow_write(struct console *con, const char *s, unsigned n)
-{
-       simnow(XWRITE, simnow_fd, (unsigned long)s, n);
-}
-
-static struct console simnow_console = {
-       .name =         "simnow",
-       .write =        simnow_write,
-       .flags =        CON_PRINTBUFFER,
-       .index =        -1,
-};
-
 /* Direct interface for emergencies */
 static struct console *early_console = &early_vga_console;
 static int __initdata early_console_initialized;
@@ -960,10 +917,6 @@ static int __init setup_early_printk(char *buf)
                max_ypos = boot_params.screen_info.orig_video_lines;
                current_ypos = boot_params.screen_info.orig_y;
                early_console = &early_vga_console;
-       } else if (!strncmp(buf, "simnow", 6)) {
-               simnow_init(buf + 6);
-               early_console = &simnow_console;
-               keep_early = 1;
 #ifdef CONFIG_EARLY_PRINTK_DBGP
        } else if (!strncmp(buf, "dbgp", 4)) {
                if (early_dbgp_init(buf+4) < 0)
index 28b597ef9ca16b7992c333f10eae252ea695475c..f6402c4ba10dae5241604da1cd2614e6113966d0 100644 (file)
@@ -1051,6 +1051,7 @@ ENTRY(kernel_thread_helper)
        push %eax
        CFI_ADJUST_CFA_OFFSET 4
        call do_exit
+       ud2                     # padding for call trace
        CFI_ENDPROC
 ENDPROC(kernel_thread_helper)
 
index b86f332c96a66596f0fe076d7c0eda78ea05dbe5..42571baaca3257d29c42aa1de5581308edbf1ade 100644 (file)
  *
  * NOTE: This code handles signal-recognition, which happens every time
  * after an interrupt and after each system call.
- * 
- * Normal syscalls and interrupts don't save a full stack frame, this is 
+ *
+ * Normal syscalls and interrupts don't save a full stack frame, this is
  * only done for syscall tracing, signals or fork/exec et.al.
- * 
- * A note on terminology:       
- * - top of stack: Architecture defined interrupt frame from SS to RIP 
- * at the top of the kernel process stack.     
+ *
+ * A note on terminology:
+ * - top of stack: Architecture defined interrupt frame from SS to RIP
+ * at the top of the kernel process stack.
  * - partial stack frame: partially saved registers upto R11.
- * - full stack frame: Like partial stack frame, but all register saved. 
+ * - full stack frame: Like partial stack frame, but all register saved.
  *
  * Some macro usage:
  * - CFI macros are used to generate dwarf2 unwind information for better
@@ -142,7 +142,7 @@ END(mcount)
 
 #ifndef CONFIG_PREEMPT
 #define retint_kernel retint_restore_args
-#endif 
+#endif
 
 #ifdef CONFIG_PARAVIRT
 ENTRY(native_usergs_sysret64)
@@ -161,14 +161,14 @@ ENTRY(native_usergs_sysret64)
 .endm
 
 /*
- * C code is not supposed to know about undefined top of stack. Every time 
- * a C function with an pt_regs argument is called from the SYSCALL based 
+ * C code is not supposed to know about undefined top of stack. Every time
+ * a C function with an pt_regs argument is called from the SYSCALL based
  * fast path FIXUP_TOP_OF_STACK is needed.
  * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  * manipulation.
- */            
-               
-       /* %rsp:at FRAMEEND */ 
+ */
+
+       /* %rsp:at FRAMEEND */
        .macro FIXUP_TOP_OF_STACK tmp
        movq    %gs:pda_oldrsp,\tmp
        movq    \tmp,RSP(%rsp)
@@ -244,8 +244,8 @@ ENTRY(native_usergs_sysret64)
        .endm
 /*
  * A newly forked process directly context switches into this.
- */    
-/* rdi:        prev */ 
+ */
+/* rdi:        prev */
 ENTRY(ret_from_fork)
        CFI_DEFAULT_STACK
        push kernel_eflags(%rip)
@@ -255,8 +255,9 @@ ENTRY(ret_from_fork)
        call schedule_tail
        GET_THREAD_INFO(%rcx)
        testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
+       CFI_REMEMBER_STATE
        jnz rff_trace
-rff_action:    
+rff_action:
        RESTORE_REST
        testl $3,CS-ARGOFFSET(%rsp)     # from kernel_thread?
        je   int_ret_from_sys_call
@@ -264,10 +265,11 @@ rff_action:
        jnz  int_ret_from_sys_call
        RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
        jmp ret_from_sys_call
+       CFI_RESTORE_STATE
 rff_trace:
        movq %rsp,%rdi
        call syscall_trace_leave
-       GET_THREAD_INFO(%rcx)   
+       GET_THREAD_INFO(%rcx)
        jmp rff_action
        CFI_ENDPROC
 END(ret_from_fork)
@@ -278,20 +280,20 @@ END(ret_from_fork)
  * SYSCALL does not save anything on the stack and does not change the
  * stack pointer.
  */
-               
+
 /*
- * Register setup:     
+ * Register setup:
  * rax  system call number
  * rdi  arg0
- * rcx  return address for syscall/sysret, C arg3 
+ * rcx  return address for syscall/sysret, C arg3
  * rsi  arg1
- * rdx  arg2   
+ * rdx  arg2
  * r10  arg3   (--> moved to rcx for C)
  * r8   arg4
  * r9   arg5
  * r11  eflags for syscall/sysret, temporary for C
- * r12-r15,rbp,rbx saved by C code, not touched.               
- * 
+ * r12-r15,rbp,rbx saved by C code, not touched.
+ *
  * Interrupts are off on entry.
  * Only called from user space.
  *
@@ -301,7 +303,7 @@ END(ret_from_fork)
  * When user can change the frames always force IRET. That is because
  * it deals with uncanonical addresses better. SYSRET has trouble
  * with them due to bugs in both AMD and Intel CPUs.
- */                                    
+ */
 
 ENTRY(system_call)
        CFI_STARTPROC   simple
@@ -317,7 +319,7 @@ ENTRY(system_call)
         */
 ENTRY(system_call_after_swapgs)
 
-       movq    %rsp,%gs:pda_oldrsp 
+       movq    %rsp,%gs:pda_oldrsp
        movq    %gs:pda_kernelstack,%rsp
        /*
         * No need to follow this irqs off/on section - it's straight
@@ -325,7 +327,7 @@ ENTRY(system_call_after_swapgs)
         */
        ENABLE_INTERRUPTS(CLBR_NONE)
        SAVE_ARGS 8,1
-       movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
+       movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
        movq  %rcx,RIP-ARGOFFSET(%rsp)
        CFI_REL_OFFSET rip,RIP-ARGOFFSET
        GET_THREAD_INFO(%rcx)
@@ -339,19 +341,19 @@ system_call_fastpath:
        movq %rax,RAX-ARGOFFSET(%rsp)
 /*
  * Syscall return path ending with SYSRET (fast path)
- * Has incomplete stack frame and undefined top of stack. 
- */            
+ * Has incomplete stack frame and undefined top of stack.
+ */
 ret_from_sys_call:
        movl $_TIF_ALLWORK_MASK,%edi
        /* edi: flagmask */
-sysret_check:          
+sysret_check:
        LOCKDEP_SYS_EXIT
        GET_THREAD_INFO(%rcx)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        movl TI_flags(%rcx),%edx
        andl %edi,%edx
-       jnz  sysret_careful 
+       jnz  sysret_careful
        CFI_REMEMBER_STATE
        /*
         * sysretq will re-enable interrupts:
@@ -366,7 +368,7 @@ sysret_check:
 
        CFI_RESTORE_STATE
        /* Handle reschedules */
-       /* edx: work, edi: workmask */  
+       /* edx: work, edi: workmask */
 sysret_careful:
        bt $TIF_NEED_RESCHED,%edx
        jnc sysret_signal
@@ -379,7 +381,7 @@ sysret_careful:
        CFI_ADJUST_CFA_OFFSET -8
        jmp sysret_check
 
-       /* Handle a signal */ 
+       /* Handle a signal */
 sysret_signal:
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
@@ -398,7 +400,7 @@ sysret_signal:
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        jmp int_with_check
-       
+
 badsys:
        movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
        jmp ret_from_sys_call
@@ -437,7 +439,7 @@ sysret_audit:
 #endif /* CONFIG_AUDITSYSCALL */
 
        /* Do syscall tracing */
-tracesys:                       
+tracesys:
 #ifdef CONFIG_AUDITSYSCALL
        testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
        jz auditsys
@@ -460,8 +462,8 @@ tracesys:
        call *sys_call_table(,%rax,8)
        movq %rax,RAX-ARGOFFSET(%rsp)
        /* Use IRET because user could have changed frame */
-               
-/* 
+
+/*
  * Syscall return path ending with IRET.
  * Has correct top of stack, but partial stack frame.
  */
@@ -505,18 +507,18 @@ int_very_careful:
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
        SAVE_REST
-       /* Check for syscall exit trace */      
+       /* Check for syscall exit trace */
        testl $_TIF_WORK_SYSCALL_EXIT,%edx
        jz int_signal
        pushq %rdi
        CFI_ADJUST_CFA_OFFSET 8
-       leaq 8(%rsp),%rdi       # &ptregs -> arg1       
+       leaq 8(%rsp),%rdi       # &ptregs -> arg1
        call syscall_trace_leave
        popq %rdi
        CFI_ADJUST_CFA_OFFSET -8
        andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
        jmp int_restore_rest
-       
+
 int_signal:
        testl $_TIF_DO_NOTIFY_MASK,%edx
        jz 1f
@@ -531,11 +533,11 @@ int_restore_rest:
        jmp int_with_check
        CFI_ENDPROC
 END(system_call)
-               
-/* 
+
+/*
  * Certain special system calls that need to save a complete full stack frame.
- */                                                            
-       
+ */
+
        .macro PTREGSCALL label,func,arg
        .globl \label
 \label:
@@ -572,7 +574,7 @@ ENTRY(ptregscall_common)
        ret
        CFI_ENDPROC
 END(ptregscall_common)
-       
+
 ENTRY(stub_execve)
        CFI_STARTPROC
        popq %r11
@@ -588,11 +590,11 @@ ENTRY(stub_execve)
        jmp int_ret_from_sys_call
        CFI_ENDPROC
 END(stub_execve)
-       
+
 /*
  * sigreturn is special because it needs to restore all registers on return.
  * This cannot be done with SYSRET, so use the IRET return path instead.
- */                
+ */
 ENTRY(stub_rt_sigreturn)
        CFI_STARTPROC
        addq $8, %rsp
@@ -685,12 +687,12 @@ exit_intr:
        GET_THREAD_INFO(%rcx)
        testl $3,CS-ARGOFFSET(%rsp)
        je retint_kernel
-       
+
        /* Interrupt came from user space */
        /*
         * Has a correct top of stack, but a partial stack frame
         * %rcx: thread info. Interrupts off.
-        */             
+        */
 retint_with_reschedule:
        movl $_TIF_WORK_MASK,%edi
 retint_check:
@@ -763,20 +765,20 @@ retint_careful:
        pushq %rdi
        CFI_ADJUST_CFA_OFFSET   8
        call  schedule
-       popq %rdi               
+       popq %rdi
        CFI_ADJUST_CFA_OFFSET   -8
        GET_THREAD_INFO(%rcx)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        jmp retint_check
-       
+
 retint_signal:
        testl $_TIF_DO_NOTIFY_MASK,%edx
        jz    retint_swapgs
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
        SAVE_REST
-       movq $-1,ORIG_RAX(%rsp)                         
+       movq $-1,ORIG_RAX(%rsp)
        xorl %esi,%esi          # oldset
        movq %rsp,%rdi          # &pt_regs
        call do_notify_resume
@@ -798,14 +800,14 @@ ENTRY(retint_kernel)
        jnc  retint_restore_args
        call preempt_schedule_irq
        jmp exit_intr
-#endif 
+#endif
 
        CFI_ENDPROC
 END(common_interrupt)
-       
+
 /*
  * APIC interrupts.
- */            
+ */
        .macro apicinterrupt num,func
        INTR_FRAME
        pushq $~(\num)
@@ -823,14 +825,14 @@ ENTRY(threshold_interrupt)
        apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
 END(threshold_interrupt)
 
-#ifdef CONFIG_SMP      
+#ifdef CONFIG_SMP
 ENTRY(reschedule_interrupt)
        apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
 END(reschedule_interrupt)
 
        .macro INVALIDATE_ENTRY num
 ENTRY(invalidate_interrupt\num)
-       apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt 
+       apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
 END(invalidate_interrupt\num)
        .endm
 
@@ -869,22 +871,22 @@ END(error_interrupt)
 ENTRY(spurious_interrupt)
        apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
 END(spurious_interrupt)
-                               
+
 /*
  * Exception entry points.
- */            
+ */
        .macro zeroentry sym
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
-       pushq $0        /* push error code/oldrax */ 
+       pushq $0        /* push error code/oldrax */
        CFI_ADJUST_CFA_OFFSET 8
-       pushq %rax      /* push real oldrax to the rdi slot */ 
+       pushq %rax      /* push real oldrax to the rdi slot */
        CFI_ADJUST_CFA_OFFSET 8
        CFI_REL_OFFSET rax,0
        leaq  \sym(%rip),%rax
        jmp error_entry
        CFI_ENDPROC
-       .endm   
+       .endm
 
        .macro errorentry sym
        XCPT_FRAME
@@ -998,13 +1000,13 @@ paranoid_schedule\trace:
 
 /*
  * Exception entry point. This expects an error code/orig_rax on the stack
- * and the exception handler in %rax.  
- */                                            
+ * and the exception handler in %rax.
+ */
 KPROBE_ENTRY(error_entry)
        _frame RDI
        CFI_REL_OFFSET rax,0
        /* rdi slot contains rax, oldrax contains error code */
-       cld     
+       cld
        subq  $14*8,%rsp
        CFI_ADJUST_CFA_OFFSET   (14*8)
        movq %rsi,13*8(%rsp)
@@ -1015,7 +1017,7 @@ KPROBE_ENTRY(error_entry)
        CFI_REL_OFFSET  rdx,RDX
        movq %rcx,11*8(%rsp)
        CFI_REL_OFFSET  rcx,RCX
-       movq %rsi,10*8(%rsp)    /* store rax */ 
+       movq %rsi,10*8(%rsp)    /* store rax */
        CFI_REL_OFFSET  rax,RAX
        movq %r8, 9*8(%rsp)
        CFI_REL_OFFSET  r8,R8
@@ -1025,29 +1027,29 @@ KPROBE_ENTRY(error_entry)
        CFI_REL_OFFSET  r10,R10
        movq %r11,6*8(%rsp)
        CFI_REL_OFFSET  r11,R11
-       movq %rbx,5*8(%rsp) 
+       movq %rbx,5*8(%rsp)
        CFI_REL_OFFSET  rbx,RBX
-       movq %rbp,4*8(%rsp) 
+       movq %rbp,4*8(%rsp)
        CFI_REL_OFFSET  rbp,RBP
-       movq %r12,3*8(%rsp) 
+       movq %r12,3*8(%rsp)
        CFI_REL_OFFSET  r12,R12
-       movq %r13,2*8(%rsp) 
+       movq %r13,2*8(%rsp)
        CFI_REL_OFFSET  r13,R13
-       movq %r14,1*8(%rsp) 
+       movq %r14,1*8(%rsp)
        CFI_REL_OFFSET  r14,R14
-       movq %r15,(%rsp) 
+       movq %r15,(%rsp)
        CFI_REL_OFFSET  r15,R15
-       xorl %ebx,%ebx  
+       xorl %ebx,%ebx
        testl $3,CS(%rsp)
        je  error_kernelspace
-error_swapgs:  
+error_swapgs:
        SWAPGS
 error_sti:
        TRACE_IRQS_OFF
-       movq %rdi,RDI(%rsp)     
+       movq %rdi,RDI(%rsp)
        CFI_REL_OFFSET  rdi,RDI
        movq %rsp,%rdi
-       movq ORIG_RAX(%rsp),%rsi        /* get error code */ 
+       movq ORIG_RAX(%rsp),%rsi        /* get error code */
        movq $-1,ORIG_RAX(%rsp)
        call *%rax
        /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
@@ -1056,7 +1058,7 @@ error_exit:
        RESTORE_REST
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       GET_THREAD_INFO(%rcx)   
+       GET_THREAD_INFO(%rcx)
        testl %eax,%eax
        jne  retint_kernel
        LOCKDEP_SYS_EXIT_IRQ
@@ -1072,7 +1074,7 @@ error_kernelspace:
        /* There are two places in the kernel that can potentially fault with
           usergs. Handle them here. The exception handlers after
           iret run with kernel gs again, so don't set the user space flag.
-          B stepping K8s sometimes report an truncated RIP for IRET 
+          B stepping K8s sometimes report an truncated RIP for IRET
           exceptions returning to compat mode. Check for these here too. */
        leaq irq_return(%rip),%rcx
        cmpq %rcx,RIP(%rsp)
@@ -1084,17 +1086,17 @@ error_kernelspace:
         je   error_swapgs
        jmp  error_sti
 KPROBE_END(error_entry)
-       
+
        /* Reload gs selector with exception handling */
-       /* edi:  new selector */ 
+       /* edi:  new selector */
 ENTRY(native_load_gs_index)
        CFI_STARTPROC
        pushf
        CFI_ADJUST_CFA_OFFSET 8
        DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
         SWAPGS
-gs_change:     
-        movl %edi,%gs   
+gs_change:
+        movl %edi,%gs
 2:     mfence          /* workaround */
        SWAPGS
         popf
@@ -1102,20 +1104,20 @@ gs_change:
         ret
        CFI_ENDPROC
 ENDPROC(native_load_gs_index)
-       
+
         .section __ex_table,"a"
         .align 8
         .quad gs_change,bad_gs
         .previous
         .section .fixup,"ax"
        /* running with kernelgs */
-bad_gs: 
+bad_gs:
        SWAPGS                  /* switch back to user gs */
        xorl %eax,%eax
         movl %eax,%gs
         jmp  2b
-        .previous       
-       
+        .previous
+
 /*
  * Create a kernel thread.
  *
@@ -1138,7 +1140,7 @@ ENTRY(kernel_thread)
 
        xorl %r8d,%r8d
        xorl %r9d,%r9d
-       
+
        # clone now
        call do_fork
        movq %rax,RAX(%rsp)
@@ -1149,14 +1151,14 @@ ENTRY(kernel_thread)
         * so internally to the x86_64 port you can rely on kernel_thread()
         * not to reschedule the child before returning, this avoids the need
         * of hacks for example to fork off the per-CPU idle tasks.
-         * [Hopefully no generic code relies on the reschedule -AK]    
+         * [Hopefully no generic code relies on the reschedule -AK]
         */
        RESTORE_ALL
        UNFAKE_STACK_FRAME
        ret
        CFI_ENDPROC
 ENDPROC(kernel_thread)
-       
+
 child_rip:
        pushq $0                # fake return address
        CFI_STARTPROC
@@ -1170,6 +1172,7 @@ child_rip:
        # exit
        mov %eax, %edi
        call do_exit
+       ud2                     # padding for call trace
        CFI_ENDPROC
 ENDPROC(child_rip)
 
@@ -1191,10 +1194,10 @@ ENDPROC(child_rip)
 ENTRY(kernel_execve)
        CFI_STARTPROC
        FAKE_STACK_FRAME $0
-       SAVE_ALL        
+       SAVE_ALL
        movq %rsp,%rcx
        call sys_execve
-       movq %rax, RAX(%rsp)    
+       movq %rax, RAX(%rsp)
        RESTORE_REST
        testq %rax,%rax
        je int_ret_from_sys_call
@@ -1213,7 +1216,7 @@ ENTRY(coprocessor_error)
 END(coprocessor_error)
 
 ENTRY(simd_coprocessor_error)
-       zeroentry do_simd_coprocessor_error     
+       zeroentry do_simd_coprocessor_error
 END(simd_coprocessor_error)
 
 ENTRY(device_not_available)
@@ -1225,12 +1228,12 @@ KPROBE_ENTRY(debug)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq $0
-       CFI_ADJUST_CFA_OFFSET 8         
+       CFI_ADJUST_CFA_OFFSET 8
        paranoidentry do_debug, DEBUG_STACK
        paranoidexit
 KPROBE_END(debug)
 
-       /* runs on exception stack */   
+       /* runs on exception stack */
 KPROBE_ENTRY(nmi)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
@@ -1264,7 +1267,7 @@ ENTRY(bounds)
 END(bounds)
 
 ENTRY(invalid_op)
-       zeroentry do_invalid_op 
+       zeroentry do_invalid_op
 END(invalid_op)
 
 ENTRY(coprocessor_segment_overrun)
@@ -1319,7 +1322,7 @@ ENTRY(machine_check)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq $0
-       CFI_ADJUST_CFA_OFFSET 8 
+       CFI_ADJUST_CFA_OFFSET 8
        paranoidentry do_machine_check
        jmp paranoid_exit1
        CFI_ENDPROC
index 0aa2c443d600c64dcf5b4ac440c18bb4f3d0e9d9..53699c931ad41ebdaee8594cdb17f4464fcc986f 100644 (file)
 #include <asm/io.h>
 #include <asm/nmi.h>
 #include <asm/smp.h>
+#include <asm/atomic.h>
 #include <asm/apicdef.h>
 #include <mach_mpparse.h>
+#include <asm/genapic.h>
+#include <asm/setup.h>
 
 /*
  * ES7000 chipsets
@@ -161,6 +164,43 @@ es7000_rename_gsi(int ioapic, int gsi)
        return gsi;
 }
 
+static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
+{
+       unsigned long vect = 0, psaival = 0;
+
+       if (psai == NULL)
+               return -1;
+
+       vect = ((unsigned long)__pa(eip)/0x1000) << 16;
+       psaival = (0x1000000 | vect | cpu);
+
+       while (*psai & 0x1000000)
+               ;
+
+       *psai = psaival;
+
+       return 0;
+}
+
+static void noop_wait_for_deassert(atomic_t *deassert_not_used)
+{
+}
+
+static int __init es7000_update_genapic(void)
+{
+       genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
+
+       /* MPENTIUMIII */
+       if (boot_cpu_data.x86 == 6 &&
+           (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
+               es7000_update_genapic_to_cluster();
+               genapic->wait_for_init_deassert = noop_wait_for_deassert;
+               genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
+       }
+
+       return 0;
+}
+
 void __init
 setup_unisys(void)
 {
@@ -176,6 +216,8 @@ setup_unisys(void)
        else
                es7000_plat = ES7000_CLASSIC;
        ioapic_renumber_irq = es7000_rename_gsi;
+
+       x86_quirks->update_genapic = es7000_update_genapic;
 }
 
 /*
@@ -317,26 +359,6 @@ es7000_mip_write(struct mip_reg *mip_reg)
        return status;
 }
 
-int
-es7000_start_cpu(int cpu, unsigned long eip)
-{
-       unsigned long vect = 0, psaival = 0;
-
-       if (psai == NULL)
-               return -1;
-
-       vect = ((unsigned long)__pa(eip)/0x1000) << 16;
-       psaival = (0x1000000 | vect | cpu);
-
-       while (*psai & 0x1000000)
-                ;
-
-       *psai = psaival;
-
-       return 0;
-
-}
-
 void __init
 es7000_sw_apic(void)
 {
index 6c9bfc9e1e95cbc5f8aa2273bd06f9eb75c2ca11..2bced78b0b8e85d6317c729c9554919cac2eb67b 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/smp.h>
 #include <asm/ipi.h>
 #include <asm/genapic.h>
+#include <asm/setup.h>
 
 extern struct genapic apic_flat;
 extern struct genapic apic_physflat;
@@ -53,6 +54,9 @@ void __init setup_apic_routing(void)
                        genapic = &apic_physflat;
                printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
        }
+
+       if (x86_quirks->update_genapic)
+               x86_quirks->update_genapic();
 }
 
 /* Same for both flat and physical. */
index 2c7dbdb98278316a6fd7228e9a7b2eaf76f58562..dece17289731b951c4189bd5f66db0a919f91197 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/threads.h>
+#include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
@@ -17,6 +18,9 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <asm/current.h>
 #include <asm/smp.h>
 #include <asm/ipi.h>
 #include <asm/genapic.h>
@@ -355,6 +359,103 @@ static __init void uv_rtc_init(void)
                sn_rtc_cycles_per_second = ticks_per_sec;
 }
 
+/*
+ * percpu heartbeat timer
+ */
+static void uv_heartbeat(unsigned long ignored)
+{
+       struct timer_list *timer = &uv_hub_info->scir.timer;
+       unsigned char bits = uv_hub_info->scir.state;
+
+       /* flip heartbeat bit */
+       bits ^= SCIR_CPU_HEARTBEAT;
+
+       /* is this cpu idle? */
+       if (idle_cpu(raw_smp_processor_id()))
+               bits &= ~SCIR_CPU_ACTIVITY;
+       else
+               bits |= SCIR_CPU_ACTIVITY;
+
+       /* update system controller interface reg */
+       uv_set_scir_bits(bits);
+
+       /* enable next timer period */
+       mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+}
+
+static void __cpuinit uv_heartbeat_enable(int cpu)
+{
+       if (!uv_cpu_hub_info(cpu)->scir.enabled) {
+               struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
+
+               uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
+               setup_timer(timer, uv_heartbeat, cpu);
+               timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
+               add_timer_on(timer, cpu);
+               uv_cpu_hub_info(cpu)->scir.enabled = 1;
+       }
+
+       /* check boot cpu */
+       if (!uv_cpu_hub_info(0)->scir.enabled)
+               uv_heartbeat_enable(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cpuinit uv_heartbeat_disable(int cpu)
+{
+       if (uv_cpu_hub_info(cpu)->scir.enabled) {
+               uv_cpu_hub_info(cpu)->scir.enabled = 0;
+               del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
+       }
+       uv_set_cpu_scir_bits(cpu, 0xff);
+}
+
+/*
+ * cpu hotplug notifier
+ */
+static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
+                                      unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+               uv_heartbeat_enable(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               uv_heartbeat_disable(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static __init void uv_scir_register_cpu_notifier(void)
+{
+       hotcpu_notifier(uv_scir_cpu_notify, 0);
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+
+static __init void uv_scir_register_cpu_notifier(void)
+{
+}
+
+static __init int uv_init_heartbeat(void)
+{
+       int cpu;
+
+       if (is_uv_system())
+               for_each_online_cpu(cpu)
+                       uv_heartbeat_enable(cpu);
+       return 0;
+}
+
+late_initcall(uv_init_heartbeat);
+
+#endif /* !CONFIG_HOTPLUG_CPU */
+
 /*
  * Called on each cpu to initialize the per_cpu UV data area.
  *     ZZZ hotplug not supported yet
@@ -428,7 +529,7 @@ void __init uv_system_init(void)
 
        uv_bios_init();
        uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
-                           &uv_coherency_id, &uv_region_size);
+                           &sn_coherency_id, &sn_region_size);
        uv_rtc_init();
 
        for_each_present_cpu(cpu) {
@@ -439,8 +540,7 @@ void __init uv_system_init(void)
                uv_blade_info[blade].nr_possible_cpus++;
 
                uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
-               uv_cpu_hub_info(cpu)->lowmem_remap_top =
-                                       lowmem_redir_base + lowmem_redir_size;
+               uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
                uv_cpu_hub_info(cpu)->m_val = m_val;
                uv_cpu_hub_info(cpu)->n_val = m_val;
                uv_cpu_hub_info(cpu)->numa_blade_id = blade;
@@ -450,7 +550,8 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
                uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
                uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
-               uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
+               uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+               uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
                max_pnode = max(pnode, max_pnode);
@@ -467,4 +568,6 @@ void __init uv_system_init(void)
        map_mmioh_high(max_pnode);
 
        uv_cpu_init();
+       uv_scir_register_cpu_notifier();
+       proc_mkdir("sgi_uv", NULL);
 }
index 1dcb0f13897e9b6d4773111e1bc8300713efe0e8..3e66bd364a9db3cf8f3fb624f96eeb7075364d78 100644 (file)
@@ -35,7 +35,6 @@ void __init reserve_ebda_region(void)
 
        /* start of EBDA area */
        ebda_addr = get_bios_ebda();
-       printk(KERN_INFO "BIOS EBDA/lowmem at: %08x/%08x\n", ebda_addr, lowmem);
 
        /* Fixup: bios puts an EBDA in the top 64K segment */
        /* of conventional memory, but does not adjust lowmem. */
index fa1d25dd83e3fa18cc2a445841c61e1e1b8b571c..ac108d1fe182a44abcd0da75f18aae5fde32e1c0 100644 (file)
 #include <asm/sections.h>
 #include <asm/e820.h>
 #include <asm/bios_ebda.h>
+#include <asm/trampoline.h>
 
 void __init i386_start_kernel(void)
 {
+       reserve_trampoline_memory();
+
        reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
 
 #ifdef CONFIG_BLK_DEV_INITRD
index d16084f90649181e6e944e104118f7ec5de6a0e3..388e05a5fc17ec05dc68fc881c169608390538b5 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/kdebug.h>
 #include <asm/e820.h>
 #include <asm/bios_ebda.h>
+#include <asm/trampoline.h>
 
 /* boot cpu pda */
 static struct x8664_pda _boot_cpu_pda __read_mostly;
@@ -120,6 +121,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
 {
        copy_bootdata(__va(real_mode_data));
 
+       reserve_trampoline_memory();
+
        reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
 
 #ifdef CONFIG_BLK_DEV_INITRD
index 067d8de913f612d45018090a1fae4e5cc920462f..3f0a3edf0a573a2f5f2d1051b32285ebffcdbb36 100644 (file)
@@ -33,7 +33,9 @@
  * HPET address is set in acpi/boot.c, when an ACPI entry exists
  */
 unsigned long                          hpet_address;
-unsigned long                          hpet_num_timers;
+#ifdef CONFIG_PCI_MSI
+static unsigned long                   hpet_num_timers;
+#endif
 static void __iomem                    *hpet_virt_address;
 
 struct hpet_dev {
index a4f93b4120c155c20889947942d7bdb5c286908d..d39918076bb48a4b70e5b911736f84ec817fd420 100644 (file)
@@ -14,7 +14,6 @@ static struct fs_struct init_fs = INIT_FS;
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
 struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_UNUSED_SYMBOL(init_mm); /* will be removed in 2.6.26 */
 
 /*
  * Initial thread structure.
index 9043251210fba4dc32ea7db009a1db6196216453..679e7bbbbcd6b07230a2e7fdbd2ebe38e04f0563 100644 (file)
@@ -2216,10 +2216,9 @@ static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
 asmlinkage void smp_irq_move_cleanup_interrupt(void)
 {
        unsigned vector, me;
+
        ack_APIC_irq();
-#ifdef CONFIG_X86_64
        exit_idle();
-#endif
        irq_enter();
 
        me = smp_processor_id();
index 60eb84eb77a0a34232be8aafbe8a7d1e040f454f..1d3d0e71b0440847e16d4c381eaef5dbe68bb082 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/idle.h>
 #include <asm/smp.h>
 
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
 /*
  * Probabilistic stack overflow check:
  *
  */
 static inline void stack_overflow_check(struct pt_regs *regs)
 {
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
        u64 curbase = (u64)task_stack_page(current);
-       static unsigned long warned = -60*HZ;
-
-       if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
-           regs->sp <  curbase + sizeof(struct thread_info) + 128 &&
-           time_after(jiffies, warned + 60*HZ)) {
-               printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
-                      current->comm, curbase, regs->sp);
-               show_stack(NULL,NULL);
-               warned = jiffies;
-       }
-}
+
+       WARN_ONCE(regs->sp >= curbase &&
+                 regs->sp <= curbase + THREAD_SIZE &&
+                 regs->sp <  curbase + sizeof(struct thread_info) +
+                                       sizeof(struct pt_regs) + 128,
+
+                 "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
+                       current->comm, curbase, regs->sp);
 #endif
+}
 
 /*
  * do_IRQ handles all normal device IRQ's (the special
@@ -60,9 +58,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
        irq_enter();
        irq = __get_cpu_var(vector_irq)[vector];
 
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
        stack_overflow_check(regs);
-#endif
 
        desc = irq_to_desc(irq);
        if (likely(desc))
index 7a385746509a2a625991447e8554a20b4d992c54..37f420018a41c806bf57048d50163d0d28c640aa 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/numa.h>
 #include <linux/ftrace.h>
 #include <linux/suspend.h>
+#include <linux/gfp.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/system.h>
 #include <asm/cacheflush.h>
 
-#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
-static u32 kexec_pgd[1024] PAGE_ALIGNED;
-#ifdef CONFIG_X86_PAE
-static u32 kexec_pmd0[1024] PAGE_ALIGNED;
-static u32 kexec_pmd1[1024] PAGE_ALIGNED;
-#endif
-static u32 kexec_pte0[1024] PAGE_ALIGNED;
-static u32 kexec_pte1[1024] PAGE_ALIGNED;
-
 static void set_idt(void *newidt, __u16 limit)
 {
        struct desc_ptr curidt;
@@ -76,6 +68,76 @@ static void load_segments(void)
 #undef __STR
 }
 
+static void machine_kexec_free_page_tables(struct kimage *image)
+{
+       free_page((unsigned long)image->arch.pgd);
+#ifdef CONFIG_X86_PAE
+       free_page((unsigned long)image->arch.pmd0);
+       free_page((unsigned long)image->arch.pmd1);
+#endif
+       free_page((unsigned long)image->arch.pte0);
+       free_page((unsigned long)image->arch.pte1);
+}
+
+static int machine_kexec_alloc_page_tables(struct kimage *image)
+{
+       image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+#ifdef CONFIG_X86_PAE
+       image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+       image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+#endif
+       image->arch.pte0 = (pte_t *)get_zeroed_page(GFP_KERNEL);
+       image->arch.pte1 = (pte_t *)get_zeroed_page(GFP_KERNEL);
+       if (!image->arch.pgd ||
+#ifdef CONFIG_X86_PAE
+           !image->arch.pmd0 || !image->arch.pmd1 ||
+#endif
+           !image->arch.pte0 || !image->arch.pte1) {
+               machine_kexec_free_page_tables(image);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void machine_kexec_page_table_set_one(
+       pgd_t *pgd, pmd_t *pmd, pte_t *pte,
+       unsigned long vaddr, unsigned long paddr)
+{
+       pud_t *pud;
+
+       pgd += pgd_index(vaddr);
+#ifdef CONFIG_X86_PAE
+       if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+               set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
+#endif
+       pud = pud_offset(pgd, vaddr);
+       pmd = pmd_offset(pud, vaddr);
+       if (!(pmd_val(*pmd) & _PAGE_PRESENT))
+               set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+       pte = pte_offset_kernel(pmd, vaddr);
+       set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+}
+
+static void machine_kexec_prepare_page_tables(struct kimage *image)
+{
+       void *control_page;
+       pmd_t *pmd = 0;
+
+       control_page = page_address(image->control_code_page);
+#ifdef CONFIG_X86_PAE
+       pmd = image->arch.pmd0;
+#endif
+       machine_kexec_page_table_set_one(
+               image->arch.pgd, pmd, image->arch.pte0,
+               (unsigned long)control_page, __pa(control_page));
+#ifdef CONFIG_X86_PAE
+       pmd = image->arch.pmd1;
+#endif
+       machine_kexec_page_table_set_one(
+               image->arch.pgd, pmd, image->arch.pte1,
+               __pa(control_page), __pa(control_page));
+}
+
 /*
  * A architecture hook called to validate the
  * proposed image and prepare the control pages
@@ -87,12 +149,20 @@ static void load_segments(void)
  * reboot code buffer to allow us to avoid allocations
  * later.
  *
- * Make control page executable.
+ * - Make control page executable.
+ * - Allocate page tables
+ * - Setup page tables
  */
 int machine_kexec_prepare(struct kimage *image)
 {
+       int error;
+
        if (nx_enabled)
                set_pages_x(image->control_code_page, 1);
+       error = machine_kexec_alloc_page_tables(image);
+       if (error)
+               return error;
+       machine_kexec_prepare_page_tables(image);
        return 0;
 }
 
@@ -104,6 +174,7 @@ void machine_kexec_cleanup(struct kimage *image)
 {
        if (nx_enabled)
                set_pages_nx(image->control_code_page, 1);
+       machine_kexec_free_page_tables(image);
 }
 
 /*
@@ -150,18 +221,7 @@ void machine_kexec(struct kimage *image)
        relocate_kernel_ptr = control_page;
        page_list[PA_CONTROL_PAGE] = __pa(control_page);
        page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
-       page_list[PA_PGD] = __pa(kexec_pgd);
-       page_list[VA_PGD] = (unsigned long)kexec_pgd;
-#ifdef CONFIG_X86_PAE
-       page_list[PA_PMD_0] = __pa(kexec_pmd0);
-       page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
-       page_list[PA_PMD_1] = __pa(kexec_pmd1);
-       page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
-#endif
-       page_list[PA_PTE_0] = __pa(kexec_pte0);
-       page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
-       page_list[PA_PTE_1] = __pa(kexec_pte1);
-       page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
+       page_list[PA_PGD] = __pa(image->arch.pgd);
 
        if (image->type == KEXEC_TYPE_DEFAULT)
                page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
index 5f8e5d75a254621799bff0138430fd2344b918a7..c25fdb38229212cb5e5ad79f527770bb9063153e 100644 (file)
@@ -10,7 +10,7 @@
  *  This driver allows to upgrade microcode on AMD
  *  family 0x10 and 0x11 processors.
  *
- *  Licensed unter the terms of the GNU General Public
+ *  Licensed under the terms of the GNU General Public
  *  License version 2. See file COPYING for details.
 */
 
@@ -32,9 +32,9 @@
 #include <linux/platform_device.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
+#include <linux/uaccess.h>
 
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/microcode.h>
 
@@ -47,43 +47,38 @@ MODULE_LICENSE("GPL v2");
 #define UCODE_UCODE_TYPE           0x00000001
 
 struct equiv_cpu_entry {
-       unsigned int installed_cpu;
-       unsigned int fixed_errata_mask;
-       unsigned int fixed_errata_compare;
-       unsigned int equiv_cpu;
-};
+       u32     installed_cpu;
+       u32     fixed_errata_mask;
+       u32     fixed_errata_compare;
+       u16     equiv_cpu;
+       u16     res;
+} __attribute__((packed));
 
 struct microcode_header_amd {
-       unsigned int  data_code;
-       unsigned int  patch_id;
-       unsigned char mc_patch_data_id[2];
-       unsigned char mc_patch_data_len;
-       unsigned char init_flag;
-       unsigned int  mc_patch_data_checksum;
-       unsigned int  nb_dev_id;
-       unsigned int  sb_dev_id;
-       unsigned char processor_rev_id[2];
-       unsigned char nb_rev_id;
-       unsigned char sb_rev_id;
-       unsigned char bios_api_rev;
-       unsigned char reserved1[3];
-       unsigned int  match_reg[8];
-};
+       u32     data_code;
+       u32     patch_id;
+       u16     mc_patch_data_id;
+       u8      mc_patch_data_len;
+       u8      init_flag;
+       u32     mc_patch_data_checksum;
+       u32     nb_dev_id;
+       u32     sb_dev_id;
+       u16     processor_rev_id;
+       u8      nb_rev_id;
+       u8      sb_rev_id;
+       u8      bios_api_rev;
+       u8      reserved1[3];
+       u32     match_reg[8];
+} __attribute__((packed));
 
 struct microcode_amd {
        struct microcode_header_amd hdr;
        unsigned int mpb[0];
 };
 
-#define UCODE_MAX_SIZE          (2048)
-#define DEFAULT_UCODE_DATASIZE (896)
-#define MC_HEADER_SIZE         (sizeof(struct microcode_header_amd))
-#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
-#define DWSIZE                 (sizeof(u32))
-/* For now we support a fixed ucode total size only */
-#define get_totalsize(mc) \
-       ((((struct microcode_amd *)mc)->hdr.mc_patch_data_len * 28) \
-        + MC_HEADER_SIZE)
+#define UCODE_MAX_SIZE                 2048
+#define UCODE_CONTAINER_SECTION_HDR    8
+#define UCODE_CONTAINER_HEADER_SIZE    12
 
 /* serialize access to the physical write */
 static DEFINE_SPINLOCK(microcode_update_lock);
@@ -93,31 +88,24 @@ static struct equiv_cpu_entry *equiv_cpu_table;
 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu);
+       u32 dummy;
 
        memset(csig, 0, sizeof(*csig));
-
        if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
-               printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n",
-                      cpu);
+               printk(KERN_WARNING "microcode: CPU%d: AMD CPU family 0x%x not "
+                      "supported\n", cpu, c->x86);
                return -1;
        }
-
-       asm volatile("movl %1, %%ecx; rdmsr"
-                    : "=a" (csig->rev)
-                    : "i" (0x0000008B) : "ecx");
-
-       printk(KERN_INFO "microcode: collect_cpu_info_amd : patch_id=0x%x\n",
-               csig->rev);
-
+       rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy);
+       printk(KERN_INFO "microcode: CPU%d: patch_level=0x%x\n", cpu, csig->rev);
        return 0;
 }
 
 static int get_matching_microcode(int cpu, void *mc, int rev)
 {
        struct microcode_header_amd *mc_header = mc;
-       struct pci_dev *nb_pci_dev, *sb_pci_dev;
        unsigned int current_cpu_id;
-       unsigned int equiv_cpu_id = 0x00;
+       u16 equiv_cpu_id = 0;
        unsigned int i = 0;
 
        BUG_ON(equiv_cpu_table == NULL);
@@ -132,57 +120,25 @@ static int get_matching_microcode(int cpu, void *mc, int rev)
        }
 
        if (!equiv_cpu_id) {
-               printk(KERN_ERR "microcode: CPU%d cpu_id "
-                      "not found in equivalent cpu table \n", cpu);
+               printk(KERN_WARNING "microcode: CPU%d: cpu revision "
+                      "not listed in equivalent cpu table\n", cpu);
                return 0;
        }
 
-       if ((mc_header->processor_rev_id[0]) != (equiv_cpu_id & 0xff)) {
-               printk(KERN_ERR
-                       "microcode: CPU%d patch does not match "
-                       "(patch is %x, cpu extended is %x) \n",
-                       cpu, mc_header->processor_rev_id[0],
-                       (equiv_cpu_id & 0xff));
+       if (mc_header->processor_rev_id != equiv_cpu_id) {
+               printk(KERN_ERR "microcode: CPU%d: patch mismatch "
+                      "(processor_rev_id: %x, equiv_cpu_id: %x)\n",
+                      cpu, mc_header->processor_rev_id, equiv_cpu_id);
                return 0;
        }
 
-       if ((mc_header->processor_rev_id[1]) != ((equiv_cpu_id >> 16) & 0xff)) {
-               printk(KERN_ERR "microcode: CPU%d patch does not match "
-                       "(patch is %x, cpu base id is %x) \n",
-                       cpu, mc_header->processor_rev_id[1],
-                       ((equiv_cpu_id >> 16) & 0xff));
-
+       /* ucode might be chipset specific -- currently we don't support this */
+       if (mc_header->nb_dev_id || mc_header->sb_dev_id) {
+               printk(KERN_ERR "microcode: CPU%d: loading of chipset "
+                      "specific code not yet supported\n", cpu);
                return 0;
        }
 
-       /* ucode may be northbridge specific */
-       if (mc_header->nb_dev_id) {
-               nb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD,
-                                           (mc_header->nb_dev_id & 0xff),
-                                           NULL);
-               if ((!nb_pci_dev) ||
-                   (mc_header->nb_rev_id != nb_pci_dev->revision)) {
-                       printk(KERN_ERR "microcode: CPU%d NB mismatch \n", cpu);
-                       pci_dev_put(nb_pci_dev);
-                       return 0;
-               }
-               pci_dev_put(nb_pci_dev);
-       }
-
-       /* ucode may be southbridge specific */
-       if (mc_header->sb_dev_id) {
-               sb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD,
-                                           (mc_header->sb_dev_id & 0xff),
-                                           NULL);
-               if ((!sb_pci_dev) ||
-                   (mc_header->sb_rev_id != sb_pci_dev->revision)) {
-                       printk(KERN_ERR "microcode: CPU%d SB mismatch \n", cpu);
-                       pci_dev_put(sb_pci_dev);
-                       return 0;
-               }
-               pci_dev_put(sb_pci_dev);
-       }
-
        if (mc_header->patch_id <= rev)
                return 0;
 
@@ -192,12 +148,10 @@ static int get_matching_microcode(int cpu, void *mc, int rev)
 static void apply_microcode_amd(int cpu)
 {
        unsigned long flags;
-       unsigned int eax, edx;
-       unsigned int rev;
+       u32 rev, dummy;
        int cpu_num = raw_smp_processor_id();
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
        struct microcode_amd *mc_amd = uci->mc;
-       unsigned long addr;
 
        /* We should bind the task to the CPU */
        BUG_ON(cpu_num != cpu);
@@ -206,42 +160,34 @@ static void apply_microcode_amd(int cpu)
                return;
 
        spin_lock_irqsave(&microcode_update_lock, flags);
-
-       addr = (unsigned long)&mc_amd->hdr.data_code;
-       edx = (unsigned int)(((unsigned long)upper_32_bits(addr)));
-       eax = (unsigned int)(((unsigned long)lower_32_bits(addr)));
-
-       asm volatile("movl %0, %%ecx; wrmsr" :
-                    : "i" (0xc0010020), "a" (eax), "d" (edx) : "ecx");
-
+       wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
        /* get patch id after patching */
-       asm volatile("movl %1, %%ecx; rdmsr"
-                    : "=a" (rev)
-                    : "i" (0x0000008B) : "ecx");
-
+       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
        spin_unlock_irqrestore(&microcode_update_lock, flags);
 
        /* check current patch id and patch's id for match */
        if (rev != mc_amd->hdr.patch_id) {
-               printk(KERN_ERR "microcode: CPU%d update from revision "
-                      "0x%x to 0x%x failed\n", cpu_num,
-                      mc_amd->hdr.patch_id, rev);
+               printk(KERN_ERR "microcode: CPU%d: update failed "
+                      "(for patch_level=0x%x)\n", cpu, mc_amd->hdr.patch_id);
                return;
        }
 
-       printk(KERN_INFO "microcode: CPU%d updated from revision "
-              "0x%x to 0x%x \n",
-              cpu_num, uci->cpu_sig.rev, mc_amd->hdr.patch_id);
+       printk(KERN_INFO "microcode: CPU%d: updated (new patch_level=0x%x)\n",
+              cpu, rev);
 
        uci->cpu_sig.rev = rev;
 }
 
-static void * get_next_ucode(u8 *buf, unsigned int size,
-                       int (*get_ucode_data)(void *, const void *, size_t),
-                       unsigned int *mc_size)
+static int get_ucode_data(void *to, const u8 *from, size_t n)
+{
+       memcpy(to, from, n);
+       return 0;
+}
+
+static void *get_next_ucode(const u8 *buf, unsigned int size,
+                           unsigned int *mc_size)
 {
        unsigned int total_size;
-#define UCODE_CONTAINER_SECTION_HDR    8
        u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
        void *mc;
 
@@ -249,39 +195,37 @@ static void * get_next_ucode(u8 *buf, unsigned int size,
                return NULL;
 
        if (section_hdr[0] != UCODE_UCODE_TYPE) {
-               printk(KERN_ERR "microcode: error! "
-                      "Wrong microcode payload type field\n");
+               printk(KERN_ERR "microcode: error: invalid type field in "
+                      "container file section header\n");
                return NULL;
        }
 
        total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
 
-       printk(KERN_INFO "microcode: size %u, total_size %u\n",
-               size, total_size);
+       printk(KERN_DEBUG "microcode: size %u, total_size %u\n",
+              size, total_size);
 
        if (total_size > size || total_size > UCODE_MAX_SIZE) {
-               printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
+               printk(KERN_ERR "microcode: error: size mismatch\n");
                return NULL;
        }
 
        mc = vmalloc(UCODE_MAX_SIZE);
        if (mc) {
                memset(mc, 0, UCODE_MAX_SIZE);
-               if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size)) {
+               if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR,
+                                  total_size)) {
                        vfree(mc);
                        mc = NULL;
                } else
                        *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
        }
-#undef UCODE_CONTAINER_SECTION_HDR
        return mc;
 }
 
 
-static int install_equiv_cpu_table(u8 *buf,
-               int (*get_ucode_data)(void *, const void *, size_t))
+static int install_equiv_cpu_table(const u8 *buf)
 {
-#define UCODE_CONTAINER_HEADER_SIZE    12
        u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
        unsigned int *buf_pos = (unsigned int *)container_hdr;
        unsigned long size;
@@ -292,14 +236,15 @@ static int install_equiv_cpu_table(u8 *buf,
        size = buf_pos[2];
 
        if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
-               printk(KERN_ERR "microcode: error! "
-                      "Wrong microcode equivalnet cpu table\n");
+               printk(KERN_ERR "microcode: error: invalid type field in "
+                      "container file section header\n");
                return 0;
        }
 
        equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
        if (!equiv_cpu_table) {
-               printk(KERN_ERR "microcode: error, can't allocate memory for equiv CPU table\n");
+               printk(KERN_ERR "microcode: failed to allocate "
+                      "equivalent CPU table\n");
                return 0;
        }
 
@@ -310,7 +255,6 @@ static int install_equiv_cpu_table(u8 *buf,
        }
 
        return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
-#undef UCODE_CONTAINER_HEADER_SIZE
 }
 
 static void free_equiv_cpu_table(void)
@@ -321,18 +265,20 @@ static void free_equiv_cpu_table(void)
        }
 }
 
-static int generic_load_microcode(int cpu, void *data, size_t size,
-               int (*get_ucode_data)(void *, const void *, size_t))
+static int generic_load_microcode(int cpu, const u8 *data, size_t size)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-       u8 *ucode_ptr = data, *new_mc = NULL, *mc;
+       const u8 *ucode_ptr = data;
+       void *new_mc = NULL;
+       void *mc;
        int new_rev = uci->cpu_sig.rev;
        unsigned int leftover;
        unsigned long offset;
 
-       offset = install_equiv_cpu_table(ucode_ptr, get_ucode_data);
+       offset = install_equiv_cpu_table(ucode_ptr);
        if (!offset) {
-               printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
+               printk(KERN_ERR "microcode: failed to create "
+                      "equivalent cpu table\n");
                return -EINVAL;
        }
 
@@ -343,7 +289,7 @@ static int generic_load_microcode(int cpu, void *data, size_t size,
                unsigned int uninitialized_var(mc_size);
                struct microcode_header_amd *mc_header;
 
-               mc = get_next_ucode(ucode_ptr, leftover, get_ucode_data, &mc_size);
+               mc = get_next_ucode(ucode_ptr, leftover, &mc_size);
                if (!mc)
                        break;
 
@@ -353,7 +299,7 @@ static int generic_load_microcode(int cpu, void *data, size_t size,
                                vfree(new_mc);
                        new_rev = mc_header->patch_id;
                        new_mc  = mc;
-               } else 
+               } else
                        vfree(mc);
 
                ucode_ptr += mc_size;
@@ -365,9 +311,9 @@ static int generic_load_microcode(int cpu, void *data, size_t size,
                        if (uci->mc)
                                vfree(uci->mc);
                        uci->mc = new_mc;
-                       pr_debug("microcode: CPU%d found a matching microcode update with"
-                               " version 0x%x (current=0x%x)\n",
-                               cpu, new_rev, uci->cpu_sig.rev);
+                       pr_debug("microcode: CPU%d found a matching microcode "
+                                "update with version 0x%x (current=0x%x)\n",
+                                cpu, new_rev, uci->cpu_sig.rev);
                } else
                        vfree(new_mc);
        }
@@ -377,12 +323,6 @@ static int generic_load_microcode(int cpu, void *data, size_t size,
        return (int)leftover;
 }
 
-static int get_ucode_fw(void *to, const void *from, size_t n)
-{
-       memcpy(to, from, n);
-       return 0;
-}
-
 static int request_microcode_fw(int cpu, struct device *device)
 {
        const char *fw_name = "amd-ucode/microcode_amd.bin";
@@ -394,12 +334,11 @@ static int request_microcode_fw(int cpu, struct device *device)
 
        ret = request_firmware(&firmware, fw_name, device);
        if (ret) {
-               printk(KERN_ERR "microcode: ucode data file %s load failed\n", fw_name);
+               printk(KERN_ERR "microcode: failed to load file %s\n", fw_name);
                return ret;
        }
 
-       ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
-                       &get_ucode_fw);
+       ret = generic_load_microcode(cpu, firmware->data, firmware->size);
 
        release_firmware(firmware);
 
@@ -408,8 +347,8 @@ static int request_microcode_fw(int cpu, struct device *device)
 
 static int request_microcode_user(int cpu, const void __user *buf, size_t size)
 {
-       printk(KERN_WARNING "microcode: AMD microcode update via /dev/cpu/microcode"
-                       "is not supported\n");
+       printk(KERN_INFO "microcode: AMD microcode update via "
+              "/dev/cpu/microcode not supported\n");
        return -1;
 }
 
@@ -433,3 +372,4 @@ struct microcode_ops * __init init_amd_microcode(void)
 {
        return &microcode_amd_ops;
 }
+
index 82fb2809ce32208eacd7231c38597938031d4036..c9b721ba968c8f779a253919b2c7c046542ea974 100644 (file)
@@ -99,7 +99,7 @@ MODULE_LICENSE("GPL");
 
 #define MICROCODE_VERSION      "2.00"
 
-struct microcode_ops *microcode_ops;
+static struct microcode_ops *microcode_ops;
 
 /* no concurrent ->write()s are allowed on /dev/cpu/microcode */
 static DEFINE_MUTEX(microcode_mutex);
@@ -203,7 +203,7 @@ MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
 #endif
 
 /* fake device for request_firmware */
-struct platform_device *microcode_pdev;
+static struct platform_device *microcode_pdev;
 
 static ssize_t reload_store(struct sys_device *dev,
                            struct sysdev_attribute *attr,
@@ -272,13 +272,18 @@ static struct attribute_group mc_attr_group = {
        .name = "microcode",
 };
 
-static void microcode_fini_cpu(int cpu)
+static void __microcode_fini_cpu(int cpu)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
-       mutex_lock(&microcode_mutex);
        microcode_ops->microcode_fini_cpu(cpu);
        uci->valid = 0;
+}
+
+static void microcode_fini_cpu(int cpu)
+{
+       mutex_lock(&microcode_mutex);
+       __microcode_fini_cpu(cpu);
        mutex_unlock(&microcode_mutex);
 }
 
@@ -306,12 +311,16 @@ static int microcode_resume_cpu(int cpu)
         * to this cpu (a bit of paranoia):
         */
        if (microcode_ops->collect_cpu_info(cpu, &nsig)) {
-               microcode_fini_cpu(cpu);
+               __microcode_fini_cpu(cpu);
+               printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n",
+                               cpu);
                return -1;
        }
 
-       if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) {
-               microcode_fini_cpu(cpu);
+       if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) {
+               __microcode_fini_cpu(cpu);
+               printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n",
+                               cpu);
                /* Should we look for a new ucode here? */
                return 1;
        }
@@ -319,7 +328,7 @@ static int microcode_resume_cpu(int cpu)
        return 0;
 }
 
-void microcode_update_cpu(int cpu)
+static void microcode_update_cpu(int cpu)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        int err = 0;
index 622dc4a217848d1c8d16941796a4f82f868fc967..b7f4c929e61505d368ac02de9e63e9a917472ca8 100644 (file)
@@ -155,6 +155,7 @@ static DEFINE_SPINLOCK(microcode_update_lock);
 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu_num);
+       unsigned long flags;
        unsigned int val[2];
 
        memset(csig, 0, sizeof(*csig));
@@ -174,11 +175,16 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
                csig->pf = 1 << ((val[1] >> 18) & 7);
        }
 
+       /* serialize access to the physical write to MSR 0x79 */
+       spin_lock_irqsave(&microcode_update_lock, flags);
+
        wrmsr(MSR_IA32_UCODE_REV, 0, 0);
        /* see notes above for revision 1.07.  Apparent chip bug */
        sync_core();
        /* get the current revision from MSR 0x8B */
        rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
+       spin_unlock_irqrestore(&microcode_update_lock, flags);
+
        pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
                        csig->sig, csig->pf, csig->rev);
 
@@ -465,7 +471,7 @@ static void microcode_fini_cpu(int cpu)
        uci->mc = NULL;
 }
 
-struct microcode_ops microcode_intel_ops = {
+static struct microcode_ops microcode_intel_ops = {
        .request_microcode_user           = request_microcode_user,
        .request_microcode_fw             = request_microcode_fw,
        .collect_cpu_info                 = collect_cpu_info,
index 0f4c1fd5a1f434259afd90480b5af527b326b855..45e3b69808ba6722bcc9617cdd0c883cab1a8ee2 100644 (file)
@@ -586,26 +586,23 @@ static void __init __get_smp_config(unsigned int early)
 {
        struct intel_mp_floating *mpf = mpf_found;
 
-       if (x86_quirks->mach_get_smp_config) {
-               if (x86_quirks->mach_get_smp_config(early))
-                       return;
-       }
+       if (!mpf)
+               return;
+
        if (acpi_lapic && early)
                return;
+
        /*
-        * ACPI supports both logical (e.g. Hyper-Threading) and physical
-        * processors, where MPS only supports physical.
+        * MPS doesn't support hyperthreading, aka only have
+        * thread 0 apic id in MPS table
         */
-       if (acpi_lapic && acpi_ioapic) {
-               printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
-                      "information\n");
+       if (acpi_lapic && acpi_ioapic)
                return;
-       } else if (acpi_lapic)
-               printk(KERN_INFO "Using ACPI for processor (LAPIC) "
-                      "configuration information\n");
 
-       if (!mpf)
-               return;
+       if (x86_quirks->mach_get_smp_config) {
+               if (x86_quirks->mach_get_smp_config(early))
+                       return;
+       }
 
        printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
               mpf->mpf_specification);
index 2c97f07f1c2cb76a705e4e421c018837588f1b6c..8bd1bf9622a7cf1062f33e039c37a2167e5f2918 100644 (file)
@@ -131,6 +131,11 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
        atomic_dec(&nmi_active);
 }
 
+static void __acpi_nmi_disable(void *__unused)
+{
+       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
+}
+
 int __init check_nmi_watchdog(void)
 {
        unsigned int *prev_nmi_count;
@@ -179,8 +184,12 @@ int __init check_nmi_watchdog(void)
        kfree(prev_nmi_count);
        return 0;
 error:
-       if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
-               disable_8259A_irq(0);
+       if (nmi_watchdog == NMI_IO_APIC) {
+               if (!timer_through_8259)
+                       disable_8259A_irq(0);
+               on_each_cpu(__acpi_nmi_disable, NULL, 1);
+       }
+
 #ifdef CONFIG_X86_32
        timer_ack = 0;
 #endif
@@ -199,12 +208,17 @@ static int __init setup_nmi_watchdog(char *str)
                ++str;
        }
 
-       get_option(&str, &nmi);
-
-       if (nmi >= NMI_INVALID)
-               return 0;
+       if (!strncmp(str, "lapic", 5))
+               nmi_watchdog = NMI_LOCAL_APIC;
+       else if (!strncmp(str, "ioapic", 6))
+               nmi_watchdog = NMI_IO_APIC;
+       else {
+               get_option(&str, &nmi);
+               if (nmi >= NMI_INVALID)
+                       return 0;
+               nmi_watchdog = nmi;
+       }
 
-       nmi_watchdog = nmi;
        return 1;
 }
 __setup("nmi_watchdog=", setup_nmi_watchdog);
@@ -285,11 +299,6 @@ void acpi_nmi_enable(void)
                on_each_cpu(__acpi_nmi_enable, NULL, 1);
 }
 
-static void __acpi_nmi_disable(void *__unused)
-{
-       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
-}
-
 /*
  * Disable timer based NMIs on all CPUs:
  */
@@ -340,6 +349,8 @@ void stop_apic_nmi_watchdog(void *unused)
                return;
        if (nmi_watchdog == NMI_LOCAL_APIC)
                lapic_watchdog_stop();
+       else
+               __acpi_nmi_disable(NULL);
        __get_cpu_var(wd_enabled) = 0;
        atomic_dec(&nmi_active);
 }
@@ -465,6 +476,24 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
 
 #ifdef CONFIG_SYSCTL
 
+static void enable_ioapic_nmi_watchdog_single(void *unused)
+{
+       __get_cpu_var(wd_enabled) = 1;
+       atomic_inc(&nmi_active);
+       __acpi_nmi_enable(NULL);
+}
+
+static void enable_ioapic_nmi_watchdog(void)
+{
+       on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1);
+       touch_nmi_watchdog();
+}
+
+static void disable_ioapic_nmi_watchdog(void)
+{
+       on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
+}
+
 static int __init setup_unknown_nmi_panic(char *str)
 {
        unknown_nmi_panic = 1;
@@ -507,6 +536,11 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
                        enable_lapic_nmi_watchdog();
                else
                        disable_lapic_nmi_watchdog();
+       } else if (nmi_watchdog == NMI_IO_APIC) {
+               if (nmi_watchdog_enabled)
+                       enable_ioapic_nmi_watchdog();
+               else
+                       disable_ioapic_nmi_watchdog();
        } else {
                printk(KERN_WARNING
                        "NMI watchdog doesn't know what hardware to touch\n");
index 4caff39078e0d386a0f1d2783fbc7e081582d9a9..0deea37a53cf5613477a17d304a03f8bf254325f 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/numaq.h>
 #include <asm/topology.h>
 #include <asm/processor.h>
-#include <asm/mpspec.h>
+#include <asm/genapic.h>
 #include <asm/e820.h>
 #include <asm/setup.h>
 
@@ -235,6 +235,13 @@ static int __init numaq_setup_ioapic_ids(void)
        return 1;
 }
 
+static int __init numaq_update_genapic(void)
+{
+       genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
+
+       return 0;
+}
+
 static struct x86_quirks numaq_x86_quirks __initdata = {
        .arch_pre_time_init     = numaq_pre_time_init,
        .arch_time_init         = NULL,
@@ -250,6 +257,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
        .mpc_oem_pci_bus        = mpc_oem_pci_bus,
        .smp_read_mpc_oem       = smp_read_mpc_oem,
        .setup_ioapic_ids       = numaq_setup_ioapic_ids,
+       .update_genapic         = numaq_update_genapic,
 };
 
 void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
index 192624820217f9eeeb64ee39ada5ad57a76a0df5..dc572994703d42d69c17f4af1777863b68d69092 100644 (file)
@@ -300,8 +300,8 @@ fs_initcall(pci_iommu_init);
 static __devinit void via_no_dac(struct pci_dev *dev)
 {
        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
-               printk(KERN_INFO "PCI: VIA PCI bridge detected."
-                                "Disabling DAC.\n");
+               printk(KERN_INFO
+                       "PCI: VIA PCI bridge detected. Disabling DAC.\n");
                forbid_dac = 1;
        }
 }
index ba7ad83e20a8f80d5b0ca7de95d6c1c0fbe6b27c..a35eaa379ff632fb142367df58a8fdeaaad81267 100644 (file)
@@ -745,10 +745,8 @@ void __init gart_iommu_init(void)
        unsigned long scratch;
        long i;
 
-       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
-               printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
+       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
                return;
-       }
 
 #ifndef CONFIG_AGP_AMD64
        no_agp = 1;
index c622772744d86fcb0dcd3e6c46a6de5323508c40..9bfc3f719d470289a8be6e3c68534c69c8e6e7b0 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <asm/idle.h>
 #include <linux/smp.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
@@ -8,6 +9,7 @@
 #include <linux/pm.h>
 #include <linux/clockchips.h>
 #include <asm/system.h>
+#include <asm/apic.h>
 
 unsigned long idle_halt;
 EXPORT_SYMBOL(idle_halt);
@@ -122,6 +124,21 @@ void default_idle(void)
 EXPORT_SYMBOL(default_idle);
 #endif
 
+void stop_this_cpu(void *dummy)
+{
+       local_irq_disable();
+       /*
+        * Remove this CPU:
+        */
+       cpu_clear(smp_processor_id(), cpu_online_map);
+       disable_local_APIC();
+
+       for (;;) {
+               if (hlt_works(smp_processor_id()))
+                       halt();
+       }
+}
+
 static void do_nothing(void *unused)
 {
 }
index 0a6d8c12e10dc7742c2e33dd29475874cb9f2b50..06180dff5b2e8aefca1ed7bd279a1dc1ed3d0237 100644 (file)
@@ -929,17 +929,16 @@ void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
        switch (c->x86) {
        case 0x6:
                switch (c->x86_model) {
+               case 0 ... 0xC:
+                       /* sorry, don't know about them */
+                       break;
                case 0xD:
                case 0xE: /* Pentium M */
                        bts_configure(&bts_cfg_pentium_m);
                        break;
-               case 0xF: /* Core2 */
-        case 0x1C: /* Atom */
+               default: /* Core2, Atom, ... */
                        bts_configure(&bts_cfg_core2);
                        break;
-               default:
-                       /* sorry, don't know about them */
-                       break;
                }
                break;
        case 0xF:
index cc5a2545dd41c0ce42b96eafffe85230773162e4..61f718df6eec1fe74dffaa2cb3b110b1deaeb70b 100644 (file)
@@ -21,6 +21,9 @@
 # include <asm/iommu.h>
 #endif
 
+#include <mach_ipi.h>
+
+
 /*
  * Power off function, if any
  */
@@ -36,7 +39,10 @@ int reboot_force;
 static int reboot_cpu = -1;
 #endif
 
-/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old]
+/* This is set by the PCI code if either type 1 or type 2 PCI is detected */
+bool port_cf9_safe = false;
+
+/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
    warm   Don't set the cold reboot flag
    cold   Set the cold reboot flag
    bios   Reboot by jumping through the BIOS (only for X86_32)
@@ -45,6 +51,7 @@ static int reboot_cpu = -1;
    kbd    Use the keyboard controller. cold reset (default)
    acpi   Use the RESET_REG in the FADT
    efi    Use efi reset_system runtime service
+   pci    Use the so-called "PCI reset register", CF9
    force  Avoid anything that could hang.
  */
 static int __init reboot_setup(char *str)
@@ -79,6 +86,7 @@ static int __init reboot_setup(char *str)
                case 'k':
                case 't':
                case 'e':
+               case 'p':
                        reboot_type = *str;
                        break;
 
@@ -404,12 +412,27 @@ static void native_machine_emergency_restart(void)
                        reboot_type = BOOT_KBD;
                        break;
 
-
                case BOOT_EFI:
                        if (efi_enabled)
-                               efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD,
+                               efi.reset_system(reboot_mode ?
+                                                EFI_RESET_WARM :
+                                                EFI_RESET_COLD,
                                                 EFI_SUCCESS, 0, NULL);
+                       reboot_type = BOOT_KBD;
+                       break;
 
+               case BOOT_CF9:
+                       port_cf9_safe = true;
+                       /* fall through */
+
+               case BOOT_CF9_COND:
+                       if (port_cf9_safe) {
+                               u8 cf9 = inb(0xcf9) & ~6;
+                               outb(cf9|2, 0xcf9); /* Request hard reset */
+                               udelay(50);
+                               outb(cf9|6, 0xcf9); /* Actually do the reset */
+                               udelay(50);
+                       }
                        reboot_type = BOOT_KBD;
                        break;
                }
@@ -470,6 +493,11 @@ static void native_machine_restart(char *__unused)
 
 static void native_machine_halt(void)
 {
+       /* stop other cpus and apics */
+       machine_shutdown();
+
+       /* stop this cpu */
+       stop_this_cpu(NULL);
 }
 
 static void native_machine_power_off(void)
@@ -523,3 +551,95 @@ void machine_crash_shutdown(struct pt_regs *regs)
        machine_ops.crash_shutdown(regs);
 }
 #endif
+
+
+#if defined(CONFIG_SMP)
+
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu;
+static nmi_shootdown_cb shootdown_callback;
+
+static atomic_t waiting_for_crash_ipi;
+
+static int crash_nmi_callback(struct notifier_block *self,
+                       unsigned long val, void *data)
+{
+       int cpu;
+
+       if (val != DIE_NMI_IPI)
+               return NOTIFY_OK;
+
+       cpu = raw_smp_processor_id();
+
+       /* Don't do anything if this handler is invoked on crashing cpu.
+        * Otherwise, system will completely hang. Crashing cpu can get
+        * an NMI if system was initially booted with nmi_watchdog parameter.
+        */
+       if (cpu == crashing_cpu)
+               return NOTIFY_STOP;
+       local_irq_disable();
+
+       shootdown_callback(cpu, (struct die_args *)data);
+
+       atomic_dec(&waiting_for_crash_ipi);
+       /* Assume hlt works */
+       halt();
+       for (;;)
+               cpu_relax();
+
+       return 1;
+}
+
+static void smp_send_nmi_allbutself(void)
+{
+       cpumask_t mask = cpu_online_map;
+       cpu_clear(safe_smp_processor_id(), mask);
+       if (!cpus_empty(mask))
+               send_IPI_mask(mask, NMI_VECTOR);
+}
+
+static struct notifier_block crash_nmi_nb = {
+       .notifier_call = crash_nmi_callback,
+};
+
+/* Halt all other CPUs, calling the specified function on each of them
+ *
+ * This function can be used to halt all other CPUs on crash
+ * or emergency reboot time. The function passed as parameter
+ * will be called inside a NMI handler on all CPUs.
+ */
+void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+{
+       unsigned long msecs;
+       local_irq_disable();
+
+       /* Make a note of crashing cpu. Will be used in NMI callback.*/
+       crashing_cpu = safe_smp_processor_id();
+
+       shootdown_callback = callback;
+
+       atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+       /* Would it be better to replace the trap vector here? */
+       if (register_die_notifier(&crash_nmi_nb))
+               return;         /* return what? */
+       /* Ensure the new callback function is set before sending
+        * out the NMI
+        */
+       wmb();
+
+       smp_send_nmi_allbutself();
+
+       msecs = 1000; /* Wait at most a second for the other cpus to stop */
+       while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+               mdelay(1);
+               msecs--;
+       }
+
+       /* Leave the nmi callback set */
+}
+#else /* !CONFIG_SMP */
+void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+{
+       /* No other CPUs to shoot down */
+}
+#endif
index 6f50664b2ba50564dfbf10c017f52bcc247a9af8..a160f31197256680c32b835b7c32d62e9dbd184b 100644 (file)
 #include <asm/page.h>
 #include <asm/kexec.h>
 #include <asm/processor-flags.h>
-#include <asm/pgtable.h>
 
 /*
  * Must be relocatable PIC code callable as a C function
  */
 
 #define PTR(x) (x << 2)
-#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define PAE_PGD_ATTR (_PAGE_PRESENT)
 
 /* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
  * ~ control_page + PAGE_SIZE are used as data storage and stack for
@@ -39,7 +36,6 @@
 #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
 
        .text
-       .align PAGE_SIZE
        .globl relocate_kernel
 relocate_kernel:
        /* Save the CPU context, used for jumping back */
@@ -60,117 +56,6 @@ relocate_kernel:
        movl    %cr4, %eax
        movl    %eax, CR4(%edi)
 
-#ifdef CONFIG_X86_PAE
-       /* map the control page at its virtual address */
-
-       movl    PTR(VA_PGD)(%ebp), %edi
-       movl    PTR(VA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0xc0000000, %eax
-       shrl    $27, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_PMD_0)(%ebp), %edx
-       orl     $PAE_PGD_ATTR, %edx
-       movl    %edx, (%eax)
-
-       movl    PTR(VA_PMD_0)(%ebp), %edi
-       movl    PTR(VA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0x3fe00000, %eax
-       shrl    $18, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_PTE_0)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-
-       movl    PTR(VA_PTE_0)(%ebp), %edi
-       movl    PTR(VA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0x001ff000, %eax
-       shrl    $9, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-
-       /* identity map the control page at its physical address */
-
-       movl    PTR(VA_PGD)(%ebp), %edi
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0xc0000000, %eax
-       shrl    $27, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_PMD_1)(%ebp), %edx
-       orl     $PAE_PGD_ATTR, %edx
-       movl    %edx, (%eax)
-
-       movl    PTR(VA_PMD_1)(%ebp), %edi
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0x3fe00000, %eax
-       shrl    $18, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_PTE_1)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-
-       movl    PTR(VA_PTE_1)(%ebp), %edi
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0x001ff000, %eax
-       shrl    $9, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-#else
-       /* map the control page at its virtual address */
-
-       movl    PTR(VA_PGD)(%ebp), %edi
-       movl    PTR(VA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0xffc00000, %eax
-       shrl    $20, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_PTE_0)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-
-       movl    PTR(VA_PTE_0)(%ebp), %edi
-       movl    PTR(VA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0x003ff000, %eax
-       shrl    $10, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-
-       /* identity map the control page at its physical address */
-
-       movl    PTR(VA_PGD)(%ebp), %edi
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0xffc00000, %eax
-       shrl    $20, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_PTE_1)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-
-       movl    PTR(VA_PTE_1)(%ebp), %edi
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %eax
-       andl    $0x003ff000, %eax
-       shrl    $10, %eax
-       addl    %edi, %eax
-
-       movl    PTR(PA_CONTROL_PAGE)(%ebp), %edx
-       orl     $PAGE_ATTR, %edx
-       movl    %edx, (%eax)
-#endif
-
-relocate_new_kernel:
        /* read the arguments and say goodbye to the stack */
        movl  20+4(%esp), %ebx /* page_list */
        movl  20+8(%esp), %ebp /* list of pages */
index 9d5674f7b6ccbfbdef7f5ad16901f9dc9dc08ad0..a31223828597bf0f0176921273b1e98614d085bd 100644 (file)
@@ -98,6 +98,7 @@
 
 #include <mach_apic.h>
 #include <asm/paravirt.h>
+#include <asm/hypervisor.h>
 
 #include <asm/percpu.h>
 #include <asm/topology.h>
@@ -448,6 +449,7 @@ static void __init reserve_early_setup_data(void)
  * @size: Size of the crashkernel memory to reserve.
  * Returns the base address on success, and -1ULL on failure.
  */
+static
 unsigned long long __init find_and_reserve_crashkernel(unsigned long long size)
 {
        const unsigned long long alignment = 16<<20;    /* 16M */
@@ -583,161 +585,24 @@ static int __init setup_elfcorehdr(char *arg)
 early_param("elfcorehdr", setup_elfcorehdr);
 #endif
 
-static struct x86_quirks default_x86_quirks __initdata;
-
-struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
-
-/*
- * Some BIOSes seem to corrupt the low 64k of memory during events
- * like suspend/resume and unplugging an HDMI cable.  Reserve all
- * remaining free memory in that area and fill it with a distinct
- * pattern.
- */
-#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
-#define MAX_SCAN_AREAS 8
-
-static int __read_mostly memory_corruption_check = -1;
-
-static unsigned __read_mostly corruption_check_size = 64*1024;
-static unsigned __read_mostly corruption_check_period = 60; /* seconds */
-
-static struct e820entry scan_areas[MAX_SCAN_AREAS];
-static int num_scan_areas;
-
-
-static int set_corruption_check(char *arg)
+static int __init default_update_genapic(void)
 {
-       char *end;
-
-       memory_corruption_check = simple_strtol(arg, &end, 10);
-
-       return (*end == 0) ? 0 : -EINVAL;
-}
-early_param("memory_corruption_check", set_corruption_check);
-
-static int set_corruption_check_period(char *arg)
-{
-       char *end;
-
-       corruption_check_period = simple_strtoul(arg, &end, 10);
-
-       return (*end == 0) ? 0 : -EINVAL;
-}
-early_param("memory_corruption_check_period", set_corruption_check_period);
-
-static int set_corruption_check_size(char *arg)
-{
-       char *end;
-       unsigned size;
-
-       size = memparse(arg, &end);
-
-       if (*end == '\0')
-               corruption_check_size = size;
-
-       return (size == corruption_check_size) ? 0 : -EINVAL;
-}
-early_param("memory_corruption_check_size", set_corruption_check_size);
-
-
-static void __init setup_bios_corruption_check(void)
-{
-       u64 addr = PAGE_SIZE;   /* assume first page is reserved anyway */
-
-       if (memory_corruption_check == -1) {
-               memory_corruption_check =
-#ifdef CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
-                       1
-#else
-                       0
+#ifdef CONFIG_X86_SMP
+# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
+       genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
+# endif
 #endif
-                       ;
-       }
-
-       if (corruption_check_size == 0)
-               memory_corruption_check = 0;
-
-       if (!memory_corruption_check)
-               return;
-
-       corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
-
-       while(addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
-               u64 size;
-               addr = find_e820_area_size(addr, &size, PAGE_SIZE);
-
-               if (addr == 0)
-                       break;
 
-               if ((addr + size) > corruption_check_size)
-                       size = corruption_check_size - addr;
-
-               if (size == 0)
-                       break;
-
-               e820_update_range(addr, size, E820_RAM, E820_RESERVED);
-               scan_areas[num_scan_areas].addr = addr;
-               scan_areas[num_scan_areas].size = size;
-               num_scan_areas++;
-
-               /* Assume we've already mapped this early memory */
-               memset(__va(addr), 0, size);
-
-               addr += size;
-       }
-
-       printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
-              num_scan_areas);
-       update_e820();
-}
-
-static struct timer_list periodic_check_timer;
-
-void check_for_bios_corruption(void)
-{
-       int i;
-       int corruption = 0;
-
-       if (!memory_corruption_check)
-               return;
-
-       for(i = 0; i < num_scan_areas; i++) {
-               unsigned long *addr = __va(scan_areas[i].addr);
-               unsigned long size = scan_areas[i].size;
-
-               for(; size; addr++, size -= sizeof(unsigned long)) {
-                       if (!*addr)
-                               continue;
-                       printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n",
-                              addr, __pa(addr), *addr);
-                       corruption = 1;
-                       *addr = 0;
-               }
-       }
-
-       WARN(corruption, KERN_ERR "Memory corruption detected in low memory\n");
-}
-
-static void periodic_check_for_corruption(unsigned long data)
-{
-       check_for_bios_corruption();
-       mod_timer(&periodic_check_timer, round_jiffies(jiffies + corruption_check_period*HZ));
+       return 0;
 }
 
-void start_periodic_check_for_corruption(void)
-{
-       if (!memory_corruption_check || corruption_check_period == 0)
-               return;
-
-       printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
-              corruption_check_period);
+static struct x86_quirks default_x86_quirks __initdata = {
+       .update_genapic         = default_update_genapic,
+};
 
-       init_timer(&periodic_check_timer);
-       periodic_check_timer.function = &periodic_check_for_corruption;
-       periodic_check_for_corruption(0);
-}
-#endif
+struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
 
+#ifdef CONFIG_X86_RESERVE_LOW_64K
 static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
 {
        printk(KERN_NOTICE
@@ -749,6 +614,7 @@ static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
 
        return 0;
 }
+#endif
 
 /* List of systems that have known low memory corruption BIOS problems */
 static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
@@ -794,6 +660,9 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "Command line: %s\n", boot_command_line);
 #endif
 
+       /* VMI may relocate the fixmap; do this before touching ioremap area */
+       vmi_init();
+
        early_cpu_init();
        early_ioremap_init();
 
@@ -880,13 +749,8 @@ void __init setup_arch(char **cmdline_p)
        check_efer();
 #endif
 
-#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
-       /*
-        * Must be before kernel pagetables are setup
-        * or fixmap area is touched.
-        */
-       vmi_init();
-#endif
+       /* Must be before kernel pagetables are setup */
+       vmi_activate();
 
        /* after early param, so could get panic from serial */
        reserve_early_setup_data();
@@ -909,6 +773,12 @@ void __init setup_arch(char **cmdline_p)
 
        dmi_check_system(bad_bios_dmi_table);
 
+       /*
+        * VMware detection requires dmi to be available, so this
+        * needs to be done after dmi_scan_machine, for the BP.
+        */
+       init_hypervisor(&boot_cpu_data);
+
 #ifdef CONFIG_X86_32
        probe_roms();
 #endif
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h
deleted file mode 100644 (file)
index cc673aa..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifdef CONFIG_X86_32
-struct sigframe {
-       char __user *pretcode;
-       int sig;
-       struct sigcontext sc;
-       /*
-        * fpstate is unused. fpstate is moved/allocated after
-        * retcode[] below. This movement allows to have the FP state and the
-        * future state extensions (xsave) stay together.
-        * And at the same time retaining the unused fpstate, prevents changing
-        * the offset of extramask[] in the sigframe and thus prevent any
-        * legacy application accessing/modifying it.
-        */
-       struct _fpstate fpstate_unused;
-       unsigned long extramask[_NSIG_WORDS-1];
-       char retcode[8];
-       /* fp state follows here */
-};
-
-struct rt_sigframe {
-       char __user *pretcode;
-       int sig;
-       struct siginfo __user *pinfo;
-       void __user *puc;
-       struct siginfo info;
-       struct ucontext uc;
-       char retcode[8];
-       /* fp state follows here */
-};
-#else
-struct rt_sigframe {
-       char __user *pretcode;
-       struct ucontext uc;
-       struct siginfo info;
-       /* fp state follows here */
-};
-
-int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-               sigset_t *set, struct pt_regs *regs);
-int ia32_setup_frame(int sig, struct k_sigaction *ka,
-               sigset_t *set, struct pt_regs *regs);
-#endif
similarity index 73%
rename from arch/x86/kernel/signal_32.c
rename to arch/x86/kernel/signal.c
index d6dd057d0f2210784023ee1c591ed4f660e837e2..89bb7668041d63067c1a2b3fd1210ab9ccfcb49d 100644 (file)
@@ -1,36 +1,41 @@
 /*
  *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  *
  *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
  *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
+ *  2000-2002   x86-64 support by Andi Kleen
  */
-#include <linux/list.h>
 
-#include <linux/personality.h>
-#include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
 #include <linux/kernel.h>
-#include <linux/ptrace.h>
 #include <linux/signal.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
 #include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/wait.h>
+#include <linux/ptrace.h>
 #include <linux/tracehook.h>
-#include <linux/elf.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
 
 #include <asm/processor.h>
 #include <asm/ucontext.h>
-#include <asm/uaccess.h>
 #include <asm/i387.h>
 #include <asm/vdso.h>
+
+#ifdef CONFIG_X86_64
+#include <asm/proto.h>
+#include <asm/ia32_unistd.h>
+#include <asm/mce.h>
+#endif /* CONFIG_X86_64 */
+
 #include <asm/syscall.h>
 #include <asm/syscalls.h>
 
-#include "sigframe.h"
+#include <asm/sigframe.h>
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 # define FIX_EFLAGS    __FIX_EFLAGS
 #endif
 
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
-{
-       mask &= _BLOCKABLE;
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       siginitset(&current->blocked, mask);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_restore_sigmask();
-
-       return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
-             struct old_sigaction __user *oact)
-{
-       struct k_sigaction new_ka, old_ka;
-       int ret;
-
-       if (act) {
-               old_sigset_t mask;
-
-               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-                       return -EFAULT;
-
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
-               siginitset(&new_ka.sa.sa_mask, mask);
-       }
-
-       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
-       if (!ret && oact) {
-               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-                       return -EFAULT;
-
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
-       }
-
-       return ret;
-}
-
-asmlinkage int sys_sigaltstack(unsigned long bx)
-{
-       /*
-        * This is needed to make gcc realize it doesn't own the
-        * "struct pt_regs"
-        */
-       struct pt_regs *regs = (struct pt_regs *)&bx;
-       const stack_t __user *uss = (const stack_t __user *)bx;
-       stack_t __user *uoss = (stack_t __user *)regs->cx;
-
-       return do_sigaltstack(uss, uoss, regs->sp);
-}
-
 #define COPY(x)                        {               \
        err |= __get_user(regs->x, &sc->x);     \
 }
@@ -123,7 +60,7 @@ asmlinkage int sys_sigaltstack(unsigned long bx)
                regs->seg = tmp;                        \
 }
 
-#define COPY_SEG_STRICT(seg)   {                       \
+#define COPY_SEG_CPL3(seg)     {                       \
                unsigned short tmp;                     \
                err |= __get_user(tmp, &sc->seg);       \
                regs->seg = tmp | 3;                    \
@@ -135,9 +72,6 @@ asmlinkage int sys_sigaltstack(unsigned long bx)
                loadsegment(seg, tmp);                  \
 }
 
-/*
- * Do a signal return; undo the signal stack.
- */
 static int
 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
                   unsigned long *pax)
@@ -149,14 +83,36 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
+#ifdef CONFIG_X86_32
        GET_SEG(gs);
        COPY_SEG(fs);
        COPY_SEG(es);
        COPY_SEG(ds);
+#endif /* CONFIG_X86_32 */
+
        COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
        COPY(dx); COPY(cx); COPY(ip);
-       COPY_SEG_STRICT(cs);
-       COPY_SEG_STRICT(ss);
+
+#ifdef CONFIG_X86_64
+       COPY(r8);
+       COPY(r9);
+       COPY(r10);
+       COPY(r11);
+       COPY(r12);
+       COPY(r13);
+       COPY(r14);
+       COPY(r15);
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_X86_32
+       COPY_SEG_CPL3(cs);
+       COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+       /* Kernel saves and restores only the CS segment register on signals,
+        * which is the bare minimum needed to allow mixed 32/64-bit code.
+        * App's signal handler can save/restore other segments if needed. */
+       COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
 
        err |= __get_user(tmpflags, &sc->flags);
        regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -169,102 +125,24 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
        return err;
 }
 
-asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
-{
-       struct sigframe __user *frame;
-       struct pt_regs *regs;
-       unsigned long ax;
-       sigset_t set;
-
-       regs = (struct pt_regs *) &__unused;
-       frame = (struct sigframe __user *)(regs->sp - 8);
-
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
-               && __copy_from_user(&set.sig[1], &frame->extramask,
-                                   sizeof(frame->extramask))))
-               goto badframe;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       if (restore_sigcontext(regs, &frame->sc, &ax))
-               goto badframe;
-       return ax;
-
-badframe:
-       if (show_unhandled_signals && printk_ratelimit()) {
-               printk("%s%s[%d] bad frame in sigreturn frame:"
-                       "%p ip:%lx sp:%lx oeax:%lx",
-                   task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
-                   current->comm, task_pid_nr(current), frame, regs->ip,
-                   regs->sp, regs->orig_ax);
-               print_vma_addr(" in ", regs->ip);
-               printk(KERN_CONT "\n");
-       }
-
-       force_sig(SIGSEGV, current);
-
-       return 0;
-}
-
-static long do_rt_sigreturn(struct pt_regs *regs)
-{
-       struct rt_sigframe __user *frame;
-       unsigned long ax;
-       sigset_t set;
-
-       frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-               goto badframe;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
-               goto badframe;
-
-       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
-               goto badframe;
-
-       return ax;
-
-badframe:
-       signal_fault(regs, frame, "rt_sigreturn");
-       return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-{
-       struct pt_regs *regs = (struct pt_regs *)&__unused;
-
-       return do_rt_sigreturn(regs);
-}
-
-/*
- * Set up a signal frame.
- */
 static int
 setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
                 struct pt_regs *regs, unsigned long mask)
 {
-       int tmp, err = 0;
+       int err = 0;
 
-       err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
-       savesegment(gs, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+#ifdef CONFIG_X86_32
+       {
+               unsigned int tmp;
 
+               savesegment(gs, tmp);
+               err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+       }
+       err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
        err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
        err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
+#endif /* CONFIG_X86_32 */
+
        err |= __put_user(regs->di, &sc->di);
        err |= __put_user(regs->si, &sc->si);
        err |= __put_user(regs->bp, &sc->bp);
@@ -273,19 +151,33 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
        err |= __put_user(regs->dx, &sc->dx);
        err |= __put_user(regs->cx, &sc->cx);
        err |= __put_user(regs->ax, &sc->ax);
+#ifdef CONFIG_X86_64
+       err |= __put_user(regs->r8, &sc->r8);
+       err |= __put_user(regs->r9, &sc->r9);
+       err |= __put_user(regs->r10, &sc->r10);
+       err |= __put_user(regs->r11, &sc->r11);
+       err |= __put_user(regs->r12, &sc->r12);
+       err |= __put_user(regs->r13, &sc->r13);
+       err |= __put_user(regs->r14, &sc->r14);
+       err |= __put_user(regs->r15, &sc->r15);
+#endif /* CONFIG_X86_64 */
+
        err |= __put_user(current->thread.trap_no, &sc->trapno);
        err |= __put_user(current->thread.error_code, &sc->err);
        err |= __put_user(regs->ip, &sc->ip);
+#ifdef CONFIG_X86_32
        err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
        err |= __put_user(regs->flags, &sc->flags);
        err |= __put_user(regs->sp, &sc->sp_at_signal);
        err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
+#else /* !CONFIG_X86_32 */
+       err |= __put_user(regs->flags, &sc->flags);
+       err |= __put_user(regs->cs, &sc->cs);
+       err |= __put_user(0, &sc->gs);
+       err |= __put_user(0, &sc->fs);
+#endif /* CONFIG_X86_32 */
 
-       tmp = save_i387_xstate(fpstate);
-       if (tmp < 0)
-               err = 1;
-       else
-               err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+       err |= __put_user(fpstate, &sc->fpstate);
 
        /* non-iBCS2 extensions.. */
        err |= __put_user(mask, &sc->oldmask);
@@ -294,6 +186,32 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
        return err;
 }
 
+/*
+ * Set up a signal frame.
+ */
+#ifdef CONFIG_X86_32
+static const struct {
+       u16 poplmovl;
+       u32 val;
+       u16 int80;
+} __attribute__((packed)) retcode = {
+       0xb858,         /* popl %eax; movl $..., %eax */
+       __NR_sigreturn,
+       0x80cd,         /* int $0x80 */
+};
+
+static const struct {
+       u8  movl;
+       u32 val;
+       u16 int80;
+       u8  pad;
+} __attribute__((packed)) rt_retcode = {
+       0xb8,           /* movl $..., %eax */
+       __NR_rt_sigreturn,
+       0x80cd,         /* int $0x80 */
+       0
+};
+
 /*
  * Determine which stack to use..
  */
@@ -328,6 +246,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
        if (used_math()) {
                sp = sp - sig_xstate_size;
                *fpstate = (struct _fpstate *) sp;
+               if (save_i387_xstate(*fpstate) < 0)
+                       return (void __user *)-1L;
        }
 
        sp -= frame_size;
@@ -383,9 +303,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
         * reasons and because gdb uses it as a signature to notice
         * signal handler stack frames.
         */
-       err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
-       err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
-       err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
+       err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
 
        if (err)
                return -EFAULT;
@@ -454,9 +372,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         * reasons and because gdb uses it as a signature to notice
         * signal handler stack frames.
         */
-       err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
-       err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
-       err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
+       err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
 
        if (err)
                return -EFAULT;
@@ -475,23 +391,293 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 
        return 0;
 }
+#else /* !CONFIG_X86_32 */
+/*
+ * Determine which stack to use..
+ */
+static void __user *
+get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
+{
+       /* Default to using normal stack - redzone*/
+       sp -= 128;
+
+       /* This is the X/Open sanctioned signal stack switching.  */
+       if (ka->sa.sa_flags & SA_ONSTACK) {
+               if (sas_ss_flags(sp) == 0)
+                       sp = current->sas_ss_sp + current->sas_ss_size;
+       }
+
+       return (void __user *)round_down(sp - size, 64);
+}
+
+static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+                           sigset_t *set, struct pt_regs *regs)
+{
+       struct rt_sigframe __user *frame;
+       void __user *fp = NULL;
+       int err = 0;
+       struct task_struct *me = current;
+
+       if (used_math()) {
+               fp = get_stack(ka, regs->sp, sig_xstate_size);
+               frame = (void __user *)round_down(
+                       (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
+
+               if (save_i387_xstate(fp) < 0)
+                       return -EFAULT;
+       } else
+               frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               return -EFAULT;
+
+       if (ka->sa.sa_flags & SA_SIGINFO) {
+               if (copy_siginfo_to_user(&frame->info, info))
+                       return -EFAULT;
+       }
+
+       /* Create the ucontext.  */
+       if (cpu_has_xsave)
+               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
+       else
+               err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(0, &frame->uc.uc_link);
+       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+       err |= __put_user(sas_ss_flags(regs->sp),
+                         &frame->uc.uc_stack.ss_flags);
+       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+       err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
+       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+       /* Set up to return from userspace.  If provided, use a stub
+          already in userspace.  */
+       /* x86-64 should always use SA_RESTORER. */
+       if (ka->sa.sa_flags & SA_RESTORER) {
+               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
+       } else {
+               /* could use a vstub here */
+               return -EFAULT;
+       }
+
+       if (err)
+               return -EFAULT;
+
+       /* Set up registers for signal handler */
+       regs->di = sig;
+       /* In case the signal handler was declared without prototypes */
+       regs->ax = 0;
+
+       /* This also works for non SA_SIGINFO handlers because they expect the
+          next argument after the signal number on the stack. */
+       regs->si = (unsigned long)&frame->info;
+       regs->dx = (unsigned long)&frame->uc;
+       regs->ip = (unsigned long) ka->sa.sa_handler;
+
+       regs->sp = (unsigned long)frame;
+
+       /* Set up the CS register to run signal handlers in 64-bit mode,
+          even if the handler happens to be interrupting 32-bit code. */
+       regs->cs = __USER_CS;
+
+       return 0;
+}
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_X86_32
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+{
+       mask &= _BLOCKABLE;
+       spin_lock_irq(&current->sighand->siglock);
+       current->saved_sigmask = current->blocked;
+       siginitset(&current->blocked, mask);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_restore_sigmask();
+
+       return -ERESTARTNOHAND;
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+             struct old_sigaction __user *oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+       if (act) {
+               old_sigset_t mask;
+
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                       return -EFAULT;
+
+               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+               __get_user(mask, &act->sa_mask);
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                       return -EFAULT;
+
+               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+       }
+
+       return ret;
+}
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_X86_32
+asmlinkage int sys_sigaltstack(unsigned long bx)
+{
+       /*
+        * This is needed to make gcc realize it doesn't own the
+        * "struct pt_regs"
+        */
+       struct pt_regs *regs = (struct pt_regs *)&bx;
+       const stack_t __user *uss = (const stack_t __user *)bx;
+       stack_t __user *uoss = (stack_t __user *)regs->cx;
+
+       return do_sigaltstack(uss, uoss, regs->sp);
+}
+#else /* !CONFIG_X86_32 */
+asmlinkage long
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+               struct pt_regs *regs)
+{
+       return do_sigaltstack(uss, uoss, regs->sp);
+}
+#endif /* CONFIG_X86_32 */
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+#ifdef CONFIG_X86_32
+asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
+{
+       struct sigframe __user *frame;
+       struct pt_regs *regs;
+       unsigned long ax;
+       sigset_t set;
+
+       regs = (struct pt_regs *) &__unused;
+       frame = (struct sigframe __user *)(regs->sp - 8);
+
+       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
+               && __copy_from_user(&set.sig[1], &frame->extramask,
+                                   sizeof(frame->extramask))))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       if (restore_sigcontext(regs, &frame->sc, &ax))
+               goto badframe;
+       return ax;
+
+badframe:
+       signal_fault(regs, frame, "sigreturn");
+
+       return 0;
+}
+#endif /* CONFIG_X86_32 */
+
+static long do_rt_sigreturn(struct pt_regs *regs)
+{
+       struct rt_sigframe __user *frame;
+       unsigned long ax;
+       sigset_t set;
+
+       frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
+       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+               goto badframe;
+
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
+               goto badframe;
+
+       return ax;
+
+badframe:
+       signal_fault(regs, frame, "rt_sigreturn");
+       return 0;
+}
+
+#ifdef CONFIG_X86_32
+asmlinkage int sys_rt_sigreturn(struct pt_regs regs)
+{
+       return do_rt_sigreturn(&regs);
+}
+#else /* !CONFIG_X86_32 */
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+       return do_rt_sigreturn(regs);
+}
+#endif /* CONFIG_X86_32 */
 
 /*
  * OK, we're invoking a handler:
  */
 static int signr_convert(int sig)
 {
+#ifdef CONFIG_X86_32
        struct thread_info *info = current_thread_info();
 
        if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32)
                return info->exec_domain->signal_invmap[sig];
+#endif /* CONFIG_X86_32 */
        return sig;
 }
 
+#ifdef CONFIG_X86_32
+
 #define is_ia32        1
 #define ia32_setup_frame       __setup_frame
 #define ia32_setup_rt_frame    __setup_rt_frame
 
+#else /* !CONFIG_X86_32 */
+
+#ifdef CONFIG_IA32_EMULATION
+#define is_ia32        test_thread_flag(TIF_IA32)
+#else /* !CONFIG_IA32_EMULATION */
+#define is_ia32        0
+#endif /* CONFIG_IA32_EMULATION */
+
+int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+               sigset_t *set, struct pt_regs *regs);
+int ia32_setup_frame(int sig, struct k_sigaction *ka,
+               sigset_t *set, struct pt_regs *regs);
+
+#endif /* CONFIG_X86_32 */
+
 static int
 setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
               sigset_t *set, struct pt_regs *regs)
@@ -592,7 +778,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        return 0;
 }
 
+#ifdef CONFIG_X86_32
 #define NR_restart_syscall     __NR_restart_syscall
+#else /* !CONFIG_X86_32 */
+#define NR_restart_syscall     \
+       test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
+#endif /* CONFIG_X86_32 */
+
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
@@ -704,8 +896,9 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
        struct task_struct *me = current;
 
        if (show_unhandled_signals && printk_ratelimit()) {
-               printk(KERN_INFO
+               printk("%s"
                       "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
+                      task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
                       me->comm, me->pid, where, frame,
                       regs->ip, regs->sp, regs->orig_ax);
                print_vma_addr(" in ", regs->ip);
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
deleted file mode 100644 (file)
index a5c9627..0000000
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
- *
- *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
- *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
- *  2000-2002   x86-64 support by Andi Kleen
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/tracehook.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/compiler.h>
-#include <linux/uaccess.h>
-
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/i387.h>
-#include <asm/proto.h>
-#include <asm/ia32_unistd.h>
-#include <asm/mce.h>
-#include <asm/syscall.h>
-#include <asm/syscalls.h>
-#include "sigframe.h"
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-#define __FIX_EFLAGS   (X86_EFLAGS_AC | X86_EFLAGS_OF | \
-                        X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
-                        X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
-                        X86_EFLAGS_CF)
-
-#ifdef CONFIG_X86_32
-# define FIX_EFLAGS    (__FIX_EFLAGS | X86_EFLAGS_RF)
-#else
-# define FIX_EFLAGS    __FIX_EFLAGS
-#endif
-
-asmlinkage long
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
-               struct pt_regs *regs)
-{
-       return do_sigaltstack(uss, uoss, regs->sp);
-}
-
-#define COPY(x)                        {               \
-       err |= __get_user(regs->x, &sc->x);     \
-}
-
-#define COPY_SEG_STRICT(seg)   {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               regs->seg = tmp | 3;                    \
-}
-
-/*
- * Do a signal return; undo the signal stack.
- */
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
-                  unsigned long *pax)
-{
-       void __user *buf;
-       unsigned int tmpflags;
-       unsigned int err = 0;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-       COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-       COPY(dx); COPY(cx); COPY(ip);
-       COPY(r8);
-       COPY(r9);
-       COPY(r10);
-       COPY(r11);
-       COPY(r12);
-       COPY(r13);
-       COPY(r14);
-       COPY(r15);
-
-       /* Kernel saves and restores only the CS segment register on signals,
-        * which is the bare minimum needed to allow mixed 32/64-bit code.
-        * App's signal handler can save/restore other segments if needed. */
-       COPY_SEG_STRICT(cs);
-
-       err |= __get_user(tmpflags, &sc->flags);
-       regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-       regs->orig_ax = -1;             /* disable syscall checks */
-
-       err |= __get_user(buf, &sc->fpstate);
-       err |= restore_i387_xstate(buf);
-
-       err |= __get_user(*pax, &sc->ax);
-       return err;
-}
-
-static long do_rt_sigreturn(struct pt_regs *regs)
-{
-       struct rt_sigframe __user *frame;
-       unsigned long ax;
-       sigset_t set;
-
-       frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-               goto badframe;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
-               goto badframe;
-
-       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
-               goto badframe;
-
-       return ax;
-
-badframe:
-       signal_fault(regs, frame, "rt_sigreturn");
-       return 0;
-}
-
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
-{
-       return do_rt_sigreturn(regs);
-}
-
-/*
- * Set up a signal frame.
- */
-
-static inline int
-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
-               unsigned long mask, struct task_struct *me)
-{
-       int err = 0;
-
-       err |= __put_user(regs->cs, &sc->cs);
-       err |= __put_user(0, &sc->gs);
-       err |= __put_user(0, &sc->fs);
-
-       err |= __put_user(regs->di, &sc->di);
-       err |= __put_user(regs->si, &sc->si);
-       err |= __put_user(regs->bp, &sc->bp);
-       err |= __put_user(regs->sp, &sc->sp);
-       err |= __put_user(regs->bx, &sc->bx);
-       err |= __put_user(regs->dx, &sc->dx);
-       err |= __put_user(regs->cx, &sc->cx);
-       err |= __put_user(regs->ax, &sc->ax);
-       err |= __put_user(regs->r8, &sc->r8);
-       err |= __put_user(regs->r9, &sc->r9);
-       err |= __put_user(regs->r10, &sc->r10);
-       err |= __put_user(regs->r11, &sc->r11);
-       err |= __put_user(regs->r12, &sc->r12);
-       err |= __put_user(regs->r13, &sc->r13);
-       err |= __put_user(regs->r14, &sc->r14);
-       err |= __put_user(regs->r15, &sc->r15);
-       err |= __put_user(me->thread.trap_no, &sc->trapno);
-       err |= __put_user(me->thread.error_code, &sc->err);
-       err |= __put_user(regs->ip, &sc->ip);
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(me->thread.cr2, &sc->cr2);
-
-       return err;
-}
-
-/*
- * Determine which stack to use..
- */
-
-static void __user *
-get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
-{
-       unsigned long sp;
-
-       /* Default to using normal stack - redzone*/
-       sp = regs->sp - 128;
-
-       /* This is the X/Open sanctioned signal stack switching.  */
-       if (ka->sa.sa_flags & SA_ONSTACK) {
-               if (sas_ss_flags(sp) == 0)
-                       sp = current->sas_ss_sp + current->sas_ss_size;
-       }
-
-       return (void __user *)round_down(sp - size, 64);
-}
-
-static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-                           sigset_t *set, struct pt_regs *regs)
-{
-       struct rt_sigframe __user *frame;
-       void __user *fp = NULL;
-       int err = 0;
-       struct task_struct *me = current;
-
-       if (used_math()) {
-               fp = get_stack(ka, regs, sig_xstate_size);
-               frame = (void __user *)round_down(
-                       (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
-
-               if (save_i387_xstate(fp) < 0)
-                       return -EFAULT;
-       } else
-               frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               return -EFAULT;
-
-       if (ka->sa.sa_flags & SA_SIGINFO) {
-               if (copy_siginfo_to_user(&frame->info, info))
-                       return -EFAULT;
-       }
-
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
-       err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
-       if (sizeof(*set) == 16) {
-               __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
-               __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
-       } else
-               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
-       /* Set up to return from userspace.  If provided, use a stub
-          already in userspace.  */
-       /* x86-64 should always use SA_RESTORER. */
-       if (ka->sa.sa_flags & SA_RESTORER) {
-               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
-       } else {
-               /* could use a vstub here */
-               return -EFAULT;
-       }
-
-       if (err)
-               return -EFAULT;
-
-       /* Set up registers for signal handler */
-       regs->di = sig;
-       /* In case the signal handler was declared without prototypes */
-       regs->ax = 0;
-
-       /* This also works for non SA_SIGINFO handlers because they expect the
-          next argument after the signal number on the stack. */
-       regs->si = (unsigned long)&frame->info;
-       regs->dx = (unsigned long)&frame->uc;
-       regs->ip = (unsigned long) ka->sa.sa_handler;
-
-       regs->sp = (unsigned long)frame;
-
-       /* Set up the CS register to run signal handlers in 64-bit mode,
-          even if the handler happens to be interrupting 32-bit code. */
-       regs->cs = __USER_CS;
-
-       return 0;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static int signr_convert(int sig)
-{
-       return sig;
-}
-
-#ifdef CONFIG_IA32_EMULATION
-#define is_ia32        test_thread_flag(TIF_IA32)
-#else
-#define is_ia32        0
-#endif
-
-static int
-setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-              sigset_t *set, struct pt_regs *regs)
-{
-       int usig = signr_convert(sig);
-       int ret;
-
-       /* Set up the stack frame */
-       if (is_ia32) {
-               if (ka->sa.sa_flags & SA_SIGINFO)
-                       ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
-               else
-                       ret = ia32_setup_frame(usig, ka, set, regs);
-       } else
-               ret = __setup_rt_frame(sig, ka, info, set, regs);
-
-       if (ret) {
-               force_sigsegv(sig, current);
-               return -EFAULT;
-       }
-
-       return ret;
-}
-
-static int
-handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs *regs)
-{
-       int ret;
-
-       /* Are we from a system call? */
-       if (syscall_get_nr(current, regs) >= 0) {
-               /* If so, check system call restarting.. */
-               switch (syscall_get_error(current, regs)) {
-               case -ERESTART_RESTARTBLOCK:
-               case -ERESTARTNOHAND:
-                       regs->ax = -EINTR;
-                       break;
-
-               case -ERESTARTSYS:
-                       if (!(ka->sa.sa_flags & SA_RESTART)) {
-                               regs->ax = -EINTR;
-                               break;
-                       }
-               /* fallthrough */
-               case -ERESTARTNOINTR:
-                       regs->ax = regs->orig_ax;
-                       regs->ip -= 2;
-                       break;
-               }
-       }
-
-       /*
-        * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
-        * flag so that register information in the sigcontext is correct.
-        */
-       if (unlikely(regs->flags & X86_EFLAGS_TF) &&
-           likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
-               regs->flags &= ~X86_EFLAGS_TF;
-
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
-
-       if (ret)
-               return ret;
-
-#ifdef CONFIG_X86_64
-       /*
-        * This has nothing to do with segment registers,
-        * despite the name.  This magic affects uaccess.h
-        * macros' behavior.  Reset it to the normal setting.
-        */
-       set_fs(USER_DS);
-#endif
-
-       /*
-        * Clear the direction flag as per the ABI for function entry.
-        */
-       regs->flags &= ~X86_EFLAGS_DF;
-
-       /*
-        * Clear TF when entering the signal handler, but
-        * notify any tracer that was single-stepping it.
-        * The tracer may want to single-step inside the
-        * handler too.
-        */
-       regs->flags &= ~X86_EFLAGS_TF;
-
-       spin_lock_irq(&current->sighand->siglock);
-       sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
-       if (!(ka->sa.sa_flags & SA_NODEFER))
-               sigaddset(&current->blocked, sig);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       tracehook_signal_handler(sig, info, ka, regs,
-                                test_thread_flag(TIF_SINGLESTEP));
-
-       return 0;
-}
-
-#define NR_restart_syscall     \
-       test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-static void do_signal(struct pt_regs *regs)
-{
-       struct k_sigaction ka;
-       siginfo_t info;
-       int signr;
-       sigset_t *oldset;
-
-       /*
-        * We want the common case to go fast, which is why we may in certain
-        * cases get here from kernel mode. Just return without doing anything
-        * if so.
-        * X86_32: vm86 regs switched out by assembly code before reaching
-        * here, so testing against kernel CS suffices.
-        */
-       if (!user_mode(regs))
-               return;
-
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
-       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-       if (signr > 0) {
-               /*
-                * Re-enable any watchpoints before delivering the
-                * signal to user space. The processor register will
-                * have been cleared if the watchpoint triggered
-                * inside the kernel.
-                */
-               if (current->thread.debugreg7)
-                       set_debugreg(current->thread.debugreg7, 7);
-
-               /* Whee! Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
-               return;
-       }
-
-       /* Did we come from a system call? */
-       if (syscall_get_nr(current, regs) >= 0) {
-               /* Restart the system call - no handlers present */
-               switch (syscall_get_error(current, regs)) {
-               case -ERESTARTNOHAND:
-               case -ERESTARTSYS:
-               case -ERESTARTNOINTR:
-                       regs->ax = regs->orig_ax;
-                       regs->ip -= 2;
-                       break;
-
-               case -ERESTART_RESTARTBLOCK:
-                       regs->ax = NR_restart_syscall;
-                       regs->ip -= 2;
-                       break;
-               }
-       }
-
-       /*
-        * If there's no signal to deliver, we just put the saved sigmask
-        * back.
-        */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-}
-
-/*
- * notification of userspace execution resumption
- * - triggered by the TIF_WORK_MASK flags
- */
-void
-do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
-{
-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
-       /* notify userspace of pending MCEs */
-       if (thread_info_flags & _TIF_MCE_NOTIFY)
-               mce_notify_user();
-#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
-
-       /* deal with pending signal delivery */
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs);
-
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
-               clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(regs);
-       }
-
-#ifdef CONFIG_X86_32
-       clear_thread_flag(TIF_IRET);
-#endif /* CONFIG_X86_32 */
-}
-
-void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
-{
-       struct task_struct *me = current;
-
-       if (show_unhandled_signals && printk_ratelimit()) {
-               printk(KERN_INFO
-                      "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
-                      me->comm, me->pid, where, frame,
-                      regs->ip, regs->sp, regs->orig_ax);
-               print_vma_addr(" in ", regs->ip);
-               printk(KERN_CONT "\n");
-       }
-
-       force_sig(SIGSEGV, me);
-}
index 18f9b19f5f8f5d46582b64501d83d11cb4376eb6..3f92b134ab902067c63ed05f51f95eac7dd0de99 100644 (file)
@@ -140,19 +140,6 @@ void native_send_call_func_ipi(cpumask_t mask)
                send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 }
 
-static void stop_this_cpu(void *dummy)
-{
-       local_irq_disable();
-       /*
-        * Remove this CPU:
-        */
-       cpu_clear(smp_processor_id(), cpu_online_map);
-       disable_local_APIC();
-       if (hlt_works(smp_processor_id()))
-               for (;;) halt();
-       for (;;);
-}
-
 /*
  * this function calls the 'stop' function on all other CPUs in the system.
  */
index 7b1093397319766b2e4848a5a883b0dc35f49804..7a430c4d15516f8ade11c87b50adcb02660b118a 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/mtrr.h>
 #include <asm/vmi.h>
 #include <asm/genapic.h>
+#include <asm/setup.h>
 #include <linux/mc146818rtc.h>
 
 #include <mach_apic.h>
@@ -294,9 +295,7 @@ static void __cpuinit start_secondary(void *unused)
         * fragile that we want to limit the things done here to the
         * most necessary things.
         */
-#ifdef CONFIG_VMI
        vmi_bringup();
-#endif
        cpu_init();
        preempt_disable();
        smp_callin();
@@ -536,7 +535,7 @@ static void impress_friends(void)
        pr_debug("Before bogocount - setting activated=1.\n");
 }
 
-static inline void __inquire_remote_apic(int apicid)
+void __inquire_remote_apic(int apicid)
 {
        unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
        char *names[] = { "ID", "VERSION", "SPIV" };
@@ -575,14 +574,13 @@ static inline void __inquire_remote_apic(int apicid)
        }
 }
 
-#ifdef WAKE_SECONDARY_VIA_NMI
 /*
  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
  * won't ... remember to clear down the APIC, etc later.
  */
-static int __devinit
-wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
+int __devinit
+wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
 {
        unsigned long send_status, accept_status = 0;
        int maxlvt;
@@ -599,7 +597,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
         * Give the other CPU some time to accept the IPI.
         */
        udelay(200);
-       if (APIC_INTEGRATED(apic_version[phys_apicid])) {
+       if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
                maxlvt = lapic_get_maxlvt();
                if (maxlvt > 3)                 /* Due to the Pentium erratum 3AP.  */
                        apic_write(APIC_ESR, 0);
@@ -614,11 +612,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
 
        return (send_status | accept_status);
 }
-#endif /* WAKE_SECONDARY_VIA_NMI */
 
-#ifdef WAKE_SECONDARY_VIA_INIT
-static int __devinit
-wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
+int __devinit
+wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 {
        unsigned long send_status, accept_status = 0;
        int maxlvt, num_starts, j;
@@ -737,7 +733,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
 
        return (send_status | accept_status);
 }
-#endif /* WAKE_SECONDARY_VIA_INIT */
 
 struct create_idle {
        struct work_struct work;
@@ -1086,8 +1081,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
 #endif
 
        if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
-               printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
-                                   "by the BIOS.\n", hard_smp_processor_id());
+               printk(KERN_WARNING
+                       "weird, boot CPU (#%d) not listed by the BIOS.\n",
+                       hard_smp_processor_id());
+
                physid_set(hard_smp_processor_id(), phys_cpu_present_map);
        }
 
index cb19d650c21643599aa1423a57b64a85ee26afe8..1b7711b31037ed034ccf55defc306ab35bfc74ba 100644 (file)
@@ -49,7 +49,7 @@ unsigned long profile_pc(struct pt_regs *regs)
 }
 EXPORT_SYMBOL(profile_pc);
 
-irqreturn_t timer_interrupt(int irq, void *dev_id)
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        add_pda(irq0_irqs, 1);
 
@@ -80,6 +80,8 @@ unsigned long __init calibrate_cpu(void)
                        break;
        no_ctr_free = (i == 4);
        if (no_ctr_free) {
+               WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
+                    "cpu_khz value may be incorrect.\n");
                i = 3;
                rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
                wrmsrl(MSR_K7_EVNTSEL3, 0);
index f4049f3513b6b5e01e71523a5490fbedd5db9992..4290d918b58af67f22fa8a25f182d2eebf133819 100644 (file)
@@ -34,9 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock);
  */
 void leave_mm(int cpu)
 {
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-               BUG();
-       cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
+       BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
+       cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
        load_cr3(swapper_pg_dir);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -104,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
                 * BUG();
                 */
 
-       if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-               if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
+       if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
+               if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
                        if (flush_va == TLB_FLUSH_ALL)
                                local_flush_tlb();
                        else
@@ -238,7 +237,7 @@ static void do_flush_tlb_all(void *info)
        unsigned long cpu = smp_processor_id();
 
        __flush_tlb_all();
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
+       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
                leave_mm(cpu);
 }
 
index 04431f34fd16f24946bb66b4eedf69ed7e51cf6f..6a00e5faaa74375d5b709488a0b9c371c3fe5c79 100644 (file)
@@ -566,14 +566,10 @@ static int __init uv_ptc_init(void)
        if (!is_uv_system())
                return 0;
 
-       if (!proc_mkdir("sgi_uv", NULL))
-               return -EINVAL;
-
        proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
        if (!proc_uv_ptc) {
                printk(KERN_ERR "unable to create %s proc entry\n",
                       UV_PTC_BASENAME);
-               remove_proc_entry("sgi_uv", NULL);
                return -EINVAL;
        }
        proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
index 1106fac6024d412fae02df4780464ecd2f9abf4b..808031a5ba19a62bee01b404a812d57409b60b7d 100644 (file)
@@ -1,10 +1,26 @@
 #include <linux/io.h>
 
 #include <asm/trampoline.h>
+#include <asm/e820.h>
 
 /* ready for x86_64 and x86 */
 unsigned char *trampoline_base = __va(TRAMPOLINE_BASE);
 
+void __init reserve_trampoline_memory(void)
+{
+#ifdef CONFIG_X86_32
+       /*
+        * But first pinch a few for the stack/trampoline stuff
+        * FIXME: Don't need the extra page at 4K, but need to fix
+        * trampoline before removing it. (see the GDT stuff)
+        */
+       reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
+#endif
+       /* Has to be in very low memory so we can execute real-mode AP code. */
+       reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE,
+                       "TRAMPOLINE");
+}
+
 /*
  * Currently trivial. Write the real->protected mode
  * bootstrap into the page concerned. The caller
@@ -12,7 +28,6 @@ unsigned char *trampoline_base = __va(TRAMPOLINE_BASE);
  */
 unsigned long setup_trampoline(void)
 {
-       memcpy(trampoline_base, trampoline_data,
-              trampoline_end - trampoline_data);
+       memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
        return virt_to_phys(trampoline_base);
 }
index 04d242ab0161967985bcb7f9e24f9fe54fb9f33f..c320c29255c216b071755c5988a08baf3f127827 100644 (file)
@@ -664,7 +664,7 @@ void math_error(void __user *ip)
 {
        struct task_struct *task;
        siginfo_t info;
-       unsigned short cwd, swd;
+       unsigned short cwd, swd, err;
 
        /*
         * Save the info for the exception handler and clear the error.
@@ -675,7 +675,6 @@ void math_error(void __user *ip)
        task->thread.error_code = 0;
        info.si_signo = SIGFPE;
        info.si_errno = 0;
-       info.si_code = __SI_FAULT;
        info.si_addr = ip;
        /*
         * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -689,34 +688,31 @@ void math_error(void __user *ip)
         */
        cwd = get_fpu_cwd(task);
        swd = get_fpu_swd(task);
-       switch (swd & ~cwd & 0x3f) {
-       case 0x000: /* No unmasked exception */
-#ifdef CONFIG_X86_32
+
+       err = swd & ~cwd & 0x3f;
+
+#if CONFIG_X86_32
+       if (!err)
                return;
 #endif
-       default: /* Multiple exceptions */
-               break;
-       case 0x001: /* Invalid Op */
+
+       if (err & 0x001) {      /* Invalid op */
                /*
                 * swd & 0x240 == 0x040: Stack Underflow
                 * swd & 0x240 == 0x240: Stack Overflow
                 * User must clear the SF bit (0x40) if set
                 */
                info.si_code = FPE_FLTINV;
-               break;
-       case 0x002: /* Denormalize */
-       case 0x010: /* Underflow */
-               info.si_code = FPE_FLTUND;
-               break;
-       case 0x004: /* Zero Divide */
+       } else if (err & 0x004) { /* Divide by Zero */
                info.si_code = FPE_FLTDIV;
-               break;
-       case 0x008: /* Overflow */
+       } else if (err & 0x008) { /* Overflow */
                info.si_code = FPE_FLTOVF;
-               break;
-       case 0x020: /* Precision */
+       } else if (err & 0x012) { /* Denormal, Underflow */
+               info.si_code = FPE_FLTUND;
+       } else if (err & 0x020) { /* Precision */
                info.si_code = FPE_FLTRES;
-               break;
+       } else {
+               info.si_code = __SI_FAULT|SI_KERNEL; /* WTF? */
        }
        force_sig_info(SIGFPE, &info, task);
 }
index 424093b157d363dcad0bb58b8ed08636a9e0b61e..599e58168631e22e5ff69e15c365273bd2f9b03c 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/vgtod.h>
 #include <asm/time.h>
 #include <asm/delay.h>
+#include <asm/hypervisor.h>
 
 unsigned int cpu_khz;           /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -31,6 +32,7 @@ static int tsc_unstable;
    erroneous rdtsc usage on !cpu_has_tsc processors */
 static int tsc_disabled = -1;
 
+static int tsc_clocksource_reliable;
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
@@ -98,6 +100,15 @@ int __init notsc_setup(char *str)
 
 __setup("notsc", notsc_setup);
 
+static int __init tsc_setup(char *str)
+{
+       if (!strcmp(str, "reliable"))
+               tsc_clocksource_reliable = 1;
+       return 1;
+}
+
+__setup("tsc=", tsc_setup);
+
 #define MAX_RETRIES     5
 #define SMI_TRESHOLD    50000
 
@@ -352,9 +363,15 @@ unsigned long native_calibrate_tsc(void)
 {
        u64 tsc1, tsc2, delta, ref1, ref2;
        unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
-       unsigned long flags, latch, ms, fast_calibrate;
+       unsigned long flags, latch, ms, fast_calibrate, tsc_khz;
        int hpet = is_hpet_enabled(), i, loopmin;
 
+       tsc_khz = get_hypervisor_tsc_freq();
+       if (tsc_khz) {
+               printk(KERN_INFO "TSC: Frequency read from the hypervisor\n");
+               return tsc_khz;
+       }
+
        local_irq_save(flags);
        fast_calibrate = quick_pit_calibrate();
        local_irq_restore(flags);
@@ -731,24 +748,21 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
        {}
 };
 
-/*
- * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
- */
+static void __init check_system_tsc_reliable(void)
+{
 #ifdef CONFIG_MGEODE_LX
-/* RTSC counts during suspend */
+       /* RTSC counts during suspend */
 #define RTSC_SUSP 0x100
-
-static void __init check_geode_tsc_reliable(void)
-{
        unsigned long res_low, res_high;
 
        rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+       /* Geode_LX - the OLPC CPU has a possibly a very reliable TSC */
        if (res_low & RTSC_SUSP)
-               clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
-}
-#else
-static inline void check_geode_tsc_reliable(void) { }
+               tsc_clocksource_reliable = 1;
 #endif
+       if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+               tsc_clocksource_reliable = 1;
+}
 
 /*
  * Make an educated guess if the TSC is trustworthy and synchronized
@@ -783,6 +797,8 @@ static void __init init_tsc_clocksource(void)
 {
        clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
                        clocksource_tsc.shift);
+       if (tsc_clocksource_reliable)
+               clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
        /* lower the rating if we already know its unstable: */
        if (check_tsc_unstable()) {
                clocksource_tsc.rating = 0;
@@ -843,7 +859,7 @@ void __init tsc_init(void)
        if (unsynchronized_tsc())
                mark_tsc_unstable("TSCs unsynchronized");
 
-       check_geode_tsc_reliable();
+       check_system_tsc_reliable();
        init_tsc_clocksource();
 }
 
index 1c0dfbca87c18a05beb42cebaa8e9ffed6d508a1..bf36328f6ef9289c4b1a1a7d0ffd9c67878f3f32 100644 (file)
@@ -112,6 +112,12 @@ void __cpuinit check_tsc_sync_source(int cpu)
        if (unsynchronized_tsc())
                return;
 
+       if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+               printk(KERN_INFO
+                      "Skipping synchronization checks as TSC is reliable.\n");
+               return;
+       }
+
        printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:",
                          smp_processor_id(), cpu);
 
@@ -165,7 +171,7 @@ void __cpuinit check_tsc_sync_target(void)
 {
        int cpus = 2;
 
-       if (unsynchronized_tsc())
+       if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
                return;
 
        /*
index 8b6c393ab9fd14f06b86d9deb656b3643d9006f3..23206ba16874ce5d6ace3d046ee35e95e721427c 100644 (file)
@@ -266,109 +266,6 @@ static void vmi_nop(void)
 {
 }
 
-#ifdef CONFIG_DEBUG_PAGE_TYPE
-
-#ifdef CONFIG_X86_PAE
-#define MAX_BOOT_PTS (2048+4+1)
-#else
-#define MAX_BOOT_PTS (1024+1)
-#endif
-
-/*
- * During boot, mem_map is not yet available in paging_init, so stash
- * all the boot page allocations here.
- */
-static struct {
-       u32 pfn;
-       int type;
-} boot_page_allocations[MAX_BOOT_PTS];
-static int num_boot_page_allocations;
-static int boot_allocations_applied;
-
-void vmi_apply_boot_page_allocations(void)
-{
-       int i;
-       BUG_ON(!mem_map);
-       for (i = 0; i < num_boot_page_allocations; i++) {
-               struct page *page = pfn_to_page(boot_page_allocations[i].pfn);
-               page->type = boot_page_allocations[i].type;
-               page->type = boot_page_allocations[i].type &
-                               ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
-       }
-       boot_allocations_applied = 1;
-}
-
-static void record_page_type(u32 pfn, int type)
-{
-       BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS);
-       boot_page_allocations[num_boot_page_allocations].pfn = pfn;
-       boot_page_allocations[num_boot_page_allocations].type = type;
-       num_boot_page_allocations++;
-}
-
-static void check_zeroed_page(u32 pfn, int type, struct page *page)
-{
-       u32 *ptr;
-       int i;
-       int limit = PAGE_SIZE / sizeof(int);
-
-       if (page_address(page))
-               ptr = (u32 *)page_address(page);
-       else
-               ptr = (u32 *)__va(pfn << PAGE_SHIFT);
-       /*
-        * When cloning the root in non-PAE mode, only the userspace
-        * pdes need to be zeroed.
-        */
-       if (type & VMI_PAGE_CLONE)
-               limit = KERNEL_PGD_BOUNDARY;
-       for (i = 0; i < limit; i++)
-               BUG_ON(ptr[i]);
-}
-
-/*
- * We stash the page type into struct page so we can verify the page
- * types are used properly.
- */
-static void vmi_set_page_type(u32 pfn, int type)
-{
-       /* PAE can have multiple roots per page - don't track */
-       if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
-               return;
-
-       if (boot_allocations_applied) {
-               struct page *page = pfn_to_page(pfn);
-               if (type != VMI_PAGE_NORMAL)
-                       BUG_ON(page->type);
-               else
-                       BUG_ON(page->type == VMI_PAGE_NORMAL);
-               page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
-               if (type & VMI_PAGE_ZEROED)
-                       check_zeroed_page(pfn, type, page);
-       } else {
-               record_page_type(pfn, type);
-       }
-}
-
-static void vmi_check_page_type(u32 pfn, int type)
-{
-       /* PAE can have multiple roots per page - skip checks */
-       if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
-               return;
-
-       type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
-       if (boot_allocations_applied) {
-               struct page *page = pfn_to_page(pfn);
-               BUG_ON((page->type ^ type) & VMI_PAGE_PAE);
-               BUG_ON(type == VMI_PAGE_NORMAL && page->type);
-               BUG_ON((type & page->type) == 0);
-       }
-}
-#else
-#define vmi_set_page_type(p,t) do { } while (0)
-#define vmi_check_page_type(p,t) do { } while (0)
-#endif
-
 #ifdef CONFIG_HIGHPTE
 static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
 {
@@ -395,7 +292,6 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
 
 static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
 {
-       vmi_set_page_type(pfn, VMI_PAGE_L1);
        vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
 }
 
@@ -406,27 +302,22 @@ static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
         * It is called only for swapper_pg_dir, which already has
         * data on it.
         */
-       vmi_set_page_type(pfn, VMI_PAGE_L2);
        vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
 }
 
 static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
 {
-       vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
-       vmi_check_page_type(clonepfn, VMI_PAGE_L2);
        vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
 }
 
 static void vmi_release_pte(unsigned long pfn)
 {
        vmi_ops.release_page(pfn, VMI_PAGE_L1);
-       vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
 }
 
 static void vmi_release_pmd(unsigned long pfn)
 {
        vmi_ops.release_page(pfn, VMI_PAGE_L2);
-       vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
 }
 
 /*
@@ -450,26 +341,22 @@ static void vmi_release_pmd(unsigned long pfn)
 
 static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-       vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
        vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
 }
 
 static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-       vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
        vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
 }
 
 static void vmi_set_pte(pte_t *ptep, pte_t pte)
 {
        /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
-       vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD);
        vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
 }
 
 static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
 {
-       vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
        vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
 }
 
@@ -477,10 +364,8 @@ static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
 {
 #ifdef CONFIG_X86_PAE
        const pte_t pte = { .pte = pmdval.pmd };
-       vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD);
 #else
        const pte_t pte = { pmdval.pud.pgd.pgd };
-       vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD);
 #endif
        vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
 }
@@ -502,7 +387,6 @@ static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
 
 static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
 {
-       vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
        vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
 }
 
@@ -510,21 +394,18 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
 {
        /* Um, eww */
        const pte_t pte = { .pte = pudval.pgd.pgd };
-       vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD);
        vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
 }
 
 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        const pte_t pte = { .pte = 0 };
-       vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
        vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
 }
 
 static void vmi_pmd_clear(pmd_t *pmd)
 {
        const pte_t pte = { .pte = 0 };
-       vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
        vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
 }
 #endif
@@ -960,8 +841,6 @@ static inline int __init activate_vmi(void)
 
 void __init vmi_init(void)
 {
-       unsigned long flags;
-
        if (!vmi_rom)
                probe_vmi_rom();
        else
@@ -973,13 +852,21 @@ void __init vmi_init(void)
 
        reserve_top_address(-vmi_rom->virtual_top);
 
-       local_irq_save(flags);
-       activate_vmi();
-
 #ifdef CONFIG_X86_IO_APIC
        /* This is virtual hardware; timer routing is wired correctly */
        no_timer_check = 1;
 #endif
+}
+
+void vmi_activate(void)
+{
+       unsigned long flags;
+
+       if (!vmi_rom)
+               return;
+
+       local_irq_save(flags);
+       activate_vmi();
        local_irq_restore(flags & X86_EFLAGS_IF);
 }
 
index 0b8b6690a86d184959703177b9cb6011bfd605ca..ebf2f12900f5d999aa8f6a57322e34743dae2bfb 100644 (file)
@@ -128,7 +128,16 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
                        gettimeofday(tv,NULL);
                        return;
                }
+
+               /*
+                * Surround the RDTSC by barriers, to make sure it's not
+                * speculated to outside the seqlock critical section and
+                * does not cause time warps:
+                */
+               rdtsc_barrier();
                now = vread();
+               rdtsc_barrier();
+
                base = __vsyscall_gtod_data.clock.cycle_last;
                mask = __vsyscall_gtod_data.clock.mask;
                mult = __vsyscall_gtod_data.clock.mult;
index 3c3b471ea496225e5a53029b92cfd5eeee658e43..3624a364b7f37ad8f7105956c15f4e1385dd66e8 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/bigsmp/apic.h>
 #include <asm/bigsmp/ipi.h>
 #include <asm/mach-default/mach_mpparse.h>
+#include <asm/mach-default/mach_wakecpu.h>
 
 static int dmi_bigsmp; /* can be set by dmi scanners */
 
index 9e835a11a13a7cba9c16f2de4e5db0e54d9ecbc5..e63a4a76d8cdd2a966aec1299d941b5e5fb7196b 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/mach-default/mach_apic.h>
 #include <asm/mach-default/mach_ipi.h>
 #include <asm/mach-default/mach_mpparse.h>
+#include <asm/mach-default/mach_wakecpu.h>
 
 /* should be called last. */
 static int probe_default(void)
index 28459cab3ddb5fdae0d560e5f766b141412e00bb..7b4e6d0d169001480eebb37c8b4bc3a32d1c6333 100644 (file)
 #include <asm/es7000/apic.h>
 #include <asm/es7000/ipi.h>
 #include <asm/es7000/mpparse.h>
-#include <asm/es7000/wakecpu.h>
+#include <asm/mach-default/mach_wakecpu.h>
+
+void __init es7000_update_genapic_to_cluster(void)
+{
+       genapic->target_cpus = target_cpus_cluster;
+       genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
+       genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
+       genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
+
+       genapic->init_apic_ldr = init_apic_ldr_cluster;
+
+       genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
+}
 
 static int probe_es7000(void)
 {
index 5a7e4619e1c47589fc6745d5843c7ef743170318..c346d9d0226f313129917b091e4a4e94bcde6105 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/genapic.h>
+#include <asm/setup.h>
 
 extern struct genapic apic_numaq;
 extern struct genapic apic_summit;
@@ -57,6 +58,9 @@ static int __init parse_apic(char *arg)
                }
        }
 
+       if (x86_quirks->update_genapic)
+               x86_quirks->update_genapic();
+
        /* Parsed again by __setup for debug/verbose */
        return 0;
 }
@@ -72,12 +76,15 @@ void __init generic_bigsmp_probe(void)
         * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
         */
 
-       if (!cmdline_apic && genapic == &apic_default)
+       if (!cmdline_apic && genapic == &apic_default) {
                if (apic_bigsmp.probe()) {
                        genapic = &apic_bigsmp;
+                       if (x86_quirks->update_genapic)
+                               x86_quirks->update_genapic();
                        printk(KERN_INFO "Overriding APIC driver with %s\n",
                               genapic->name);
                }
+       }
 #endif
 }
 
@@ -94,6 +101,9 @@ void __init generic_apic_probe(void)
                /* Not visible without early console */
                if (!apic_probe[i])
                        panic("Didn't find an APIC driver");
+
+               if (x86_quirks->update_genapic)
+                       x86_quirks->update_genapic();
        }
        printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
 }
@@ -108,6 +118,8 @@ int __init mps_oem_check(struct mp_config_table *mpc, char *oem,
                if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
                        if (!cmdline_apic) {
                                genapic = apic_probe[i];
+                               if (x86_quirks->update_genapic)
+                                       x86_quirks->update_genapic();
                                printk(KERN_INFO "Switched to APIC driver `%s'.\n",
                                       genapic->name);
                        }
@@ -124,6 +136,8 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
                if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
                        if (!cmdline_apic) {
                                genapic = apic_probe[i];
+                               if (x86_quirks->update_genapic)
+                                       x86_quirks->update_genapic();
                                printk(KERN_INFO "Switched to APIC driver `%s'.\n",
                                       genapic->name);
                        }
index 6272b5e69da62b28f660105d9a8788e4944432b3..2c6d234e0009ccd5b2ca50db816f599a5cd133b3 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/summit/apic.h>
 #include <asm/summit/ipi.h>
 #include <asm/summit/mpparse.h>
+#include <asm/mach-default/mach_wakecpu.h>
 
 static int probe_summit(void)
 {
index 31e8730fa2463214f36c2f6b3df9d0f75f6be346..20ef272c412cc9ca113b0caaab17121626c1486f 100644 (file)
@@ -413,6 +413,7 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
                                 unsigned long error_code)
 {
        unsigned long flags = oops_begin();
+       int sig = SIGKILL;
        struct task_struct *tsk;
 
        printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
@@ -423,8 +424,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
        tsk->thread.trap_no = 14;
        tsk->thread.error_code = error_code;
        if (__die("Bad pagetable", regs, error_code))
-               regs = NULL;
-       oops_end(flags, regs, SIGKILL);
+               sig = 0;
+       oops_end(flags, regs, sig);
 }
 #endif
 
@@ -590,6 +591,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
        int fault;
 #ifdef CONFIG_X86_64
        unsigned long flags;
+       int sig;
 #endif
 
        tsk = current;
@@ -849,11 +851,12 @@ no_context:
        bust_spinlocks(0);
        do_exit(SIGKILL);
 #else
+       sig = SIGKILL;
        if (__die("Oops", regs, error_code))
-               regs = NULL;
+               sig = 0;
        /* Executive summary in case the body of the oops scrolled away */
        printk(KERN_EMERG "CR2: %016lx\n", address);
-       oops_end(flags, regs, SIGKILL);
+       oops_end(flags, regs, sig);
 #endif
 
 /*
index c483f424207938cc16ff3a2eb6b92eee5175e6b5..800e1d94c1b5580e627ddb0a2ef81cfd10d031cd 100644 (file)
@@ -67,7 +67,7 @@ static unsigned long __meminitdata table_top;
 
 static int __initdata after_init_bootmem;
 
-static __init void *alloc_low_page(unsigned long *phys)
+static __init void *alloc_low_page(void)
 {
        unsigned long pfn = table_end++;
        void *adr;
@@ -77,7 +77,6 @@ static __init void *alloc_low_page(unsigned long *phys)
 
        adr = __va(pfn * PAGE_SIZE);
        memset(adr, 0, PAGE_SIZE);
-       *phys  = pfn * PAGE_SIZE;
        return adr;
 }
 
@@ -92,16 +91,17 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
        pmd_t *pmd_table;
 
 #ifdef CONFIG_X86_PAE
-       unsigned long phys;
        if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
                if (after_init_bootmem)
                        pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
                else
-                       pmd_table = (pmd_t *)alloc_low_page(&phys);
+                       pmd_table = (pmd_t *)alloc_low_page();
                paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
                set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
                pud = pud_offset(pgd, 0);
                BUG_ON(pmd_table != pmd_offset(pud, 0));
+
+               return pmd_table;
        }
 #endif
        pud = pud_offset(pgd, 0);
@@ -126,10 +126,8 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
                        if (!page_table)
                                page_table =
                                (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
-               } else {
-                       unsigned long phys;
-                       page_table = (pte_t *)alloc_low_page(&phys);
-               }
+               } else
+                       page_table = (pte_t *)alloc_low_page();
 
                paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
@@ -969,8 +967,6 @@ void __init mem_init(void)
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
-       start_periodic_check_for_corruption();
-
 #ifdef CONFIG_FLATMEM
        BUG_ON(!mem_map);
 #endif
@@ -1040,11 +1036,25 @@ void __init mem_init(void)
                (unsigned long)&_text, (unsigned long)&_etext,
                ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 
+       /*
+        * Check boundaries twice: Some fundamental inconsistencies can
+        * be detected at build time already.
+        */
+#define __FIXADDR_TOP (-PAGE_SIZE)
+#ifdef CONFIG_HIGHMEM
+       BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE  > FIXADDR_START);
+       BUILD_BUG_ON(VMALLOC_END                        > PKMAP_BASE);
+#endif
+#define high_memory (-128UL << 20)
+       BUILD_BUG_ON(VMALLOC_START                      >= VMALLOC_END);
+#undef high_memory
+#undef __FIXADDR_TOP
+
 #ifdef CONFIG_HIGHMEM
        BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE        > FIXADDR_START);
        BUG_ON(VMALLOC_END                              > PKMAP_BASE);
 #endif
-       BUG_ON(VMALLOC_START                            > VMALLOC_END);
+       BUG_ON(VMALLOC_START                            >= VMALLOC_END);
        BUG_ON((unsigned long)high_memory               > VMALLOC_START);
 
        if (boot_cpu_data.wp_works_ok < 0)
index 9db01db6e3cdf4cb0e11444d49e4d598be280b55..9f7a0d24d42a93b47e36d3ce40c70876f5d09e16 100644 (file)
@@ -902,8 +902,6 @@ void __init mem_init(void)
        long codesize, reservedpages, datasize, initsize;
        unsigned long absent_pages;
 
-       start_periodic_check_for_corruption();
-
        pci_iommu_alloc();
 
        /* clear_bss() already clear the empty_zero_page */
index d4c4307ff3e049f1454c3629fb2989f63395ae68..bd85d42819e1eadad218a266cdade7e00bf7659a 100644 (file)
@@ -223,7 +223,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
         * Check if the request spans more than any BAR in the iomem resource
         * tree.
         */
-       WARN_ON(iomem_map_sanity_check(phys_addr, size));
+       WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
+                 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
 
        /*
         * Don't allow anybody to remap normal RAM that we're using..
index eb1bf000d12e9b1aa1769284e263d43593095183..541bcc944a5b8c70450cb53a8a1a2b2a3ae31ddf 100644 (file)
@@ -596,6 +596,242 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
        free_memtype(addr, addr + size);
 }
 
+/*
+ * Internal interface to reserve a range of physical memory with prot.
+ * Reserved non RAM regions only and after successful reserve_memtype,
+ * this func also keeps identity mapping (if any) in sync with this new prot.
+ */
+static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
+{
+       int is_ram = 0;
+       int id_sz, ret;
+       unsigned long flags;
+       unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
+
+       is_ram = pagerange_is_ram(paddr, paddr + size);
+
+       if (is_ram != 0) {
+               /*
+                * For mapping RAM pages, drivers need to call
+                * set_memory_[uc|wc|wb] directly, for reserve and free, before
+                * setting up the PTE.
+                */
+               WARN_ON_ONCE(1);
+               return 0;
+       }
+
+       ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
+       if (ret)
+               return ret;
+
+       if (flags != want_flags) {
+               free_memtype(paddr, paddr + size);
+               printk(KERN_ERR
+               "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
+                       current->comm, current->pid,
+                       cattr_name(want_flags),
+                       (unsigned long long)paddr,
+                       (unsigned long long)(paddr + size),
+                       cattr_name(flags));
+               return -EINVAL;
+       }
+
+       /* Need to keep identity mapping in sync */
+       if (paddr >= __pa(high_memory))
+               return 0;
+
+       id_sz = (__pa(high_memory) < paddr + size) ?
+                               __pa(high_memory) - paddr :
+                               size;
+
+       if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
+               free_memtype(paddr, paddr + size);
+               printk(KERN_ERR
+                       "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
+                       "for %Lx-%Lx\n",
+                       current->comm, current->pid,
+                       cattr_name(flags),
+                       (unsigned long long)paddr,
+                       (unsigned long long)(paddr + size));
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/*
+ * Internal interface to free a range of physical memory.
+ * Frees non RAM regions only.
+ */
+static void free_pfn_range(u64 paddr, unsigned long size)
+{
+       int is_ram;
+
+       is_ram = pagerange_is_ram(paddr, paddr + size);
+       if (is_ram == 0)
+               free_memtype(paddr, paddr + size);
+}
+
+/*
+ * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ *
+ * If the vma has a linear pfn mapping for the entire range, we get the prot
+ * from pte and reserve the entire vma range with single reserve_pfn_range call.
+ * Otherwise, we reserve the entire vma range, my ging through the PTEs page
+ * by page to get physical address and protection.
+ */
+int track_pfn_vma_copy(struct vm_area_struct *vma)
+{
+       int retval = 0;
+       unsigned long i, j;
+       u64 paddr;
+       unsigned long prot;
+       unsigned long vma_start = vma->vm_start;
+       unsigned long vma_end = vma->vm_end;
+       unsigned long vma_size = vma_end - vma_start;
+
+       if (!pat_enabled)
+               return 0;
+
+       if (is_linear_pfn_mapping(vma)) {
+               /*
+                * reserve the whole chunk covered by vma. We need the
+                * starting address and protection from pte.
+                */
+               if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
+                       WARN_ON_ONCE(1);
+                       return -EINVAL;
+               }
+               return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
+       }
+
+       /* reserve entire vma page by page, using pfn and prot from pte */
+       for (i = 0; i < vma_size; i += PAGE_SIZE) {
+               if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
+                       continue;
+
+               retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
+               if (retval)
+                       goto cleanup_ret;
+       }
+       return 0;
+
+cleanup_ret:
+       /* Reserve error: Cleanup partial reservation and return error */
+       for (j = 0; j < i; j += PAGE_SIZE) {
+               if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
+                       continue;
+
+               free_pfn_range(paddr, PAGE_SIZE);
+       }
+
+       return retval;
+}
+
+/*
+ * track_pfn_vma_new is called when a _new_ pfn mapping is being established
+ * for physical range indicated by pfn and size.
+ *
+ * prot is passed in as a parameter for the new mapping. If the vma has a
+ * linear pfn mapping for the entire range reserve the entire vma range with
+ * single reserve_pfn_range call.
+ * Otherwise, we look t the pfn and size and reserve only the specified range
+ * page by page.
+ *
+ * Note that this function can be called with caller trying to map only a
+ * subrange/page inside the vma.
+ */
+int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+                       unsigned long pfn, unsigned long size)
+{
+       int retval = 0;
+       unsigned long i, j;
+       u64 base_paddr;
+       u64 paddr;
+       unsigned long vma_start = vma->vm_start;
+       unsigned long vma_end = vma->vm_end;
+       unsigned long vma_size = vma_end - vma_start;
+
+       if (!pat_enabled)
+               return 0;
+
+       if (is_linear_pfn_mapping(vma)) {
+               /* reserve the whole chunk starting from vm_pgoff */
+               paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
+               return reserve_pfn_range(paddr, vma_size, prot);
+       }
+
+       /* reserve page by page using pfn and size */
+       base_paddr = (u64)pfn << PAGE_SHIFT;
+       for (i = 0; i < size; i += PAGE_SIZE) {
+               paddr = base_paddr + i;
+               retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+               if (retval)
+                       goto cleanup_ret;
+       }
+       return 0;
+
+cleanup_ret:
+       /* Reserve error: Cleanup partial reservation and return error */
+       for (j = 0; j < i; j += PAGE_SIZE) {
+               paddr = base_paddr + j;
+               free_pfn_range(paddr, PAGE_SIZE);
+       }
+
+       return retval;
+}
+
+/*
+ * untrack_pfn_vma is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case size can be zero).
+ */
+void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+                       unsigned long size)
+{
+       unsigned long i;
+       u64 paddr;
+       unsigned long prot;
+       unsigned long vma_start = vma->vm_start;
+       unsigned long vma_end = vma->vm_end;
+       unsigned long vma_size = vma_end - vma_start;
+
+       if (!pat_enabled)
+               return;
+
+       if (is_linear_pfn_mapping(vma)) {
+               /* free the whole chunk starting from vm_pgoff */
+               paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
+               free_pfn_range(paddr, vma_size);
+               return;
+       }
+
+       if (size != 0 && size != vma_size) {
+               /* free page by page, using pfn and size */
+               paddr = (u64)pfn << PAGE_SHIFT;
+               for (i = 0; i < size; i += PAGE_SIZE) {
+                       paddr = paddr + i;
+                       free_pfn_range(paddr, PAGE_SIZE);
+               }
+       } else {
+               /* free entire vma, page by page, using the pfn from pte */
+               for (i = 0; i < vma_size; i += PAGE_SIZE) {
+                       if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
+                               continue;
+
+                       free_pfn_range(paddr, PAGE_SIZE);
+               }
+       }
+}
+
+pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+       if (pat_enabled)
+               return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
+       else
+               return pgprot_noncached(prot);
+}
+
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
 
 /* get Nth element of the linked list */
index b67732bbb85a3a562063c36e579ee769ccdf86c1..bb1a01f089e29c817432907a820ca7a06f727347 100644 (file)
@@ -23,6 +23,12 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
 unsigned int pci_early_dump_regs;
 static int pci_bf_sort;
 int pci_routeirq;
+int noioapicquirk;
+#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
+int noioapicreroute = 0;
+#else
+int noioapicreroute = 1;
+#endif
 int pcibios_last_bus = -1;
 unsigned long pirq_table_addr;
 struct pci_bus *pci_root_bus;
@@ -519,6 +525,17 @@ char * __devinit  pcibios_setup(char *str)
        } else if (!strcmp(str, "skip_isa_align")) {
                pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
                return NULL;
+       } else if (!strcmp(str, "noioapicquirk")) {
+               noioapicquirk = 1;
+               return NULL;
+       } else if (!strcmp(str, "ioapicreroute")) {
+               if (noioapicreroute != -1)
+                       noioapicreroute = 0;
+               return NULL;
+       } else if (!strcmp(str, "noioapicreroute")) {
+               if (noioapicreroute != -1)
+                       noioapicreroute = 1;
+               return NULL;
        }
        return str;
 }
index 9915293500fb69ebdae0b8d80b69155f180f1274..9a5af6c8fbe9fc445fdca3358b90666c9ecae7bb 100644 (file)
@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
 
 #undef PCI_CONF2_ADDRESS
 
-static struct pci_raw_ops pci_direct_conf2 = {
+struct pci_raw_ops pci_direct_conf2 = {
        .read =         pci_conf2_read,
        .write =        pci_conf2_write,
 };
@@ -289,6 +289,7 @@ int __init pci_direct_probe(void)
 
        if (pci_check_type1()) {
                raw_pci_ops = &pci_direct_conf1;
+               port_cf9_safe = true;
                return 1;
        }
        release_resource(region);
@@ -305,6 +306,7 @@ int __init pci_direct_probe(void)
 
        if (pci_check_type2()) {
                raw_pci_ops = &pci_direct_conf2;
+               port_cf9_safe = true;
                return 2;
        }
 
index 15b9cf6be729c0c7cddee3ff54d1ce9809f32d58..1959018aac025ea3a38048973e6cc7a107396684 100644 (file)
@@ -96,6 +96,7 @@ extern struct pci_raw_ops *raw_pci_ops;
 extern struct pci_raw_ops *raw_pci_ext_ops;
 
 extern struct pci_raw_ops pci_direct_conf1;
+extern bool port_cf9_safe;
 
 /* arch_initcall level */
 extern int pci_direct_probe(void);
index 5e4686d70f629de22f81adc96b658ab06216be24..bea215230b20cdf4fc41ac5fd4e060dcbaec4ec1 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/console.h>
 
 #include <xen/interface/xen.h>
+#include <xen/interface/version.h>
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
 #include <xen/features.h>
@@ -793,7 +794,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 
        ret = 0;
 
-       switch(msr) {
+       switch (msr) {
 #ifdef CONFIG_X86_64
                unsigned which;
                u64 base;
@@ -1453,7 +1454,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
 
        ident_pte = 0;
        pfn = 0;
-       for(pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
+       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
                pte_t *pte_page;
 
                /* Reuse or allocate a page of ptes */
@@ -1471,7 +1472,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                }
 
                /* Install mappings */
-               for(pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
                        pte_t pte;
 
                        if (pfn > max_pfn_mapped)
@@ -1485,7 +1486,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                }
        }
 
-       for(pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
+       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
                set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
 
        set_page_prot(pmd, PAGE_KERNEL_RO);
@@ -1499,7 +1500,7 @@ static void convert_pfn_mfn(void *v)
 
        /* All levels are converted the same way, so just treat them
           as ptes. */
-       for(i = 0; i < PTRS_PER_PTE; i++)
+       for (i = 0; i < PTRS_PER_PTE; i++)
                pte[i] = xen_make_pte(pte[i].pte);
 }
 
@@ -1514,7 +1515,8 @@ static void convert_pfn_mfn(void *v)
  * of the physical mapping once some sort of allocator has been set
  * up.
  */
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                               unsigned long max_pfn)
 {
        pud_t *l3;
        pmd_t *l2;
@@ -1577,7 +1579,8 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pf
 #else  /* !CONFIG_X86_64 */
 static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
 
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                               unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
index 636ef4caa52d3dee2a24fc3b1913fe23b8ab34db..773d68d3e9128eba414a31ae3fa3dbc989033d72 100644 (file)
@@ -154,13 +154,13 @@ void xen_setup_mfn_list_list(void)
 {
        unsigned pfn, idx;
 
-       for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
+       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
 
                p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
        }
 
-       for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
+       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
                unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
                p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
        }
@@ -179,7 +179,7 @@ void __init xen_build_dynamic_phys_to_machine(void)
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
        unsigned pfn;
 
-       for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+       for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
 
                p2m_top[topidx] = &mfn_list[pfn];
@@ -207,7 +207,7 @@ static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
        p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
        BUG_ON(p == NULL);
 
-       for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
+       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
                p[i] = INVALID_P2M_ENTRY;
 
        if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
@@ -407,7 +407,8 @@ out:
                preempt_enable();
 }
 
-pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
+                                unsigned long addr, pte_t *ptep)
 {
        /* Just return the pte as-is.  We preserve the bits on commit */
        return *ptep;
@@ -878,7 +879,8 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
 
                if (user_pgd) {
                        xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
-                       xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
+                       xen_do_pin(MMUEXT_PIN_L4_TABLE,
+                                  PFN_DOWN(__pa(user_pgd)));
                }
        }
 #else /* CONFIG_X86_32 */
@@ -993,7 +995,8 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
                pgd_t *user_pgd = xen_get_user_pgd(pgd);
 
                if (user_pgd) {
-                       xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
+                       xen_do_pin(MMUEXT_UNPIN_TABLE,
+                                  PFN_DOWN(__pa(user_pgd)));
                        xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
                }
        }
index 8ea8a0d0b0deeff69a438a5ad4fc0552d250d68f..c738644b543502d6577ee07b42c25f6e52493131 100644 (file)
@@ -154,7 +154,7 @@ void xen_mc_flush(void)
                               ret, smp_processor_id());
                        dump_stack();
                        for (i = 0; i < b->mcidx; i++) {
-                               printk("  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
+                               printk(KERN_DEBUG "  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
                                       i+1, b->mcidx,
                                       b->debug[i].op,
                                       b->debug[i].args[0],
index d679010838883195da63cfa7a7bbef7cfa71fe03..15c6c68db6a25f4941d25e8fc35857a3d518dc5d 100644 (file)
@@ -28,6 +28,9 @@
 /* These are code, but not functions.  Defined in entry.S */
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
+extern void xen_sysenter_target(void);
+extern void xen_syscall_target(void);
+extern void xen_syscall32_target(void);
 
 
 /**
@@ -110,7 +113,6 @@ static __cpuinit int register_callback(unsigned type, const void *func)
 
 void __cpuinit xen_enable_sysenter(void)
 {
-       extern void xen_sysenter_target(void);
        int ret;
        unsigned sysenter_feature;
 
@@ -132,8 +134,6 @@ void __cpuinit xen_enable_syscall(void)
 {
 #ifdef CONFIG_X86_64
        int ret;
-       extern void xen_syscall_target(void);
-       extern void xen_syscall32_target(void);
 
        ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
        if (ret != 0) {
@@ -160,7 +160,8 @@ void __init xen_arch_setup(void)
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
 
        if (!xen_feature(XENFEAT_auto_translated_physmap))
-               HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
+               HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                                    VMASST_TYPE_pae_extended_cr3);
 
        if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
            register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
index c029d3eb9ef0a76ed248deb4a40e47511ba12afe..595b78672b36ad90e27cf50ed28126bb43970fb5 100644 (file)
@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
        int xor_src_cnt;
        dma_addr_t dma_dest;
 
-       dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
-       for (i = 0; i < src_cnt; i++)
+       /* map the dest bidrectional in case it is re-used as a source */
+       dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
+       for (i = 0; i < src_cnt; i++) {
+               /* only map the dest once */
+               if (unlikely(src_list[i] == dest)) {
+                       dma_src[i] = dma_dest;
+                       continue;
+               }
                dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
                                          len, DMA_TO_DEVICE);
+       }
 
        while (src_cnt) {
                async_flags = flags;
index 11acaee14d66ddb1752f9432107f46bf0fc1b146..bf79d83bdfbbbade9a942a297e90b0223eef5946 100644 (file)
@@ -384,6 +384,27 @@ acpi_pci_free_irq(struct acpi_prt_entry *entry,
        return irq;
 }
 
+#ifdef CONFIG_X86_IO_APIC
+extern int noioapicquirk;
+
+static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
+{
+       struct pci_bus *bus_it;
+
+       for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
+               if (!bus_it->self)
+                       return 0;
+
+               printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor,
+                               bus_it->self->device);
+
+               if (bus_it->self->irq_reroute_variant)
+                       return bus_it->self->irq_reroute_variant;
+       }
+       return 0;
+}
+#endif /* CONFIG_X86_IO_APIC */
+
 /*
  * acpi_pci_irq_lookup
  * success: return IRQ >= 0
@@ -413,6 +434,41 @@ acpi_pci_irq_lookup(struct pci_bus *bus,
        }
 
        ret = func(entry, triggering, polarity, link);
+
+#ifdef CONFIG_X86_IO_APIC
+       /*
+        * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the
+        * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel
+        * does during interrupt handling). When this INTx generation cannot be
+        * disabled, we reroute these interrupts to their legacy equivalent to
+        * get rid of spurious interrupts.
+        */
+        if (!noioapicquirk) {
+               switch (bridge_has_boot_interrupt_variant(bus)) {
+               case 0:
+                       /* no rerouting necessary */
+                       break;
+
+               case INTEL_IRQ_REROUTE_VARIANT:
+                       /*
+                        * Remap according to INTx routing table in 6700PXH
+                        * specs, intel order number 302628-002, section
+                        * 2.15.2. Other chipsets (80332, ...) have the same
+                        * mapping and are handled here as well.
+                        */
+                       printk(KERN_INFO "pci irq %d -> rerouted to legacy "
+                                        "irq %d\n", ret, (ret % 4) + 16);
+                       ret = (ret % 4) + 16;
+                       break;
+
+               default:
+                       printk(KERN_INFO "not rerouting irq %d to legacy irq: "
+                                        "unknown mapping\n", ret);
+                       break;
+               }
+       }
+#endif /* CONFIG_X86_IO_APIC */
+
        return ret;
 }
 
index 25f531d892deacbb5082b0696f0ba460c48a5494..40e60fc2e596daead5c656396aa84fca0587868b 100644 (file)
@@ -824,32 +824,36 @@ static int __init toshiba_acpi_init(void)
                        toshiba_acpi_exit();
                        return -ENOMEM;
                }
-       }
 
-       /* Register input device for kill switch */
-       toshiba_acpi.poll_dev = input_allocate_polled_device();
-       if (!toshiba_acpi.poll_dev) {
-               printk(MY_ERR "unable to allocate kill-switch input device\n");
-               toshiba_acpi_exit();
-               return -ENOMEM;
-       }
-       toshiba_acpi.poll_dev->private = &toshiba_acpi;
-       toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
-       toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
-
-       toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
-       toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
-       toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
-       set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
-       set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
-       input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
-       input_sync(toshiba_acpi.poll_dev->input);
-
-       ret = input_register_polled_device(toshiba_acpi.poll_dev);
-       if (ret) {
-               printk(MY_ERR "unable to register kill-switch input device\n");
-               toshiba_acpi_exit();
-               return ret;
+               /* Register input device for kill switch */
+               toshiba_acpi.poll_dev = input_allocate_polled_device();
+               if (!toshiba_acpi.poll_dev) {
+                       printk(MY_ERR
+                              "unable to allocate kill-switch input device\n");
+                       toshiba_acpi_exit();
+                       return -ENOMEM;
+               }
+               toshiba_acpi.poll_dev->private = &toshiba_acpi;
+               toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
+               toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
+
+               toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
+               toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
+               /* Toshiba USB ID */
+               toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
+               set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
+               set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
+               input_report_switch(toshiba_acpi.poll_dev->input,
+                                   SW_RFKILL_ALL, TRUE);
+               input_sync(toshiba_acpi.poll_dev->input);
+
+               ret = input_register_polled_device(toshiba_acpi.poll_dev);
+               if (ret) {
+                       printk(MY_ERR
+                              "unable to register kill-switch input device\n");
+                       toshiba_acpi_exit();
+                       return ret;
+               }
        }
 
        return 0;
index 5e2eb740df46cff3ddb41d434b4e41a3c67cbb8b..bc6695e3c8482927e5b6de7ae4017c9884bea4a8 100644 (file)
@@ -4050,17 +4050,70 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3160023AS",        "3.42",         ATA_HORKAGE_NONCQ },
 
        /* Seagate NCQ + FLUSH CACHE firmware bug */
-       { "ST31500341AS",       "9JU138",       ATA_HORKAGE_NONCQ |
+       { "ST31500341AS",       "SD15",         ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
-       { "ST31000333AS",       "9FZ136",       ATA_HORKAGE_NONCQ |
+       { "ST31500341AS",       "SD16",         ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
-       { "ST3640623AS",        "9FZ164",       ATA_HORKAGE_NONCQ |
+       { "ST31500341AS",       "SD17",         ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
-       { "ST3640323AS",        "9FZ134",       ATA_HORKAGE_NONCQ |
+       { "ST31500341AS",       "SD18",         ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
-       { "ST3320813AS",        "9FZ182",       ATA_HORKAGE_NONCQ |
+       { "ST31500341AS",       "SD19",         ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
-       { "ST3320613AS",        "9FZ162",       ATA_HORKAGE_NONCQ |
+
+       { "ST31000333AS",       "SD15",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST31000333AS",       "SD16",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST31000333AS",       "SD17",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST31000333AS",       "SD18",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST31000333AS",       "SD19",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+
+       { "ST3640623AS",        "SD15",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640623AS",        "SD16",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640623AS",        "SD17",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640623AS",        "SD18",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640623AS",        "SD19",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+
+       { "ST3640323AS",        "SD15",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640323AS",        "SD16",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640323AS",        "SD17",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640323AS",        "SD18",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3640323AS",        "SD19",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+
+       { "ST3320813AS",        "SD15",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320813AS",        "SD16",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320813AS",        "SD17",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320813AS",        "SD18",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320813AS",        "SD19",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+
+       { "ST3320613AS",        "SD15",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320613AS",        "SD16",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320613AS",        "SD17",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320613AS",        "SD18",         ATA_HORKAGE_NONCQ |
+                                               ATA_HORKAGE_FIRMWARE_WARN },
+       { "ST3320613AS",        "SD19",         ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
index a098ba8eaab6141ed52ec9e90666cbe0b8081cc3..e0c4f05d7d579a807becb30af2bf44b3a9ea7260 100644 (file)
@@ -183,7 +183,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
                        mask &= ~(0xF8 << ATA_SHIFT_UDMA);
                if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
                        mask &= ~(0xF0 << ATA_SHIFT_UDMA);
-       }
+       } else if (adev->class == ATA_DEV_ATAPI)
+               mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+
        return ata_bmdma_mode_filter(adev, mask);
 }
 
@@ -211,11 +213,15 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
 
 static int hpt36x_cable_detect(struct ata_port *ap)
 {
-       u8 ata66;
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+       u8 ata66;
 
+       /*
+        * Each channel of pata_hpt366 occupies separate PCI function
+        * as the primary channel and bit1 indicates the cable type.
+        */
        pci_read_config_byte(pdev, 0x5A, &ata66);
-       if (ata66 & (1 << ap->port_no))
+       if (ata66 & 2)
                return ATA_CBL_PATA40;
        return ATA_CBL_PATA80;
 }
index 9364dc554257e5af44a3a4a2aff7fc3f6598fa27..9f7c543cc04b265344b0e37443c4349147e88574 100644 (file)
@@ -1693,6 +1693,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
        for (i = 0; i <= h->highest_lun; i++) {
                int j;
                drv_found = 0;
+
+               /* skip holes in the array from already deleted drives */
+               if (h->drv[i].raid_level == -1)
+                       continue;
+
                for (j = 0; j < num_luns; j++) {
                        memcpy(&lunid, &ld_buff->LUN[j][0], 4);
                        lunid = le32_to_cpu(lunid);
index aa7f7962a9a0e678b0e3cdfdd9da1058c3798d14..05d897764f027489a346774cedc0ded600f28d2c 100644 (file)
@@ -21,9 +21,6 @@
  *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  *     FOR A PARTICULAR PURPOSE.
  *
- *     Xilinx products are not intended for use in life support appliances,
- *     devices, or systems. Use in such applications is expressly prohibited.
- *
  *     (c) Copyright 2003-2008 Xilinx Inc.
  *     All rights reserved.
  *
index 8b0252bf06e221ea89a0b9b6b3f89f0eda301722..d4f419ee87abbfc6fb0d41fd98ab8665524514cf 100644 (file)
@@ -21,9 +21,6 @@
  *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  *     FOR A PARTICULAR PURPOSE.
  *
- *     Xilinx products are not intended for use in life support appliances,
- *     devices, or systems. Use in such applications is expressly prohibited.
- *
  *     (c) Copyright 2003-2008 Xilinx Inc.
  *     All rights reserved.
  *
index 776b505284783cc0f128763a65925c9dbbd622e1..02225eb19cf6bd2e6d99085b6edea7aa4801c30b 100644 (file)
@@ -21,9 +21,6 @@
  *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  *     FOR A PARTICULAR PURPOSE.
  *
- *     Xilinx products are not intended for use in life support appliances,
- *     devices, or systems. Use in such applications is expressly prohibited.
- *
  *     (c) Copyright 2007-2008 Xilinx Inc.
  *     All rights reserved.
  *
index 62bda453c90b8a6101e91d11f00ec24406ab48b4..4c9dd9a3b62ac4b0266389141a4f66b9e2e34fd6 100644 (file)
@@ -21,9 +21,6 @@
  *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  *     FOR A PARTICULAR PURPOSE.
  *
- *     Xilinx products are not intended for use in life support appliances,
- *     devices, or systems. Use in such applications is expressly prohibited.
- *
  *     (c) Copyright 2007-2008 Xilinx Inc.
  *     All rights reserved.
  *
index d16131949097696cc8a72d2d07ae6a393094ebb7..f40ab699860f4645a693f76a239473812e767493 100644 (file)
@@ -21,9 +21,6 @@
  *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  *     FOR A PARTICULAR PURPOSE.
  *
- *     Xilinx products are not intended for use in life support appliances,
- *     devices, or systems. Use in such applications is expressly prohibited.
- *
  *     (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
  *     (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
  *     (c) Copyright 2007-2008 Xilinx Inc.
index 24d0d9b938fb39e485c56d9e74d1d9ff6effa5e6..8cca11981c5ff73036eaaeb915fbcf0a050a87c5 100644 (file)
@@ -21,9 +21,6 @@
  *     INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  *     FOR A PARTICULAR PURPOSE.
  *
- *     Xilinx products are not intended for use in life support appliances,
- *     devices, or systems. Use in such applications is expressly prohibited.
- *
  *     (c) Copyright 2003-2007 Xilinx Inc.
  *     All rights reserved.
  *
index 5317e08221ecb2a34e699c77bac4f72a505f59e6..657996517374f30f51f10257dd0ef0ee40ae66b9 100644 (file)
@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device)
 
        init_completion(&device->done);
        kref_init(&device->refcount);
+
+       mutex_lock(&dma_list_mutex);
        device->dev_id = id++;
+       mutex_unlock(&dma_list_mutex);
 
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
index ecd743f7cc617b9921b7d08e553da0264d8c401c..6607fdd00b1cd3a31655cc6b53fca580919f3354 100644 (file)
@@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
  */
 #define IOAT_TEST_SIZE 2000
 
+DECLARE_COMPLETION(test_completion);
 static void ioat_dma_test_callback(void *dma_async_param)
 {
        printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
                dma_async_param);
+       complete(&test_completion);
 }
 
 /**
@@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
                goto free_resources;
        }
        device->common.device_issue_pending(dma_chan);
-       msleep(1);
+
+       wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
 
        if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
                                        != DMA_SUCCESS) {
index c7a9306d951d2c61f622c601ef2d49af422b679e..6be3172622009ff67ec502b049f167d6ccebe1cd 100644 (file)
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
                        enum dma_ctrl_flags flags = desc->async_tx.flags;
                        u32 src_cnt;
                        dma_addr_t addr;
+                       dma_addr_t dest;
 
+                       src_cnt = unmap->unmap_src_cnt;
+                       dest = iop_desc_get_dest_addr(unmap, iop_chan);
                        if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-                               addr = iop_desc_get_dest_addr(unmap, iop_chan);
-                               dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+                               enum dma_data_direction dir;
+
+                               if (src_cnt > 1) /* is xor? */
+                                       dir = DMA_BIDIRECTIONAL;
+                               else
+                                       dir = DMA_FROM_DEVICE;
+
+                               dma_unmap_page(dev, dest, len, dir);
                        }
 
                        if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-                               src_cnt = unmap->unmap_src_cnt;
                                while (src_cnt--) {
                                        addr = iop_desc_get_src_addr(unmap,
                                                                     iop_chan,
                                                                     src_cnt);
+                                       if (addr == dest)
+                                               continue;
                                        dma_unmap_page(dev, addr, len,
                                                       DMA_TO_DEVICE);
                                }
index 0328da020a1084e6c7f0fcfd0e729a00e079702d..bcda17426411ea6f4c311aa5b077a1e91e906b63 100644 (file)
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
                        enum dma_ctrl_flags flags = desc->async_tx.flags;
                        u32 src_cnt;
                        dma_addr_t addr;
+                       dma_addr_t dest;
 
+                       src_cnt = unmap->unmap_src_cnt;
+                       dest = mv_desc_get_dest_addr(unmap);
                        if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-                               addr = mv_desc_get_dest_addr(unmap);
-                               dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+                               enum dma_data_direction dir;
+
+                               if (src_cnt > 1) /* is xor ? */
+                                       dir = DMA_BIDIRECTIONAL;
+                               else
+                                       dir = DMA_FROM_DEVICE;
+                               dma_unmap_page(dev, dest, len, dir);
                        }
 
                        if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-                               src_cnt = unmap->unmap_src_cnt;
                                while (src_cnt--) {
                                        addr = mv_desc_get_src_addr(unmap,
                                                                    src_cnt);
+                                       if (addr == dest)
+                                               continue;
                                        dma_unmap_page(dev, addr, len,
                                                       DMA_TO_DEVICE);
                                }
index 8daf4793ac32485824194af5e14f332c50fc6f26..4a597d8c2f708ae4bb0e52a7c16a4fe0a0891092 100644 (file)
@@ -467,6 +467,17 @@ const char *dmi_get_system_info(int field)
 }
 EXPORT_SYMBOL(dmi_get_system_info);
 
+/**
+ *     dmi_name_in_serial -    Check if string is in the DMI product serial
+ *                             information.
+ */
+int dmi_name_in_serial(const char *str)
+{
+       int f = DMI_PRODUCT_SERIAL;
+       if (dmi_ident[f] && strstr(dmi_ident[f], str))
+               return 1;
+       return 0;
+}
 
 /**
  *     dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information.
index 553dd4bc307547cc1ac4928209c7f8b3423ce8a3..afa8a12cd00902c1c34ea4f4ba83e55c3b7a74e2 100644 (file)
@@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = dev->pci_device;
                break;
        case I915_PARAM_HAS_GEM:
-               value = 1;
+               value = dev_priv->has_gem;
                break;
        default:
                DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -830,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        dev_priv->regs = ioremap(base, size);
 
+#ifdef CONFIG_HIGHMEM64G
+       /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
+       dev_priv->has_gem = 0;
+#else
+       /* enable GEM by default */
+       dev_priv->has_gem = 1;
+#endif
+
        i915_gem_load(dev);
 
        /* Init HWS */
index adc972cc6bfc03a28038841b2f6a43ff578e0fb3..b3cc4731aa7c07b43d2d9825068f3e8a70ce34bd 100644 (file)
@@ -106,6 +106,8 @@ struct intel_opregion {
 typedef struct drm_i915_private {
        struct drm_device *dev;
 
+       int has_gem;
+
        void __iomem *regs;
        drm_local_map_t *sarea;
 
index ad672d8548289b8e7a934123d2ff4878f272b6e0..24fe8c10b4b22c6bac1cfa17d79d55f02dd59179 100644 (file)
@@ -2309,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        }
 
        obj_priv = obj->driver_private;
-       args->busy = obj_priv->active;
+       /* Don't count being on the flushing list against the object being
+        * done.  Otherwise, a buffer left on the flushing list but not getting
+        * flushed (because nobody's flushing that domain) won't ever return
+        * unbusy and get reused by libdrm's bo cache.  The other expected
+        * consumer of this interface, OpenGL's occlusion queries, also specs
+        * that the objects get unbusy "eventually" without any interference.
+        */
+       args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
 
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);
index 228f75723063968c4e77fbb374e5421e5c87e393..3fcf78e906db185187d10c87466786fb3c3280c2 100644 (file)
@@ -365,6 +365,7 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                pmsg = &msgs[tptr];
                if (pmsg->flags & I2C_M_RD)
                        ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+                               (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
                                !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
                                1 * HZ);
                else
index 1fac4e233133879c8b06b07045c5318d87b91840..b7434d24904ebfa9750d655df8ab42f521ec2c40 100644 (file)
@@ -56,6 +56,7 @@ enum s3c24xx_i2c_state {
 struct s3c24xx_i2c {
        spinlock_t              lock;
        wait_queue_head_t       wait;
+       unsigned int            suspended:1;
 
        struct i2c_msg          *msg;
        unsigned int            msg_num;
@@ -507,7 +508,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int
        unsigned long timeout;
        int ret;
 
-       if (!(readl(i2c->regs + S3C2410_IICCON) & S3C2410_IICCON_IRQEN))
+       if (i2c->suspended)
                return -EIO;
 
        ret = s3c24xx_i2c_set_master(i2c);
@@ -986,17 +987,26 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
+static int s3c24xx_i2c_suspend_late(struct platform_device *dev,
+                                   pm_message_t msg)
+{
+       struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
+       i2c->suspended = 1;
+       return 0;
+}
+
 static int s3c24xx_i2c_resume(struct platform_device *dev)
 {
        struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
 
-       if (i2c != NULL)
-               s3c24xx_i2c_init(i2c);
+       i2c->suspended = 0;
+       s3c24xx_i2c_init(i2c);
 
        return 0;
 }
 
 #else
+#define s3c24xx_i2c_suspend_late NULL
 #define s3c24xx_i2c_resume NULL
 #endif
 
@@ -1005,6 +1015,7 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
 static struct platform_driver s3c2410_i2c_driver = {
        .probe          = s3c24xx_i2c_probe,
        .remove         = s3c24xx_i2c_remove,
+       .suspend_late   = s3c24xx_i2c_suspend_late,
        .resume         = s3c24xx_i2c_resume,
        .driver         = {
                .owner  = THIS_MODULE,
@@ -1015,6 +1026,7 @@ static struct platform_driver s3c2410_i2c_driver = {
 static struct platform_driver s3c2440_i2c_driver = {
        .probe          = s3c24xx_i2c_probe,
        .remove         = s3c24xx_i2c_remove,
+       .suspend_late   = s3c24xx_i2c_suspend_late,
        .resume         = s3c24xx_i2c_resume,
        .driver         = {
                .owner  = THIS_MODULE,
index d333ae22459c9d697c9d71ba7dd462f2af2f51d6..79ef5fd928ae42c0d8252f1d8bf07c479782be9c 100644 (file)
@@ -115,8 +115,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
        return error;
 }
 
+#define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db
+
 static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
 {
+       /* Freecom FireWire Hard Drive firmware bug */
+       if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH)
+               return 0;
+
        return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
 }
 
index ac89a5deaca2e12fa62ccb78f1d62b088039675c..ab7c8e4a61f943c516ae5a3534e8775c33180f6b 100644 (file)
@@ -208,15 +208,18 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
  */
 
 /* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index)
+static struct page *read_sb_page(mddev_t *mddev, long offset,
+                                struct page *page,
+                                unsigned long index, int size)
 {
        /* choose a good rdev and read the page from there */
 
        mdk_rdev_t *rdev;
        struct list_head *tmp;
-       struct page *page = alloc_page(GFP_KERNEL);
        sector_t target;
 
+       if (!page)
+               page = alloc_page(GFP_KERNEL);
        if (!page)
                return ERR_PTR(-ENOMEM);
 
@@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
 
                target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
 
-               if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
+               if (sync_page_io(rdev->bdev, target,
+                                roundup(size, bdev_hardsect_size(rdev->bdev)),
+                                page, READ)) {
                        page->index = index;
                        attach_page_buffers(page, NULL); /* so that free_buffer will
                                                          * quietly no-op */
@@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
 
                bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
        } else {
-               bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
+               bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
+                                              NULL,
+                                              0, sizeof(bitmap_super_t));
        }
        if (IS_ERR(bitmap->sb_page)) {
                err = PTR_ERR(bitmap->sb_page);
@@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                                 */
                                page = bitmap->sb_page;
                                offset = sizeof(bitmap_super_t);
+                               read_sb_page(bitmap->mddev, bitmap->offset,
+                                            page,
+                                            index, count);
                        } else if (file) {
                                page = read_page(file, index, bitmap, count);
                                offset = 0;
                        } else {
-                               page = read_sb_page(bitmap->mddev, bitmap->offset, index);
+                               page = read_sb_page(bitmap->mddev, bitmap->offset,
+                                                   NULL,
+                                                   index, count);
                                offset = 0;
                        }
                        if (IS_ERR(page)) { /* read error */
index d62fd4f6b52e054db684e2af1e30ac12fa3a9240..ee090413e598e4254cad01d211e465e46e081f48 100644 (file)
@@ -2008,6 +2008,9 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
                return FAILED;
        }
 
+       /* make sure we have no outstanding commands at this stage */
+       mptscsih_flush_running_cmds(hd);
+
        ioc = hd->ioc;
        printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
            ioc->name, SCpnt);
index 533923f83f1aa061635571b86934ad2715014be7..73b0ca061bb567401cc4d30a145c9daf05e72160 100644 (file)
@@ -317,7 +317,6 @@ int gru_proc_init(void)
 {
        struct proc_entry *p;
 
-       proc_mkdir("sgi_uv", NULL);
        proc_gru = proc_mkdir("sgi_uv/gru", NULL);
 
        for (p = proc_files; p->name; p++)
index ed1722e50049ff2aea256edbb574bb9969cb3bda..7b4cbd5e03e99a3e4297dc6d4b6de219d30f7389 100644 (file)
@@ -194,9 +194,10 @@ enum xp_retval {
        xpGruSendMqError,       /* 59: gru send message queue related error */
 
        xpBadChannelNumber,     /* 60: invalid channel number */
-       xpBadMsgType,           /* 60: invalid message type */
+       xpBadMsgType,           /* 61: invalid message type */
+       xpBiosError,            /* 62: BIOS error */
 
-       xpUnknownReason         /* 61: unknown reason - must be last in enum */
+       xpUnknownReason         /* 63: unknown reason - must be last in enum */
 };
 
 /*
@@ -345,6 +346,8 @@ extern unsigned long (*xp_pa) (void *);
 extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
                       size_t);
 extern int (*xp_cpu_to_nasid) (int);
+extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
+extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
 
 extern u64 xp_nofault_PIOR_target;
 extern int xp_nofault_PIOR(void *);
index 66a1d19e08ad73ccd651f22efaddc4605fc0857a..9a2e77172d942f548402e9ce8ccce25d9e3bf935 100644 (file)
@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy);
 int (*xp_cpu_to_nasid) (int cpuid);
 EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
 
+enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
+                                       unsigned long size);
+EXPORT_SYMBOL_GPL(xp_expand_memprotect);
+enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
+                                         unsigned long size);
+EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
+
 /*
  * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
  * users of XPC.
index 1440134caf3166d25339771b73df741b814e35e1..fb3ec9d735a901fb7825c1d254f188d0334a2509 100644 (file)
@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid)
        return cpuid_to_nasid(cpuid);
 }
 
+static enum xp_retval
+xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
+{
+       u64 nasid_array = 0;
+       int ret;
+
+       ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
+                                  &nasid_array);
+       if (ret != 0) {
+               dev_err(xp, "sn_change_memprotect(,, "
+                       "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
+               return xpSalError;
+       }
+       return xpSuccess;
+}
+
+static enum xp_retval
+xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
+{
+       u64 nasid_array = 0;
+       int ret;
+
+       ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
+                                  &nasid_array);
+       if (ret != 0) {
+               dev_err(xp, "sn_change_memprotect(,, "
+                       "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
+               return xpSalError;
+       }
+       return xpSuccess;
+}
+
 enum xp_retval
 xp_init_sn2(void)
 {
@@ -132,6 +164,8 @@ xp_init_sn2(void)
        xp_pa = xp_pa_sn2;
        xp_remote_memcpy = xp_remote_memcpy_sn2;
        xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
+       xp_expand_memprotect = xp_expand_memprotect_sn2;
+       xp_restrict_memprotect = xp_restrict_memprotect_sn2;
 
        return xp_register_nofault_code_sn2();
 }
index d9f7ce2510bcc26fe6f7c31fd287ebd4ad91f96b..d238576b26fa9b6c8f5d96b8f41760db444d43f1 100644 (file)
 
 #include <linux/device.h>
 #include <asm/uv/uv_hub.h>
+#if defined CONFIG_X86_64
+#include <asm/uv/bios.h>
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#include <asm/sn/sn_sal.h>
+#endif
 #include "../sgi-gru/grukservices.h"
 #include "xp.h"
 
@@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid)
        return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
 }
 
+static enum xp_retval
+xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
+{
+       int ret;
+
+#if defined CONFIG_X86_64
+       ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
+       if (ret != BIOS_STATUS_SUCCESS) {
+               dev_err(xp, "uv_bios_change_memprotect(,, "
+                       "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
+               return xpBiosError;
+       }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       u64 nasid_array;
+
+       ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
+                                  &nasid_array);
+       if (ret != 0) {
+               dev_err(xp, "sn_change_memprotect(,, "
+                       "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
+               return xpSalError;
+       }
+#else
+       #error not a supported configuration
+#endif
+       return xpSuccess;
+}
+
+static enum xp_retval
+xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
+{
+       int ret;
+
+#if defined CONFIG_X86_64
+       ret = uv_bios_change_memprotect(phys_addr, size,
+                                       UV_MEMPROT_RESTRICT_ACCESS);
+       if (ret != BIOS_STATUS_SUCCESS) {
+               dev_err(xp, "uv_bios_change_memprotect(,, "
+                       "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
+               return xpBiosError;
+       }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       u64 nasid_array;
+
+       ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
+                                  &nasid_array);
+       if (ret != 0) {
+               dev_err(xp, "sn_change_memprotect(,, "
+                       "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
+               return xpSalError;
+       }
+#else
+       #error not a supported configuration
+#endif
+       return xpSuccess;
+}
+
 enum xp_retval
 xp_init_uv(void)
 {
        BUG_ON(!is_uv());
 
        xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
-       xp_partition_id = 0;    /* !!! not correct value */
-       xp_region_size = 0;     /* !!! not correct value */
+       xp_partition_id = sn_partition_id;
+       xp_region_size = sn_region_size;
 
        xp_pa = xp_pa_uv;
        xp_remote_memcpy = xp_remote_memcpy_uv;
        xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
+       xp_expand_memprotect = xp_expand_memprotect_uv;
+       xp_restrict_memprotect = xp_restrict_memprotect_uv;
 
        return xpSuccess;
 }
index 619208d618627692c8cbaec3e0448b1d15479577..a5bd658c2e83b13a5d9dee5805a71eb191750e21 100644 (file)
@@ -180,6 +180,18 @@ struct xpc_vars_part_sn2 {
                                 (XPC_RP_MACH_NASIDS(_rp) + \
                                  xpc_nasid_mask_nlongs))
 
+/*
+ * Info pertinent to a GRU message queue using a watch list for irq generation.
+ */
+struct xpc_gru_mq_uv {
+       void *address;          /* address of GRU message queue */
+       unsigned int order;     /* size of GRU message queue as a power of 2 */
+       int irq;                /* irq raised when message is received in mq */
+       int mmr_blade;          /* blade where watchlist was allocated from */
+       unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
+       int watchlist_num;      /* number of watchlist allocatd by BIOS */
+};
+
 /*
  * The activate_mq is used to send/receive GRU messages that affect XPC's
  * heartbeat, partition active state, and channel state. This is UV only.
index b4882ccf6344a69607ee8c62f1ff3c84c32ce894..73b7fb8de47a32e0e38b8abe048ede7333fbe832 100644 (file)
@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
 static enum xp_retval
 xpc_allow_amo_ops_sn2(struct amo *amos_page)
 {
-       u64 nasid_array = 0;
-       int ret;
+       enum xp_retval ret = xpSuccess;
 
        /*
         * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
         * collides with memory operations. On those systems we call
         * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
         */
-       if (!enable_shub_wars_1_1()) {
-               ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
-                                          SN_MEMPROT_ACCESS_CLASS_1,
-                                          &nasid_array);
-               if (ret != 0)
-                       return xpSalError;
-       }
-       return xpSuccess;
+       if (!enable_shub_wars_1_1())
+               ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
+
+       return ret;
 }
 
 /*
index 1ac694c01623b99419e3b7d2afb9a2c16d8ceda7..91a55b1b1037011c5e403b19dac8c710d18632d5 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/err.h>
 #include <asm/uv/uv_hub.h>
+#if defined CONFIG_X86_64
+#include <asm/uv/bios.h>
+#include <asm/uv/uv_irq.h>
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#include <asm/sn/intr.h>
+#include <asm/sn/sn_sal.h>
+#endif
 #include "../sgi-gru/gru.h"
 #include "../sgi-gru/grukservices.h"
 #include "xpc.h"
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
 static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
 
 #define XPC_ACTIVATE_MSG_SIZE_UV       (1 * GRU_CACHE_LINE_BYTES)
-#define XPC_NOTIFY_MSG_SIZE_UV         (2 * GRU_CACHE_LINE_BYTES)
+#define XPC_ACTIVATE_MQ_SIZE_UV                (4 * XP_MAX_NPARTITIONS_UV * \
+                                        XPC_ACTIVATE_MSG_SIZE_UV)
+#define XPC_ACTIVATE_IRQ_NAME          "xpc_activate"
 
-#define XPC_ACTIVATE_MQ_SIZE_UV        (4 * XP_MAX_NPARTITIONS_UV * \
-                                XPC_ACTIVATE_MSG_SIZE_UV)
-#define XPC_NOTIFY_MQ_SIZE_UV  (4 * XP_MAX_NPARTITIONS_UV * \
-                                XPC_NOTIFY_MSG_SIZE_UV)
+#define XPC_NOTIFY_MSG_SIZE_UV         (2 * GRU_CACHE_LINE_BYTES)
+#define XPC_NOTIFY_MQ_SIZE_UV          (4 * XP_MAX_NPARTITIONS_UV * \
+                                        XPC_NOTIFY_MSG_SIZE_UV)
+#define XPC_NOTIFY_IRQ_NAME            "xpc_notify"
 
-static void *xpc_activate_mq_uv;
-static void *xpc_notify_mq_uv;
+static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
+static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
 
 static int
 xpc_setup_partitions_sn_uv(void)
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
        return 0;
 }
 
-static void *
-xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq,
+static int
+xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
+{
+#if defined CONFIG_X86_64
+       mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
+       if (mq->irq < 0) {
+               dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
+                       mq->irq);
+       }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       int mmr_pnode;
+       unsigned long mmr_value;
+
+       if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
+               mq->irq = SGI_XPC_ACTIVATE;
+       else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
+               mq->irq = SGI_XPC_NOTIFY;
+       else
+               return -EINVAL;
+
+       mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+       mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
+
+       uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
+#else
+       #error not a supported configuration
+#endif
+
+       return 0;
+}
+
+static void
+xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
+{
+#if defined CONFIG_X86_64
+       uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       int mmr_pnode;
+       unsigned long mmr_value;
+
+       mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+       mmr_value = 1UL << 16;
+
+       uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
+#else
+       #error not a supported configuration
+#endif
+}
+
+static int
+xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
+{
+       int ret;
+
+#if defined CONFIG_X86_64
+       ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
+                                        mq->order, &mq->mmr_offset);
+       if (ret < 0) {
+               dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+                       "ret=%d\n", ret);
+               return ret;
+       }
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
+                                   mq->order, &mq->mmr_offset);
+       if (ret < 0) {
+               dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
+                       ret);
+               return -EBUSY;
+       }
+#else
+       #error not a supported configuration
+#endif
+
+       mq->watchlist_num = ret;
+       return 0;
+}
+
+static void
+xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
+{
+       int ret;
+
+#if defined CONFIG_X86_64
+       ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+       BUG_ON(ret != BIOS_STATUS_SUCCESS);
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+       BUG_ON(ret != SALRET_OK);
+#else
+       #error not a supported configuration
+#endif
+}
+
+static struct xpc_gru_mq_uv *
+xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
                     irq_handler_t irq_handler)
 {
+       enum xp_retval xp_ret;
        int ret;
        int nid;
-       int mq_order;
+       int pg_order;
        struct page *page;
-       void *mq;
+       struct xpc_gru_mq_uv *mq;
+
+       mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
+       if (mq == NULL) {
+               dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
+                       "a xpc_gru_mq_uv structure\n");
+               ret = -ENOMEM;
+               goto out_1;
+       }
+
+       pg_order = get_order(mq_size);
+       mq->order = pg_order + PAGE_SHIFT;
+       mq_size = 1UL << mq->order;
+
+       mq->mmr_blade = uv_cpu_to_blade_id(cpu);
 
-       nid = cpu_to_node(cpuid);
-       mq_order = get_order(mq_size);
+       nid = cpu_to_node(cpu);
        page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
-                               mq_order);
+                               pg_order);
        if (page == NULL) {
                dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
                        "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
-               return NULL;
+               ret = -ENOMEM;
+               goto out_2;
        }
+       mq->address = page_address(page);
 
-       mq = page_address(page);
-       ret = gru_create_message_queue(mq, mq_size);
+       ret = gru_create_message_queue(mq->address, mq_size);
        if (ret != 0) {
                dev_err(xpc_part, "gru_create_message_queue() returned "
                        "error=%d\n", ret);
-               free_pages((unsigned long)mq, mq_order);
-               return NULL;
+               ret = -EINVAL;
+               goto out_3;
        }
 
-       /* !!! Need to do some other things to set up IRQ */
+       /* enable generation of irq when GRU mq operation occurs to this mq */
+       ret = xpc_gru_mq_watchlist_alloc_uv(mq);
+       if (ret != 0)
+               goto out_3;
 
-       ret = request_irq(irq, irq_handler, 0, "xpc", NULL);
+       ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
+       if (ret != 0)
+               goto out_4;
+
+       ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
        if (ret != 0) {
                dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
-                       irq, ret);
-               free_pages((unsigned long)mq, mq_order);
-               return NULL;
+                       mq->irq, ret);
+               goto out_5;
        }
 
-       /* !!! enable generation of irq when GRU mq op occurs to this mq */
-
-       /* ??? allow other partitions to access GRU mq? */
+       /* allow other partitions to access this GRU mq */
+       xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
+       if (xp_ret != xpSuccess) {
+               ret = -EACCES;
+               goto out_6;
+       }
 
        return mq;
+
+       /* something went wrong */
+out_6:
+       free_irq(mq->irq, NULL);
+out_5:
+       xpc_release_gru_mq_irq_uv(mq);
+out_4:
+       xpc_gru_mq_watchlist_free_uv(mq);
+out_3:
+       free_pages((unsigned long)mq->address, pg_order);
+out_2:
+       kfree(mq);
+out_1:
+       return ERR_PTR(ret);
 }
 
 static void
-xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq)
+xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
 {
-       /* ??? disallow other partitions to access GRU mq? */
+       unsigned int mq_size;
+       int pg_order;
+       int ret;
+
+       /* disallow other partitions to access GRU mq */
+       mq_size = 1UL << mq->order;
+       ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
+       BUG_ON(ret != xpSuccess);
 
-       /* !!! disable generation of irq when GRU mq op occurs to this mq */
+       /* unregister irq handler and release mq irq/vector mapping */
+       free_irq(mq->irq, NULL);
+       xpc_release_gru_mq_irq_uv(mq);
 
-       free_irq(irq, NULL);
+       /* disable generation of irq when GRU mq op occurs to this mq */
+       xpc_gru_mq_watchlist_free_uv(mq);
 
-       free_pages((unsigned long)mq, get_order(mq_size));
+       pg_order = mq->order - PAGE_SHIFT;
+       free_pages((unsigned long)mq->address, pg_order);
+
+       kfree(mq);
 }
 
 static enum xp_retval
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
        struct xpc_partition *part;
        int wakeup_hb_checker = 0;
 
-       while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) {
+       while (1) {
+               msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
+               if (msg_hdr == NULL)
+                       break;
 
                partid = msg_hdr->partid;
                if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
                        }
                }
 
-               gru_free_message(xpc_activate_mq_uv, msg_hdr);
+               gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
        }
 
        if (wakeup_hb_checker)
@@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
        struct xpc_partition_uv *part_uv = &part->sn.uv;
 
        /*
-        * !!! Make our side think that the remote parition sent an activate
+        * !!! Make our side think that the remote partition sent an activate
         * !!! message our way by doing what the activate IRQ handler would
         * !!! do had one really been sent.
         */
@@ -500,14 +660,39 @@ static enum xp_retval
 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
                                  size_t *len)
 {
-       /* !!! call the UV version of sn_partition_reserved_page_pa() */
-       return xpUnsupported;
+       s64 status;
+       enum xp_retval ret;
+
+#if defined CONFIG_X86_64
+       status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
+                                         (u64 *)len);
+       if (status == BIOS_STATUS_SUCCESS)
+               ret = xpSuccess;
+       else if (status == BIOS_STATUS_MORE_PASSES)
+               ret = xpNeedMoreInfo;
+       else
+               ret = xpBiosError;
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
+       if (status == SALRET_OK)
+               ret = xpSuccess;
+       else if (status == SALRET_MORE_PASSES)
+               ret = xpNeedMoreInfo;
+       else
+               ret = xpSalError;
+
+#else
+       #error not a supported configuration
+#endif
+
+       return ret;
 }
 
 static int
 xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
 {
-       rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv);
+       rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
        return 0;
 }
 
@@ -1411,22 +1596,18 @@ xpc_init_uv(void)
                return -E2BIG;
        }
 
-       /* ??? The cpuid argument's value is 0, is that what we want? */
-       /* !!! The irq argument's value isn't correct. */
-       xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
+       xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
+                                                 XPC_ACTIVATE_IRQ_NAME,
                                                  xpc_handle_activate_IRQ_uv);
-       if (xpc_activate_mq_uv == NULL)
-               return -ENOMEM;
+       if (IS_ERR(xpc_activate_mq_uv))
+               return PTR_ERR(xpc_activate_mq_uv);
 
-       /* ??? The cpuid argument's value is 0, is that what we want? */
-       /* !!! The irq argument's value isn't correct. */
-       xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
+       xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
+                                               XPC_NOTIFY_IRQ_NAME,
                                                xpc_handle_notify_IRQ_uv);
-       if (xpc_notify_mq_uv == NULL) {
-               /* !!! The irq argument's value isn't correct. */
-               xpc_destroy_gru_mq_uv(xpc_activate_mq_uv,
-                                     XPC_ACTIVATE_MQ_SIZE_UV, 0);
-               return -ENOMEM;
+       if (IS_ERR(xpc_notify_mq_uv)) {
+               xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+               return PTR_ERR(xpc_notify_mq_uv);
        }
 
        return 0;
@@ -1435,9 +1616,6 @@ xpc_init_uv(void)
 void
 xpc_exit_uv(void)
 {
-       /* !!! The irq argument's value isn't correct. */
-       xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0);
-
-       /* !!! The irq argument's value isn't correct. */
-       xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
+       xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
+       xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
 }
index a1a3d0e5d2b4ac6c30637ca3f8a308a7d56c5d3a..9e8222f9e90ee0f69416ffe985103d0f6b183df5 100644 (file)
@@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
                for (j = 0; j < bp->rx_max_pg_ring; j++) {
                        if (rxr->rx_pg_desc_ring[j])
                                pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
-                                                   rxr->rx_pg_desc_ring[i],
-                                                   rxr->rx_pg_desc_mapping[i]);
-                       rxr->rx_pg_desc_ring[i] = NULL;
+                                                   rxr->rx_pg_desc_ring[j],
+                                                   rxr->rx_pg_desc_mapping[j]);
+                       rxr->rx_pg_desc_ring[j] = NULL;
                }
                if (rxr->rx_pg_ring)
                        vfree(rxr->rx_pg_ring);
index 523b9716a543aec21d923b58a07cd5162b3edab3..d115a6d30f29c0b3acb81c8038c6215993980e8d 100644 (file)
@@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
                ctrl |= E1000_CTRL_PHY_RST;
        }
        ret_val = e1000_acquire_swflag_ich8lan(hw);
+       /* Whether or not the swflag was acquired, we need to reset the part */
        hw_dbg(hw, "Issuing a global reset to ich8lan");
        ew32(CTRL, (ctrl | E1000_CTRL_RST));
        msleep(20);
 
-       /* release the swflag because it is not reset by hardware reset */
-       e1000_release_swflag_ich8lan(hw);
+       if (!ret_val) {
+               /* release the swflag because it is not reset by
+                * hardware reset
+                */
+               e1000_release_swflag_ich8lan(hw);
+       }
 
        ret_val = e1000e_get_auto_rd_done(hw);
        if (ret_val) {
index c414554ac3217f22845b8e71901117bc5fffaaad..36cb6e95b465c2545b822d30c14a675b6bd39e07 100644 (file)
@@ -959,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += len;
                        ndev->last_rx = jiffies;
-                       netif_rx(skb);
+                       netif_rx_ni(skb);
                }
        }
        /*
index f863aee6648b49b0db38d2cdc077640669a2a424..3f5d91543246c88936d8fc086abf37c2cd3f6686 100644 (file)
@@ -22,7 +22,7 @@
  */
 
 #ifndef __JME_H_INCLUDED__
-#define __JME_H_INCLUDEE__
+#define __JME_H_INCLUDED__
 
 #define DRV_NAME       "jme"
 #define DRV_VERSION    "1.0.3"
index 536bda1f428b6c09360be41448f7b0b7d4c16679..289fc267edf30eb5094d24f416ca644c9494d401 100644 (file)
@@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
                return -EINVAL;
        }
 
-       bus->state = MDIOBUS_REGISTERED;
-
        mutex_init(&bus->mdio_lock);
 
        if (bus->reset)
@@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
                }
        }
 
+       if (!err)
+               bus->state = MDIOBUS_REGISTERED;
+
        pr_info("%s: probed\n", bus->name);
 
        return err;
index 1d2ef8f47780904da79f621bf2ac09d284b50753..5a40f2d78beb1a9114478ac9e0385556c29d30bf 100644 (file)
@@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
                desc->status = 0;
                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
        }
+
+       if (*quota == 0) {      /* out of rx quota */
+               retcode = 1;
+               goto out;
+       }
        writew(np->rx_done, np->base + CompletionQConsumerIdx);
 
  out:
index 1349e419673ca348dbe5aa22b3b5b27f1a1056dd..fed7eba65ead6268e292eddc66c73905d8e263c7 100644 (file)
@@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+static void gem_pcs_reset(struct gem *gp)
+{
+       int limit;
+       u32 val;
+
+       /* Reset PCS unit. */
+       val = readl(gp->regs + PCS_MIICTRL);
+       val |= PCS_MIICTRL_RST;
+       writel(val, gp->regs + PCS_MIICTRL);
+
+       limit = 32;
+       while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+               udelay(100);
+               if (limit-- <= 0)
+                       break;
+       }
+       if (limit <= 0)
+               printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
+                      gp->dev->name);
+}
+
+static void gem_pcs_reinit_adv(struct gem *gp)
+{
+       u32 val;
+
+       /* Make sure PCS is disabled while changing advertisement
+        * configuration.
+        */
+       val = readl(gp->regs + PCS_CFG);
+       val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+       writel(val, gp->regs + PCS_CFG);
+
+       /* Advertise all capabilities except assymetric
+        * pause.
+        */
+       val = readl(gp->regs + PCS_MIIADV);
+       val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+               PCS_MIIADV_SP | PCS_MIIADV_AP);
+       writel(val, gp->regs + PCS_MIIADV);
+
+       /* Enable and restart auto-negotiation, disable wrapback/loopback,
+        * and re-enable PCS.
+        */
+       val = readl(gp->regs + PCS_MIICTRL);
+       val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+       val &= ~PCS_MIICTRL_WB;
+       writel(val, gp->regs + PCS_MIICTRL);
+
+       val = readl(gp->regs + PCS_CFG);
+       val |= PCS_CFG_ENABLE;
+       writel(val, gp->regs + PCS_CFG);
+
+       /* Make sure serialink loopback is off.  The meaning
+        * of this bit is logically inverted based upon whether
+        * you are in Serialink or SERDES mode.
+        */
+       val = readl(gp->regs + PCS_SCTRL);
+       if (gp->phy_type == phy_serialink)
+               val &= ~PCS_SCTRL_LOOP;
+       else
+               val |= PCS_SCTRL_LOOP;
+       writel(val, gp->regs + PCS_SCTRL);
+}
+
 #define STOP_TRIES 32
 
 /* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
 
        if (limit <= 0)
                printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
+
+       if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
+               gem_pcs_reinit_adv(gp);
 }
 
 /* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
                   gp->phy_type == phy_serdes) {
                u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
 
-               if (pcs_lpa & PCS_MIIADV_FD)
+               if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
                        full_duplex = 1;
                speed = SPEED_1000;
        }
@@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
                        val = readl(gp->regs + PCS_MIISTAT);
 
                if ((val & PCS_MIISTAT_LS) != 0) {
+                       if (gp->lstate == link_up)
+                               goto restart;
+
                        gp->lstate = link_up;
                        netif_carrier_on(gp->dev);
                        (void)gem_set_link_modes(gp);
@@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
                if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
                        gp->phy_mii.def->ops->init(&gp->phy_mii);
        } else {
-               u32 val;
-               int limit;
-
-               /* Reset PCS unit. */
-               val = readl(gp->regs + PCS_MIICTRL);
-               val |= PCS_MIICTRL_RST;
-               writel(val, gp->regs + PCS_MIICTRL);
-
-               limit = 32;
-               while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
-                       udelay(100);
-                       if (limit-- <= 0)
-                               break;
-               }
-               if (limit <= 0)
-                       printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
-                              gp->dev->name);
-
-               /* Make sure PCS is disabled while changing advertisement
-                * configuration.
-                */
-               val = readl(gp->regs + PCS_CFG);
-               val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
-               writel(val, gp->regs + PCS_CFG);
-
-               /* Advertise all capabilities except assymetric
-                * pause.
-                */
-               val = readl(gp->regs + PCS_MIIADV);
-               val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
-                       PCS_MIIADV_SP | PCS_MIIADV_AP);
-               writel(val, gp->regs + PCS_MIIADV);
-
-               /* Enable and restart auto-negotiation, disable wrapback/loopback,
-                * and re-enable PCS.
-                */
-               val = readl(gp->regs + PCS_MIICTRL);
-               val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
-               val &= ~PCS_MIICTRL_WB;
-               writel(val, gp->regs + PCS_MIICTRL);
-
-               val = readl(gp->regs + PCS_CFG);
-               val |= PCS_CFG_ENABLE;
-               writel(val, gp->regs + PCS_CFG);
-
-               /* Make sure serialink loopback is off.  The meaning
-                * of this bit is logically inverted based upon whether
-                * you are in Serialink or SERDES mode.
-                */
-               val = readl(gp->regs + PCS_SCTRL);
-               if (gp->phy_type == phy_serialink)
-                       val &= ~PCS_SCTRL_LOOP;
-               else
-                       val |= PCS_SCTRL_LOOP;
-               writel(val, gp->regs + PCS_SCTRL);
+               gem_pcs_reset(gp);
+               gem_pcs_reinit_adv(gp);
        }
 
        /* Default aneg parameters */
@@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                cmd->speed = 0;
                cmd->duplex = cmd->port = cmd->phy_address =
                        cmd->transceiver = cmd->autoneg = 0;
+
+               /* serdes means usually a Fibre connector, with most fixed */
+               if (gp->phy_type == phy_serdes) {
+                       cmd->port = PORT_FIBRE;
+                       cmd->supported = (SUPPORTED_1000baseT_Half |
+                               SUPPORTED_1000baseT_Full |
+                               SUPPORTED_FIBRE | SUPPORTED_Autoneg |
+                               SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+                       cmd->advertising = cmd->supported;
+                       cmd->transceiver = XCVR_INTERNAL;
+                       if (gp->lstate == link_up)
+                               cmd->speed = SPEED_1000;
+                       cmd->duplex = DUPLEX_FULL;
+                       cmd->autoneg = 1;
+               }
        }
        cmd->maxtxpkt = cmd->maxrxpkt = 0;
 
index c41d68761364550ebe2d2d8376f2a8c293b5d799..e60498232b94f83dede6c6d4ae153d8d135d7b19 100644 (file)
@@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
        dma_addr_t      tail_list_phys;
        u8              *tail_buffer;
        unsigned long   flags;
+       unsigned int    txlen;
 
        if ( ! priv->phyOnline ) {
                TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
@@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
 
        if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
                return 0;
+       txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
 
        tail_list = priv->txList + priv->txTail;
        tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
@@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
 
        if ( bbuf ) {
                tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
-               skb_copy_from_linear_data(skb, tail_buffer, skb->len);
+               skb_copy_from_linear_data(skb, tail_buffer, txlen);
        } else {
                tail_list->buffer[0].address = pci_map_single(priv->pciDev,
-                                                             skb->data, skb->len,
+                                                             skb->data, txlen,
                                                              PCI_DMA_TODEVICE);
                TLan_StoreSKB(tail_list, skb);
        }
 
-       tail_list->frameSize = (u16) skb->len;
-       tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+       tail_list->frameSize = (u16) txlen;
+       tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
        tail_list->buffer[1].count = 0;
        tail_list->buffer[1].address = 0;
 
@@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
                if ( ! bbuf ) {
                        struct sk_buff *skb = TLan_GetSKB(head_list);
                        pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
-                                        skb->len, PCI_DMA_TODEVICE);
+                                        max(skb->len,
+                                            (unsigned int)TLAN_MIN_FRAME_SIZE),
+                                        PCI_DMA_TODEVICE);
                        dev_kfree_skb_any(skb);
                        head_list->buffer[8].address = 0;
                        head_list->buffer[9].address = 0;
@@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
                        list = priv->txList + i;
                        skb = TLan_GetSKB(list);
                        if ( skb ) {
-                               pci_unmap_single(priv->pciDev,
-                                                list->buffer[0].address, skb->len,
-                                                PCI_DMA_TODEVICE);
+                               pci_unmap_single(
+                                       priv->pciDev,
+                                       list->buffer[0].address,
+                                       max(skb->len,
+                                           (unsigned int)TLAN_MIN_FRAME_SIZE),
+                                       PCI_DMA_TODEVICE);
                                dev_kfree_skb_any( skb );
                                list->buffer[8].address = 0;
                                list->buffer[9].address = 0;
index f9e244da30aef9ea9e170c5cce58a524ab5f87a6..9bcb6cbd5aa93988e39bdf2b8c9285512d74df22 100644 (file)
@@ -113,7 +113,7 @@ struct acpiphp_slot {
 
        u8              device;         /* pci device# */
 
-       u32             sun;            /* ACPI _SUN (slot unique number) */
+       unsigned long long sun;         /* ACPI _SUN (slot unique number) */
        u32             flags;          /* see below */
 };
 
index 95b536a23d25d8d9f76d5aa757a411540017fef0..43c10bd261b48612b4ec2fa2620fa14207628805 100644 (file)
@@ -337,7 +337,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
        slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
 
        acpiphp_slot->slot = slot;
-       snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
+       snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
 
        retval = pci_hp_register(slot->hotplug_slot,
                                        acpiphp_slot->bridge->pci_bus,
index 955aae4071f7a64eeb5cb716aaacf8ebb26c7a11..3affc6472e65253c94b77bcc8aa97061480b85d7 100644 (file)
@@ -255,13 +255,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
 
                bridge->nr_slots++;
 
-               dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n",
+               dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
                                slot->sun, pci_domain_nr(bridge->pci_bus),
                                bridge->pci_bus->number, slot->device);
                retval = acpiphp_register_hotplug_slot(slot);
                if (retval) {
                        if (retval == -EBUSY)
-                               warn("Slot %d already registered by another "
+                               warn("Slot %llu already registered by another "
                                        "hotplug driver\n", slot->sun);
                        else
                                warn("acpiphp_register_hotplug_slot failed "
index c892daae74d6dc325a77dc9050c0f6cd8a5105c8..633e743442ac711d0b02d6b85a8252a1ee74dbb5 100644 (file)
@@ -1402,10 +1402,6 @@ static int __init ibmphp_init(void)
                goto error;
        }
 
-       /* lock ourselves into memory with a module 
-        * count of -1 so that no one can unload us. */
-       module_put(THIS_MODULE);
-
 exit:
        return rc;
 
@@ -1423,4 +1419,3 @@ static void __exit ibmphp_exit(void)
 }
 
 module_init(ibmphp_init);
-module_exit(ibmphp_exit);
index 4b23bc39b11e513be82a54df9f01db2b268b5bb4..39cf248d24e3cd0592b61dee392f748e441301c0 100644 (file)
@@ -432,18 +432,19 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
                goto err_out_release_ctlr;
        }
 
+       /* Check if slot is occupied */
        t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
-
-       t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */
-       if (value && pciehp_force) {
-               rc = pciehp_enable_slot(t_slot);
-               if (rc) /* -ENODEV: shouldn't happen, but deal with it */
-                       value = 0;
-       }
-       if ((POWER_CTRL(ctrl)) && !value) {
-               rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
-               if (rc)
-                       goto err_out_free_ctrl_slot;
+       t_slot->hpc_ops->get_adapter_status(t_slot, &value);
+       if (value) {
+               if (pciehp_force)
+                       pciehp_enable_slot(t_slot);
+       } else {
+               /* Power off slot if not occupied */
+               if (POWER_CTRL(ctrl)) {
+                       rc = t_slot->hpc_ops->power_off_slot(t_slot);
+                       if (rc)
+                               goto err_out_free_ctrl_slot;
+               }
        }
 
        return 0;
index dfc63d01f20a0a7c072f9a65039f2776537b3e8e..aac7006949f15c9eb52b56968a4dca6ef812e3c2 100644 (file)
@@ -252,7 +252,7 @@ static void report_resume(struct pci_dev *dev, void *data)
 
        if (!dev->driver ||
                !dev->driver->err_handler ||
-               !dev->driver->err_handler->slot_reset)
+               !dev->driver->err_handler->resume)
                return;
 
        err_handler = dev->driver->err_handler;
index 5f4f85f56cb7c7b592f4583f61f7a0d0657ea9d7..ce0985615133df0da3b84f71b370e0e1fb6f6988 100644 (file)
@@ -606,27 +606,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev)
                sis_apic_bug = 1;
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,      PCI_ANY_ID,                     quirk_ioapic_rmw);
-
-#define AMD8131_revA0        0x01
-#define AMD8131_revB0        0x11
-#define AMD8131_MISC         0x40
-#define AMD8131_NIOAMODE_BIT 0
-static void quirk_amd_8131_ioapic(struct pci_dev *dev)
-{ 
-        unsigned char tmp;
-        
-        if (nr_ioapics == 0) 
-                return;
-
-        if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) {
-                dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n");
-                pci_read_config_byte( dev, AMD8131_MISC, &tmp);
-                tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
-                pci_write_config_byte( dev, AMD8131_MISC, tmp);
-        }
-} 
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
 #endif /* CONFIG_X86_IO_APIC */
 
 /*
@@ -1423,6 +1402,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    0x2609, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   0x260a, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   0x260b, quirk_intel_pcie_pm);
 
+#ifdef CONFIG_X86_IO_APIC
+/*
+ * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
+ * remap the original interrupt in the linux kernel to the boot interrupt, so
+ * that a PCI device's interrupt handler is installed on the boot interrupt
+ * line instead.
+ */
+static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
+{
+       if (noioapicquirk || noioapicreroute)
+               return;
+
+       dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
+
+       printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n",
+                       dev->vendor, dev->device);
+       return;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_80333_0,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_80333_1,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB2_0,     quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_PXH_0,      quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_PXH_1,      quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_PXHV,       quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_80332_0,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_80332_1,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_80333_0,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_80333_1,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_ESB2_0,     quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_PXH_0,      quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_PXH_1,      quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_PXHV,       quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_80332_0,    quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_80332_1,    quirk_reroute_to_boot_interrupts_intel);
+
+/*
+ * On some chipsets we can disable the generation of legacy INTx boot
+ * interrupts.
+ */
+
+/*
+ * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
+ * 300641-004US, section 5.7.3.
+ */
+#define INTEL_6300_IOAPIC_ABAR         0x40
+#define INTEL_6300_DISABLE_BOOT_IRQ    (1<<14)
+
+static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
+{
+       u16 pci_config_word;
+
+       if (noioapicquirk)
+               return;
+
+       pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
+       pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+       pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
+
+       printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n",
+               dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10,     quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10,    quirk_disable_intel_boot_interrupt);
+
+/*
+ * disable boot interrupts on HT-1000
+ */
+#define BC_HT1000_FEATURE_REG          0x64
+#define BC_HT1000_PIC_REGS_ENABLE      (1<<0)
+#define BC_HT1000_MAP_IDX              0xC00
+#define BC_HT1000_MAP_DATA             0xC01
+
+static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
+{
+       u32 pci_config_dword;
+       u8 irq;
+
+       if (noioapicquirk)
+               return;
+
+       pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
+       pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
+                       BC_HT1000_PIC_REGS_ENABLE);
+
+       for (irq = 0x10; irq < 0x10 + 32; irq++) {
+               outb(irq, BC_HT1000_MAP_IDX);
+               outb(0x00, BC_HT1000_MAP_DATA);
+       }
+
+       pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
+
+       printk(KERN_INFO "disabled boot interrupts on PCI device"
+                       "0x%04x:0x%04x\n", dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,       quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,      quirk_disable_broadcom_boot_interrupt);
+
+/*
+ * disable boot interrupts on AMD and ATI chipsets
+ */
+/*
+ * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
+ * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
+ * (due to an erratum).
+ */
+#define AMD_813X_MISC                  0x40
+#define AMD_813X_NOIOAMODE             (1<<0)
+
+static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
+{
+       u32 pci_config_dword;
+
+       if (noioapicquirk)
+               return;
+
+       pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
+       pci_config_dword &= ~AMD_813X_NOIOAMODE;
+       pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
+
+       printk(KERN_INFO "disabled boot interrupts on PCI device "
+                       "0x%04x:0x%04x\n", dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8131_BRIDGE,    quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8132_BRIDGE,   quirk_disable_amd_813x_boot_interrupt);
+
+#define AMD_8111_PCI_IRQ_ROUTING       0x56
+
+static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
+{
+       u16 pci_config_word;
+
+       if (noioapicquirk)
+               return;
+
+       pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
+       if (!pci_config_word) {
+               printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x "
+                               "already disabled\n",
+                               dev->vendor, dev->device);
+               return;
+       }
+       pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
+       printk(KERN_INFO "disabled boot interrupts on PCI device "
+                       "0x%04x:0x%04x\n", dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8111_SMBUS,     quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8111_SMBUS,    quirk_disable_amd_8111_boot_interrupt);
+#endif /* CONFIG_X86_IO_APIC */
+
 /*
  * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
  * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
index bb7338863fb9aabeedfbb941b59f026176919673..b59d4115d20f541e5176ba309cb694687c1c1d17 100644 (file)
@@ -334,6 +334,6 @@ static void __exit bfin_cf_exit(void)
 module_init(bfin_cf_init);
 module_exit(bfin_cf_exit);
 
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>")
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
 MODULE_LICENSE("GPL");
index 162cd927d94b8e975edb1438db13cd9d0bcc6f8f..94acbeed4e7cfe7bc59ebc8003fdd7ae96e66ec4 100644 (file)
@@ -175,8 +175,8 @@ static struct aac_driver_ident aac_drivers[] = {
        { aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
        { aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
        { aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
-       { aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */
-       { aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */
+       { aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },                     /* Adaptec 2120S (Crusader) */
+       { aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },                     /* Adaptec 2200S (Vulcan) */
        { aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
        { aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
        { aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
index 2a5b29d12172e4a72bb774453d2662b7d29192ee..e2dd6a45924a5f598f6dafa05eff545b2aa5292b 100644 (file)
@@ -864,21 +864,23 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        INIT_WORK(&vport->crq_work, handle_crq);
 
-       err = crq_queue_create(&vport->crq_queue, target);
+       err = scsi_add_host(shost, target->dev);
        if (err)
                goto free_srp_target;
 
-       err = scsi_add_host(shost, target->dev);
+       err = scsi_tgt_alloc_queue(shost);
        if (err)
-               goto destroy_queue;
+               goto remove_host;
 
-       err = scsi_tgt_alloc_queue(shost);
+       err = crq_queue_create(&vport->crq_queue, target);
        if (err)
-               goto destroy_queue;
+               goto free_queue;
 
        return 0;
-destroy_queue:
-       crq_queue_destroy(target);
+free_queue:
+       scsi_tgt_free_queue(shost);
+remove_host:
+       scsi_remove_host(shost);
 free_srp_target:
        srp_target_free(target);
 put_host:
index 801c7cf54d2ebb29b209fbd692eb7c807514fdc6..3fdee7370cccd235d9ad37aa8482c29cac715b7a 100644 (file)
@@ -489,12 +489,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                if (!__kfifo_get(session->cmdpool.queue,
                                 (void*)&task, sizeof(void*)))
                        return NULL;
-
-               if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
-                    hdr->ttt == RESERVED_ITT) {
-                       conn->ping_task = task;
-                       conn->last_ping = jiffies;
-               }
        }
        /*
         * released in complete pdu for task we expect a response for, and
@@ -703,6 +697,11 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
        task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
        if (!task)
                iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+       else if (!rhdr) {
+               /* only track our nops */
+               conn->ping_task = task;
+               conn->last_ping = jiffies;
+       }
 }
 
 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
index fa45a1a668676c667dbe2bc1749ad44227061d6b..148d3af92aefad04fa42c48780ba4138e8033e59 100644 (file)
@@ -648,8 +648,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
        struct request *req = cmd->request;
        unsigned long flags;
 
-       scsi_unprep_request(req);
        spin_lock_irqsave(q->queue_lock, flags);
+       scsi_unprep_request(req);
        blk_requeue_request(q, req);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
index d1812d32f47d40c4c5e3c028aae8727c755f451f..63f0de29aa14eeb508645dd0b917e7c4163950c1 100644 (file)
@@ -827,7 +827,7 @@ static int __init maple_bus_init(void)
 
        maple_queue_cache =
            kmem_cache_create("maple_queue_cache", 0x400, 0,
-                             SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
+                             SLAB_HWCACHE_ALIGN, NULL);
 
        if (!maple_queue_cache)
                goto cleanup_bothirqs;
index c95b286a1239e8a6cbdb4da4a292e5d85c9079f1..5d457c96bd7ed596bdaab11852fc9ad45acaa635 100644 (file)
@@ -22,6 +22,8 @@ menuconfig STAGING
          If in doubt, say N here.
 
 
+if STAGING
+
 config STAGING_EXCLUDE_BUILD
        bool "Exclude Staging drivers from being built" if STAGING
        default y
@@ -62,3 +64,4 @@ source "drivers/staging/at76_usb/Kconfig"
 source "drivers/staging/poch/Kconfig"
 
 endif # !STAGING_EXCLUDE_BUILD
+endif # STAGING
index 8e74657f106c01d152c8e5244e75100bdd94f682..43a863c5cc430367ad045625eb4c3faecbfba7f9 100644 (file)
@@ -51,6 +51,7 @@ static struct usb_device_id usbtmc_devices[] = {
        { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
        { 0, } /* terminating entry */
 };
+MODULE_DEVICE_TABLE(usb, usbtmc_devices);
 
 /*
  * This structure is the capabilities for the device
index 3d7793d93031525400d3ed746a60639ebed1d52f..8c081308b0e2d8bf2349d6b9e9703de0548213ac 100644 (file)
@@ -279,7 +279,9 @@ static int usb_unbind_interface(struct device *dev)
         * altsetting means creating new endpoint device entries).
         * When either of these happens, defer the Set-Interface.
         */
-       if (!error && intf->dev.power.status == DPM_ON)
+       if (intf->cur_altsetting->desc.bAlternateSetting == 0)
+               ;       /* Already in altsetting 0 so skip Set-Interface */
+       else if (!error && intf->dev.power.status == DPM_ON)
                usb_set_interface(udev, intf->altsetting[0].
                                desc.bInterfaceNumber, 0);
        else
index 428b5993575a9126104da3ec32ea695e7881c9e8..3a8bb53fc473269e31366d0c11069490a30cb7bf 100644 (file)
@@ -651,6 +651,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
                                fs_in_desc.bEndpointAddress;
                hs_out_desc.bEndpointAddress =
                                fs_out_desc.bEndpointAddress;
+               hs_notify_desc.bEndpointAddress =
+                               fs_notify_desc.bEndpointAddress;
 
                /* copy descriptors, and track endpoint copies */
                f->hs_descriptors = usb_copy_descriptors(eth_hs_function);
@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
                                f->hs_descriptors, &hs_in_desc);
                rndis->hs.out = usb_find_endpoint(eth_hs_function,
                                f->hs_descriptors, &hs_out_desc);
+               rndis->hs.notify = usb_find_endpoint(eth_hs_function,
+                               f->hs_descriptors, &hs_notify_desc);
        }
 
        rndis->port.open = rndis_open;
index aad1359a3eb18719c30dff180f70482b131ae886..fb6f2933b01badb0075c285078fb6067fc7b6827 100644 (file)
@@ -143,6 +143,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
 static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -166,6 +167,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
index 07a3992abad2b546095e35d247a3d7e68aec1eec..373ee09975bbd8c80b2a158aa32b9c6595a3bfbe 100644 (file)
@@ -40,6 +40,9 @@
 /* AlphaMicro Components AMC-232USB01 device */
 #define FTDI_AMC232_PID 0xFF00 /* Product Id */
 
+/* www.candapter.com Ewert Energy Systems CANdapter device */
+#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
 /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
 /* the VID is the standard ftdi vid (FTDI_VID) */
 #define FTDI_SCS_DEVICE_0_PID 0xD010    /* SCS PTC-IIusb */
@@ -75,6 +78,9 @@
 /* OpenDCC (www.opendcc.de) product id */
 #define FTDI_OPENDCC_PID       0xBFD8
 
+/* Sprog II (Andrew Crosland's SprogII DCC interface) */
+#define FTDI_SPROG_II          0xF0C8
+
 /* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
 /* they use the ftdi chipset for the USB interface and the vendor id is the same */
 #define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
index 491c8857b6448277a5cd49c51e88f6ac496233ad..1aed584be5eb9a528d904b3b3209afef29f40383 100644 (file)
@@ -91,6 +91,8 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
        { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
        { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
+       { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
        { }                                     /* Terminating entry */
 };
 
index a3bd039c78e96769beeff504e4da6c32f63daa14..54974f446a8c2f51f213d293d3a2f53bcaf6479d 100644 (file)
 /* Y.C. Cable U.S.A., Inc - USB to RS-232 */
 #define YCCABLE_VENDOR_ID      0x05ad
 #define YCCABLE_PRODUCT_ID     0x0fba
+
+/* "Superial" USB - Serial */
+#define SUPERIAL_VENDOR_ID     0x5372
+#define SUPERIAL_PRODUCT_ID    0x2303
+
+/* Hewlett-Packard LD220-HP POS Pole Display */
+#define HP_VENDOR_ID           0x03f0
+#define HP_LD220_PRODUCT_ID    0x3524
index 31c42d1cae13cf746e49cce12c9f588247da2c5c..01d0c70d60e9b41129b44e76893958990f780b10 100644 (file)
  * For questions or problems with this driver, contact Texas Instruments
  * technical support, or Al Borchers <alborchers@steinerpoint.com>, or
  * Peter Berger <pberger@brimson.com>.
- *
- * This driver needs this hotplug script in /etc/hotplug/usb/ti_usb_3410_5052
- * or in /etc/hotplug.d/usb/ti_usb_3410_5052.hotplug to set the device
- * configuration.
- *
- * #!/bin/bash
- *
- * BOOT_CONFIG=1
- * ACTIVE_CONFIG=2
- *
- * if [[ "$ACTION" != "add" ]]
- * then
- *     exit
- * fi
- *
- * CONFIG_PATH=/sys${DEVPATH%/?*}/bConfigurationValue
- *
- * if [[ 0`cat $CONFIG_PATH` -ne $BOOT_CONFIG ]]
- * then
- *     exit
- * fi
- *
- * PRODUCT=${PRODUCT%/?*}              # delete version
- * VENDOR_ID=`printf "%d" 0x${PRODUCT%/?*}`
- * PRODUCT_ID=`printf "%d" 0x${PRODUCT#*?/}`
- *
- * PARAM_PATH=/sys/module/ti_usb_3410_5052/parameters
- *
- * function scan() {
- *     s=$1
- *     shift
- *     for i
- *     do
- *             if [[ $s -eq $i ]]
- *             then
- *                     return 0
- *             fi
- *     done
- *     return 1
- * }
- *
- * IFS=$IFS,
- *
- * if (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_3410` &&
- * scan $PRODUCT_ID 13328 `cat $PARAM_PATH/product_3410`) ||
- * (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_5052` &&
- * scan $PRODUCT_ID 20562 20818 20570 20575 `cat $PARAM_PATH/product_5052`)
- * then
- *     echo $ACTIVE_CONFIG > $CONFIG_PATH
- * fi
  */
 
 #include <linux/kernel.h>
@@ -457,9 +407,10 @@ static int ti_startup(struct usb_serial *serial)
                goto free_tdev;
        }
 
-       /* the second configuration must be set (in sysfs by hotplug script) */
+       /* the second configuration must be set */
        if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) {
-               status = -ENODEV;
+               status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG);
+               status = status ? status : -ENODEV;
                goto free_tdev;
        }
 
index e61f2bfc64add77ab938367c45c08db542c589e0..bfcc1fe82518d0e5739b1e936cf43d1b1430390c 100644 (file)
@@ -167,8 +167,22 @@ UNUSUAL_DEV(  0x0421, 0x005d, 0x0001, 0x0600,
                US_SC_DEVICE, US_PR_DEVICE, NULL,
                US_FL_FIX_CAPACITY ),
 
+/* Reported by Ozan Sener <themgzzy@gmail.com> */
+UNUSUAL_DEV(  0x0421, 0x0060, 0x0551, 0x0551,
+               "Nokia",
+               "3500c",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY ),
+
+/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
+UNUSUAL_DEV(  0x0421, 0x0063, 0x0001, 0x0601,
+               "Nokia",
+               "Nokia 3109c",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY ),
+
 /* Patch for Nokia 5310 capacity */
-UNUSUAL_DEV(  0x0421, 0x006a, 0x0000, 0x0591,
+UNUSUAL_DEV(  0x0421, 0x006a, 0x0000, 0x0701,
                "Nokia",
                "5310",
                US_SC_DEVICE, US_PR_DEVICE, NULL,
index 526c191e84ea9cfd32d0a6252e27046a90cfc6f4..8dc7109d61b760ab6162402797b24d98cff80b88 100644 (file)
 #include <linux/list.h>
 #include <linux/sysdev.h>
 
-#include <asm/xen/hypervisor.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
 
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/xen.h>
 #include <xen/interface/memory.h>
 #include <xen/xenbus.h>
 #include <xen/features.h>
index 0707714e40d699d16032eae63fe0119c1ef3cad0..99eda169c77967ff72ca23ba25ac6026102ea18f 100644 (file)
@@ -8,7 +8,11 @@
 #include <linux/types.h>
 #include <linux/cache.h>
 #include <linux/module.h>
-#include <asm/xen/hypervisor.h>
+
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
 #include <xen/features.h>
 
 u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
index 06592b9da83c454a0117d8d9bf6c86633787e3d7..7d8f531fb8e8a60a342f2fce3f2168fe5127db39 100644 (file)
@@ -40,6 +40,7 @@
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
+#include <asm/xen/hypercall.h>
 
 #include <asm/pgtable.h>
 #include <asm/sync_bitops.h>
index 3031e3233dd625302715ff10c3e49ddc4b1aa7f7..2a983d49d19cba78984b5ae317ded774605121c1 100644 (file)
@@ -45,7 +45,7 @@ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
        struct v9fs_dentry *dent;
 
        P9_DPRINTK(P9_DEBUG_VFS, "fid %d dentry %s\n",
-                                       fid->fid, dentry->d_iname);
+                                       fid->fid, dentry->d_name.name);
 
        dent = dentry->d_fsdata;
        if (!dent) {
@@ -79,7 +79,7 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any)
        struct p9_fid *fid, *ret;
 
        P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
-               dentry->d_iname, dentry, uid, any);
+               dentry->d_name.name, dentry, uid, any);
        dent = (struct v9fs_dentry *) dentry->d_fsdata;
        ret = NULL;
        if (dent) {
index 24eb01087b6d63313404dff82b4f192177189342..332b5ff02fec11b8e8e43645c93ae09679ac9a61 100644 (file)
@@ -160,7 +160,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses)
                                v9ses->flags |= V9FS_ACCESS_ANY;
                        else {
                                v9ses->flags |= V9FS_ACCESS_SINGLE;
-                               v9ses->uid = simple_strtol(s, &e, 10);
+                               v9ses->uid = simple_strtoul(s, &e, 10);
                                if (*e != '\0')
                                        v9ses->uid = ~0;
                        }
index f9534f18df0a1a2bafdf70cd1b00466b8c5fb6b7..06dcc7c4f2343ebb9cced85a8da46c880849794f 100644 (file)
@@ -52,7 +52,8 @@
 
 static int v9fs_dentry_delete(struct dentry *dentry)
 {
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
+       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
+                                                                       dentry);
 
        return 1;
 }
@@ -69,7 +70,8 @@ static int v9fs_dentry_delete(struct dentry *dentry)
 static int v9fs_cached_dentry_delete(struct dentry *dentry)
 {
        struct inode *inode = dentry->d_inode;
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
+       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
+                                                                       dentry);
 
        if(!inode)
                return 1;
@@ -88,7 +90,8 @@ void v9fs_dentry_release(struct dentry *dentry)
        struct v9fs_dentry *dent;
        struct p9_fid *temp, *current_fid;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
+       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
+                                                                       dentry);
        dent = dentry->d_fsdata;
        if (dent) {
                list_for_each_entry_safe(current_fid, temp, &dent->fidlist,
index 8314d3f43b716f41560e4517d42eaade5cfc1459..2dfcf5487efebff886a9f42c23733875d4264608 100644 (file)
@@ -963,7 +963,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
        if (buflen > PATH_MAX)
                buflen = PATH_MAX;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
+       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
+                                                                       dentry);
 
        retval = v9fs_readlink(dentry, link, buflen);
 
@@ -1022,7 +1023,8 @@ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
        char *s = nd_get_link(nd);
 
-       P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, s);
+       P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
+               IS_ERR(s) ? "<error>" : s);
        if (!IS_ERR(s))
                __putname(s);
 }
index 2af8626ced435c10bea3654e38570f7decaa8a7d..6d51696dc762d3b6656ed027fe2d7ed40d549409 100644 (file)
@@ -3983,7 +3983,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
 
                node->flags = le16_to_cpu(pSMBr->DFSFlags);
                if (is_unicode) {
-                       __le16 *tmp = kmalloc(strlen(searchName)*2, GFP_KERNEL);
+                       __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
+                                               GFP_KERNEL);
                        cifsConvertToUCS((__le16 *) tmp, searchName,
                                        PATH_MAX, nls_codepage, remap);
                        node->path_consumed = hostlen_fromUCS(tmp,
index 5f180cf7abbd6f4e21dc1a662932f06ad56a6407..5e0c0d0aef7dcbcf1c9baae7b108cc47bce1f9cf 100644 (file)
@@ -86,7 +86,8 @@
 #define OCFS2_CLEAR_INCOMPAT_FEATURE(sb,mask)                  \
        OCFS2_SB(sb)->s_feature_incompat &= ~(mask)
 
-#define OCFS2_FEATURE_COMPAT_SUPP      OCFS2_FEATURE_COMPAT_BACKUP_SB
+#define OCFS2_FEATURE_COMPAT_SUPP      (OCFS2_FEATURE_COMPAT_BACKUP_SB \
+                                        | OCFS2_FEATURE_COMPAT_JBD2_SB)
 #define OCFS2_FEATURE_INCOMPAT_SUPP    (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \
                                         | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \
                                         | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \
  */
 #define OCFS2_FEATURE_COMPAT_BACKUP_SB         0x0001
 
+/*
+ * The filesystem will correctly handle journal feature bits.
+ */
+#define OCFS2_FEATURE_COMPAT_JBD2_SB           0x0002
+
 /*
  * Unwritten extents support.
  */
index 054e2efb0b7e71f196025c777d61f2cc5954e0f9..74d7367ade13e1e36cb74a13327ca65573fabf86 100644 (file)
@@ -2645,9 +2645,9 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode,
                                return ret;
                        }
 
-                       i = xs->here - old_xh->xh_entries;
-                       xs->here = &xs->header->xh_entries[i];
                }
+               i = xs->here - old_xh->xh_entries;
+               xs->here = &xs->header->xh_entries[i];
        }
 
        return ret;
index 12c07c1866b2072f9c2529829c27dac5077c96a3..4c794d73fb8484e47fb0708beb7a470724707046 100644 (file)
@@ -8,9 +8,17 @@
 #ifdef CONFIG_GENERIC_BUG
 #ifndef __ASSEMBLY__
 struct bug_entry {
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
        unsigned long   bug_addr;
+#else
+       signed int      bug_addr_disp;
+#endif
 #ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
        const char      *file;
+#else
+       signed int      file_disp;
+#endif
        unsigned short  line;
 #endif
        unsigned short  flags;
index ef87f889ef62c2e215b22a0b4d29a53d3b8f61fe..72ebe91005a8de190f795e91d77030d921ea86a5 100644 (file)
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 #define move_pte(pte, prot, old_addr, new_addr)        (pte)
 #endif
 
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
 /*
  * When walking page tables, get the address of the next boundary,
  * or the end address of the range if that comes earlier.  Although no
@@ -289,6 +293,52 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 #define arch_flush_lazy_cpu_mode()     do {} while (0)
 #endif
 
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_new is called when a _new_ pfn mapping is being established
+ * for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+                                       unsigned long pfn, unsigned long size)
+{
+       return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
+{
+       return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * untrack_pfn_vma is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case size can be zero).
+ */
+static inline void untrack_pfn_vma(struct vm_area_struct *vma,
+                                       unsigned long pfn, unsigned long size)
+{
+}
+#else
+extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+                               unsigned long pfn, unsigned long size);
+extern int track_pfn_vma_copy(struct vm_area_struct *vma);
+extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+                               unsigned long size);
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_GENERIC_PGTABLE_H */
index e5084eb5943a5f35fc82680e3a83103f19a46019..2bfda178f274ef5dfbe782fb117d5ea0d80391d4 100644 (file)
@@ -44,6 +44,7 @@ extern const struct dmi_device * dmi_find_device(int type, const char *name,
 extern void dmi_scan_machine(void);
 extern int dmi_get_year(int field);
 extern int dmi_name_in_vendors(const char *str);
+extern int dmi_name_in_serial(const char *str);
 extern int dmi_available;
 extern int dmi_walk(void (*decode)(const struct dmi_header *));
 
@@ -56,6 +57,7 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na
 static inline void dmi_scan_machine(void) { return; }
 static inline int dmi_get_year(int year) { return 0; }
 static inline int dmi_name_in_vendors(const char *s) { return 0; }
+static inline int dmi_name_in_serial(const char *s) { return 0; }
 #define dmi_available 0
 static inline int dmi_walk(void (*decode)(const struct dmi_header *))
        { return -1; }
index 17f76fc0517377094cf318a544eb55b2e8eaeb56..adc34f2c6eff9eb9dd3eb5e159cf730f5baba457 100644 (file)
@@ -100,6 +100,10 @@ struct kimage {
 #define KEXEC_TYPE_DEFAULT 0
 #define KEXEC_TYPE_CRASH   1
        unsigned int preserve_context : 1;
+
+#ifdef ARCH_HAS_KIMAGE_ARCH
+       struct kimage_arch arch;
+#endif
 };
 
 
index ffee2f74341856275ead62f71c6093c8de457199..d3ddd735e3755d288720742eddd2b7b7a7ff2368 100644 (file)
@@ -145,6 +145,23 @@ extern pgprot_t protection_map[16];
 #define FAULT_FLAG_WRITE       0x01    /* Fault was a write access */
 #define FAULT_FLAG_NONLINEAR   0x02    /* Fault was via a nonlinear mapping */
 
+/*
+ * This interface is used by x86 PAT code to identify a pfn mapping that is
+ * linear over entire vma. This is to optimize PAT code that deals with
+ * marking the physical region with a particular prot. This is not for generic
+ * mm use. Note also that this check will not work if the pfn mapping is
+ * linear for a vma starting at physical address 0. In which case PAT code
+ * falls back to slow path of reserving physical range page by page.
+ */
+static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
+{
+       return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
+}
+
+static inline int is_pfn_mapping(struct vm_area_struct *vma)
+{
+       return (vma->vm_flags & VM_PFNMAP);
+}
 
 /*
  * vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -781,6 +798,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma);
 void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_phys(struct vm_area_struct *vma, unsigned long address,
+               unsigned int flags, unsigned long *prot, resource_size_t *phys);
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                        void *buf, int len, int write);
 
index 9d77b1d7dca806e540838d27bbc632961a340f0e..e26f54952892e9c46be142344e36ceb37f19f6f0 100644 (file)
@@ -319,6 +319,7 @@ enum
 {
        NAPI_STATE_SCHED,       /* Poll is scheduled */
        NAPI_STATE_DISABLE,     /* Disable pending */
+       NAPI_STATE_NPSVC,       /* Netpoll - don't dequeue from poll_list */
 };
 
 extern void __napi_schedule(struct napi_struct *n);
@@ -1497,6 +1498,12 @@ static inline void netif_rx_complete(struct net_device *dev,
 {
        unsigned long flags;
 
+       /*
+        * don't let napi dequeue from the cpu poll list
+        * just in case its running on a different cpu
+        */
+       if (unlikely(test_bit(NAPI_STATE_NPSVC, &napi->state)))
+               return;
        local_irq_save(flags);
        __netif_rx_complete(dev, napi);
        local_irq_restore(flags);
index c19595c8930491d8e0f64a2795d36cb127ee3b60..29fe9ea1d3463a5964ee158eeb1a39211b1c756f 100644 (file)
@@ -141,6 +141,7 @@ enum ctattr_protonat {
 #define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
 
 enum ctattr_natseq {
+       CTA_NAT_SEQ_UNSPEC,
        CTA_NAT_SEQ_CORRECTION_POS,
        CTA_NAT_SEQ_OFFSET_BEFORE,
        CTA_NAT_SEQ_OFFSET_AFTER,
index feb4657bb04338ee7140a4774237aaf914fcebac..03b0b8c3c81b5858282004b60ba9a0893d39a6b5 100644 (file)
@@ -134,6 +134,11 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
 };
 
+enum pci_irq_reroute_variant {
+       INTEL_IRQ_REROUTE_VARIANT = 1,
+       MAX_IRQ_REROUTE_VARIANTS = 3
+};
+
 typedef unsigned short __bitwise pci_bus_flags_t;
 enum pci_bus_flags {
        PCI_BUS_FLAGS_NO_MSI   = (__force pci_bus_flags_t) 1,
@@ -218,6 +223,7 @@ struct pci_dev {
        unsigned int    no_msi:1;       /* device may not use msi */
        unsigned int    block_ucfg_access:1;    /* userspace config space access is blocked */
        unsigned int    broken_parity_status:1; /* Device generates false positive parity */
+       unsigned int    irq_reroute_variant:2;  /* device needs IRQ rerouting variant */
        unsigned int    msi_enabled:1;
        unsigned int    msix_enabled:1;
        unsigned int    ari_enabled:1;  /* ARI forwarding */
index 1800f1d6e40dd1c22ff973e6f86f654ee27755f4..b6e6944542807e75d43d17b4d2e432c8f875b090 100644 (file)
 #define PCI_DEVICE_ID_INTEL_PXH_0      0x0329
 #define PCI_DEVICE_ID_INTEL_PXH_1      0x032A
 #define PCI_DEVICE_ID_INTEL_PXHV       0x032C
+#define PCI_DEVICE_ID_INTEL_80332_0    0x0330
+#define PCI_DEVICE_ID_INTEL_80332_1    0x0332
+#define PCI_DEVICE_ID_INTEL_80333_0    0x0370
+#define PCI_DEVICE_ID_INTEL_80333_1    0x0372
 #define PCI_DEVICE_ID_INTEL_82375      0x0482
 #define PCI_DEVICE_ID_INTEL_82424      0x0483
 #define PCI_DEVICE_ID_INTEL_82378      0x0484
 #define PCI_DEVICE_ID_INTEL_ESB_4      0x25a4
 #define PCI_DEVICE_ID_INTEL_ESB_5      0x25a6
 #define PCI_DEVICE_ID_INTEL_ESB_9      0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10     0x25ac
 #define PCI_DEVICE_ID_INTEL_82820_HB   0x2500
 #define PCI_DEVICE_ID_INTEL_82820_UP_HB        0x2501
 #define PCI_DEVICE_ID_INTEL_82850_HB   0x2530
index 3f9a60043a975617a3b8217f8e69b24751c13e01..6e7ba16ff454f878d28315c02f66572b9734bd09 100644 (file)
@@ -146,6 +146,8 @@ static inline void smp_send_reschedule(int cpu) { }
 })
 #define smp_call_function_mask(mask, func, info, wait) \
                        (up_smp_call_function(func, info))
+#define smp_call_function_many(mask, func, info, wait) \
+                       (up_smp_call_function(func, info))
 static inline void init_call_single_data(void)
 {
 }
index 73a2f4eb1f7ae1a6f9c23131a56f5d1d6d631d58..9b42baed39009c75c8e3932a8c9be3cfbb0984f0 100644 (file)
@@ -158,8 +158,12 @@ struct usb_ctrlrequest {
  * (rarely) accepted by SET_DESCRIPTOR.
  *
  * Note that all multi-byte values here are encoded in little endian
- * byte order "on the wire".  But when exposed through Linux-USB APIs,
- * they've been converted to cpu byte order.
+ * byte order "on the wire".  Within the kernel and when exposed
+ * through the Linux-USB APIs, they are not converted to cpu byte
+ * order; it is the responsibility of the client code to do this.
+ * The single exception is when device and configuration descriptors (but
+ * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
+ * in this case the fields are converted to host endianness by the kernel.
  */
 
 /*
index 3025ae17ddbeb46207ca71e88b3d31d6aaad44a8..94c852d47d0f8d5c7d033279bf2f67ca3fd861dd 100644 (file)
@@ -135,9 +135,11 @@ struct dongle_reg {
 
 /* 
  * Per-packet information we need to hide inside sk_buff 
- * (must not exceed 48 bytes, check with struct sk_buff) 
+ * (must not exceed 48 bytes, check with struct sk_buff)
+ * The default_qdisc_pad field is a temporary hack.
  */
 struct irda_skb_cb {
+       unsigned int default_qdisc_pad;
        magic_t magic;       /* Be sure that we can trust the information */
        __u32   next_speed;  /* The Speed to be set *after* this frame */
        __u16   mtt;         /* Minimum turn around time */
index 919b5bdcb2bdcfe31f2e8f970e14a9533af7eb04..2090881c3650f57e3808f12d4743e1dbb8f1c93a 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
 #define __XEN_PUBLIC_EVENT_CHANNEL_H__
 
+#include <xen/interface/xen.h>
+
 typedef uint32_t evtchn_port_t;
 DEFINE_GUEST_HANDLE(evtchn_port_t);
 
index fe00b3b983a86387332234703217abb1d28ca202..8185a0f09594fd904ca2ab032dca27013708ff1c 100644 (file)
@@ -702,7 +702,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
         * any child cgroups exist. This is theoretically supportable
         * but involves complex error handling, so it's being left until
         * later */
-       if (!list_empty(&cgrp->children))
+       if (root->number_of_cgroups > 1)
                return -EBUSY;
 
        /* Process each subsystem */
index 5e79c662294bf542750af232be67a1e92bed858d..a140e44eebbacbe88b989e98b1bbdc744a99c94f 100644 (file)
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
        return 0;
 }
 
+static int no_timer_create(struct k_itimer *new_timer)
+{
+       return -EOPNOTSUPP;
+}
+
 /*
  * Return nonzero if we know a priori this clockid_t value is bogus.
  */
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void)
                .clock_getres = hrtimer_get_res,
                .clock_get = posix_get_monotonic_raw,
                .clock_set = do_posix_clock_nosettime,
+               .timer_create = no_timer_create,
        };
 
        register_posix_clock(CLOCK_REALTIME, &clock_realtime);
index 81787248b60fda5bc58b6231be84a6a54a763cb0..e8ab096ddfe399dc3007e32b169b46d02ed9cb48 100644 (file)
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
 
        /*
         * scd->clock = clamp(scd->tick_gtod + delta,
-        *                    max(scd->tick_gtod, scd->clock),
-        *                    max(scd->clock, scd->tick_gtod + TICK_NSEC));
+        *                    max(scd->tick_gtod, scd->clock),
+        *                    scd->tick_gtod + TICK_NSEC);
         */
 
        clock = scd->tick_gtod + delta;
        min_clock = wrap_max(scd->tick_gtod, scd->clock);
-       max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
+       max_clock = scd->tick_gtod + TICK_NSEC;
 
        clock = wrap_max(clock, min_clock);
        clock = wrap_min(clock, max_clock);
index bfeafd60ee9fce4ba8e643c2beb5d8e66cff74e3..300e41afbf97b976006b9dc6f933e36c62172372 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -5,6 +5,8 @@
 
   CONFIG_BUG - emit BUG traps.  Nothing happens without this.
   CONFIG_GENERIC_BUG - enable this code.
+  CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
+       the containing struct bug_entry for bug_addr and file.
   CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
 
   CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
 
 extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
 
+static inline unsigned long bug_addr(const struct bug_entry *bug)
+{
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+       return bug->bug_addr;
+#else
+       return (unsigned long)bug + bug->bug_addr_disp;
+#endif
+}
+
 #ifdef CONFIG_MODULES
 static LIST_HEAD(module_bug_list);
 
@@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
                unsigned i;
 
                for (i = 0; i < mod->num_bugs; ++i, ++bug)
-                       if (bugaddr == bug->bug_addr)
+                       if (bugaddr == bug_addr(bug))
                                return bug;
        }
        return NULL;
@@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
        const struct bug_entry *bug;
 
        for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
-               if (bugaddr == bug->bug_addr)
+               if (bugaddr == bug_addr(bug))
                        return bug;
 
        return module_find_bug(bugaddr);
@@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
 
        if (bug) {
 #ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
                file = bug->file;
+#else
+               file = (const char *)bug + bug->file_disp;
+#endif
                line = bug->line;
 #endif
                warning = (bug->flags & BUGFLAG_WARNING) != 0;
index d83660fd6fdde53617121f3f72e38b58158126b9..8e30295e8566579515ca8126a694150e290107aa 100644 (file)
@@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name)
        nr_entries--;
 out:
        up(&debug_list_mutex);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
 
@@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf,
                                        dynamic_enabled = DYNAMIC_ENABLED_SOME;
                                        err = 0;
                                        printk(KERN_DEBUG
-                                              "debugging enabled for module %s",
+                                              "debugging enabled for module %s\n",
                                               elem->name);
                                } else if (!value && (elem->enable == 1)) {
                                        elem->enable = 0;
@@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf,
                                        err = 0;
                                        printk(KERN_DEBUG
                                               "debugging disabled for module "
-                                              "%s", elem->name);
+                                              "%s\n", elem->name);
                                }
                        }
                }
index 164951c473058a25c081d5e47260d872068cdbb7..f01b7eed6e16c4e3f32e039b7f2223ca598f40aa 100644 (file)
@@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
+       if (unlikely(is_pfn_mapping(vma))) {
+               /*
+                * We do not free on error cases below as remove_vma
+                * gets called on error from higher level routine
+                */
+               ret = track_pfn_vma_copy(vma);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * We need to invalidate the secondary MMU mappings only when
         * there could be a permission downgrade on the ptes of the
@@ -915,6 +925,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                if (vma->vm_flags & VM_ACCOUNT)
                        *nr_accounted += (end - start) >> PAGE_SHIFT;
 
+               if (unlikely(is_pfn_mapping(vma)))
+                       untrack_pfn_vma(vma, 0, 0);
+
                while (start != end) {
                        if (!tlb_start_valid) {
                                tlb_start = start;
@@ -1430,6 +1443,7 @@ out:
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
+       int ret;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1444,7 +1458,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+               return -EINVAL;
+
+       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+
+       if (ret)
+               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
+
+       return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
 
@@ -1575,14 +1597,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (is_cow_mapping(vma->vm_flags)) {
-               if (addr != vma->vm_start || end != vma->vm_end)
-                       return -EINVAL;
+       if (addr == vma->vm_start && end == vma->vm_end)
                vma->vm_pgoff = pfn;
-       }
+       else if (is_cow_mapping(vma->vm_flags))
+               return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
+       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
+       if (err)
+               return -EINVAL;
+
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
        pgd = pgd_offset(mm, addr);
@@ -1594,6 +1619,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+
+       if (err)
+               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -2865,9 +2894,9 @@ int in_gate_area_no_task(unsigned long addr)
 #endif /* __HAVE_ARCH_GATE_AREA */
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static resource_size_t follow_phys(struct vm_area_struct *vma,
-                       unsigned long address, unsigned int flags,
-                       unsigned long *prot)
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -2876,24 +2905,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
        spinlock_t *ptl;
        resource_size_t phys_addr = 0;
        struct mm_struct *mm = vma->vm_mm;
+       int ret = -EINVAL;
 
-       VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
 
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto no_page_table;
+               goto out;
 
        pud = pud_offset(pgd, address);
        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               goto no_page_table;
+               goto out;
 
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto no_page_table;
+               goto out;
 
        /* We cannot handle huge page PFN maps. Luckily they don't exist. */
        if (pmd_huge(*pmd))
-               goto no_page_table;
+               goto out;
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!ptep)
@@ -2908,13 +2939,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
        phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
 
        *prot = pgprot_val(pte_pgprot(pte));
+       *phys = phys_addr;
+       ret = 0;
 
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out:
-       return phys_addr;
-no_page_table:
-       return 0;
+       return ret;
 }
 
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -2925,12 +2956,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
        void *maddr;
        int offset = addr & (PAGE_SIZE-1);
 
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-               return -EINVAL;
-
-       phys_addr = follow_phys(vma, addr, write, &prot);
-
-       if (!phys_addr)
+       if (follow_phys(vma, addr, write, &prot, &phys_addr))
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
index d8f07667fc8045e6690ab74a6e58806c7093ed7c..037b0967c1e3b40cda18e57a775d61867bafb95f 100644 (file)
@@ -998,7 +998,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
                unsigned long addr = (unsigned long)(*pages);
                struct vm_area_struct *vma;
                struct page *page;
-               int err;
+               int err = -EFAULT;
 
                vma = find_vma(mm, addr);
                if (!vma)
index cb675d1267914dc263100b9c4a9ea5eb20fefa48..bf7e8fc3aed806542e44cc7b1d222d9e2b56dc3a 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
        struct kmem_cache *c;
 
        c = slob_alloc(sizeof(struct kmem_cache),
-               flags, ARCH_KMALLOC_MINALIGN, -1);
+               GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
 
        if (c) {
                c->name = name;
index 90cb67a5417cccea9f0352b6316f747bec4c608d..54a9f87e5162ab0a24a00e057be2fcd7f5498022 100644 (file)
@@ -1462,6 +1462,15 @@ static int __init procswaps_init(void)
 __initcall(procswaps_init);
 #endif /* CONFIG_PROC_FS */
 
+#ifdef MAX_SWAPFILES_CHECK
+static int __init max_swapfiles_check(void)
+{
+       MAX_SWAPFILES_CHECK();
+       return 0;
+}
+late_initcall(max_swapfiles_check);
+#endif
+
 /*
  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
  *
index 6c7af390be0a515cfb1d09d2db79f16776e0dbe4..dadac6281f20392d0431d80a9001a6169ab94ca7 100644 (file)
@@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo,
 
        npinfo->rx_flags |= NETPOLL_RX_DROP;
        atomic_inc(&trapped);
+       set_bit(NAPI_STATE_NPSVC, &napi->state);
 
        work = napi->poll(napi, budget);
 
+       clear_bit(NAPI_STATE_NPSVC, &napi->state);
        atomic_dec(&trapped);
        npinfo->rx_flags &= ~NETPOLL_RX_DROP;
 
index bea54a685109ed3b6fa1fe3f621fb7522ceb2de8..8d489e746b21cb513d0d1b49b3984b514f6e634c 100644 (file)
@@ -61,7 +61,7 @@ static struct
 static struct xt_table nat_table = {
        .name           = "nat",
        .valid_hooks    = NAT_VALID_HOOKS,
-       .lock           = __RW_LOCK_UNLOCKED(__nat_table.lock),
+       .lock           = __RW_LOCK_UNLOCKED(nat_table.lock),
        .me             = THIS_MODULE,
        .af             = AF_INET,
 };
index 7cd22262de3af3fa0f2917eefaea885c068855ac..a453aac91bd3b740ef44363ef10e5be5390bf6dc 100644 (file)
 
 #include "tcp_vegas.h"
 
-/* Default values of the Vegas variables, in fixed-point representation
- * with V_PARAM_SHIFT bits to the right of the binary point.
- */
-#define V_PARAM_SHIFT 1
-static int alpha = 2<<V_PARAM_SHIFT;
-static int beta  = 4<<V_PARAM_SHIFT;
-static int gamma = 1<<V_PARAM_SHIFT;
+static int alpha = 2;
+static int beta  = 4;
+static int gamma = 1;
 
 module_param(alpha, int, 0644);
-MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)");
+MODULE_PARM_DESC(alpha, "lower bound of packets in network");
 module_param(beta, int, 0644);
-MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)");
+MODULE_PARM_DESC(beta, "upper bound of packets in network");
 module_param(gamma, int, 0644);
 MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
 
@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                return;
        }
 
-       /* The key players are v_beg_snd_una and v_beg_snd_nxt.
-        *
-        * These are so named because they represent the approximate values
-        * of snd_una and snd_nxt at the beginning of the current RTT. More
-        * precisely, they represent the amount of data sent during the RTT.
-        * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
-        * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
-        * bytes of data have been ACKed during the course of the RTT, giving
-        * an "actual" rate of:
-        *
-        *     (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
-        *
-        * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
-        * because delayed ACKs can cover more than one segment, so they
-        * don't line up nicely with the boundaries of RTTs.
-        *
-        * Another unfortunate fact of life is that delayed ACKs delay the
-        * advance of the left edge of our send window, so that the number
-        * of bytes we send in an RTT is often less than our cwnd will allow.
-        * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
-        */
-
        if (after(ack, vegas->beg_snd_nxt)) {
                /* Do the Vegas once-per-RTT cwnd adjustment. */
-               u32 old_wnd, old_snd_cwnd;
-
-
-               /* Here old_wnd is essentially the window of data that was
-                * sent during the previous RTT, and has all
-                * been acknowledged in the course of the RTT that ended
-                * with the ACK we just received. Likewise, old_snd_cwnd
-                * is the cwnd during the previous RTT.
-                */
-               old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
-                       tp->mss_cache;
-               old_snd_cwnd = vegas->beg_snd_cwnd;
 
                /* Save the extent of the current window so we can use this
                 * at the end of the next RTT.
                 */
-               vegas->beg_snd_una  = vegas->beg_snd_nxt;
                vegas->beg_snd_nxt  = tp->snd_nxt;
-               vegas->beg_snd_cwnd = tp->snd_cwnd;
 
                /* We do the Vegas calculations only if we got enough RTT
                 * samples that we can be reasonably sure that we got
@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                         *
                         * This is:
                         *     (actual rate in segments) * baseRTT
-                        * We keep it as a fixed point number with
-                        * V_PARAM_SHIFT bits to the right of the binary point.
                         */
-                       target_cwnd = ((u64)old_wnd * vegas->baseRTT);
-                       target_cwnd <<= V_PARAM_SHIFT;
-                       do_div(target_cwnd, rtt);
+                       target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
 
                        /* Calculate the difference between the window we had,
                         * and the window we would like to have. This quantity
                         * is the "Diff" from the Arizona Vegas papers.
-                        *
-                        * Again, this is a fixed point number with
-                        * V_PARAM_SHIFT bits to the right of the binary
-                        * point.
                         */
-                       diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
+                       diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
                        if (diff > gamma && tp->snd_ssthresh > 2 ) {
                                /* Going too fast. Time to slow down
@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                 * truncation robs us of full link
                                 * utilization.
                                 */
-                               tp->snd_cwnd = min(tp->snd_cwnd,
-                                                  ((u32)target_cwnd >>
-                                                   V_PARAM_SHIFT)+1);
+                               tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
 
                        } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
                                /* Slow start.  */
                                tcp_slow_start(tp);
                        } else {
                                /* Congestion avoidance. */
-                               u32 next_snd_cwnd;
 
                                /* Figure out where we would like cwnd
                                 * to be.
@@ -300,26 +249,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                        /* The old window was too fast, so
                                         * we slow down.
                                         */
-                                       next_snd_cwnd = old_snd_cwnd - 1;
+                                       tp->snd_cwnd--;
                                } else if (diff < alpha) {
                                        /* We don't have enough extra packets
                                         * in the network, so speed up.
                                         */
-                                       next_snd_cwnd = old_snd_cwnd + 1;
+                                       tp->snd_cwnd++;
                                } else {
                                        /* Sending just as fast as we
                                         * should be.
                                         */
-                                       next_snd_cwnd = old_snd_cwnd;
                                }
-
-                               /* Adjust cwnd upward or downward, toward the
-                                * desired value.
-                                */
-                               if (next_snd_cwnd > tp->snd_cwnd)
-                                       tp->snd_cwnd++;
-                               else if (next_snd_cwnd < tp->snd_cwnd)
-                                       tp->snd_cwnd--;
                        }
 
                        if (tp->snd_cwnd < 2)
index 172438320eec74287b045cd6b784bb3de0b38b0f..d0f54d18e19b92de1407eb9a36fed5344363d01d 100644 (file)
@@ -912,8 +912,13 @@ static void ndisc_recv_na(struct sk_buff *skb)
                   is invalid, but ndisc specs say nothing
                   about it. It could be misconfiguration, or
                   an smart proxy agent tries to help us :-)
+
+                  We should not print the error if NA has been
+                  received from loopback - it is just our own
+                  unsolicited advertisement.
                 */
-               ND_PRINTK1(KERN_WARNING
+               if (skb->pkt_type != PACKET_LOOPBACK)
+                       ND_PRINTK1(KERN_WARNING
                           "ICMPv6 NA: someone advertises our address on %s!\n",
                           ifp->idev->dev->name);
                in6_ifa_put(ifp);
index 90c8506a0aac913afd2ad4d62d0d234039fc9315..8c03080321784fe7fee76ec1ea2ebd3d85187765 100644 (file)
@@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
                                      const struct in_addr *mask,
                                      struct netlbl_audit *audit_info)
 {
-       int ret_val = 0;
        struct netlbl_af4list *list_entry;
        struct netlbl_unlhsh_addr4 *entry;
        struct audit_buffer *audit_buf;
@@ -577,7 +576,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
        if (list_entry != NULL)
                entry = netlbl_unlhsh_addr4_entry(list_entry);
        else
-               ret_val = -ENOENT;
+               entry = NULL;
 
        audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
                                              audit_info);
@@ -588,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
                                          addr->s_addr, mask->s_addr);
                if (dev != NULL)
                        dev_put(dev);
-               if (entry && security_secid_to_secctx(entry->secid,
-                                                     &secctx,
-                                                     &secctx_len) == 0) {
+               if (entry != NULL &&
+                   security_secid_to_secctx(entry->secid,
+                                            &secctx, &secctx_len) == 0) {
                        audit_log_format(audit_buf, " sec_obj=%s", secctx);
                        security_release_secctx(secctx, secctx_len);
                }
-               audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
+               audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
                audit_log_end(audit_buf);
        }
 
-       if (ret_val == 0)
-               call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
-       return ret_val;
+       if (entry == NULL)
+               return -ENOENT;
+
+       call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
+       return 0;
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -624,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
                                      const struct in6_addr *mask,
                                      struct netlbl_audit *audit_info)
 {
-       int ret_val = 0;
        struct netlbl_af6list *list_entry;
        struct netlbl_unlhsh_addr6 *entry;
        struct audit_buffer *audit_buf;
@@ -638,7 +638,7 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
        if (list_entry != NULL)
                entry = netlbl_unlhsh_addr6_entry(list_entry);
        else
-               ret_val = -ENOENT;
+               entry = NULL;
 
        audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
                                              audit_info);
@@ -649,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
                                          addr, mask);
                if (dev != NULL)
                        dev_put(dev);
-               if (entry && security_secid_to_secctx(entry->secid,
-                                                     &secctx,
-                                                     &secctx_len) == 0) {
+               if (entry != NULL &&
+                   security_secid_to_secctx(entry->secid,
+                                            &secctx, &secctx_len) == 0) {
                        audit_log_format(audit_buf, " sec_obj=%s", secctx);
                        security_release_secctx(secctx, secctx_len);
                }
-               audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
+               audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
                audit_log_end(audit_buf);
        }
 
-       if (ret_val == 0)
-               call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
-       return ret_val;
+       if (entry == NULL)
+               return -ENOENT;
+
+       call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
+       return 0;
 }
 #endif /* IPv6 */
 
index 9978afbd9f2ad5033375df896358f88b929ecfcc..803eeef0aa856877ddced0e45fc283f4641da2ab 100644 (file)
@@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len)
 static void gprs_write_space(struct sock *sk)
 {
        struct gprs_dev *dev = sk->sk_user_data;
+       struct net_device *net = dev->net;
        unsigned credits = pep_writeable(sk);
 
        spin_lock_bh(&dev->tx_lock);
        dev->tx_max = credits;
-       if (credits > skb_queue_len(&dev->tx_queue))
-               netif_wake_queue(dev->net);
+       if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net))
+               netif_wake_queue(net);
        spin_unlock_bh(&dev->tx_lock);
 }
 
@@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk)
  * Network device callbacks
  */
 
+static int gprs_open(struct net_device *dev)
+{
+       struct gprs_dev *gp = netdev_priv(dev);
+
+       gprs_write_space(gp->sk);
+       return 0;
+}
+
+static int gprs_close(struct net_device *dev)
+{
+       struct gprs_dev *gp = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       flush_work(&gp->tx_work);
+       return 0;
+}
+
 static int gprs_xmit(struct sk_buff *skb, struct net_device *net)
 {
        struct gprs_dev *dev = netdev_priv(net);
@@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net)
        net->tx_queue_len       = 10;
 
        net->destructor         = free_netdev;
+       net->open               = gprs_open;
+       net->stop               = gprs_close;
        net->hard_start_xmit    = gprs_xmit; /* mandatory */
        net->change_mtu         = gprs_set_mtu;
        net->get_stats          = gprs_get_stats;
@@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk)
        dev->sk = sk;
 
        printk(KERN_DEBUG"%s: attached\n", net->name);
-       gprs_write_space(sk); /* kick off TX */
        return net->ifindex;
 
 out_rel:
@@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk)
 
        printk(KERN_DEBUG"%s: detached\n", net->name);
        unregister_netdev(net);
-       flush_scheduled_work();
        sock_put(sk);
-       skb_queue_purge(&dev->tx_queue);
 }
index a11959908d9a68c405114d56c5073c0d77647402..98402f0efa47727677b8d57acfe2ecd7cb9441b2 100644 (file)
@@ -46,9 +46,6 @@
         layering other disciplines.  It does not need to do bandwidth
         control either since that can be handled by using token
         bucket or other rate control.
-
-        The simulator is limited by the Linux timer resolution
-        and will create packet bursts on the HZ boundary (1ms).
 */
 
 struct netem_sched_data {
index 5dd3e89f620a2753511097d1fe86b4e6673afefa..596ceabd6504849b7925bc08c9d7e5ea1bbab136 100644 (file)
@@ -69,6 +69,7 @@ enum {
 };
 
 enum {
+       STAC_92HD73XX_NO_JD, /* no jack-detection */
        STAC_92HD73XX_REF,
        STAC_DELL_M6_AMIC,
        STAC_DELL_M6_DMIC,
@@ -127,6 +128,7 @@ enum {
 };
 
 enum {
+       STAC_D965_REF_NO_JD, /* no jack-detection */
        STAC_D965_REF,
        STAC_D965_3ST,
        STAC_D965_5ST,
@@ -1611,6 +1613,7 @@ static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
 };
 
 static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
+       [STAC_92HD73XX_NO_JD] = "no-jd",
        [STAC_92HD73XX_REF] = "ref",
        [STAC_DELL_M6_AMIC] = "dell-m6-amic",
        [STAC_DELL_M6_DMIC] = "dell-m6-dmic",
@@ -1640,6 +1643,8 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
                                "unknown Dell", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x029f,
                                "Dell Studio 1537", STAC_DELL_M6_DMIC),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a0,
+                               "Dell Studio 17", STAC_DELL_M6_DMIC),
        {} /* terminator */
 };
 
@@ -2027,6 +2032,7 @@ static unsigned int dell_3st_pin_configs[14] = {
 };
 
 static unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = {
+       [STAC_D965_REF_NO_JD] = ref927x_pin_configs,
        [STAC_D965_REF]  = ref927x_pin_configs,
        [STAC_D965_3ST]  = d965_3st_pin_configs,
        [STAC_D965_5ST]  = d965_5st_pin_configs,
@@ -2035,6 +2041,7 @@ static unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = {
 };
 
 static const char *stac927x_models[STAC_927X_MODELS] = {
+       [STAC_D965_REF_NO_JD]   = "ref-no-jd",
        [STAC_D965_REF]         = "ref",
        [STAC_D965_3ST]         = "3stack",
        [STAC_D965_5ST]         = "5stack",
@@ -2896,7 +2903,7 @@ static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec,
        }
 
        if ((spec->multiout.num_dacs - cfg->line_outs) > 0 &&
-                       cfg->hp_outs && !spec->multiout.hp_nid)
+           cfg->hp_outs == 1 && !spec->multiout.hp_nid)
                spec->multiout.hp_nid = nid;
 
        if (cfg->hp_outs > 1 && cfg->line_out_type == AUTO_PIN_LINE_OUT) {
@@ -4254,14 +4261,17 @@ again:
 
        switch (spec->multiout.num_dacs) {
        case 0x3: /* 6 Channel */
+               spec->multiout.hp_nid = 0x17;
                spec->mixer = stac92hd73xx_6ch_mixer;
                spec->init = stac92hd73xx_6ch_core_init;
                break;
        case 0x4: /* 8 Channel */
+               spec->multiout.hp_nid = 0x18;
                spec->mixer = stac92hd73xx_8ch_mixer;
                spec->init = stac92hd73xx_8ch_core_init;
                break;
        case 0x5: /* 10 Channel */
+               spec->multiout.hp_nid = 0x19;
                spec->mixer = stac92hd73xx_10ch_mixer;
                spec->init = stac92hd73xx_10ch_core_init;
        };
@@ -4297,6 +4307,7 @@ again:
                spec->amp_nids = &stac92hd73xx_amp_nids[DELL_M6_AMP];
                spec->eapd_switch = 0;
                spec->num_amps = 1;
+               spec->multiout.hp_nid = 0; /* dual HPs */
 
                if (!spec->init)
                        spec->init = dell_m6_core_init;
@@ -4351,6 +4362,9 @@ again:
                return err;
        }
 
+       if (spec->board_config == STAC_92HD73XX_NO_JD)
+               spec->hp_detect = 0;
+
        codec->patch_ops = stac92xx_patch_ops;
 
        return 0;
@@ -4899,6 +4913,10 @@ static int patch_stac927x(struct hda_codec *codec)
         */
        codec->bus->needs_damn_long_delay = 1;
 
+       /* no jack detecion for ref-no-jd model */
+       if (spec->board_config == STAC_D965_REF_NO_JD)
+               spec->hp_detect = 0;
+
        return 0;
 }
 
index e9084fdd2082efcbc14695e801ec6ad89ff66599..acd68efb2b758dc59692036d4bd6f445854dc985 100644 (file)
@@ -233,7 +233,7 @@ static int omap_pcm_open(struct snd_pcm_substream *substream)
        if (ret < 0)
                goto out;
 
-       prtd = kzalloc(sizeof(prtd), GFP_KERNEL);
+       prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
        if (prtd == NULL) {
                ret = -ENOMEM;
                goto out;