]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branches 'x86/apic', 'x86/asm', 'x86/fixmap', 'x86/memtest', 'x86/mm', 'x86...
authorIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 08:26:38 +0000 (09:26 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 08:26:38 +0000 (09:26 +0100)
214 files changed:
Documentation/filesystems/squashfs.txt
Documentation/x86/earlyprintk.txt [new file with mode: 0644]
Makefile
arch/arm/kernel/setup.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/include/mach/board.h
arch/arm/mach-at91/pm.c
arch/arm/mm/abort-ev6.S
arch/arm/plat-s3c64xx/irq-eint.c
arch/blackfin/Kconfig
arch/blackfin/Kconfig.debug
arch/blackfin/configs/BF518F-EZBRD_defconfig
arch/blackfin/configs/BF527-EZKIT_defconfig
arch/blackfin/configs/BF533-EZKIT_defconfig
arch/blackfin/configs/BF533-STAMP_defconfig
arch/blackfin/configs/BF537-STAMP_defconfig
arch/blackfin/configs/BF538-EZKIT_defconfig
arch/blackfin/configs/BF548-EZKIT_defconfig
arch/blackfin/configs/BF561-EZKIT_defconfig
arch/blackfin/configs/BlackStamp_defconfig
arch/blackfin/configs/CM-BF527_defconfig
arch/blackfin/configs/CM-BF548_defconfig
arch/blackfin/configs/IP0X_defconfig
arch/blackfin/configs/SRV1_defconfig
arch/blackfin/include/asm/Kbuild
arch/blackfin/include/asm/bfin_sport.h
arch/blackfin/include/asm/ipipe.h
arch/blackfin/include/asm/ipipe_base.h
arch/blackfin/include/asm/irq.h
arch/blackfin/include/asm/thread_info.h
arch/blackfin/kernel/Makefile
arch/blackfin/kernel/cplb-nompu/cplbinit.c
arch/blackfin/kernel/ipipe.c
arch/blackfin/kernel/irqchip.c
arch/blackfin/kernel/kgdb_test.c
arch/blackfin/kernel/ptrace.c
arch/blackfin/kernel/setup.c
arch/blackfin/kernel/time.c
arch/blackfin/mach-bf518/boards/ezbrd.c
arch/blackfin/mach-bf518/include/mach/anomaly.h
arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-bf527/boards/cm_bf527.c
arch/blackfin/mach-bf527/boards/ezbrd.c
arch/blackfin/mach-bf527/include/mach/anomaly.h
arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-bf533/boards/Kconfig
arch/blackfin/mach-bf533/boards/Makefile
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf533/boards/cm_bf533.c
arch/blackfin/mach-bf533/boards/generic_board.c [deleted file]
arch/blackfin/mach-bf533/boards/ip0x.c
arch/blackfin/mach-bf533/include/mach/anomaly.h
arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-bf537/boards/Kconfig
arch/blackfin/mach-bf537/boards/Makefile
arch/blackfin/mach-bf537/boards/cm_bf537.c
arch/blackfin/mach-bf537/boards/generic_board.c [deleted file]
arch/blackfin/mach-bf537/boards/minotaur.c
arch/blackfin/mach-bf537/boards/pnav10.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf537/include/mach/anomaly.h
arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-bf538/include/mach/anomaly.h
arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-bf548/include/mach/anomaly.h
arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-bf548/include/mach/irq.h
arch/blackfin/mach-bf561/boards/Kconfig
arch/blackfin/mach-bf561/boards/Makefile
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/generic_board.c [deleted file]
arch/blackfin/mach-bf561/include/mach/anomaly.h
arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
arch/blackfin/mach-common/arch_checks.c
arch/blackfin/mach-common/cache.S
arch/blackfin/mach-common/clocks-init.c
arch/blackfin/mach-common/dpmc_modes.S
arch/blackfin/mach-common/entry.S
arch/blackfin/mach-common/interrupt.S
arch/blackfin/mach-common/ints-priority.c
arch/blackfin/mach-common/smp.c
arch/blackfin/mm/init.c
arch/ia64/sn/pci/pcibr/pcibr_dma.c
arch/mips/include/asm/compat.h
arch/powerpc/platforms/86xx/gef_sbc610.c
arch/s390/crypto/aes_s390.c
arch/x86/Kconfig
arch/x86/include/asm/apic.h
arch/x86/include/asm/apicdef.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/entry_arch.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/i387.h
arch/x86/include/asm/init.h [new file with mode: 0644]
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/linkage.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/page_types.h
arch/x86/include/asm/pat.h
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/kernel/Makefile
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/Makefile
arch/x86/kernel/cpu/mcheck/mce_32.c
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/mcheck/mce_intel_64.c
arch/x86/kernel/cpu/mcheck/threshold.c [new file with mode: 0644]
arch/x86/kernel/ds.c
arch/x86/kernel/efi.c
arch/x86/kernel/efi_64.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/i387.c
arch/x86/kernel/irq.c
arch/x86/kernel/irqinit_32.c
arch/x86/kernel/irqinit_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/uv_time.c [new file with mode: 0644]
arch/x86/math-emu/fpu_aux.c
arch/x86/mm/highmem_32.c
arch/x86/mm/init.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/kmmio.c
arch/x86/mm/memtest.c
arch/x86/mm/numa_32.c
arch/x86/mm/testmmiotrace.c
block/blk-merge.c
crypto/api.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/sata_nv.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/padlock-aes.c
drivers/crypto/padlock-sha.c
drivers/dca/dca-core.c
drivers/dma/dmatest.c
drivers/dma/fsldma.c
drivers/dma/ioat.c
drivers/dma/ioat_dca.c
drivers/dma/ioat_dma.c
drivers/dma/ioatdma.h
drivers/dma/ioatdma_hw.h
drivers/dma/ioatdma_registers.h
drivers/dma/iop-adma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/mv_xor.c
drivers/gpu/drm/drm_stub.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/ide/Kconfig
drivers/ide/Makefile
drivers/ide/at91_ide.c [new file with mode: 0644]
drivers/ide/ide-disk_proc.c
drivers/ide/ide-floppy_proc.c
drivers/ide/ide-io.c
drivers/ide/ide-iops.c
drivers/ide/ide-probe.c
drivers/ide/ide-proc.c
drivers/ide/ide-tape.c
drivers/mmc/core/mmc_ops.c
drivers/mtd/nand/orion_nand.c
drivers/net/arm/Makefile
drivers/net/arm/etherh.c
drivers/video/pxafb.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/ks8695_wdt.c
drivers/watchdog/orion5x_wdt.c
drivers/watchdog/rc32434_wdt.c
fs/ext4/ialloc.c
fs/squashfs/block.c
fs/squashfs/cache.c
fs/squashfs/inode.c
fs/squashfs/squashfs.h
fs/squashfs/super.c
include/linux/ata.h
include/linux/dmaengine.h
include/linux/hdreg.h
include/linux/ide.h
include/linux/libata.h
include/linux/rcuclassic.h
include/linux/rcupdate.h
include/linux/rcupreempt.h
include/linux/rcutree.h
include/linux/sched.h
include/linux/serio.h
init/main.c
kernel/rcuclassic.c
kernel/rcupdate.c
kernel/rcupreempt.c
kernel/rcutree.c
kernel/sched.c
kernel/softirq.c
kernel/sys.c
kernel/user.c
security/smack/smack_lsm.c
security/smack/smackfs.c
sound/pci/hda/patch_sigmatel.c

index 3e79e4a7a3920ac659ad2b3b76ed339aaf04e9f3..b324c033035ad097fda59b86897ae0720df9f09b 100644 (file)
@@ -22,7 +22,7 @@ Squashfs filesystem features versus Cramfs:
 
                                Squashfs                Cramfs
 
-Max filesystem size:           2^64                    16 MiB
+Max filesystem size:           2^64                    256 MiB
 Max file size:                 ~ 2 TiB                 16 MiB
 Max files:                     unlimited               unlimited
 Max directories:               unlimited               unlimited
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt
new file mode 100644 (file)
index 0000000..607b1a0
--- /dev/null
@@ -0,0 +1,101 @@
+
+Mini-HOWTO for using the earlyprintk=dbgp boot option with a
+USB2 Debug port key and a debug cable, on x86 systems.
+
+You need two computers, the 'USB debug key' special gadget and
+and two USB cables, connected like this:
+
+  [host/target] <-------> [USB debug key] <-------> [client/console]
+
+1. There are three specific hardware requirements:
+
+ a.) Host/target system needs to have USB debug port capability.
+
+ You can check this capability by looking at a 'Debug port' bit in
+ the lspci -vvv output:
+
+ # lspci -vvv
+ ...
+ 00:1d.7 USB Controller: Intel Corporation 82801H (ICH8 Family) USB2 EHCI Controller #1 (rev 03) (prog-if 20 [EHCI])
+         Subsystem: Lenovo ThinkPad T61
+         Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx-
+         Status: Cap+ 66MHz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
+         Latency: 0
+         Interrupt: pin D routed to IRQ 19
+         Region 0: Memory at fe227000 (32-bit, non-prefetchable) [size=1K]
+         Capabilities: [50] Power Management version 2
+                 Flags: PMEClk- DSI- D1- D2- AuxCurrent=375mA PME(D0+,D1-,D2-,D3hot+,D3cold+)
+                 Status: D0 PME-Enable- DSel=0 DScale=0 PME+
+         Capabilities: [58] Debug port: BAR=1 offset=00a0
+                            ^^^^^^^^^^^ <==================== [ HERE ]
+        Kernel driver in use: ehci_hcd
+         Kernel modules: ehci-hcd
+ ...
+
+( If your system does not list a debug port capability then you probably
+  wont be able to use the USB debug key. )
+
+ b.) You also need a Netchip USB debug cable/key:
+
+        http://www.plxtech.com/products/NET2000/NET20DC/default.asp
+
+     This is a small blue plastic connector with two USB connections,
+     it draws power from its USB connections.
+
+ c.) Thirdly, you need a second client/console system with a regular USB port.
+
+2. Software requirements:
+
+ a.) On the host/target system:
+
+    You need to enable the following kernel config option:
+
+      CONFIG_EARLY_PRINTK_DBGP=y
+
+    And you need to add the boot command line: "earlyprintk=dbgp".
+    (If you are using Grub, append it to the 'kernel' line in
+     /etc/grub.conf)
+
+    NOTE: normally earlyprintk console gets turned off once the
+    regular console is alive - use "earlyprintk=dbgp,keep" to keep
+    this channel open beyond early bootup. This can be useful for
+    debugging crashes under Xorg, etc.
+
+ b.) On the client/console system:
+
+    You should enable the following kernel config option:
+
+      CONFIG_USB_SERIAL_DEBUG=y
+
+    On the next bootup with the modified kernel you should
+    get a /dev/ttyUSBx device(s).
+
+    Now this channel of kernel messages is ready to be used: start
+    your favorite terminal emulator (minicom, etc.) and set
+    it up to use /dev/ttyUSB0 - or use a raw 'cat /dev/ttyUSBx' to
+    see the raw output.
+
+ c.) On Nvidia Southbridge based systems: the kernel will try to probe
+     and find out which port has debug device connected.
+
+3. Testing that it works fine:
+
+   You can test the output by using earlyprintk=dbgp,keep and provoking
+   kernel messages on the host/target system. You can provoke a harmless
+   kernel message by for example doing:
+
+     echo h > /proc/sysrq-trigger
+
+   On the host/target system you should see this help line in "dmesg" output:
+
+     SysRq : HELP : loglevel(0-9) reBoot Crashdump terminate-all-tasks(E) memory-full-oom-kill(F) kill-all-tasks(I) saK show-backtrace-all-active-cpus(L) show-memory-usage(M) nice-all-RT-tasks(N) powerOff show-registers(P) show-all-timers(Q) unRaw Sync show-task-states(T) Unmount show-blocked-tasks(W) dump-ftrace-buffer(Z)
+
+   On the client/console system do:
+
+       cat /dev/ttyUSB0
+
+   And you should see the help line above displayed shortly after you've
+   provoked it on the host system.
+
+If it does not work then please ask about it on the linux-kernel@vger.kernel.org
+mailing list or contact the x86 maintainers.
index 27fb890a2bffe029236a2199637518bdaf951c6c..c40d83aedebef6cddffa500c6c97271340a79a21 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 29
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Erotic Pickled Herring
 
 # *DOCUMENTATION*
index 7049815d66d566e7d89c08fd3c25e33b8c48c995..68d6494c0389751d5584e94e833ae139ae53d76b 100644 (file)
@@ -233,12 +233,13 @@ static void __init cacheid_init(void)
        unsigned int cachetype = read_cpuid_cachetype();
        unsigned int arch = cpu_architecture();
 
-       if (arch >= CPU_ARCH_ARMv7) {
-               cacheid = CACHEID_VIPT_NONALIASING;
-               if ((cachetype & (3 << 14)) == 1 << 14)
-                       cacheid |= CACHEID_ASID_TAGGED;
-       } else if (arch >= CPU_ARCH_ARMv6) {
-               if (cachetype & (1 << 23))
+       if (arch >= CPU_ARCH_ARMv6) {
+               if ((cachetype & (7 << 29)) == 4 << 29) {
+                       /* ARMv7 register format */
+                       cacheid = CACHEID_VIPT_NONALIASING;
+                       if ((cachetype & (3 << 14)) == 1 << 14)
+                               cacheid |= CACHEID_ASID_TAGGED;
+               } else if (cachetype & (1 << 23))
                        cacheid = CACHEID_VIPT_ALIASING;
                else
                        cacheid = CACHEID_VIPT_NONALIASING;
index 134af97ff3403f1c309b4b6c962665c860fa796d..b7f23324231560635b0d053ac7fac703a249e19f 100644 (file)
@@ -347,6 +347,111 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
 void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
 #endif
 
+/* --------------------------------------------------------------------
+ *  Compact Flash (PCMCIA or IDE)
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) || \
+    defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE)
+
+static struct at91_cf_data cf0_data;
+
+static struct resource cf0_resources[] = {
+       [0] = {
+               .start  = AT91_CHIPSELECT_4,
+               .end    = AT91_CHIPSELECT_4 + SZ_256M - 1,
+               .flags  = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
+       }
+};
+
+static struct platform_device cf0_device = {
+       .id             = 0,
+       .dev            = {
+                               .platform_data  = &cf0_data,
+       },
+       .resource       = cf0_resources,
+       .num_resources  = ARRAY_SIZE(cf0_resources),
+};
+
+static struct at91_cf_data cf1_data;
+
+static struct resource cf1_resources[] = {
+       [0] = {
+               .start  = AT91_CHIPSELECT_5,
+               .end    = AT91_CHIPSELECT_5 + SZ_256M - 1,
+               .flags  = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
+       }
+};
+
+static struct platform_device cf1_device = {
+       .id             = 1,
+       .dev            = {
+                               .platform_data  = &cf1_data,
+       },
+       .resource       = cf1_resources,
+       .num_resources  = ARRAY_SIZE(cf1_resources),
+};
+
+void __init at91_add_device_cf(struct at91_cf_data *data)
+{
+       unsigned long ebi0_csa;
+       struct platform_device *pdev;
+
+       if (!data)
+               return;
+
+       /*
+        * assign CS4 or CS5 to SMC with Compact Flash logic support,
+        * we assume SMC timings are configured by board code,
+        * except True IDE where timings are controlled by driver
+        */
+       ebi0_csa = at91_sys_read(AT91_MATRIX_EBI0CSA);
+       switch (data->chipselect) {
+       case 4:
+               at91_set_A_periph(AT91_PIN_PD6, 0);  /* EBI0_NCS4/CFCS0 */
+               ebi0_csa |= AT91_MATRIX_EBI0_CS4A_SMC_CF1;
+               cf0_data = *data;
+               pdev = &cf0_device;
+               break;
+       case 5:
+               at91_set_A_periph(AT91_PIN_PD7, 0);  /* EBI0_NCS5/CFCS1 */
+               ebi0_csa |= AT91_MATRIX_EBI0_CS5A_SMC_CF2;
+               cf1_data = *data;
+               pdev = &cf1_device;
+               break;
+       default:
+               printk(KERN_ERR "AT91 CF: bad chip-select requested (%u)\n",
+                      data->chipselect);
+               return;
+       }
+       at91_sys_write(AT91_MATRIX_EBI0CSA, ebi0_csa);
+
+       if (data->det_pin) {
+               at91_set_gpio_input(data->det_pin, 1);
+               at91_set_deglitch(data->det_pin, 1);
+       }
+
+       if (data->irq_pin) {
+               at91_set_gpio_input(data->irq_pin, 1);
+               at91_set_deglitch(data->irq_pin, 1);
+       }
+
+       if (data->vcc_pin)
+               /* initially off */
+               at91_set_gpio_output(data->vcc_pin, 0);
+
+       /* enable EBI controlled pins */
+       at91_set_A_periph(AT91_PIN_PD5, 1);  /* NWAIT */
+       at91_set_A_periph(AT91_PIN_PD8, 0);  /* CFCE1 */
+       at91_set_A_periph(AT91_PIN_PD9, 0);  /* CFCE2 */
+       at91_set_A_periph(AT91_PIN_PD14, 0); /* CFNRW */
+
+       pdev->name = (data->flags & AT91_CF_TRUE_IDE) ? "at91_ide" : "at91_cf";
+       platform_device_register(pdev);
+}
+#else
+void __init at91_add_device_cf(struct at91_cf_data *data) {}
+#endif
 
 /* --------------------------------------------------------------------
  *  NAND / SmartMedia
index 0b3ae21b4565abb471fbedd59c15c96a3e4450f5..793fe7b25f367653292110e5936c1c8b4bcdc3d8 100644 (file)
@@ -56,6 +56,9 @@ struct at91_cf_data {
        u8      vcc_pin;                /* power switching */
        u8      rst_pin;                /* card reset */
        u8      chipselect;             /* EBI Chip Select number */
+       u8      flags;
+#define AT91_CF_TRUE_IDE       0x01
+#define AT91_IDE_SWAP_A0_A2    0x02
 };
 extern void __init at91_add_device_cf(struct at91_cf_data *data);
 
index 9bb4f043aa22beb3fd08d0750cb75c155097b113..7ac812dc055a792476ffdc5a93e6568ae75e069a 100644 (file)
@@ -332,7 +332,6 @@ static int at91_pm_enter(suspend_state_t state)
                        at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
 
 error:
-       sdram_selfrefresh_disable();
        target_state = PM_SUSPEND_ON;
        at91_irq_resume();
        at91_gpio_resume();
index 8a7f65ba14b761cb81ceaa87689f1f0d9a91ed3f..94077fbd96b7691850275d71114eb2b9d328f5d3 100644 (file)
@@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
 #ifdef CONFIG_CPU_32v6K
        clrex
 #else
-       strex   r0, r1, [sp]                    @ Clear the exclusive monitor
+       sub     r1, sp, #4                      @ Get unused stack location
+       strex   r0, r1, [r1]                    @ Clear the exclusive monitor
 #endif
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
index 1f7cc0067f5cdd19fad4c487b2a2821687de3b92..ebb305ce7689f2aa0499f6d582ceebc453cddaf0 100644 (file)
@@ -55,7 +55,7 @@ static void s3c_irq_eint_unmask(unsigned int irq)
        u32 mask;
 
        mask = __raw_readl(S3C64XX_EINT0MASK);
-       mask |= eint_irq_to_bit(irq);
+       mask &= ~eint_irq_to_bit(irq);
        __raw_writel(mask, S3C64XX_EINT0MASK);
 }
 
index 8f1f97d56e1ecfec191b185350e7f374cd0df00b..0c1f86e3e44a0bfa99700609e69dfe82551e3f3e 100644 (file)
@@ -1129,6 +1129,7 @@ endchoice
 
 config PM_WAKEUP_BY_GPIO
        bool "Allow Wakeup from Standby by GPIO"
+       depends on PM && !BF54x
 
 config PM_WAKEUP_GPIO_NUMBER
        int "GPIO number"
@@ -1168,6 +1169,12 @@ config PM_BFIN_WAKE_GP
        default n
        help
          Enable General-Purpose Wake-Up (Voltage Regulator Power-Up)
+         (all processors, except ADSP-BF549). This option sets
+         the general-purpose wake-up enable (GPWE) control bit to enable
+         wake-up upon detection of an active low signal on the /GPW (PH7) pin.
+         On ADSP-BF549 this option enables the the same functionality on the
+         /MRXON pin also PH7.
+
 endmenu
 
 menu "CPU Frequency scaling"
index 5f981d9ca625c4540ef6090e68d25d137ec29793..79e7e63ab70985ddea291a2656052dcd6f8a2c5c 100644 (file)
@@ -21,12 +21,6 @@ config DEBUG_STACK_USAGE
 config HAVE_ARCH_KGDB
        def_bool y
 
-config KGDB_TESTCASE
-       tristate "KGDB: for test case in expect"
-       default n
-       help
-         This is a kgdb test case for automated testing.
-
 config DEBUG_VERBOSE
        bool "Verbose fault messages"
        default y
index 4fdb9e04759f779abdcfaa6d6ee2e932a7e41ca7..281f4b60e603fe52cb8327d9069c081a183f824a 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc2
-# Fri Jan  9 17:58:41 2009
+# Linux kernel version: 2.6.28
+# Fri Feb 20 10:01:44 2009
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -133,10 +133,15 @@ CONFIG_BF518=y
 # CONFIG_BF538 is not set
 # CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
 # CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
 # CONFIG_BF547 is not set
+# CONFIG_BF547M is not set
 # CONFIG_BF548 is not set
+# CONFIG_BF548M is not set
 # CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
 # CONFIG_BF561 is not set
 CONFIG_BF_REV_MIN=0
 CONFIG_BF_REV_MAX=2
@@ -426,7 +431,17 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
-# CONFIG_NET_DSA is not set
+CONFIG_NET_DSA=y
+# CONFIG_NET_DSA_TAG_DSA is not set
+# CONFIG_NET_DSA_TAG_EDSA is not set
+# CONFIG_NET_DSA_TAG_TRAILER is not set
+CONFIG_NET_DSA_TAG_STPID=y
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_NET_DSA_KSZ8893M=y
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -529,6 +544,8 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
 #
 # Self-contained MTD device drivers
 #
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_MTD_PHRAM is not set
 # CONFIG_MTD_MTDRAM is not set
@@ -561,7 +578,9 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
 # CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
@@ -607,6 +626,7 @@ CONFIG_BFIN_RX_DESC_NUM=20
 # CONFIG_SMC91X is not set
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -764,7 +784,23 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
-# CONFIG_SPI is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BITBANG is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
 CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
 # CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
@@ -788,8 +824,10 @@ CONFIG_BFIN_WDT=y
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
 # CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
@@ -861,10 +899,18 @@ CONFIG_RTC_INTF_DEV=y
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_S35390A is not set
 # CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
 
 #
 # SPI RTC drivers
 #
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
@@ -1062,12 +1108,20 @@ CONFIG_DEBUG_INFO=y
 # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_FAULT_INJECTION is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
+
+#
+# Tracers
+#
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
 # CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 # CONFIG_DEBUG_STACKOVERFLOW is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_KGDB_TESTCASE is not set
 CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
 # CONFIG_DEBUG_HWERR is not set
@@ -1100,6 +1154,7 @@ CONFIG_CRYPTO=y
 #
 # CONFIG_CRYPTO_FIPS is not set
 # CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_CRYPTD is not set
index 833128b3972496931aaa2028751bfaab52d8ba2b..a50050f17706efdf17de663ef30289b6c605a224 100644 (file)
@@ -327,8 +327,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
index 334c94b51c402fb11af7e25be76d9b27f40f1c99..0a2a00d638872361d6d2f6fec0a6ccf22d34e16a 100644 (file)
@@ -290,8 +290,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
index 9d733436e3009ed0e8fbdb0eeb53a1a051156bd2..eb027587a355d4095e939c769c3ab5ec7c014bc9 100644 (file)
@@ -290,8 +290,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
index 4fb4108d310322fbc9b82dc0e02d51a5fc461d67..9e62b9f40eb1ce8eec06ae8b1115214b538100f9 100644 (file)
@@ -298,8 +298,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
@@ -568,15 +568,7 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
 # CONFIG_MTD_DOC2000 is not set
 # CONFIG_MTD_DOC2001 is not set
 # CONFIG_MTD_DOC2001PLUS is not set
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-# CONFIG_MTD_NAND_ECC_SMC is not set
-# CONFIG_MTD_NAND_MUSEUM_IDS is not set
-# CONFIG_MTD_NAND_BFIN is not set
-CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
-# CONFIG_MTD_NAND_NANDSIM is not set
-CONFIG_MTD_NAND_PLATFORM=m
+# CONFIG_MTD_NAND is not set
 # CONFIG_MTD_ONENAND is not set
 
 #
index cb32f5624a1b5b859bfbd23ed0f352fa99030a71..dd6ad6be1c872d4f271d58b80ae624cf908d31d0 100644 (file)
@@ -306,8 +306,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
index 0f8697618aa5605f29e52774e7db0d3bde4fb37a..6bc2fb1b2a70bb5e03ea25f2ad6b66e6ded1105d 100644 (file)
@@ -361,8 +361,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_BFIN_L2_CACHEABLE is not set
 # CONFIG_MPU is not set
 
@@ -680,7 +680,7 @@ CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
 # CONFIG_SCSI_TGT is not set
 # CONFIG_SCSI_NETLINK is not set
-CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_PROC_FS is not set
 
 #
 # SCSI support type (disk, tape, CD-ROM)
index 042c7adfccfa994e42baba76eed0a56c081be836..69714fb3e608eb933b9f08ffdb70f27c61a50c26 100644 (file)
@@ -329,8 +329,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_BFIN_L2_CACHEABLE is not set
 # CONFIG_MPU is not set
 
index 3a20e281d23c7d26a79c18ce6e97d3c2d48c77dd..017c6ea071b5f1b42dffbebd478a8ed380a609ec 100644 (file)
@@ -288,8 +288,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
index 865ed85a5760e4ce27ac4d848c074fe8865dca6a..d880ef786770e3c4ab367a061e3d6e8379b428b1 100644 (file)
@@ -332,8 +332,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 # CONFIG_MPU is not set
 
 #
index efe9741b1f146dd51e670a8c674741500bba5ac2..f410430b4e3d79a71e5ee2feaf0db29b1e3e5dc0 100644 (file)
@@ -336,8 +336,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 CONFIG_L1_MAX_PIECE=16
 # CONFIG_MPU is not set
 
@@ -595,7 +595,7 @@ CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
 # CONFIG_SCSI_TGT is not set
 # CONFIG_SCSI_NETLINK is not set
-CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_PROC_FS is not set
 
 #
 # SCSI support type (disk, tape, CD-ROM)
index eae83b5de92f1ceca0360c9f2d4f130840c79713..7db93874c9875cfada3c72cb4138927f9840e9f0 100644 (file)
@@ -612,7 +612,7 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
 CONFIG_SCSI=y
 # CONFIG_SCSI_TGT is not set
 # CONFIG_SCSI_NETLINK is not set
-CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_PROC_FS is not set
 
 #
 # SCSI support type (disk, tape, CD-ROM)
index fa580affc9d6eaa26779e5182946ae2ff481e5e5..a46529c6ade337d406f7037e2e2d8b6ebdd919fe 100644 (file)
@@ -282,8 +282,8 @@ CONFIG_BFIN_ICACHE=y
 CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_DCACHE_BANKA is not set
 # CONFIG_BFIN_ICACHE_LOCK is not set
-# CONFIG_BFIN_WB is not set
-CONFIG_BFIN_WT=y
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
 CONFIG_L1_MAX_PIECE=16
 
 #
index 606ecfdcc962e19dc68a8af35d730b2623603ebf..09c31418cc08dda85cb646998995bff825d00dbe 100644 (file)
@@ -1,3 +1,4 @@
 include include/asm-generic/Kbuild.asm
 
+unifdef-y += bfin_sport.h
 unifdef-y += fixed_code.h
index fe88a2c19213285ca49ceb17a38c756ae95b9682..65a651db5b072bf72d22b544ac9d3f1b7a809056 100644 (file)
@@ -1,30 +1,9 @@
 /*
- * File:         include/asm-blackfin/bfin_sport.h
- * Based on:
- * Author:       Roy Huang (roy.huang@analog.com)
+ * bfin_sport.h - userspace header for bfin sport driver
  *
- * Created:      Thu Aug. 24 2006
- * Description:
+ * Copyright 2004-2008 Analog Devices Inc.
  *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef __BFIN_SPORT_H__
 #define NORM_FORMAT    0x0
 #define ALAW_FORMAT    0x2
 #define ULAW_FORMAT    0x3
-struct sport_register;
 
 /* Function driver which use sport must initialize the structure */
 struct sport_config {
-       /*TDM (multichannels), I2S or other mode */
+       /* TDM (multichannels), I2S or other mode */
        unsigned int mode:3;
 
        /* if TDM mode is selected, channels must be set */
@@ -72,12 +50,18 @@ struct sport_config {
        int serial_clk;
        int fsync_clk;
 
-       unsigned int data_format:2;     /*Normal, u-law or a-law */
+       unsigned int data_format:2;     /* Normal, u-law or a-law */
 
        int word_len;           /* How length of the word in bits, 3-32 bits */
        int dma_enabled;
 };
 
+/* Userspace interface */
+#define SPORT_IOC_MAGIC                'P'
+#define SPORT_IOC_CONFIG       _IOWR('P', 0x01, struct sport_config)
+
+#ifdef __KERNEL__
+
 struct sport_register {
        unsigned short tcr1;
        unsigned short reserved0;
@@ -117,9 +101,6 @@ struct sport_register {
        unsigned long mrcs3;
 };
 
-#define SPORT_IOC_MAGIC                'P'
-#define SPORT_IOC_CONFIG       _IOWR('P', 0x01, struct sport_config)
-
 struct sport_dev {
        struct cdev cdev;       /* Char device structure */
 
@@ -149,6 +130,8 @@ struct sport_dev {
        struct sport_config config;
 };
 
+#endif
+
 #define SPORT_TCR1     0
 #define        SPORT_TCR2      1
 #define        SPORT_TCLKDIV   2
@@ -169,4 +152,4 @@ struct sport_dev {
 #define SPORT_MRCS2    22
 #define SPORT_MRCS3    23
 
-#endif                         /*__BFIN_SPORT_H__*/
+#endif
index 76f53d8b9a0d2e25650d34bc48c9b46cef536721..343b56361ec98db86252f75a3e0f01f07bbea7c1 100644 (file)
@@ -35,9 +35,9 @@
 #include <asm/atomic.h>
 #include <asm/traps.h>
 
-#define IPIPE_ARCH_STRING     "1.8-00"
+#define IPIPE_ARCH_STRING     "1.9-00"
 #define IPIPE_MAJOR_NUMBER    1
-#define IPIPE_MINOR_NUMBER    8
+#define IPIPE_MINOR_NUMBER    9
 #define IPIPE_PATCH_NUMBER    0
 
 #ifdef CONFIG_SMP
@@ -83,9 +83,9 @@ struct ipipe_sysinfo {
                                "%2 = CYCLES2\n"                \
                                "CC = %2 == %0\n"               \
                                "if ! CC jump 1b\n"             \
-                               : "=r" (((unsigned long *)&t)[1]),      \
-                                 "=r" (((unsigned long *)&t)[0]),      \
-                                 "=r" (__cy2)                          \
+                               : "=d,a" (((unsigned long *)&t)[1]),    \
+                                 "=d,a" (((unsigned long *)&t)[0]),    \
+                                 "=d,a" (__cy2)                                \
                                : /*no input*/ : "CC");                 \
        t;                                                              \
        })
@@ -118,35 +118,40 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
 
 #define __ipipe_disable_irq(irq)       (irq_desc[irq].chip->mask(irq))
 
-#define __ipipe_lock_root()                                    \
-       set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+static inline int __ipipe_check_tickdev(const char *devname)
+{
+       return 1;
+}
 
-#define __ipipe_unlock_root()                                  \
-       clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+static inline void __ipipe_lock_root(void)
+{
+       set_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
+}
+
+static inline void __ipipe_unlock_root(void)
+{
+       clear_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
+}
 
 void __ipipe_enable_pipeline(void);
 
 #define __ipipe_hook_critical_ipi(ipd) do { } while (0)
 
-#define __ipipe_sync_pipeline(syncmask)                                        \
-       do {                                                            \
-               struct ipipe_domain *ipd = ipipe_current_domain;        \
-               if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
-                       __ipipe_sync_stage(syncmask);                   \
-       } while (0)
+#define __ipipe_sync_pipeline  ___ipipe_sync_pipeline
+void ___ipipe_sync_pipeline(unsigned long syncmask);
 
 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
 
 int __ipipe_get_irq_priority(unsigned irq);
 
-int __ipipe_get_irqthread_priority(unsigned irq);
-
 void __ipipe_stall_root_raw(void);
 
 void __ipipe_unstall_root_raw(void);
 
 void __ipipe_serial_debug(const char *fmt, ...);
 
+asmlinkage void __ipipe_call_irqtail(unsigned long addr);
+
 DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
 
 extern unsigned long __ipipe_core_clock;
@@ -162,42 +167,25 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
 
 #define __ipipe_run_irqtail()  /* Must be a macro */                   \
        do {                                                            \
-               asmlinkage void __ipipe_call_irqtail(void);             \
                unsigned long __pending;                                \
-               CSYNC();                                        \
+               CSYNC();                                                \
                __pending = bfin_read_IPEND();                          \
                if (__pending & 0x8000) {                               \
                        __pending &= ~0x8010;                           \
                        if (__pending && (__pending & (__pending - 1)) == 0) \
-                               __ipipe_call_irqtail();                 \
+                               __ipipe_call_irqtail(__ipipe_irq_tail_hook); \
                }                                                       \
        } while (0)
 
 #define __ipipe_run_isr(ipd, irq)                                      \
        do {                                                            \
                if (ipd == ipipe_root_domain) {                         \
-                       /*                                              \
-                        * Note: the I-pipe implements a threaded interrupt model on \
-                        * this arch for Linux external IRQs. The interrupt handler we \
-                        * call here only wakes up the associated IRQ thread. \
-                        */                                             \
-                       if (ipipe_virtual_irq_p(irq)) {                 \
-                               /* No irqtail here; virtual interrupts have no effect \
-                                  on IPEND so there is no need for processing \
-                                  deferral. */                         \
-                               local_irq_enable_nohead(ipd);           \
+                       local_irq_enable_hw();                          \
+                       if (ipipe_virtual_irq_p(irq))                   \
                                ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-                               local_irq_disable_nohead(ipd);          \
-                       } else                                          \
-                               /*                                      \
-                                * No need to run the irqtail here either; \
-                                * we can't be preempted by hw IRQs, so \
-                                * non-Linux IRQs cannot stack over the short \
-                                * thread wakeup code. Which in turn means \
-                                * that no irqtail condition could be pending \
-                                * for domains above Linux in the pipeline. \
-                                */                                     \
+                       else                                            \
                                ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
+                       local_irq_disable_hw();                         \
                } else {                                                \
                        __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
                        local_irq_enable_nohead(ipd);                   \
@@ -217,42 +205,24 @@ void ipipe_init_irq_threads(void);
 
 int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 
-#define IS_SYSIRQ(irq)         ((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS)
-#define IS_GPIOIRQ(irq)                ((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS)
-
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+#define IRQ_SYSTMR             IRQ_CORETMR
+#define IRQ_PRIOTMR            IRQ_CORETMR
+#else
 #define IRQ_SYSTMR             IRQ_TIMER0
 #define IRQ_PRIOTMR            CONFIG_IRQ_TIMER0
+#endif
 
-#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533)
-#define PRIO_GPIODEMUX(irq)    CONFIG_PFA
-#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
-#define PRIO_GPIODEMUX(irq)    CONFIG_IRQ_PROG_INTA
-#elif defined(CONFIG_BF52x)
-#define PRIO_GPIODEMUX(irq)    ((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
-                                (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
-                                (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
-                                -1)
-#elif defined(CONFIG_BF561)
-#define PRIO_GPIODEMUX(irq)    ((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
-                                (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
-                                (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
-                                -1)
+#ifdef CONFIG_BF561
 #define bfin_write_TIMER_DISABLE(val)  bfin_write_TMRS8_DISABLE(val)
 #define bfin_write_TIMER_ENABLE(val)   bfin_write_TMRS8_ENABLE(val)
 #define bfin_write_TIMER_STATUS(val)   bfin_write_TMRS8_STATUS(val)
 #define bfin_read_TIMER_STATUS()       bfin_read_TMRS8_STATUS()
 #elif defined(CONFIG_BF54x)
-#define PRIO_GPIODEMUX(irq)    ((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
-                                (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
-                                (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
-                                (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
-                                -1)
 #define bfin_write_TIMER_DISABLE(val)  bfin_write_TIMER_DISABLE0(val)
 #define bfin_write_TIMER_ENABLE(val)   bfin_write_TIMER_ENABLE0(val)
 #define bfin_write_TIMER_STATUS(val)   bfin_write_TIMER_STATUS0(val)
 #define bfin_read_TIMER_STATUS(val)    bfin_read_TIMER_STATUS0(val)
-#else
-# error "no PRIO_GPIODEMUX() for this part"
 #endif
 
 #define __ipipe_root_tick_p(regs)      ((regs->ipend & 0x10) != 0)
@@ -275,4 +245,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 
 #endif /* !CONFIG_IPIPE */
 
+#define ipipe_update_tick_evtdev(evtdev)       do { } while (0)
+
 #endif /* !__ASM_BLACKFIN_IPIPE_H */
index cb1025aeabcfc1c6ff89f74bf1ae2fa3cc37ba63..3e8acbd1a3bee6843e348a942e7cc476bfd51918 100644 (file)
@@ -1,5 +1,5 @@
 /*   -*- linux-c -*-
- *   include/asm-blackfin/_baseipipe.h
+ *   include/asm-blackfin/ipipe_base.h
  *
  *   Copyright (C) 2007 Philippe Gerum.
  *
@@ -27,8 +27,9 @@
 #define IPIPE_NR_XIRQS         NR_IRQS
 #define IPIPE_IRQ_ISHIFT       5       /* 2^5 for 32bits arch. */
 
-/* Blackfin-specific, global domain flags */
-#define IPIPE_ROOTLOCK_FLAG    1       /* Lock pipeline for root */
+/* Blackfin-specific, per-cpu pipeline status */
+#define IPIPE_SYNCDEFER_FLAG   15
+#define IPIPE_SYNCDEFER_MASK   (1L << IPIPE_SYNCDEFER_MASK)
 
  /* Blackfin traps -- i.e. exception vector numbers */
 #define IPIPE_NR_FAULTS                52 /* We leave a gap after VEC_ILL_RES. */
 
 #ifndef __ASSEMBLY__
 
-#include <linux/bitops.h>
-
-extern int test_bit(int nr, const void *addr);
-
-
 extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
 
 static inline void __ipipe_stall_root(void)
index 3d977909ce7da0a41179af244608d85eb0ab2c1b..7645e85a5f6f70319c2a20158c0f5d39fbe84ce7 100644 (file)
@@ -61,20 +61,38 @@ void __ipipe_restore_root(unsigned long flags);
 #define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
 #define local_test_iflag_hw(x)         irqs_enabled_from_flags_hw(x)
 
-#define local_save_flags(x)                                            \
-       do {                                                            \
-               (x) = __ipipe_test_root() ? \
+#define local_save_flags(x)                                     \
+       do {                                                     \
+               (x) = __ipipe_test_root() ?                      \
                        __all_masked_irq_flags : bfin_irq_flags; \
+               barrier();                                       \
        } while (0)
 
-#define local_irq_save(x)                              \
-       do {                                            \
-               (x) = __ipipe_test_and_stall_root();    \
+#define local_irq_save(x)                                       \
+       do {                                                     \
+               (x) = __ipipe_test_and_stall_root() ?            \
+                       __all_masked_irq_flags : bfin_irq_flags; \
+               barrier();                                       \
+       } while (0)
+
+static inline void local_irq_restore(unsigned long x)
+{
+       barrier();
+       __ipipe_restore_root(x == __all_masked_irq_flags);
+}
+
+#define local_irq_disable()                    \
+       do {                                    \
+               __ipipe_stall_root();           \
+               barrier();                      \
        } while (0)
 
-#define local_irq_restore(x)   __ipipe_restore_root(x)
-#define local_irq_disable()    __ipipe_stall_root()
-#define local_irq_enable()     __ipipe_unstall_root()
+static inline void local_irq_enable(void)
+{
+       barrier();
+       __ipipe_unstall_root();
+}
+
 #define irqs_disabled()                __ipipe_test_root()
 
 #define local_save_flags_hw(x) \
index e721ce55956c6a96782e3093c76aa88b11914eb6..2920087516f2a08175b54ed3086844434db937c9 100644 (file)
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE              4
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
 #define TIF_FREEZE              6       /* is freezing for suspend */
+#define TIF_IRQ_SYNC            7       /* sync pipeline stage */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_FREEZE             (1<<TIF_FREEZE)
+#define _TIF_IRQ_SYNC           (1<<TIF_IRQ_SYNC)
 
 #define _TIF_WORK_MASK         0x0000FFFE      /* work to do on interrupt/exception return */
 
index 4a92a86824b7bd1d6164eae8d8dcb2ca08bbfd80..fd4d4328a0f2aa94fb5ef97afcc01f0018e89edf 100644 (file)
@@ -15,13 +15,15 @@ else
     obj-y += time.o
 endif
 
-CFLAGS_kgdb_test.o := -mlong-calls -O0
-
 obj-$(CONFIG_IPIPE)                  += ipipe.o
 obj-$(CONFIG_IPIPE_TRACE_MCOUNT)     += mcount.o
 obj-$(CONFIG_BFIN_GPTIMERS)          += gptimers.o
 obj-$(CONFIG_CPLB_INFO)              += cplbinfo.o
 obj-$(CONFIG_MODULES)                += module.o
 obj-$(CONFIG_KGDB)                   += kgdb.o
-obj-$(CONFIG_KGDB_TESTCASE)          += kgdb_test.o
+obj-$(CONFIG_KGDB_TESTS)             += kgdb_test.o
 obj-$(CONFIG_EARLY_PRINTK)           += early_printk.o
+
+# the kgdb test puts code into L2 and without linker
+# relaxation, we need to force long calls to/from it
+CFLAGS_kgdb_test.o := -mlong-calls -O0
index 0e28f75957330d92406b08adbc13024b9e5017c8..d6c067782e638987ba406164f6df65adeb0b15db 100644 (file)
@@ -53,9 +53,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
 
        i_d = i_i = 0;
 
+#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
        /* Set up the zero page.  */
        d_tbl[i_d].addr = 0;
        d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+       i_tbl[i_i].addr = 0;
+       i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+#endif
 
        /* Cover kernel memory with 4M pages.  */
        addr = 0;
index 339be5a3ae6a64f3f6bcc7bf9f9514a86b167ed8..a5de8d45424cda8b29f21d338e6224c94892cb1b 100644 (file)
 #include <asm/atomic.h>
 #include <asm/io.h>
 
-static int create_irq_threads;
-
 DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
 
-static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
-
-static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
-
 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
 
 static void __ipipe_no_irqtail(void);
@@ -93,6 +87,7 @@ void __ipipe_enable_pipeline(void)
  */
 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
 {
+       struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
        struct ipipe_domain *this_domain, *next_domain;
        struct list_head *head, *pos;
        int m_ack, s = -1;
@@ -104,7 +99,6 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
         * interrupt.
         */
        m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
-
        this_domain = ipipe_current_domain;
 
        if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
@@ -114,49 +108,28 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
                next_domain = list_entry(head, struct ipipe_domain, p_link);
                if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
                        if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
-                               next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
-                       if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
-                               s = __test_and_set_bit(IPIPE_STALL_FLAG,
-                                                      &ipipe_root_cpudom_var(status));
+                               next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
+                       if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+                               s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
                        __ipipe_dispatch_wired(next_domain, irq);
-                               goto finalize;
-                       return;
+                       goto out;
                }
        }
 
        /* Ack the interrupt. */
 
        pos = head;
-
        while (pos != &__ipipe_pipeline) {
                next_domain = list_entry(pos, struct ipipe_domain, p_link);
-               /*
-                * For each domain handling the incoming IRQ, mark it
-                * as pending in its log.
-                */
                if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
-                       /*
-                        * Domains that handle this IRQ are polled for
-                        * acknowledging it by decreasing priority
-                        * order. The interrupt must be made pending
-                        * _first_ in the domain's status flags before
-                        * the PIC is unlocked.
-                        */
                        __ipipe_set_irq_pending(next_domain, irq);
-
                        if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
-                               next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+                               next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
                                m_ack = 1;
                        }
                }
-
-               /*
-                * If the domain does not want the IRQ to be passed
-                * down the interrupt pipe, exit the loop now.
-                */
                if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
                        break;
-
                pos = next_domain->p_link.next;
        }
 
@@ -166,18 +139,24 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
         * immediately to the current domain if the interrupt has been
         * marked as 'sticky'. This search does not go beyond the
         * current domain in the pipeline. We also enforce the
-        * additional root stage lock (blackfin-specific). */
+        * additional root stage lock (blackfin-specific).
+        */
+       if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+               s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
 
-       if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
-               s = __test_and_set_bit(IPIPE_STALL_FLAG,
-                                      &ipipe_root_cpudom_var(status));
-finalize:
+       /*
+        * If the interrupt preempted the head domain, then do not
+        * even try to walk the pipeline, unless an interrupt is
+        * pending for it.
+        */
+       if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
+           ipipe_head_cpudom_var(irqpend_himask) == 0)
+               goto out;
 
        __ipipe_walk_pipeline(head);
-
+out:
        if (!s)
-               __clear_bit(IPIPE_STALL_FLAG,
-                           &ipipe_root_cpudom_var(status));
+               __clear_bit(IPIPE_STALL_FLAG, &p->status);
 }
 
 int __ipipe_check_root(void)
@@ -187,7 +166,7 @@ int __ipipe_check_root(void)
 
 void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 {
-       struct irq_desc *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
        int prio = desc->ic_prio;
 
        desc->depth = 0;
@@ -199,7 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
 
 void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 {
-       struct irq_desc *desc = irq_desc + irq;
+       struct irq_desc *desc = irq_to_desc(irq);
        int prio = desc->ic_prio;
 
        if (ipd != &ipipe_root &&
@@ -236,15 +215,18 @@ int __ipipe_syscall_root(struct pt_regs *regs)
 {
        unsigned long flags;
 
-       /* We need to run the IRQ tail hook whenever we don't
+       /*
+        * We need to run the IRQ tail hook whenever we don't
         * propagate a syscall to higher domains, because we know that
         * important operations might be pending there (e.g. Xenomai
-        * deferred rescheduling). */
+        * deferred rescheduling).
+        */
 
-       if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
+       if (regs->orig_p0 < NR_syscalls) {
                void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
                hook();
-               return 0;
+               if ((current->flags & PF_EVNOTIFY) == 0)
+                       return 0;
        }
 
        /*
@@ -312,112 +294,46 @@ int ipipe_trigger_irq(unsigned irq)
 {
        unsigned long flags;
 
+#ifdef CONFIG_IPIPE_DEBUG
        if (irq >= IPIPE_NR_IRQS ||
            (ipipe_virtual_irq_p(irq)
             && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
                return -EINVAL;
+#endif
 
        local_irq_save_hw(flags);
-
        __ipipe_handle_irq(irq, NULL);
-
        local_irq_restore_hw(flags);
 
        return 1;
 }
 
-/* Move Linux IRQ to threads. */
-
-static int do_irqd(void *__desc)
+asmlinkage void __ipipe_sync_root(void)
 {
-       struct irq_desc *desc = __desc;
-       unsigned irq = desc - irq_desc;
-       int thrprio = desc->thr_prio;
-       int thrmask = 1 << thrprio;
-       int cpu = smp_processor_id();
-       cpumask_t cpumask;
-
-       sigfillset(&current->blocked);
-       current->flags |= PF_NOFREEZE;
-       cpumask = cpumask_of_cpu(cpu);
-       set_cpus_allowed(current, cpumask);
-       ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
-
-       while (!kthread_should_stop()) {
-               local_irq_disable();
-               if (!(desc->status & IRQ_SCHEDULED)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-resched:
-                       local_irq_enable();
-                       schedule();
-                       local_irq_disable();
-               }
-               __set_current_state(TASK_RUNNING);
-               /*
-                * If higher priority interrupt servers are ready to
-                * run, reschedule immediately. We need this for the
-                * GPIO demux IRQ handler to unmask the interrupt line
-                * _last_, after all GPIO IRQs have run.
-                */
-               if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
-                       goto resched;
-               if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
-                       per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
-               desc->status &= ~IRQ_SCHEDULED;
-               desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
-               local_irq_enable();
-       }
-       __set_current_state(TASK_RUNNING);
-       return 0;
-}
+       unsigned long flags;
 
-static void kick_irqd(unsigned irq, void *cookie)
-{
-       struct irq_desc *desc = irq_desc + irq;
-       int thrprio = desc->thr_prio;
-       int thrmask = 1 << thrprio;
-       int cpu = smp_processor_id();
-
-       if (!(desc->status & IRQ_SCHEDULED)) {
-               desc->status |= IRQ_SCHEDULED;
-               per_cpu(pending_irqthread_mask, cpu) |= thrmask;
-               ++per_cpu(pending_irq_count[thrprio], cpu);
-               wake_up_process(desc->thread);
-       }
-}
+       BUG_ON(irqs_disabled());
 
-int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
-{
-       if (desc->thread || !create_irq_threads)
-               return 0;
-
-       desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
-       if (desc->thread == NULL) {
-               printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
-               return -ENOMEM;
-       }
+       local_irq_save_hw(flags);
 
-       wake_up_process(desc->thread);
+       clear_thread_flag(TIF_IRQ_SYNC);
 
-       desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
-       ipipe_root_domain->irqs[irq].handler = &kick_irqd;
+       if (ipipe_root_cpudom_var(irqpend_himask) != 0)
+               __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
 
-       return 0;
+       local_irq_restore_hw(flags);
 }
 
-void __init ipipe_init_irq_threads(void)
+void ___ipipe_sync_pipeline(unsigned long syncmask)
 {
-       unsigned irq;
-       struct irq_desc *desc;
-
-       create_irq_threads = 1;
+       struct ipipe_domain *ipd = ipipe_current_domain;
 
-       for (irq = 0; irq < NR_IRQS; irq++) {
-               desc = irq_desc + irq;
-               if (desc->action != NULL ||
-                       (desc->status & IRQ_NOREQUEST) != 0)
-                       ipipe_start_irq_thread(irq, desc);
+       if (ipd == ipipe_root_domain) {
+               if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
+                       return;
        }
+
+       __ipipe_sync_stage(syncmask);
 }
 
 EXPORT_SYMBOL(show_stack);
index 23e9aa080710f095e3389b74892c2b6933dbdbda..1ab5b532ec724c97e02dc1f71282aa9918d9a83f 100644 (file)
@@ -149,11 +149,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
 #endif
        generic_handle_irq(irq);
 
-#ifndef CONFIG_IPIPE   /* Useless and bugous over the I-pipe: IRQs are threaded. */
-       /* If we're the only interrupt running (ignoring IRQ15 which is for
-          syscalls), lower our priority to IRQ14 so that softirqs run at
-          that level.  If there's another, lower-level interrupt, irq_exit
-          will defer softirqs to that.  */
+#ifndef CONFIG_IPIPE
+       /*
+        * If we're the only interrupt running (ignoring IRQ15 which
+        * is for syscalls), lower our priority to IRQ14 so that
+        * softirqs run at that level.  If there's another,
+        * lower-level interrupt, irq_exit will defer softirqs to
+        * that. If the interrupt pipeline is enabled, we are already
+        * running at IRQ14 priority, so we don't need this code.
+        */
        CSYNC();
        pending = bfin_read_IPEND() & ~0x8000;
        other_ints = pending & (pending - 1);
index 3dba9c17304a86006a7a3ad2709ea0cf361c3998..dbcf3e45cb0baefef5136967e33744fef40b64fb 100644 (file)
@@ -20,6 +20,7 @@
 static char cmdline[256];
 static unsigned long len;
 
+#ifndef CONFIG_SMP
 static int num1 __attribute__((l1_data));
 
 void kgdb_l1_test(void) __attribute__((l1_text));
@@ -32,6 +33,8 @@ void kgdb_l1_test(void)
        printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
        return ;
 }
+#endif
+
 #if L2_LENGTH
 
 static int num2 __attribute__((l2));
@@ -59,10 +62,12 @@ int kgdb_test(char *name, int len, int count, int z)
 static int test_proc_output(char *buf)
 {
        kgdb_test("hello world!", 12, 0x55, 0x10);
+#ifndef CONFIG_SMP
        kgdb_l1_test();
-       #if L2_LENGTH
+#endif
+#if L2_LENGTH
        kgdb_l2_test();
-       #endif
+#endif
 
        return 0;
 }
index 594e325b40e4fa078ce1c7aee07041a20a0ac4d0..d76618db50df8c4e3f4c06cb44d09906e5b41147 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/dma.h>
 #include <asm/fixed_code.h>
+#include <asm/cacheflush.h>
 #include <asm/mem_map.h>
 
 #define TEXT_OFFSET 0
@@ -240,7 +241,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                        } else if (addr >= FIXED_CODE_START
                            && addr + sizeof(tmp) <= FIXED_CODE_END) {
-                               memcpy(&tmp, (const void *)(addr), sizeof(tmp));
+                               copy_from_user_page(0, 0, 0, &tmp, (const void *)(addr), sizeof(tmp));
                                copied = sizeof(tmp);
 
                        } else
@@ -320,7 +321,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                        } else if (addr >= FIXED_CODE_START
                            && addr + sizeof(data) <= FIXED_CODE_END) {
-                               memcpy((void *)(addr), &data, sizeof(data));
+                               copy_to_user_page(0, 0, 0, (void *)(addr), &data, sizeof(data));
                                copied = sizeof(data);
 
                        } else
index e5c11623080015fec2cf4a2f671639448f745277..a58687bdee6a6580031d6903526774c3f35e1625 100644 (file)
@@ -889,6 +889,10 @@ void __init setup_arch(char **cmdline_p)
                               CPU, bfin_revid());
        }
 
+       /* We can't run on BF548-0.1 due to ANOMALY 05000448 */
+       if (bfin_cpuid() == 0x27de && bfin_revid() == 1)
+               panic("You can't run on this processor due to 05000448\n");
+
        printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
 
        printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
@@ -1141,12 +1145,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                icache_size = 0;
 
        seq_printf(m, "cache size\t: %d KB(L1 icache) "
-               "%d KB(L1 dcache-%s) %d KB(L2 cache)\n",
+               "%d KB(L1 dcache%s) %d KB(L2 cache)\n",
                icache_size, dcache_size,
 #if defined CONFIG_BFIN_WB
-               "wb"
+               "-wb"
 #elif defined CONFIG_BFIN_WT
-               "wt"
+               "-wt"
 #endif
                "", 0);
 
index 172b4c588467ff2a51fbcc5993a4507d39e83d91..1bbacfbd4c5d7c6198496fa939a7920dc36319d4 100644 (file)
@@ -134,7 +134,10 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
 
        write_seqlock(&xtime_lock);
 #if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
-/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */
+       /*
+        * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is
+        * enabled.
+        */
        if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
 #endif
                do_timer(1);
index 0e175342112e45340d72fbe558234d97840027a2..41f2eacfef207339d53d7dbf1de90b78e7d3a8f2 100644 (file)
@@ -113,7 +113,6 @@ static struct platform_device bfin_mac_device = {
        .name = "bfin_mac",
        .dev.platform_data = &bfin_mii_bus,
 };
-#endif
 
 #if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
 static struct dsa_platform_data ksz8893m_switch_data = {
@@ -132,6 +131,7 @@ static struct platform_device ksz8893m_switch_device = {
        .dev.platform_data = &ksz8893m_switch_data,
 };
 #endif
+#endif
 
 #if defined(CONFIG_MTD_M25P80) \
        || defined(CONFIG_MTD_M25P80_MODULE)
@@ -171,6 +171,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
 };
 #endif
 
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 #if defined(CONFIG_NET_DSA_KSZ8893M) \
        || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
 /* SPI SWITCH CHIP */
@@ -179,10 +180,11 @@ static struct bfin5xx_spi_chip spi_switch_info = {
        .bits_per_word = 8,
 };
 #endif
+#endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -259,6 +261,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 #if defined(CONFIG_NET_DSA_KSZ8893M) \
        || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
        {
@@ -271,24 +274,15 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .mode = SPI_MODE_3,
        },
 #endif
+#endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc_dummy",
+               .modalias = "mmc_spi",
                .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
-       {
-               .modalias = "spi_mmc",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
@@ -630,11 +624,10 @@ static struct platform_device *stamp_devices[] __initdata = {
 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
        &bfin_mii_bus,
        &bfin_mac_device,
-#endif
-
 #if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
        &ksz8893m_switch_device,
 #endif
+#endif
 
 #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
        &bfin_spi0_device,
index e5b4bef0edaea0c9966297e608946cd9a87ee959..c847bb101076399cbb3bc510adf5946e42ddecff 100644 (file)
@@ -2,12 +2,12 @@
  * File: include/asm-blackfin/mach-bf518/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 /* This file shoule be up to date with:
- *  - ????
+ *  - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
  */
 
 #ifndef _MACH_ANOMALY_H_
@@ -19,6 +19,8 @@
 #define ANOMALY_05000122 (1)
 /* False Hardware Error from an Access in the Shadow of a Conditional Branch */
 #define ANOMALY_05000245 (1)
+/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
+#define ANOMALY_05000254 (1)
 /* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
 #define ANOMALY_05000265 (1)
 /* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
 #define ANOMALY_05000443 (1)
 /* Incorrect L1 Instruction Bank B Memory Map Location */
 #define ANOMALY_05000444 (1)
+/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
+#define ANOMALY_05000452 (1)
+/* PWM_TRIPB Signal Not Available on PG10 */
+#define ANOMALY_05000453 (1)
+/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */
+#define ANOMALY_05000455 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000125 (0)
 #define ANOMALY_05000263 (0)
 #define ANOMALY_05000266 (0)
 #define ANOMALY_05000273 (0)
+#define ANOMALY_05000278 (0)
 #define ANOMALY_05000285 (0)
+#define ANOMALY_05000305 (0)
 #define ANOMALY_05000307 (0)
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000312 (0)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000353 (0)
 #define ANOMALY_05000363 (0)
+#define ANOMALY_05000380 (0)
 #define ANOMALY_05000386 (0)
 #define ANOMALY_05000412 (0)
 #define ANOMALY_05000432 (0)
+#define ANOMALY_05000447 (0)
+#define ANOMALY_05000448 (0)
 
 #endif
index b50a63b975a2cbade4a24069db52d7b564183d24..e21c1c3e4ec7e8129b87989c81e49f9f4001a6a0 100644 (file)
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
         CH_UART0_TX,
         CH_UART0_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
         CONFIG_UART0_CTS_PIN,
         CONFIG_UART0_RTS_PIN,
 #endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
         CH_UART1_TX,
         CH_UART1_RX,
 #endif
-#ifdef CONFIG_BFIN_UART1_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
         CONFIG_UART1_CTS_PIN,
         CONFIG_UART1_RTS_PIN,
 #endif
index 856c097b5317eda49cc70cf8456f8dfee1bfbc6c..48e69eecdba42b4e690b146b71866f1127c6d6e8 100644 (file)
@@ -487,9 +487,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip  mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -585,23 +585,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .controller_data = &ad9960_spi_chip_info,
        },
 #endif
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
-       {
-               .modalias = "spi_mmc",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+               .modalias = "mmc_spi",
+               .max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
index 83606fcdde2757ea8a6d7361864c27c7d2240b9c..7fe480e4ebe83c1022e275373cc1d77550091c20 100644 (file)
@@ -256,9 +256,9 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -366,23 +366,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc_dummy",
+               .modalias = "mmc_spi",
                .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
-       {
-               .modalias = "spi_mmc",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
index 035e8d83505870a246be7f05a0d81611c3a2cc37..df6808d8a6efce3f5317cfc849c9bbb969e7dcda 100644 (file)
@@ -2,7 +2,7 @@
  * File: include/asm-blackfin/mach-bf527/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 #define ANOMALY_05000263 (0)
 #define ANOMALY_05000266 (0)
 #define ANOMALY_05000273 (0)
+#define ANOMALY_05000278 (0)
 #define ANOMALY_05000285 (0)
+#define ANOMALY_05000305 (0)
 #define ANOMALY_05000307 (0)
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000312 (0)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000363 (0)
 #define ANOMALY_05000412 (0)
+#define ANOMALY_05000447 (0)
+#define ANOMALY_05000448 (0)
 
 #endif
index 75722d6008b0cade6651e72911d7bfdb3543cbf6..e8c41fd842b5d818d70e83cc4ce78fda0288f01b 100644 (file)
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
         CH_UART0_TX,
         CH_UART0_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
         CONFIG_UART0_CTS_PIN,
         CONFIG_UART0_RTS_PIN,
 #endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
         CH_UART1_TX,
         CH_UART1_RX,
 #endif
-#ifdef CONFIG_BFIN_UART1_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
         CONFIG_UART1_CTS_PIN,
         CONFIG_UART1_RTS_PIN,
 #endif
index 308c98dc5aba6a2bf2bdecf0aef4239bbe86a8b0..8d8b3e7321e628fb52d1aecb9c34bc0ee9f00772 100644 (file)
@@ -38,9 +38,4 @@ config BFIN532_IP0X
        help
          Core support for IP04/IP04 open hardware IP-PBX.
 
-config GENERIC_BF533_BOARD
-       bool "Generic"
-       help
-         Generic or Custom board support.
-
 endchoice
index 9afbe72b484f7159a0a1ab5068ee5e54db9baf78..ff1e832f80d2ea912e1317cc96de3c1ccb1d1abe 100644 (file)
@@ -2,7 +2,6 @@
 # arch/blackfin/mach-bf533/boards/Makefile
 #
 
-obj-$(CONFIG_GENERIC_BF533_BOARD)      += generic_board.o
 obj-$(CONFIG_BFIN533_STAMP)            += stamp.o
 obj-$(CONFIG_BFIN532_IP0X)             += ip0x.o
 obj-$(CONFIG_BFIN533_EZKIT)            += ezkit.o
index 015c18f85e7faf2d272c2fc5dffb6cdc89547a44..0765872a8ada70a9114bfc98266ad7798e384ca9 100644 (file)
@@ -101,9 +101,9 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -129,23 +129,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-       {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
+               .modalias = "mmc_spi",
                .max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
index e7061c7e8c42e5c0aa84acd06034dd64e41792f9..e8974878d8c2807dc34bb2baa4b5822959558a5c 100644 (file)
@@ -96,9 +96,9 @@ static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -138,23 +138,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-       {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
+               .modalias = "mmc_spi",
                .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
diff --git a/arch/blackfin/mach-bf533/boards/generic_board.c b/arch/blackfin/mach-bf533/boards/generic_board.c
deleted file mode 100644 (file)
index 986eeec..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf533/generic_board.c
- * Based on:     arch/blackfin/mach-bf533/ezkit.c
- * Author:       Aidan Williams <aidan@nicta.com.au>
- *
- * Created:      2005
- * Description:
- *
- * Modified:
- *               Copyright 2005 National ICT Australia (NICTA)
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-
-/*
- * Name the Board for the /proc/cpuinfo
- */
-const char bfin_board_name[] = "UNKNOWN BOARD";
-
-#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
-static struct platform_device rtc_device = {
-       .name = "rtc-bfin",
-       .id   = -1,
-};
-#endif
-
-/*
- *  Driver needs to know address, irq and flag pin.
- */
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-static struct resource smc91x_resources[] = {
-       {
-               .start = 0x20300300,
-               .end = 0x20300300 + 16,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = IRQ_PROG_INTB,
-               .end = IRQ_PROG_INTB,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       }, {
-               .start = IRQ_PF7,
-               .end = IRQ_PF7,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       },
-};
-
-static struct platform_device smc91x_device = {
-       .name = "smc91x",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(smc91x_resources),
-       .resource = smc91x_resources,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-#ifdef CONFIG_BFIN_SIR0
-static struct resource bfin_sir0_resources[] = {
-       {
-               .start = 0xFFC00400,
-               .end = 0xFFC004FF,
-               .flags = IORESOURCE_MEM,
-       },
-       {
-               .start = IRQ_UART0_RX,
-               .end = IRQ_UART0_RX+1,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .start = CH_UART0_RX,
-               .end = CH_UART0_RX+1,
-               .flags = IORESOURCE_DMA,
-       },
-};
-
-static struct platform_device bfin_sir0_device = {
-       .name = "bfin_sir",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(bfin_sir0_resources),
-       .resource = bfin_sir0_resources,
-};
-#endif
-#endif
-
-static struct platform_device *generic_board_devices[] __initdata = {
-#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
-       &rtc_device,
-#endif
-
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-       &smc91x_device,
-#endif
-
-#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-#ifdef CONFIG_BFIN_SIR0
-       &bfin_sir0_device,
-#endif
-#endif
-};
-
-static int __init generic_board_init(void)
-{
-       printk(KERN_INFO "%s(): registering device resources\n", __func__);
-       return platform_add_devices(generic_board_devices, ARRAY_SIZE(generic_board_devices));
-}
-
-arch_initcall(generic_board_init);
index e30b1b7d144265803e37f1d5dab9826a3a72c026..f19b63378b1299ec086b0358bbb3261cc7c9e113 100644 (file)
@@ -127,8 +127,8 @@ static struct platform_device dm9000_device2 = {
 #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
 /* all SPI peripherals info goes here */
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
 /*
  * CPOL (Clock Polarity)
  *  0 - Active high SCK
@@ -152,14 +152,13 @@ static struct bfin5xx_spi_chip spi_mmc_chip_info = {
 /* Notice: for blackfin, the speed_hz is the value of register
  * SPI_BAUD, not the real baudrate */
 static struct spi_board_info bfin_spi_board_info[] __initdata = {
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
+               .modalias = "mmc_spi",
                .max_speed_hz = 2,
                .bus_num = 1,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
        },
 #endif
 };
index 0d3a03429fb984174f75d03a1c904c920bf22aca..1cf893e2e55baffa27cf4aa8aa2d2757505f76f1 100644 (file)
@@ -2,7 +2,7 @@
  * File: include/asm-blackfin/mach-bf533/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 #define ANOMALY_05000301 (__SILICON_REVISION__ < 6)
 /* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
 #define ANOMALY_05000302 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional Hysteresis on SPORT Input Pins (Not Available On Older Silicon) */
+/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
 #define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
 /* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */
 #define ANOMALY_05000306 (__SILICON_REVISION__ < 5)
 #define ANOMALY_05000266 (0)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000353 (1)
+#define ANOMALY_05000380 (0)
 #define ANOMALY_05000386 (1)
 #define ANOMALY_05000412 (0)
 #define ANOMALY_05000432 (0)
 #define ANOMALY_05000435 (0)
+#define ANOMALY_05000447 (0)
+#define ANOMALY_05000448 (0)
 
 #endif
index f3d9e495230c04439357b1499874c918092a817e..5f517f53b0fd8d17fb8f9ab104269ffa51290f09 100644 (file)
@@ -134,7 +134,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART_TX,
        CH_UART_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART0_CTS_PIN,
        CONFIG_UART0_RTS_PIN,
 #endif
index 42a57b0acb292d4046b674afffe720f2d6f94cda..77c59da87e85d93f2d62478fff1f6a9cef4c19df 100644 (file)
@@ -33,9 +33,4 @@ config CAMSIG_MINOTAUR
        help
          Board supply package for CSP Minotaur
 
-config GENERIC_BF537_BOARD
-       bool "Generic"
-       help
-         Generic or Custom board support.
-
 endchoice
index 7168cc14afd82a7011821649fdd48283d24266b2..68b98a7af6a618dc642b9413ec0b7c613798bf84 100644 (file)
@@ -2,7 +2,6 @@
 # arch/blackfin/mach-bf537/boards/Makefile
 #
 
-obj-$(CONFIG_GENERIC_BF537_BOARD)      += generic_board.o
 obj-$(CONFIG_BFIN537_STAMP)            += stamp.o
 obj-$(CONFIG_BFIN537_BLUETECHNIX_CM)   += cm_bf537.o
 obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM)  += tcm_bf537.o
index 9cd8fb2a30d32ba3d81ef2409e9078df884b55f0..41c75b9bfac03dde78f1269738fd750193efe4a2 100644 (file)
@@ -108,9 +108,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip  mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -160,23 +160,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-       {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 7,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+               .modalias = "mmc_spi",
+               .max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 1,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c
deleted file mode 100644 (file)
index da710fd..0000000
+++ /dev/null
@@ -1,745 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf537/boards/generic_board.c
- * Based on:     arch/blackfin/mach-bf533/boards/ezkit.c
- * Author:       Aidan Williams <aidan@nicta.com.au>
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright 2005 National ICT Australia (NICTA)
- *               Copyright 2004-2008 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/device.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-#include <linux/usb/isp1362.h>
-#endif
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/usb/sl811.h>
-#include <asm/dma.h>
-#include <asm/bfin5xx_spi.h>
-#include <asm/reboot.h>
-#include <asm/portmux.h>
-#include <linux/spi/ad7877.h>
-
-/*
- * Name the Board for the /proc/cpuinfo
- */
-const char bfin_board_name[] = "UNKNOWN BOARD";
-
-/*
- *  Driver needs to know address, irq and flag pin.
- */
-
-#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-#include <linux/usb/isp1760.h>
-static struct resource bfin_isp1760_resources[] = {
-       [0] = {
-               .start  = 0x203C0000,
-               .end    = 0x203C0000 + 0x000fffff,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_PF7,
-               .end    = IRQ_PF7,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct isp1760_platform_data isp1760_priv = {
-       .is_isp1761 = 0,
-       .port1_disable = 0,
-       .bus_width_16 = 1,
-       .port1_otg = 0,
-       .analog_oc = 0,
-       .dack_polarity_high = 0,
-       .dreq_polarity_high = 0,
-};
-
-static struct platform_device bfin_isp1760_device = {
-       .name           = "isp1760-hcd",
-       .id             = 0,
-       .dev = {
-               .platform_data = &isp1760_priv,
-       },
-       .num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
-       .resource       = bfin_isp1760_resources,
-};
-#endif
-
-#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
-static struct resource bfin_pcmcia_cf_resources[] = {
-       {
-               .start = 0x20310000, /* IO PORT */
-               .end = 0x20312000,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = 0x20311000, /* Attribute Memory */
-               .end = 0x20311FFF,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = IRQ_PF4,
-               .end = IRQ_PF4,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       }, {
-               .start = 6, /* Card Detect PF6 */
-               .end = 6,
-               .flags = IORESOURCE_IRQ,
-       },
-};
-
-static struct platform_device bfin_pcmcia_cf_device = {
-       .name = "bfin_cf_pcmcia",
-       .id = -1,
-       .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
-       .resource = bfin_pcmcia_cf_resources,
-};
-#endif
-
-#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
-static struct platform_device rtc_device = {
-       .name = "rtc-bfin",
-       .id   = -1,
-};
-#endif
-
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-static struct resource smc91x_resources[] = {
-       {
-               .name = "smc91x-regs",
-               .start = 0x20300300,
-               .end = 0x20300300 + 16,
-               .flags = IORESOURCE_MEM,
-       }, {
-
-               .start = IRQ_PF7,
-               .end = IRQ_PF7,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       },
-};
-static struct platform_device smc91x_device = {
-       .name = "smc91x",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(smc91x_resources),
-       .resource = smc91x_resources,
-};
-#endif
-
-#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
-static struct resource dm9000_resources[] = {
-       [0] = {
-               .start  = 0x203FB800,
-               .end    = 0x203FB800 + 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = 0x203FB800 + 4,
-               .end    = 0x203FB800 + 5,
-               .flags  = IORESOURCE_MEM,
-       },
-       [2] = {
-               .start  = IRQ_PF9,
-               .end    = IRQ_PF9,
-               .flags  = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE),
-       },
-};
-
-static struct platform_device dm9000_device = {
-       .name           = "dm9000",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(dm9000_resources),
-       .resource       = dm9000_resources,
-};
-#endif
-
-#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
-static struct resource sl811_hcd_resources[] = {
-       {
-               .start = 0x20340000,
-               .end = 0x20340000,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = 0x20340004,
-               .end = 0x20340004,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = CONFIG_USB_SL811_BFIN_IRQ,
-               .end = CONFIG_USB_SL811_BFIN_IRQ,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       },
-};
-
-#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
-void sl811_port_power(struct device *dev, int is_on)
-{
-       gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
-       gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
-
-}
-#endif
-
-static struct sl811_platform_data sl811_priv = {
-       .potpg = 10,
-       .power = 250,       /* == 500mA */
-#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
-       .port_power = &sl811_port_power,
-#endif
-};
-
-static struct platform_device sl811_hcd_device = {
-       .name = "sl811-hcd",
-       .id = 0,
-       .dev = {
-               .platform_data = &sl811_priv,
-       },
-       .num_resources = ARRAY_SIZE(sl811_hcd_resources),
-       .resource = sl811_hcd_resources,
-};
-#endif
-
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-static struct resource isp1362_hcd_resources[] = {
-       {
-               .start = 0x20360000,
-               .end = 0x20360000,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = 0x20360004,
-               .end = 0x20360004,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
-               .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       },
-};
-
-static struct isp1362_platform_data isp1362_priv = {
-       .sel15Kres = 1,
-       .clknotstop = 0,
-       .oc_enable = 0,
-       .int_act_high = 0,
-       .int_edge_triggered = 0,
-       .remote_wakeup_connected = 0,
-       .no_power_switching = 1,
-       .power_switching_mode = 0,
-};
-
-static struct platform_device isp1362_hcd_device = {
-       .name = "isp1362-hcd",
-       .id = 0,
-       .dev = {
-               .platform_data = &isp1362_priv,
-       },
-       .num_resources = ARRAY_SIZE(isp1362_hcd_resources),
-       .resource = isp1362_hcd_resources,
-};
-#endif
-
-#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
-static struct platform_device bfin_mii_bus = {
-       .name = "bfin_mii_bus",
-};
-
-static struct platform_device bfin_mac_device = {
-       .name = "bfin_mac",
-       .dev.platform_data = &bfin_mii_bus,
-};
-#endif
-
-#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
-static struct resource net2272_bfin_resources[] = {
-       {
-               .start = 0x20300000,
-               .end = 0x20300000 + 0x100,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = IRQ_PF7,
-               .end = IRQ_PF7,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       },
-};
-
-static struct platform_device net2272_bfin_device = {
-       .name = "net2272",
-       .id = -1,
-       .num_resources = ARRAY_SIZE(net2272_bfin_resources),
-       .resource = net2272_bfin_resources,
-};
-#endif
-
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
-/* all SPI peripherals info goes here */
-
-#if defined(CONFIG_MTD_M25P80) \
-       || defined(CONFIG_MTD_M25P80_MODULE)
-static struct mtd_partition bfin_spi_flash_partitions[] = {
-       {
-               .name = "bootloader(spi)",
-               .size = 0x00020000,
-               .offset = 0,
-               .mask_flags = MTD_CAP_ROM
-       }, {
-               .name = "linux kernel(spi)",
-               .size = 0xe0000,
-               .offset = 0x20000
-       }, {
-               .name = "file system(spi)",
-               .size = 0x700000,
-               .offset = 0x00100000,
-       }
-};
-
-static struct flash_platform_data bfin_spi_flash_data = {
-       .name = "m25p80",
-       .parts = bfin_spi_flash_partitions,
-       .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
-       .type = "m25p64",
-};
-
-/* SPI flash chip (m25p64) */
-static struct bfin5xx_spi_chip spi_flash_chip_info = {
-       .enable_dma = 0,         /* use dma transfer with this chip*/
-       .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_ADC_BF533) \
-       || defined(CONFIG_SPI_ADC_BF533_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
-       .enable_dma = 1,         /* use dma transfer with this chip*/
-       .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BLACKFIN_AD1836) \
-       || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE)
-static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
-       .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_PBX)
-static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
-       .ctl_reg        = 0x4, /* send zero */
-       .enable_dma     = 0,
-       .bits_per_word  = 8,
-       .cs_change_per_word = 1,
-};
-#endif
-
-#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-
-static const struct ad7877_platform_data bfin_ad7877_ts_info = {
-       .model                  = 7877,
-       .vref_delay_usecs       = 50,   /* internal, no capacitor */
-       .x_plate_ohms           = 419,
-       .y_plate_ohms           = 486,
-       .pressure_max           = 1000,
-       .pressure_min           = 0,
-       .stopacq_polarity       = 1,
-       .first_conversion_delay = 3,
-       .acquisition_time       = 1,
-       .averaging              = 1,
-       .pen_down_acc_interval  = 1,
-};
-#endif
-
-static struct spi_board_info bfin_spi_board_info[] __initdata = {
-#if defined(CONFIG_MTD_M25P80) \
-       || defined(CONFIG_MTD_M25P80_MODULE)
-       {
-               /* the modalias must be the same as spi device driver name */
-               .modalias = "m25p80", /* Name of spi_driver for this device */
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0, /* Framework bus number */
-               .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
-               .platform_data = &bfin_spi_flash_data,
-               .controller_data = &spi_flash_chip_info,
-               .mode = SPI_MODE_3,
-       },
-#endif
-
-#if defined(CONFIG_SPI_ADC_BF533) \
-       || defined(CONFIG_SPI_ADC_BF533_MODULE)
-       {
-               .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
-               .max_speed_hz = 6250000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0, /* Framework bus number */
-               .chip_select = 1, /* Framework chip select. */
-               .platform_data = NULL, /* No spi_driver specific config */
-               .controller_data = &spi_adc_chip_info,
-       },
-#endif
-
-#if defined(CONFIG_SND_BLACKFIN_AD1836) \
-       || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
-       {
-               .modalias = "ad1836-spi",
-               .max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT,
-               .controller_data = &ad1836_spi_chip_info,
-       },
-#endif
-#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE)
-       {
-               .modalias = "ad9960-spi",
-               .max_speed_hz = 10000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 1,
-               .controller_data = &ad9960_spi_chip_info,
-       },
-#endif
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-       {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
-       {
-               .modalias = "spi_mmc",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
-#endif
-#if defined(CONFIG_PBX)
-       {
-               .modalias = "fxs-spi",
-               .max_speed_hz = 12500000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 8 - CONFIG_J11_JUMPER,
-               .controller_data = &spi_si3xxx_chip_info,
-               .mode = SPI_MODE_3,
-       },
-       {
-               .modalias = "fxo-spi",
-               .max_speed_hz = 12500000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 8 - CONFIG_J19_JUMPER,
-               .controller_data = &spi_si3xxx_chip_info,
-               .mode = SPI_MODE_3,
-       },
-#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-       {
-               .modalias               = "ad7877",
-               .platform_data          = &bfin_ad7877_ts_info,
-               .irq                    = IRQ_PF6,
-               .max_speed_hz   = 12500000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num        = 0,
-               .chip_select  = 1,
-               .controller_data = &spi_ad7877_chip_info,
-       },
-#endif
-};
-
-/* SPI controller data */
-static struct bfin5xx_spi_master bfin_spi0_info = {
-       .num_chipselect = 8,
-       .enable_dma = 1,  /* master has the ability to do dma transfer */
-       .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
-};
-
-/* SPI (0) */
-static struct resource bfin_spi0_resource[] = {
-       [0] = {
-               .start = SPI0_REGBASE,
-               .end   = SPI0_REGBASE + 0xFF,
-               .flags = IORESOURCE_MEM,
-               },
-       [1] = {
-               .start = CH_SPI,
-               .end   = CH_SPI,
-               .flags = IORESOURCE_IRQ,
-       },
-};
-
-static struct platform_device bfin_spi0_device = {
-       .name = "bfin-spi",
-       .id = 0, /* Bus number */
-       .num_resources = ARRAY_SIZE(bfin_spi0_resource),
-       .resource = bfin_spi0_resource,
-       .dev = {
-               .platform_data = &bfin_spi0_info, /* Passed to driver */
-       },
-};
-#endif  /* spi master and devices */
-
-#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
-static struct platform_device bfin_fb_device = {
-       .name = "bf537-lq035",
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
-static struct platform_device bfin_fb_adv7393_device = {
-       .name = "bfin-adv7393",
-};
-#endif
-
-#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
-static struct resource bfin_uart_resources[] = {
-       {
-               .start = 0xFFC00400,
-               .end = 0xFFC004FF,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = 0xFFC02000,
-               .end = 0xFFC020FF,
-               .flags = IORESOURCE_MEM,
-       },
-};
-
-static struct platform_device bfin_uart_device = {
-       .name = "bfin-uart",
-       .id = 1,
-       .num_resources = ARRAY_SIZE(bfin_uart_resources),
-       .resource = bfin_uart_resources,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-#ifdef CONFIG_BFIN_SIR0
-static struct resource bfin_sir0_resources[] = {
-       {
-               .start = 0xFFC00400,
-               .end = 0xFFC004FF,
-               .flags = IORESOURCE_MEM,
-       },
-       {
-               .start = IRQ_UART0_RX,
-               .end = IRQ_UART0_RX+1,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .start = CH_UART0_RX,
-               .end = CH_UART0_RX+1,
-               .flags = IORESOURCE_DMA,
-       },
-};
-
-static struct platform_device bfin_sir0_device = {
-       .name = "bfin_sir",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(bfin_sir0_resources),
-       .resource = bfin_sir0_resources,
-};
-#endif
-#ifdef CONFIG_BFIN_SIR1
-static struct resource bfin_sir1_resources[] = {
-       {
-               .start = 0xFFC02000,
-               .end = 0xFFC020FF,
-               .flags = IORESOURCE_MEM,
-       },
-       {
-               .start = IRQ_UART1_RX,
-               .end = IRQ_UART1_RX+1,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .start = CH_UART1_RX,
-               .end = CH_UART1_RX+1,
-               .flags = IORESOURCE_DMA,
-       },
-};
-
-static struct platform_device bfin_sir1_device = {
-       .name = "bfin_sir",
-       .id = 1,
-       .num_resources = ARRAY_SIZE(bfin_sir1_resources),
-       .resource = bfin_sir1_resources,
-};
-#endif
-#endif
-
-#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
-static struct resource bfin_twi0_resource[] = {
-       [0] = {
-               .start = TWI0_REGBASE,
-               .end   = TWI0_REGBASE + 0xFF,
-               .flags = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start = IRQ_TWI,
-               .end   = IRQ_TWI,
-               .flags = IORESOURCE_IRQ,
-       },
-};
-
-static struct platform_device i2c_bfin_twi_device = {
-       .name = "i2c-bfin-twi",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(bfin_twi0_resource),
-       .resource = bfin_twi0_resource,
-};
-#endif
-
-#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
-static struct platform_device bfin_sport0_uart_device = {
-       .name = "bfin-sport-uart",
-       .id = 0,
-};
-
-static struct platform_device bfin_sport1_uart_device = {
-       .name = "bfin-sport-uart",
-       .id = 1,
-};
-#endif
-
-static struct platform_device *stamp_devices[] __initdata = {
-#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
-       &bfin_pcmcia_cf_device,
-#endif
-
-#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
-       &rtc_device,
-#endif
-
-#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
-       &sl811_hcd_device,
-#endif
-
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-       &isp1362_hcd_device,
-#endif
-
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-       &smc91x_device,
-#endif
-
-#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
-       &dm9000_device,
-#endif
-
-#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
-       &bfin_mii_bus,
-       &bfin_mac_device,
-#endif
-
-#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
-       &net2272_bfin_device,
-#endif
-
-#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-       &bfin_isp1760_device,
-#endif
-
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
-       &bfin_spi0_device,
-#endif
-
-#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
-       &bfin_fb_device,
-#endif
-
-#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
-       &bfin_fb_adv7393_device,
-#endif
-
-#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
-       &bfin_uart_device,
-#endif
-
-#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-#ifdef CONFIG_BFIN_SIR0
-       &bfin_sir0_device,
-#endif
-#ifdef CONFIG_BFIN_SIR1
-       &bfin_sir1_device,
-#endif
-#endif
-
-#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
-       &i2c_bfin_twi_device,
-#endif
-
-#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
-       &bfin_sport0_uart_device,
-       &bfin_sport1_uart_device,
-#endif
-};
-
-static int __init generic_init(void)
-{
-       printk(KERN_INFO "%s(): registering device resources\n", __func__);
-       platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
-       spi_register_board_info(bfin_spi_board_info,
-                               ARRAY_SIZE(bfin_spi_board_info));
-#endif
-
-       return 0;
-}
-
-arch_initcall(generic_init);
-
-void native_machine_restart(char *cmd)
-{
-       /* workaround reboot hang when booting from SPI */
-       if ((bfin_read_SYSCR() & 0x7) == 0x3)
-               bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
-}
-
-#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
-void bfin_get_ether_addr(char *addr)
-{
-       random_ether_addr(addr);
-       printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__);
-}
-EXPORT_SYMBOL(bfin_get_ether_addr);
-#endif
index db7d3a385e4bda4ade36365ca439080a3c4787aa..3c159819e5550ee178d93cf33a2a4efbbeacd6c2 100644 (file)
@@ -134,9 +134,9 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -156,23 +156,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc_dummy",
+               .modalias = "mmc_spi",
                .max_speed_hz = 5000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = 0,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
-       {
-               .modalias = "spi_mmc",
-               .max_speed_hz = 5000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
index 590eb3a139b7a7542a070aa4b885a561997c188a..4e1de1e53f89f396a38ff5faf71fa04300434381 100644 (file)
@@ -289,9 +289,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -364,23 +364,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .controller_data = &ad9960_spi_chip_info,
        },
 #endif
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-       {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 7,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
+               .modalias = "mmc_spi",
                .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
index 3f4f203a06ec9b0c0ffb570d7e00b8e1b611db60..53ad10f3cd76fb5a7fe945f032e80a172dc0f7fd 100644 (file)
@@ -108,9 +108,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -160,23 +160,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-       {
-               .modalias = "spi_mmc_dummy",
-               .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 7,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
-               .mode = SPI_MODE_3,
-       },
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
+               .modalias = "mmc_spi",
                .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
index 9cb39121d1cba7a359e8a14a053f8435a8d04a42..1bfd80c26c90c35fe0921585a177d4e215eb3bdd 100644 (file)
@@ -2,7 +2,7 @@
  * File: include/asm-blackfin/mach-bf537/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 #define ANOMALY_05000301 (1)
 /* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
 #define ANOMALY_05000304 (__SILICON_REVISION__ < 3)
-/* New Feature: Additional Hysteresis on SPORT Input Pins (Not Available On Older Silicon) */
+/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
 #define ANOMALY_05000305 (__SILICON_REVISION__ < 3)
 /* SCKELOW Bit Does Not Maintain State Through Hibernate */
 #define ANOMALY_05000307 (__SILICON_REVISION__ < 3)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000353 (1)
 #define ANOMALY_05000363 (0)
+#define ANOMALY_05000380 (0)
 #define ANOMALY_05000386 (1)
 #define ANOMALY_05000412 (0)
 #define ANOMALY_05000432 (0)
 #define ANOMALY_05000435 (0)
+#define ANOMALY_05000447 (0)
+#define ANOMALY_05000448 (0)
 
 #endif
index b3f87e1d16a2f5ffa18156b35e76edc01c451870..9e34700844a294278921a36fe7ca411fd9c3a61f 100644 (file)
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART0_TX,
        CH_UART0_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART0_CTS_PIN,
        CONFIG_UART0_RTS_PIN,
 #endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART1_TX,
        CH_UART1_RX,
 #endif
-#ifdef CONFIG_BFIN_UART1_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART1_CTS_PIN,
        CONFIG_UART1_RTS_PIN,
 #endif
index e130b4f8a05dd9b1d044ce08088f47bea4a71f28..3a5699827363b9e68c547c2c7628a5852660ab48 100644 (file)
@@ -2,7 +2,7 @@
  * File: include/asm-blackfin/mach-bf538/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 #define ANOMALY_05000198 (0)
 #define ANOMALY_05000230 (0)
 #define ANOMALY_05000263 (0)
+#define ANOMALY_05000305 (0)
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000353 (1)
 #define ANOMALY_05000363 (0)
+#define ANOMALY_05000380 (0)
 #define ANOMALY_05000386 (1)
 #define ANOMALY_05000412 (0)
 #define ANOMALY_05000432 (0)
 #define ANOMALY_05000435 (0)
+#define ANOMALY_05000447 (0)
+#define ANOMALY_05000448 (0)
 
 #endif
index 40503b6b89a392152e436e3d28d6747bd7272545..3c2811ebecddd520fec44339c7bba04460ae52ca 100644 (file)
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART0_TX,
        CH_UART0_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART0_CTS_PIN,
        CONFIG_UART0_RTS_PIN,
 #endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART1_TX,
        CH_UART1_RX,
 #endif
-#ifdef CONFIG_BFIN_UART1_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART1_CTS_PIN,
        CONFIG_UART1_RTS_PIN,
 #endif
index 23d03c52f4b46df8d8a7e5aa7c638a8d5e0d7889..882e40ccf0d16f752563bdffffc46f98f2ed6f99 100644 (file)
@@ -2,12 +2,12 @@
  * File: include/asm-blackfin/mach-bf548/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 /* This file shoule be up to date with:
- *  - Revision G, 08/07/2008; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
+ *  - Revision H, 01/16/2009; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
  */
 
 #ifndef _MACH_ANOMALY_H_
@@ -91,8 +91,6 @@
 #define ANOMALY_05000371 (__SILICON_REVISION__ < 2)
 /* USB DP/DM Data Pins May Lose State When Entering Hibernate */
 #define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
-/* Mobile DDR Operation Not Functional */
-#define ANOMALY_05000377 (1)
 /* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */
 #define ANOMALY_05000378 (__SILICON_REVISION__ < 2)
 /* 16-Bit NAND FLASH Boot Mode Is Not Functional */
 #define ANOMALY_05000429 (__SILICON_REVISION__ < 2)
 /* Software System Reset Corrupts PLL_LOCKCNT Register */
 #define ANOMALY_05000430 (__SILICON_REVISION__ >= 2)
+/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
+#define ANOMALY_05000431 (__SILICON_REVISION__ < 3)
+/* OTP Write Accesses Not Supported */
+#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
 /* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
 #define ANOMALY_05000443 (1)
+/* CDMAPRIO and L2DMAPRIO Bits in the SYSCR Register Are Not Functional */
+#define ANOMALY_05000446 (1)
+/* UART IrDA Receiver Fails on Extended Bit Pulses */
+#define ANOMALY_05000447 (1)
+/* DDR Clock Duty Cycle Spec Violation (tCH, tCL) */
+#define ANOMALY_05000448 (__SILICON_REVISION__ == 1)
+/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */
+#define ANOMALY_05000449 (__SILICON_REVISION__ == 1)
+/* USB DMA Mode 1 Short Packet Data Corruption */
+#define ANOMALY_05000450 (1
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000125 (0)
 #define ANOMALY_05000263 (0)
 #define ANOMALY_05000266 (0)
 #define ANOMALY_05000273 (0)
+#define ANOMALY_05000278 (0)
+#define ANOMALY_05000305 (0)
 #define ANOMALY_05000307 (0)
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000323 (0)
index e4cf35e7ab9fd6c8a4adf80d10090e821a266ba3..c05e79cba257b0870629ed815bca9efe752bc809 100644 (file)
@@ -63,7 +63,7 @@
 #define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
 #define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
 
-#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
+#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART2_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
 # ifndef CONFIG_UART0_CTS_PIN
 #  define CONFIG_UART0_RTS_PIN -1
 # endif
 
-# ifndef CONFIG_UART1_CTS_PIN
-#  define CONFIG_UART1_CTS_PIN -1
+# ifndef CONFIG_UART2_CTS_PIN
+#  define CONFIG_UART2_CTS_PIN -1
 # endif
 
-# ifndef CONFIG_UART1_RTS_PIN
-#  define CONFIG_UART1_RTS_PIN -1
+# ifndef CONFIG_UART2_RTS_PIN
+#  define CONFIG_UART2_RTS_PIN -1
 # endif
 #endif
 
@@ -130,7 +130,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART0_TX,
        CH_UART0_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART0_CTS_PIN,
        CONFIG_UART0_RTS_PIN,
 #endif
@@ -143,6 +143,10 @@ struct bfin_serial_res bfin_serial_resource[] = {
 #ifdef CONFIG_SERIAL_BFIN_DMA
        CH_UART1_TX,
        CH_UART1_RX,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       0,
+       0,
 #endif
        },
 #endif
@@ -154,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART2_TX,
        CH_UART2_RX,
 #endif
-#ifdef CONFIG_BFIN_UART2_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART2_CTS_PIN,
        CONFIG_UART2_RTS_PIN,
 #endif
@@ -167,6 +171,10 @@ struct bfin_serial_res bfin_serial_resource[] = {
 #ifdef CONFIG_SERIAL_BFIN_DMA
        CH_UART3_TX,
        CH_UART3_RX,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       0,
+       0,
 #endif
        },
 #endif
index 60299a71e0905fe3b97566afce8efef8265e1ab5..f194625f68216375bdf720f743826d31e28d7581 100644 (file)
@@ -123,8 +123,8 @@ Events         (highest priority)  EMU         0
 #define IRQ_MXVR_ERROR         BFIN_IRQ(51)    /* MXVR Status (Error) Interrupt */
 #define IRQ_MXVR_MSG           BFIN_IRQ(52)    /* MXVR Message Interrupt */
 #define IRQ_MXVR_PKT           BFIN_IRQ(53)    /* MXVR Packet Interrupt */
-#define IRQ_EPP1_ERROR         BFIN_IRQ(54)    /* EPPI1 Error Interrupt */
-#define IRQ_EPP2_ERROR         BFIN_IRQ(55)    /* EPPI2 Error Interrupt */
+#define IRQ_EPPI1_ERROR                BFIN_IRQ(54)    /* EPPI1 Error Interrupt */
+#define IRQ_EPPI2_ERROR                BFIN_IRQ(55)    /* EPPI2 Error Interrupt */
 #define IRQ_UART3_ERROR                BFIN_IRQ(56)    /* UART3 Status (Error) Interrupt */
 #define IRQ_HOST_ERROR         BFIN_IRQ(57)    /* HOST Status (Error) Interrupt */
 #define IRQ_PIXC_ERROR         BFIN_IRQ(59)    /* PIXC Status (Error) Interrupt */
@@ -361,8 +361,8 @@ Events         (highest priority)  EMU         0
 #define IRQ_UART2_ERR          IRQ_UART2_ERROR
 #define IRQ_CAN0_ERR           IRQ_CAN0_ERROR
 #define IRQ_MXVR_ERR           IRQ_MXVR_ERROR
-#define IRQ_EPP1_ERR           IRQ_EPP1_ERROR
-#define IRQ_EPP2_ERR           IRQ_EPP2_ERROR
+#define IRQ_EPPI1_ERR                  IRQ_EPPI1_ERROR
+#define IRQ_EPPI2_ERR                  IRQ_EPPI2_ERROR
 #define IRQ_UART3_ERR          IRQ_UART3_ERROR
 #define IRQ_HOST_ERR           IRQ_HOST_ERROR
 #define IRQ_PIXC_ERR           IRQ_PIXC_ERROR
index e41a67b1fb53e53933a0d41aa5f86cb309bf9b72..e4bc6d7c5a6a1e83ba33a287cc7feb6aabcc7d7a 100644 (file)
@@ -19,9 +19,4 @@ config BFIN561_BLUETECHNIX_CM
        help
          CM-BF561 support for EVAL- and DEV-Board.
 
-config GENERIC_BF561_BOARD
-       bool "Generic"
-       help
-         Generic or Custom board support.
-
 endchoice
index 04add010b568459da6d324afd87bf4ae46d40b55..3a152559e957d1ab98cac121399256be9a72bc4a 100644 (file)
@@ -2,7 +2,6 @@
 # arch/blackfin/mach-bf561/boards/Makefile
 #
 
-obj-$(CONFIG_GENERIC_BF561_BOARD)      += generic_board.o
 obj-$(CONFIG_BFIN561_BLUETECHNIX_CM)   += cm_bf561.o
 obj-$(CONFIG_BFIN561_EZKIT)            += ezkit.o
 obj-$(CONFIG_BFIN561_TEPLA)            += tepla.o
index 6880d1ebfe60f087b1ec19297b8d0d164f851bc5..f623c6b0719fbc2711f1942e19e4a7143f359c4a 100644 (file)
@@ -105,9 +105,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-       .enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+       .enable_dma = 0,
        .bits_per_word = 8,
 };
 #endif
@@ -155,14 +155,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .controller_data = &ad9960_spi_chip_info,
        },
 #endif
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
        {
-               .modalias = "spi_mmc",
+               .modalias = "mmc_spi",
                .max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
-               .chip_select = CONFIG_SPI_MMC_CS_CHAN,
-               .platform_data = NULL,
-               .controller_data = &spi_mmc_chip_info,
+               .chip_select = 5,
+               .controller_data = &mmc_spi_chip_info,
                .mode = SPI_MODE_3,
        },
 #endif
diff --git a/arch/blackfin/mach-bf561/boards/generic_board.c b/arch/blackfin/mach-bf561/boards/generic_board.c
deleted file mode 100644 (file)
index 0ba366a..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf561/generic_board.c
- * Based on:     arch/blackfin/mach-bf533/ezkit.c
- * Author:       Aidan Williams <aidan@nicta.com.au>
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright 2005 National ICT Australia (NICTA)
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-
-const char bfin_board_name[] = "UNKNOWN BOARD";
-
-/*
- *  Driver needs to know address, irq and flag pin.
- */
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-static struct resource smc91x_resources[] = {
-       {
-               .start = 0x2C010300,
-               .end = 0x2C010300 + 16,
-               .flags = IORESOURCE_MEM,
-       }, {
-               .start = IRQ_PROG_INTB,
-               .end = IRQ_PROG_INTB,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       }, {
-               .start = IRQ_PF9,
-               .end = IRQ_PF9,
-               .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
-       },
-};
-
-static struct platform_device smc91x_device = {
-       .name = "smc91x",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(smc91x_resources),
-       .resource = smc91x_resources,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-#ifdef CONFIG_BFIN_SIR0
-static struct resource bfin_sir0_resources[] = {
-       {
-               .start = 0xFFC00400,
-               .end = 0xFFC004FF,
-               .flags = IORESOURCE_MEM,
-       },
-       {
-               .start = IRQ_UART0_RX,
-               .end = IRQ_UART0_RX+1,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .start = CH_UART0_RX,
-               .end = CH_UART0_RX+1,
-               .flags = IORESOURCE_DMA,
-       },
-};
-
-static struct platform_device bfin_sir0_device = {
-       .name = "bfin_sir",
-       .id = 0,
-       .num_resources = ARRAY_SIZE(bfin_sir0_resources),
-       .resource = bfin_sir0_resources,
-};
-#endif
-#endif
-
-static struct platform_device *generic_board_devices[] __initdata = {
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-       &smc91x_device,
-#endif
-
-#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-#ifdef CONFIG_BFIN_SIR0
-       &bfin_sir0_device,
-#endif
-#endif
-};
-
-static int __init generic_board_init(void)
-{
-       printk(KERN_INFO "%s(): registering device resources\n", __func__);
-       return platform_add_devices(generic_board_devices,
-                                   ARRAY_SIZE(generic_board_devices));
-}
-
-arch_initcall(generic_board_init);
index 1a9e17562821cd754ba1815aa449a2d8d4f5223e..d0b0b3506440fee5373c850b19cf703a94941681 100644 (file)
@@ -2,7 +2,7 @@
  * File: include/asm-blackfin/mach-bf561/anomaly.h
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
 #define ANOMALY_05000301 (1)
 /* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
 #define ANOMALY_05000302 (1)
-/* New Feature: Additional Hysteresis on SPORT Input Pins (Not Available On Older Silicon) */
+/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
 #define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
 /* SCKELOW Bit Does Not Maintain State Through Hibernate */
 #define ANOMALY_05000307 (__SILICON_REVISION__ < 5)
 #define ANOMALY_05000273 (0)
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000353 (1)
+#define ANOMALY_05000380 (0)
 #define ANOMALY_05000386 (1)
 #define ANOMALY_05000432 (0)
 #define ANOMALY_05000435 (0)
+#define ANOMALY_05000447 (0)
+#define ANOMALY_05000448 (0)
 
 #endif
index 043bfcf26c52a2250c4399a484904e0e0906e9af..ca8c5f6452093c7c8d9247906e35a8b1baf4fb9b 100644 (file)
@@ -134,7 +134,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
        CH_UART_TX,
        CH_UART_RX,
 #endif
-#ifdef CONFIG_BFIN_UART0_CTSRTS
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
        CONFIG_UART0_CTS_PIN,
        CONFIG_UART0_RTS_PIN,
 #endif
index 98133b968f7b9eabab64d7f1e76d903fb3af2745..80d39b2f9db295d2709574664528a9c71b6ac1db 100644 (file)
 #if (CONFIG_BOOT_LOAD & 0x3)
 # error "The kernel load address must be 4 byte aligned"
 #endif
+
+/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
+#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
+# error "The kernel load address is too high; keep it below 10meg for safety"
+#endif
+
+#if ANOMALY_05000448
+# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
+#endif
index 3c98dacbf2892960ec4053f3c3bfde85bbc01fd9..aa0648c6a9feb19111d2b8e60e96dfef12bc9340 100644 (file)
 
 /* Invalidate all instruction cache lines assocoiated with this memory area */
 ENTRY(_blackfin_icache_flush_range)
+/*
+ * Walkaround to avoid loading wrong instruction after invalidating icache
+ * and following sequence is met.
+ *
+ * 1) One instruction address is cached in the instruction cache.
+ * 2) This instruction in SDRAM is changed.
+ * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
+ * 4) This instruction is executed again, but the old one is loaded.
+ */
+       P0 = R0;
+       IFLUSH[P0];
        do_flush IFLUSH, , nop
 ENDPROC(_blackfin_icache_flush_range)
 
 /* Flush all cache lines assocoiated with this area of memory. */
 ENTRY(_blackfin_icache_dcache_flush_range)
+/*
+ * Walkaround to avoid loading wrong instruction after invalidating icache
+ * and following sequence is met.
+ *
+ * 1) One instruction address is cached in the instruction cache.
+ * 2) This instruction in SDRAM is changed.
+ * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
+ * 4) This instruction is executed again, but the old one is loaded.
+ */
+       P0 = R0;
+       IFLUSH[P0];
        do_flush FLUSH, IFLUSH
 ENDPROC(_blackfin_icache_dcache_flush_range)
 
index 9dddb6f8cc855296c5e98213bc45b8d76d123276..35393651359bfc004d996154fb7d8d2497e45b5b 100644 (file)
@@ -17,7 +17,7 @@
 #define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
 #define PLL_CTL_VAL \
        (((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
-        (PLL_BYPASS << 8) | (ANOMALY_05000265 ? 0x8000 : 0))
+        (PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
 
 __attribute__((l1_text))
 static void do_sync(void)
index 4da50bcd9300de6fb96952bd0bdc0991b15bc183..8009a512fb1186214af8998e3400b73bb40d4f67 100644 (file)
@@ -376,10 +376,22 @@ ENTRY(_do_hibernate)
 #endif
 
 #ifdef PINT0_ASSIGN
+       PM_SYS_PUSH(PINT0_MASK_SET)
+       PM_SYS_PUSH(PINT1_MASK_SET)
+       PM_SYS_PUSH(PINT2_MASK_SET)
+       PM_SYS_PUSH(PINT3_MASK_SET)
        PM_SYS_PUSH(PINT0_ASSIGN)
        PM_SYS_PUSH(PINT1_ASSIGN)
        PM_SYS_PUSH(PINT2_ASSIGN)
        PM_SYS_PUSH(PINT3_ASSIGN)
+       PM_SYS_PUSH(PINT0_INVERT_SET)
+       PM_SYS_PUSH(PINT1_INVERT_SET)
+       PM_SYS_PUSH(PINT2_INVERT_SET)
+       PM_SYS_PUSH(PINT3_INVERT_SET)
+       PM_SYS_PUSH(PINT0_EDGE_SET)
+       PM_SYS_PUSH(PINT1_EDGE_SET)
+       PM_SYS_PUSH(PINT2_EDGE_SET)
+       PM_SYS_PUSH(PINT3_EDGE_SET)
 #endif
 
        PM_SYS_PUSH(EBIU_AMBCTL0)
@@ -714,10 +726,22 @@ ENTRY(_do_hibernate)
        PM_SYS_POP(EBIU_AMBCTL0)
 
 #ifdef PINT0_ASSIGN
+       PM_SYS_POP(PINT3_EDGE_SET)
+       PM_SYS_POP(PINT2_EDGE_SET)
+       PM_SYS_POP(PINT1_EDGE_SET)
+       PM_SYS_POP(PINT0_EDGE_SET)
+       PM_SYS_POP(PINT3_INVERT_SET)
+       PM_SYS_POP(PINT2_INVERT_SET)
+       PM_SYS_POP(PINT1_INVERT_SET)
+       PM_SYS_POP(PINT0_INVERT_SET)
        PM_SYS_POP(PINT3_ASSIGN)
        PM_SYS_POP(PINT2_ASSIGN)
        PM_SYS_POP(PINT1_ASSIGN)
        PM_SYS_POP(PINT0_ASSIGN)
+       PM_SYS_POP(PINT3_MASK_SET)
+       PM_SYS_POP(PINT2_MASK_SET)
+       PM_SYS_POP(PINT1_MASK_SET)
+       PM_SYS_POP(PINT0_MASK_SET)
 #endif
 
 #ifdef SICA_IWR1
index 88de053bbe8ed58dcdb6e7b4bad80657a3e011db..21e65a339a22aacb857d1a76cdbe93863743298b 100644 (file)
@@ -600,6 +600,19 @@ ENTRY(_system_call)
        p2 = [p2];
 
        [p2+(TASK_THREAD+THREAD_KSP)] = sp;
+#ifdef CONFIG_IPIPE
+       r0 = sp;
+       SP += -12;
+       call ___ipipe_syscall_root;
+       SP += 12;
+       cc = r0 == 1;
+       if cc jump .Lsyscall_really_exit;
+       cc = r0 == -1;
+       if cc jump .Lresume_userspace;
+       r3 = [sp + PT_R3];
+       r4 = [sp + PT_R4];
+       p0 = [sp + PT_ORIG_P0];
+#endif /* CONFIG_IPIPE */
 
        /* Check the System Call */
        r7 = __NR_syscall;
@@ -654,6 +667,17 @@ ENTRY(_system_call)
        r7 =  r7 & r4;
 
 .Lsyscall_resched:
+#ifdef CONFIG_IPIPE
+       cc = BITTST(r7, TIF_IRQ_SYNC);
+       if !cc jump .Lsyscall_no_irqsync;
+       [--sp] = reti;
+       r0 = [sp++];
+       SP += -12;
+       call ___ipipe_sync_root;
+       SP += 12;
+       jump .Lresume_userspace_1;
+.Lsyscall_no_irqsync:
+#endif
        cc = BITTST(r7, TIF_NEED_RESCHED);
        if !cc jump .Lsyscall_sigpending;
 
@@ -685,6 +709,10 @@ ENTRY(_system_call)
 .Lsyscall_really_exit:
        r5 = [sp + PT_RESERVED];
        rets = r5;
+#ifdef CONFIG_IPIPE
+       [--sp] = reti;
+       r5 = [sp++];
+#endif /* CONFIG_IPIPE */
        rts;
 ENDPROC(_system_call)
 
@@ -771,6 +799,15 @@ _new_old_task:
 ENDPROC(_resume)
 
 ENTRY(_ret_from_exception)
+#ifdef CONFIG_IPIPE
+       [--sp] = rets;
+       SP += -12;
+       call ___ipipe_check_root
+       SP += 12
+       rets = [sp++];
+       cc = r0 == 0;
+       if cc jump 4f;                /* not on behalf of Linux, get out */
+#endif /* CONFIG_IPIPE */
        p2.l = lo(IPEND);
        p2.h = hi(IPEND);
 
@@ -827,6 +864,28 @@ ENTRY(_ret_from_exception)
        rts;
 ENDPROC(_ret_from_exception)
 
+#ifdef CONFIG_IPIPE
+
+_sync_root_irqs:
+       [--sp] = reti;          /* Reenable interrupts */
+       r0 = [sp++];
+       jump.l ___ipipe_sync_root
+
+_resume_kernel_from_int:
+       r0.l = _sync_root_irqs
+       r0.h = _sync_root_irqs
+       [--sp] = rets;
+       [--sp] = ( r7:4, p5:3 );
+       SP += -12;
+       call ___ipipe_call_irqtail
+       SP += 12;
+       ( r7:4, p5:3 ) = [sp++];
+       rets = [sp++];
+       rts
+#else
+#define _resume_kernel_from_int         2f
+#endif
+
 ENTRY(_return_from_int)
        /* If someone else already raised IRQ 15, do nothing.  */
        csync;
@@ -848,7 +907,7 @@ ENTRY(_return_from_int)
        r1 = r0 - r1;
        r2 = r0 & r1;
        cc = r2 == 0;
-       if !cc jump 2f;
+       if !cc jump _resume_kernel_from_int;
 
        /* Lower the interrupt level to 15.  */
        p0.l = lo(EVT15);
index 43c4eb9acb65e11b0ce1c0fdc480cb1849005a4d..0069c2dd462520db2a1abb7ad4c54108efcc5203 100644 (file)
@@ -235,6 +235,7 @@ ENDPROC(_evt_system_call)
 
 #ifdef CONFIG_IPIPE
 ENTRY(___ipipe_call_irqtail)
+       p0 = r0;
        r0.l = 1f;
        r0.h = 1f;
        reti = r0;
@@ -242,9 +243,6 @@ ENTRY(___ipipe_call_irqtail)
 1:
        [--sp] = rets;
        [--sp] = ( r7:4, p5:3 );
-       p0.l = ___ipipe_irq_tail_hook;
-       p0.h = ___ipipe_irq_tail_hook;
-       p0 = [p0];
        sp += -12;
        call (p0);
        sp += 12;
@@ -259,7 +257,7 @@ ENTRY(___ipipe_call_irqtail)
        p0.h = hi(EVT14);
        [p0] = r0;
        csync;
-       r0 = 0x401f;
+       r0 = 0x401f (z);
        sti r0;
        raise 14;
        [--sp] = reti;          /* IRQs on. */
@@ -277,11 +275,7 @@ ENTRY(___ipipe_call_irqtail)
        p0.h = _bfin_irq_flags;
        r0 = [p0];
        sti r0;
-#if 0 /* FIXME: this actually raises scheduling latencies */
-       /* Reenable interrupts */
-       [--sp] = reti;
-       r0 = [sp++];
-#endif
        rts;
 ENDPROC(___ipipe_call_irqtail)
+
 #endif /* CONFIG_IPIPE */
index 202494568c6c0303b76cafb279b1b06f97fc9963..a7d7b2dd4059a7f5685cec710bb9409174c31589 100644 (file)
@@ -161,11 +161,15 @@ static void bfin_core_unmask_irq(unsigned int irq)
 
 static void bfin_internal_mask_irq(unsigned int irq)
 {
+       unsigned long flags;
+
 #ifdef CONFIG_BF53x
+       local_irq_save_hw(flags);
        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
                             ~(1 << SIC_SYSIRQ(irq)));
 #else
        unsigned mask_bank, mask_bit;
+       local_irq_save_hw(flags);
        mask_bank = SIC_SYSIRQ(irq) / 32;
        mask_bit = SIC_SYSIRQ(irq) % 32;
        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
@@ -175,15 +179,20 @@ static void bfin_internal_mask_irq(unsigned int irq)
                             ~(1 << mask_bit));
 #endif
 #endif
+       local_irq_restore_hw(flags);
 }
 
 static void bfin_internal_unmask_irq(unsigned int irq)
 {
+       unsigned long flags;
+
 #ifdef CONFIG_BF53x
+       local_irq_save_hw(flags);
        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
                             (1 << SIC_SYSIRQ(irq)));
 #else
        unsigned mask_bank, mask_bit;
+       local_irq_save_hw(flags);
        mask_bank = SIC_SYSIRQ(irq) / 32;
        mask_bit = SIC_SYSIRQ(irq) % 32;
        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
@@ -193,6 +202,7 @@ static void bfin_internal_unmask_irq(unsigned int irq)
                             (1 << mask_bit));
 #endif
 #endif
+       local_irq_restore_hw(flags);
 }
 
 #ifdef CONFIG_PM
@@ -390,7 +400,7 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 {
 #ifdef CONFIG_IPIPE
-       _set_irq_handler(irq, handle_edge_irq);
+       _set_irq_handler(irq, handle_level_irq);
 #else
        struct irq_desc *desc = irq_desc + irq;
        /* May not call generic set_irq_handler() due to spinlock
@@ -1055,13 +1065,18 @@ int __init init_arch_irq(void)
 #endif
                default:
 #ifdef CONFIG_IPIPE
-       /*
-        * We want internal interrupt sources to be masked, because
-        * ISRs may trigger interrupts recursively (e.g. DMA), but
-        * interrupts are _not_ masked at CPU level. So let's handle
-        * them as level interrupts.
-        */
-                       set_irq_handler(irq, handle_level_irq);
+                       /*
+                        * We want internal interrupt sources to be
+                        * masked, because ISRs may trigger interrupts
+                        * recursively (e.g. DMA), but interrupts are
+                        * _not_ masked at CPU level. So let's handle
+                        * most of them as level interrupts, except
+                        * the timer interrupt which is special.
+                        */
+                       if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
+                               set_irq_handler(irq, handle_simple_irq);
+                       else
+                               set_irq_handler(irq, handle_level_irq);
 #else /* !CONFIG_IPIPE */
                        set_irq_handler(irq, handle_simple_irq);
 #endif /* !CONFIG_IPIPE */
@@ -1123,9 +1138,8 @@ int __init init_arch_irq(void)
 
 #ifdef CONFIG_IPIPE
        for (irq = 0; irq < NR_IRQS; irq++) {
-               struct irq_desc *desc = irq_desc + irq;
+               struct irq_desc *desc = irq_to_desc(irq);
                desc->ic_prio = __ipipe_get_irq_priority(irq);
-               desc->thr_prio = __ipipe_get_irqthread_priority(irq);
        }
 #endif /* CONFIG_IPIPE */
 
@@ -1208,76 +1222,21 @@ int __ipipe_get_irq_priority(unsigned irq)
        return IVG15;
 }
 
-int __ipipe_get_irqthread_priority(unsigned irq)
-{
-       int ient, prio;
-       int demux_irq;
-
-       /* The returned priority value is rescaled to [0..IVG13+1]
-        * with 0 being the lowest effective priority level. */
-
-       if (irq <= IRQ_CORETMR)
-               return IVG13 - irq + 1;
-
-       /* GPIO IRQs are given the priority of the demux
-        * interrupt. */
-       if (IS_GPIOIRQ(irq)) {
-#if defined(CONFIG_BF54x)
-               u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
-               demux_irq = (bank == 0 ? IRQ_PINT0 :
-                               bank == 1 ? IRQ_PINT1 :
-                               bank == 2 ? IRQ_PINT2 :
-                               IRQ_PINT3);
-#elif defined(CONFIG_BF561)
-               demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
-                               irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
-                               IRQ_PROG0_INTA);
-#elif defined(CONFIG_BF52x)
-               demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
-                               irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
-                               IRQ_PORTF_INTA);
-#else
-               demux_irq = irq;
-#endif
-               return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
-       }
-
-       /* The GPIO demux interrupt is given a lower priority
-        * than the GPIO IRQs, so that its threaded handler
-        * unmasks the interrupt line after the decoded IRQs
-        * have been processed. */
-       prio = PRIO_GPIODEMUX(irq);
-       /* demux irq? */
-       if (prio != -1)
-               return IVG13 - prio;
-
-       for (ient = 0; ient < NR_PERI_INTS; ient++) {
-               struct ivgx *ivg = ivg_table + ient;
-               if (ivg->irqno == irq) {
-                       for (prio = 0; prio <= IVG13-IVG7; prio++) {
-                               if (ivg7_13[prio].ifirst <= ivg &&
-                                   ivg7_13[prio].istop > ivg)
-                                       return IVG7 - prio;
-                       }
-               }
-       }
-
-       return 0;
-}
-
 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
 #ifdef CONFIG_DO_IRQ_L1
 __attribute__((l1_text))
 #endif
 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
 {
+       struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
+       struct ipipe_domain *this_domain = ipipe_current_domain;
        struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
        struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
-       int irq;
+       int irq, s;
 
        if (likely(vec == EVT_IVTMR_P)) {
                irq = IRQ_CORETMR;
-               goto handle_irq;
+               goto core_tick;
        }
 
        SSYNC();
@@ -1319,24 +1278,39 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
        irq = ivg->irqno;
 
        if (irq == IRQ_SYSTMR) {
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#else
                bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+#endif
                /* This is basically what we need from the register frame. */
                __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
                __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
-               if (!ipipe_root_domain_p)
-                       __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
-               else
+               if (this_domain != ipipe_root_domain)
                        __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+               else
+                       __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
        }
 
-handle_irq:
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#endif
+       if (this_domain == ipipe_root_domain) {
+               s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+               barrier();
+       }
 
        ipipe_trace_irq_entry(irq);
        __ipipe_handle_irq(irq, regs);
-       ipipe_trace_irq_exit(irq);
+       ipipe_trace_irq_exit(irq);
 
-       if (ipipe_root_domain_p)
-               return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+       if (this_domain == ipipe_root_domain) {
+               set_thread_flag(TIF_IRQ_SYNC);
+               if (!s) {
+                       __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+                       return !test_bit(IPIPE_STALL_FLAG, &p->status);
+               }
+       }
 
        return 0;
 }
index 77c9928470944d0c4ceeecb7a691b45482e1fb09..93eab61460792b8a10d08f7ce33ee3d1d9f1d853 100644 (file)
@@ -158,10 +158,14 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
                        kfree(msg);
                        break;
                case BFIN_IPI_CALL_FUNC:
+                       spin_unlock(&msg_queue->lock);
                        ipi_call_function(cpu, msg);
+                       spin_lock(&msg_queue->lock);
                        break;
                case BFIN_IPI_CPU_STOP:
+                       spin_unlock(&msg_queue->lock);
                        ipi_cpu_stop(cpu);
+                       spin_lock(&msg_queue->lock);
                        kfree(msg);
                        break;
                default:
@@ -457,7 +461,7 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
        smp_flush_data.start = start;
        smp_flush_data.end = end;
 
-       if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
+       if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
                printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
 }
 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
index d0532b72bba56d34f31526fd5c889d71c89217b3..9c3629b9a689420edb42a34e3d7611c9650fbe57 100644 (file)
@@ -104,7 +104,7 @@ void __init paging_init(void)
        }
 }
 
-asmlinkage void init_pda(void)
+asmlinkage void __init init_pda(void)
 {
        unsigned int cpu = raw_smp_processor_id();
 
index e626e50a938a3ce91b2d9926f06d036d63f29609..060df4aa9916853297a0be09d4221c5a95e37654 100644 (file)
@@ -135,11 +135,10 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
        if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
                pci_addr = IS_PIC_SOFT(pcibus_info) ?
                                PHYS_TO_DMA(paddr) :
-                               PHYS_TO_TIODMA(paddr) | dma_attributes;
+                               PHYS_TO_TIODMA(paddr);
        else
-               pci_addr = IS_PIC_SOFT(pcibus_info) ?
-                               paddr :
-                               paddr | dma_attributes;
+               pci_addr = paddr;
+       pci_addr |= dma_attributes;
 
        /* Handle Bus mode */
        if (IS_PCIX(pcibus_info))
index ac5d541368e934a30ca17ac03e58bd990b9eb7ca..6c5b40905dd66daff610de074e5f17f7dd17fc8d 100644 (file)
@@ -3,6 +3,8 @@
 /*
  * Architecture specific compatibility types
  */
+#include <linux/seccomp.h>
+#include <linux/thread_info.h>
 #include <linux/types.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
@@ -218,4 +220,9 @@ struct compat_shmid64_ds {
        compat_ulong_t  __unused2;
 };
 
+static inline int is_compat_task(void)
+{
+       return test_thread_flag(TIF_32BIT);
+}
+
 #endif /* _ASM_COMPAT_H */
index fb371f5ce132c657ab910824d6d4c998b59c574d..d6b772ba3b8f8238ea3ecd5d27c2b77843b9dcda 100644 (file)
@@ -142,6 +142,10 @@ static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
+       /* Do not do the fixup on other platforms! */
+       if (!machine_is(gef_sbc610))
+               return;
+
        printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
 
        /* Ensure ports 1, 2, 3, 4 & 5 are enabled */
index c42cd898f68bfdece3e98706e44ab048abe61038..6118890c946d89244e587cdc880726a1d7893073 100644 (file)
@@ -556,7 +556,7 @@ static void __exit aes_s390_fini(void)
 module_init(aes_s390_init);
 module_exit(aes_s390_fini);
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS("aes-all");
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("GPL");
index f5cef3fbf9a5b21588b71add9f3abff0af847e99..31758378bcd2707f7dd2803ecd9a98d2d9a9a063 100644 (file)
@@ -783,6 +783,11 @@ config X86_MCE_AMD
           Additional support for AMD specific MCE features such as
           the DRAM Error Threshold.
 
+config X86_MCE_THRESHOLD
+       depends on X86_MCE_AMD || X86_MCE_INTEL
+       bool
+       default y
+
 config X86_MCE_NONFATAL
        tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
        depends on X86_32 && X86_MCE
index 4ef949c1972e6bfacb80d477fefc427cadfecd8f..394d177d721b0fdec5917b6cfc9d410ede7cb09a 100644 (file)
@@ -379,6 +379,7 @@ static inline u32 safe_apic_wait_icr_idle(void)
 
 static inline void ack_APIC_irq(void)
 {
+#ifdef CONFIG_X86_LOCAL_APIC
        /*
         * ack_APIC_irq() actually gets compiled as a single instruction
         * ... yummie.
@@ -386,6 +387,7 @@ static inline void ack_APIC_irq(void)
 
        /* Docs say use 0 for future compatibility */
        apic_write(APIC_EOI, 0);
+#endif
 }
 
 static inline unsigned default_get_apic_id(unsigned long x)
index 63134e31e8b933acb973ee2e66f62a2f815bb326..bc9514fb3b13f70d75b11f037546eca8e43d07fd 100644 (file)
@@ -53,6 +53,7 @@
 #define                APIC_ESR_SENDILL        0x00020
 #define                APIC_ESR_RECVILL        0x00040
 #define                APIC_ESR_ILLREGA        0x00080
+#define        APIC_LVTCMCI    0x2f0
 #define        APIC_ICR        0x300
 #define                APIC_DEST_SELF          0x40000
 #define                APIC_DEST_ALLINC        0x80000
index ca5ffb2856b6810ec399a48ec98cc36d020edd69..edc90f23e70814a80491b45db2409656c1b35be4 100644 (file)
@@ -37,8 +37,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 
 #else /* !CONFIG_X86_32 */
 
-#define MAX_EFI_IO_PAGES       100
-
 extern u64 efi_call0(void *fp);
 extern u64 efi_call1(void *fp, u64 arg1);
 extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
index 854d538ae85797a27601df6ff051ac3c44fe38f5..c2e6bedaf25873b75df79f6a0f809b4ad9070787 100644 (file)
@@ -33,6 +33,8 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
                 smp_invalidate_interrupt)
 #endif
 
+BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
+
 /*
  * every pentium local APIC has two 'local interrupts', with a
  * soft-definable vector attached to both interrupts, one of
index dca8f03da5b29574af570717c435ea3a5320f22e..63a79c77d220058f74eb81a70e70e3f3721be76d 100644 (file)
@@ -24,9 +24,6 @@
 #include <asm/kmap_types.h>
 #else
 #include <asm/vsyscall.h>
-#ifdef CONFIG_EFI
-#include <asm/efi.h>
-#endif
 #endif
 
 /*
@@ -92,13 +89,6 @@ enum fixed_addresses {
        FIX_IO_APIC_BASE_0,
        FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
 #endif
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_EFI
-       FIX_EFI_IO_MAP_LAST_PAGE,
-       FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
-                                 + MAX_EFI_IO_PAGES - 1,
-#endif
-#endif
 #ifdef CONFIG_X86_VISWS_APIC
        FIX_CO_CPU,     /* Cobalt timer */
        FIX_CO_APIC,    /* Cobalt APIC Redirection Table */
index 176f058e715947cbd7b803b90654e14b307dc8ea..039db6aa8e0271951706ee20bd9b819098f8002a 100644 (file)
@@ -12,6 +12,7 @@ typedef struct {
        unsigned int apic_timer_irqs;   /* arch dependent */
        unsigned int irq_spurious_count;
 #endif
+       unsigned int generic_irqs;      /* arch dependent */
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
index 370e1c83bb49a27048d4641546bef5f76efd0faa..b762ea49bd703ab3b28958cf83b6908e825e57a4 100644 (file)
@@ -27,6 +27,7 @@
 
 /* Interrupt handlers registered during init_IRQ */
 extern void apic_timer_interrupt(void);
+extern void generic_interrupt(void);
 extern void error_interrupt(void);
 extern void spurious_interrupt(void);
 extern void thermal_interrupt(void);
index 48f0004db8c981175c0a900f659f4b41d1fa4dce..71c9e51839827dfc14ec28858467c2ea4815297c 100644 (file)
@@ -172,7 +172,13 @@ static inline void __save_init_fpu(struct task_struct *tsk)
 
 #else  /* CONFIG_X86_32 */
 
-extern void finit(void);
+#ifdef CONFIG_MATH_EMULATION
+extern void finit_task(struct task_struct *tsk);
+#else
+static inline void finit_task(struct task_struct *tsk)
+{
+}
+#endif
 
 static inline void tolerant_fwait(void)
 {
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
new file mode 100644 (file)
index 0000000..36fb1a6
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _ASM_X86_INIT_32_H
+#define _ASM_X86_INIT_32_H
+
+#ifdef CONFIG_X86_32
+extern void __init early_ioremap_page_table_range_init(void);
+#endif
+
+extern unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask);
+
+
+extern unsigned long __initdata e820_table_start;
+extern unsigned long __meminitdata e820_table_end;
+extern unsigned long __meminitdata e820_table_top;
+
+#endif /* _ASM_X86_INIT_32_H */
index 107eb2196691134cd4f8bc2944e953a47e58805d..f38481bcd45538f26d640a2c460a3510246510d2 100644 (file)
@@ -36,6 +36,7 @@ static inline int irq_canonicalize(int irq)
 extern void fixup_irqs(void);
 #endif
 
+extern void (*generic_interrupt_extension)(void);
 extern void init_IRQ(void);
 extern void native_init_IRQ(void);
 extern bool handle_irq(unsigned irq, struct pt_regs *regs);
index 8a285f356f8aef9e0e64b1210303c7d82acdfa19..3cbd79bbb47c82613341fca01ee6e174319342fe 100644 (file)
  */
 #define LOCAL_PERF_VECTOR              0xee
 
+/*
+ * Generic system vector for platform specific use
+ */
+#define GENERIC_INTERRUPT_VECTOR       0xed
+
 /*
  * First APIC vector available to drivers: (vectors 0x30-0xee) we
  * start at 0x31(0x41) to spread out vectors evenly between priority
index 9320e2a8a26a46e99aa2c5eae5a6bd381cc90610..a0d70b46c27c2bbb4735ea3c02b308c6fdfb0448 100644 (file)
@@ -4,11 +4,6 @@
 #undef notrace
 #define notrace __attribute__((no_instrument_function))
 
-#ifdef CONFIG_X86_64
-#define __ALIGN .p2align 4,,15
-#define __ALIGN_STR ".p2align 4,,15"
-#endif
-
 #ifdef CONFIG_X86_32
 #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
 /*
        __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
                              "g" (arg4), "g" (arg5), "g" (arg6))
 
-#endif
+#endif /* CONFIG_X86_32 */
+
+#ifdef __ASSEMBLY__
 
 #define GLOBAL(name)   \
        .globl name;    \
        name:
 
+#ifdef CONFIG_X86_64
+#define __ALIGN .p2align 4,,15
+#define __ALIGN_STR ".p2align 4,,15"
+#endif
+
 #ifdef CONFIG_X86_ALIGNMENT_16
 #define __ALIGN .align 16,0x90
 #define __ALIGN_STR ".align 16,0x90"
 #endif
 
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_X86_LINKAGE_H */
 
index 32c6e17b960b7aed994078dc1d5c1fb4e547b2a4..563933e06a35fa48f6c828d3884f9b35886ab4eb 100644 (file)
@@ -11,6 +11,8 @@
  */
 
 #define MCG_CTL_P       (1UL<<8)   /* MCG_CAP register available */
+#define MCG_EXT_P       (1ULL<<9)   /* Extended registers available */
+#define MCG_CMCI_P      (1ULL<<10)  /* CMCI supported */
 
 #define MCG_STATUS_RIPV  (1UL<<0)   /* restart ip valid */
 #define MCG_STATUS_EIPV  (1UL<<1)   /* ip points to correct instruction */
@@ -90,14 +92,29 @@ extern int mce_disabled;
 
 #include <asm/atomic.h>
 
+void mce_setup(struct mce *m);
 void mce_log(struct mce *m);
 DECLARE_PER_CPU(struct sys_device, device_mce);
 extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
 
+/*
+ * To support more than 128 would need to escape the predefined
+ * Linux defined extended banks first.
+ */
+#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1)
+
 #ifdef CONFIG_X86_MCE_INTEL
 void mce_intel_feature_init(struct cpuinfo_x86 *c);
+void cmci_clear(void);
+void cmci_reenable(void);
+void cmci_rediscover(int dying);
+void cmci_recheck(void);
 #else
 static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
+static inline void cmci_clear(void) {}
+static inline void cmci_reenable(void) {}
+static inline void cmci_rediscover(int dying) {}
+static inline void cmci_recheck(void) {}
 #endif
 
 #ifdef CONFIG_X86_MCE_AMD
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c);
 static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
 #endif
 
-void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
+extern int mce_available(struct cpuinfo_x86 *c);
+
+void mce_log_therm_throt_event(__u64 status);
 
 extern atomic_t mce_entry;
 
 extern void do_machine_check(struct pt_regs *, long);
+
+typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
+DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
+
+enum mcp_flags {
+       MCP_TIMESTAMP = (1 << 0),       /* log time stamp */
+       MCP_UC = (1 << 1),              /* log uncorrected errors */
+};
+extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
+
 extern int mce_notify_user(void);
 
 #endif /* !CONFIG_X86_32 */
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
 #else
 #define mcheck_init(c) do { } while (0)
 #endif
-extern void stop_mce(void);
-extern void restart_mce(void);
+
+extern void (*mce_threshold_vector)(void);
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_MCE_H */
index 358acc59ae044d421196c3beb8abcc3c6e58efc0..2dbd2314139e2426c511fe1dd7bc8b3a29392c67 100644 (file)
 #define MSR_IA32_MC0_ADDR              0x00000402
 #define MSR_IA32_MC0_MISC              0x00000403
 
+/* These are consecutive and not in the normal 4er MCE bank block */
+#define MSR_IA32_MC0_CTL2              0x00000280
+#define CMCI_EN                        (1ULL << 30)
+#define CMCI_THRESHOLD_MASK            0xffffULL
+
 #define MSR_P6_PERFCTR0                        0x000000c1
 #define MSR_P6_PERFCTR1                        0x000000c2
 #define MSR_P6_EVNTSEL0                        0x00000186
index 2d625da6603c36958cd51a130f6ae41ce2df0f3d..826ad37006ab1a075993ddbe69bd3281412b7f09 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-struct pgprot;
-
 extern int page_is_ram(unsigned long pagenr);
 extern int devmem_is_allowed(unsigned long pagenr);
-extern void map_devmem(unsigned long pfn, unsigned long size,
-                      struct pgprot vma_prot);
-extern void unmap_devmem(unsigned long pfn, unsigned long size,
-                        struct pgprot vma_prot);
 
 extern unsigned long max_low_pfn_mapped;
 extern unsigned long max_pfn_mapped;
index b0e70056838e9d49c4e530f0396926c91abef1fb..2cd07b9422f49cbe8b87e561e140f0f31c28e77f 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_X86_PAT_H
 
 #include <linux/types.h>
+#include <asm/pgtable_types.h>
 
 #ifdef CONFIG_X86_PAT
 extern int pat_enabled;
@@ -17,5 +18,9 @@ extern int free_memtype(u64 start, u64 end);
 
 extern int kernel_map_sync_memtype(u64 base, unsigned long size,
                unsigned long flag);
+extern void map_devmem(unsigned long pfn, unsigned long size,
+                      struct pgprot vma_prot);
+extern void unmap_devmem(unsigned long pfn, unsigned long size,
+                        struct pgprot vma_prot);
 
 #endif /* _ASM_X86_PAT_H */
index bd8df3b2fe04fc61fc0912569ebbf498969a028f..2733fad45f989bfd5a775c99e5decb35cfe8014a 100644 (file)
  * area for the same reason. ;)
  */
 #define VMALLOC_OFFSET (8 * 1024 * 1024)
+
+#ifndef __ASSEMBLER__
+extern bool __vmalloc_start_set; /* set once high_memory is set */
+#endif
+
 #define VMALLOC_START  ((unsigned long)high_memory + VMALLOC_OFFSET)
 #ifdef CONFIG_X86_PAE
 #define LAST_PKMAP 512
index 4d258ad76a0fc04925ac484c1fa9b0bfc47a1cb9..b8238dc8786d9f6b4291c2d7f912abbc0ea04e18 100644 (file)
@@ -273,6 +273,7 @@ typedef struct page *pgtable_t;
 
 extern pteval_t __supported_pte_mask;
 extern int nx_enabled;
+extern void set_nx(void);
 
 #define pgprot_writecombine    pgprot_writecombine
 extern pgprot_t pgprot_writecombine(pgprot_t prot);
index 777327ef05c19f0887c0b9b089a6f71135395e94..9f4dfba33b2899a17ee672c19a4891b09ac816dc 100644 (file)
@@ -199,6 +199,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
 #define SCIR_CPU_ACTIVITY      0x02    /* not idle */
 #define SCIR_CPU_HB_INTERVAL   (HZ)    /* once per second */
 
+/* Loop through all installed blades */
+#define for_each_possible_blade(bid)           \
+       for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
+
 /*
  * Macros for converting between kernel virtual addresses, socket local physical
  * addresses, and UV global physical addresses.
index 95f216bbfaf16f6b8572c40c4cdfa756758f6be5..339ce35648e60708b147ca24d09bdb44a469b269 100644 (file)
@@ -111,7 +111,7 @@ obj-$(CONFIG_SWIOTLB)                       += pci-swiotlb_64.o # NB rename without _64
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
-       obj-$(CONFIG_X86_UV)            += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o
+       obj-$(CONFIG_X86_UV)            += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
        obj-$(CONFIG_X86_PM_TIMER)      += pmtimer_64.o
        obj-$(CONFIG_AUDIT)             += audit_64.o
 
index 6907b8e85d52c580083d47971e4fa9192259c7f9..4c80f15574335d6b02bc93a7ed797c088c1da6c2 100644 (file)
@@ -414,9 +414,17 @@ void __init alternative_instructions(void)
           that might execute the to be patched code.
           Other CPUs are not running. */
        stop_nmi();
-#ifdef CONFIG_X86_MCE
-       stop_mce();
-#endif
+
+       /*
+        * Don't stop machine check exceptions while patching.
+        * MCEs only happen when something got corrupted and in this
+        * case we must do something about the corruption.
+        * Ignoring it is worse than a unlikely patching race.
+        * Also machine checks tend to be broadcast and if one CPU
+        * goes into machine check the others follow quickly, so we don't
+        * expect a machine check to cause undue problems during to code
+        * patching.
+        */
 
        apply_alternatives(__alt_instructions, __alt_instructions_end);
 
@@ -456,9 +464,6 @@ void __init alternative_instructions(void)
                                (unsigned long)__smp_locks_end);
 
        restart_nmi();
-#ifdef CONFIG_X86_MCE
-       restart_mce();
-#endif
 }
 
 /**
index f9cecdfd05c5523cb6298a27a3d03706e92fb21a..30909a258d0fbc92165857629f4c492499907430 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/idle.h>
 #include <asm/mtrr.h>
 #include <asm/smp.h>
+#include <asm/mce.h>
 
 unsigned int num_processors;
 
@@ -842,6 +843,14 @@ void clear_local_APIC(void)
                apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
        }
 #endif
+#ifdef CONFIG_X86_MCE_INTEL
+       if (maxlvt >= 6) {
+               v = apic_read(APIC_LVTCMCI);
+               if (!(v & APIC_LVT_MASKED))
+                       apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
+       }
+#endif
+
        /*
         * Clean APIC state for other OSs:
         */
@@ -1241,6 +1250,12 @@ void __cpuinit setup_local_APIC(void)
        apic_write(APIC_LVT1, value);
 
        preempt_enable();
+
+#ifdef CONFIG_X86_MCE_INTEL
+       /* Recheck CMCI information after local APIC is up on CPU #0 */
+       if (smp_processor_id() == 0)
+               cmci_recheck();
+#endif
 }
 
 void __cpuinit end_local_APIC_setup(void)
index 25423a5b80ed28a7058bd01cf3f5f0358fb167b4..f47df59016c5e98c8214548505b1ef00895fb5aa 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_64
 # include <asm/numa_64.h>
@@ -141,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
        }
 }
 
+static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       /* calling is from identify_secondary_cpu() ? */
+       if (c->cpu_index == boot_cpu_id)
+               return;
+
+       /*
+        * Certain Athlons might work (for various values of 'work') in SMP
+        * but they are not certified as MP capable.
+        */
+       /* Athlon 660/661 is valid. */
+       if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
+           (c->x86_mask == 1)))
+               goto valid_k7;
+
+       /* Duron 670 is valid */
+       if ((c->x86_model == 7) && (c->x86_mask == 0))
+               goto valid_k7;
+
+       /*
+        * Athlon 662, Duron 671, and Athlon >model 7 have capability
+        * bit. It's worth noting that the A5 stepping (662) of some
+        * Athlon XP's have the MP bit set.
+        * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
+        * more.
+        */
+       if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
+           ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
+            (c->x86_model > 7))
+               if (cpu_has_mp)
+                       goto valid_k7;
+
+       /* If we get here, not a certified SMP capable AMD system. */
+
+       /*
+        * Don't taint if we are running SMP kernel on a single non-MP
+        * approved Athlon
+        */
+       WARN_ONCE(1, "WARNING: This combination of AMD"
+               "processors is not suitable for SMP.\n");
+       if (!test_taint(TAINT_UNSAFE_SMP))
+               add_taint(TAINT_UNSAFE_SMP);
+
+valid_k7:
+       ;
+#endif
+}
+
 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
 {
        u32 l, h;
@@ -175,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
        }
 
        set_cpu_cap(c, X86_FEATURE_K7);
+
+       amd_k7_smp_check(c);
 }
 #endif
 
index 25c559ba8d546fd4e332720165b84b7f5aea6a8e..191117f1ad51d06475be918516e109fd31bc0d71 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/uaccess.h>
 #include <asm/ds.h>
 #include <asm/bugs.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/topology.h>
@@ -110,6 +111,28 @@ static void __cpuinit trap_init_f00f_bug(void)
 }
 #endif
 
+static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       /* calling is from identify_secondary_cpu() ? */
+       if (c->cpu_index == boot_cpu_id)
+               return;
+
+       /*
+        * Mask B, Pentium, but not Pentium MMX
+        */
+       if (c->x86 == 5 &&
+           c->x86_mask >= 1 && c->x86_mask <= 4 &&
+           c->x86_model <= 3) {
+               /*
+                * Remember we have B step Pentia with bugs
+                */
+               WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
+                                   "with B stepping processors.\n");
+       }
+#endif
+}
+
 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 {
        unsigned long lo, hi;
@@ -186,6 +209,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_NUMAQ
        numaq_tsc_disable();
 #endif
+
+       intel_smp_check(c);
 }
 #else
 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
index d7d2323bbb6976ffa603246bd06845d280150371..b2f89829bbe824d2cecfd74d8c09b2923406493b 100644 (file)
@@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32)            += k7.o p4.o p5.o p6.o winchip.o
 obj-$(CONFIG_X86_MCE_INTEL)    += mce_intel_64.o
 obj-$(CONFIG_X86_MCE_AMD)      += mce_amd_64.o
 obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
+obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
index dfaebce3633e3f9b8415e79f29ad9f012276825a..3552119b091da51e65ce933c9852eee694920b9b 100644 (file)
@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c)
        }
 }
 
-static unsigned long old_cr4 __initdata;
-
-void __init stop_mce(void)
-{
-       old_cr4 = read_cr4();
-       clear_in_cr4(X86_CR4_MCE);
-}
-
-void __init restart_mce(void)
-{
-       if (old_cr4 & X86_CR4_MCE)
-               set_in_cr4(X86_CR4_MCE);
-}
-
 static int __init mcheck_disable(char *str)
 {
        mce_disabled = 1;
index fe79985ce0f2f6fb4ae23f3bc92a791994ed9f62..bfbd5323a63538bebed231801cbe22f6c5acfd26 100644 (file)
@@ -3,6 +3,8 @@
  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  * Rest from unknown author(s).
  * 2004 Andi Kleen. Rewrote most of it.
+ * Copyright 2008 Intel Corporation
+ * Author: Andi Kleen
  */
 
 #include <linux/init.h>
@@ -24,6 +26,9 @@
 #include <linux/ctype.h>
 #include <linux/kmod.h>
 #include <linux/kdebug.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/ratelimit.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/mce.h>
@@ -32,7 +37,6 @@
 #include <asm/idle.h>
 
 #define MISC_MCELOG_MINOR 227
-#define NR_SYSFS_BANKS 6
 
 atomic_t mce_entry;
 
@@ -47,7 +51,7 @@ static int mce_dont_init;
  */
 static int tolerant = 1;
 static int banks;
-static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL };
+static u64 *bank;
 static unsigned long notify_user;
 static int rip_msr;
 static int mce_bootlog = -1;
@@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL };
 
 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
 
+/* MCA banks polled by the period polling timer for corrected events */
+DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
+       [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
+};
+
+/* Do initial initialization of a struct mce */
+void mce_setup(struct mce *m)
+{
+       memset(m, 0, sizeof(struct mce));
+       m->cpu = smp_processor_id();
+       rdtscll(m->tsc);
+}
+
 /*
  * Lockless MCE logging infrastructure.
  * This avoids deadlocks on printk locks without having to break locks. Also
@@ -119,11 +136,11 @@ static void print_mce(struct mce *m)
                        print_symbol("{%s}", m->ip);
                printk("\n");
        }
-       printk(KERN_EMERG "TSC %Lx ", m->tsc);
+       printk(KERN_EMERG "TSC %llx ", m->tsc);
        if (m->addr)
-               printk("ADDR %Lx ", m->addr);
+               printk("ADDR %llx ", m->addr);
        if (m->misc)
-               printk("MISC %Lx ", m->misc);
+               printk("MISC %llx ", m->misc);
        printk("\n");
        printk(KERN_EMERG "This is not a software problem!\n");
        printk(KERN_EMERG "Run through mcelog --ascii to decode "
@@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
        panic(msg);
 }
 
-static int mce_available(struct cpuinfo_x86 *c)
+int mce_available(struct cpuinfo_x86 *c)
 {
+       if (mce_dont_init)
+               return 0;
        return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 }
 
@@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
 }
 
 /*
- * The actual machine check handler
+ * Poll for corrected events or events that happened before reset.
+ * Those are just logged through /dev/mcelog.
+ *
+ * This is executed in standard interrupt context.
+ */
+void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
+{
+       struct mce m;
+       int i;
+
+       mce_setup(&m);
+
+       rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
+       for (i = 0; i < banks; i++) {
+               if (!bank[i] || !test_bit(i, *b))
+                       continue;
+
+               m.misc = 0;
+               m.addr = 0;
+               m.bank = i;
+               m.tsc = 0;
+
+               barrier();
+               rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
+               if (!(m.status & MCI_STATUS_VAL))
+                       continue;
+
+               /*
+                * Uncorrected events are handled by the exception handler
+                * when it is enabled. But when the exception is disabled log
+                * everything.
+                *
+                * TBD do the same check for MCI_STATUS_EN here?
+                */
+               if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
+                       continue;
+
+               if (m.status & MCI_STATUS_MISCV)
+                       rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
+               if (m.status & MCI_STATUS_ADDRV)
+                       rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
+
+               if (!(flags & MCP_TIMESTAMP))
+                       m.tsc = 0;
+               /*
+                * Don't get the IP here because it's unlikely to
+                * have anything to do with the actual error location.
+                */
+
+               mce_log(&m);
+               add_taint(TAINT_MACHINE_CHECK);
+
+               /*
+                * Clear state for this bank.
+                */
+               wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
+       }
+
+       /*
+        * Don't clear MCG_STATUS here because it's only defined for
+        * exceptions.
+        */
+}
+
+/*
+ * The actual machine check handler. This only handles real
+ * exceptions when something got corrupted coming in through int 18.
+ *
+ * This is executed in NMI context not subject to normal locking rules. This
+ * implies that most kernel services cannot be safely used. Don't even
+ * think about putting a printk in there!
  */
 void do_machine_check(struct pt_regs * regs, long error_code)
 {
@@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code)
         * error.
         */
        int kill_it = 0;
+       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
 
        atomic_inc(&mce_entry);
 
-       if ((regs
-            && notify_die(DIE_NMI, "machine check", regs, error_code,
+       if (notify_die(DIE_NMI, "machine check", regs, error_code,
                           18, SIGKILL) == NOTIFY_STOP)
-           || !banks)
+               goto out2;
+       if (!banks)
                goto out2;
 
-       memset(&m, 0, sizeof(struct mce));
-       m.cpu = smp_processor_id();
+       mce_setup(&m);
+
        rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
        /* if the restart IP is not valid, we're done for */
        if (!(m.mcgstatus & MCG_STATUS_RIPV))
@@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code)
        barrier();
 
        for (i = 0; i < banks; i++) {
-               if (i < NR_SYSFS_BANKS && !bank[i])
+               __clear_bit(i, toclear);
+               if (!bank[i])
                        continue;
 
                m.misc = 0;
                m.addr = 0;
                m.bank = i;
-               m.tsc = 0;
 
                rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
                if ((m.status & MCI_STATUS_VAL) == 0)
                        continue;
 
+               /*
+                * Non uncorrected errors are handled by machine_check_poll
+                * Leave them alone.
+                */
+               if ((m.status & MCI_STATUS_UC) == 0)
+                       continue;
+
+               /*
+                * Set taint even when machine check was not enabled.
+                */
+               add_taint(TAINT_MACHINE_CHECK);
+
+               __set_bit(i, toclear);
+
                if (m.status & MCI_STATUS_EN) {
                        /* if PCC was set, there's no way out */
                        no_way_out |= !!(m.status & MCI_STATUS_PCC);
@@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
                                        no_way_out = 1;
                                kill_it = 1;
                        }
+               } else {
+                       /*
+                        * Machine check event was not enabled. Clear, but
+                        * ignore.
+                        */
+                       continue;
                }
 
                if (m.status & MCI_STATUS_MISCV)
@@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
                        rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
 
                mce_get_rip(&m, regs);
-               if (error_code >= 0)
-                       rdtscll(m.tsc);
-               if (error_code != -2)
-                       mce_log(&m);
+               mce_log(&m);
 
                /* Did this bank cause the exception? */
                /* Assume that the bank with uncorrectable errors did it,
@@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
                        panicm = m;
                        panicm_found = 1;
                }
-
-               add_taint(TAINT_MACHINE_CHECK);
        }
 
-       /* Never do anything final in the polling timer */
-       if (!regs)
-               goto out;
-
        /* If we didn't find an uncorrectable error, pick
           the last one (shouldn't happen, just being safe). */
        if (!panicm_found)
@@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
        /* notify userspace ASAP */
        set_thread_flag(TIF_MCE_NOTIFY);
 
- out:
        /* the last thing we do is clear state */
-       for (i = 0; i < banks; i++)
-               wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
+       for (i = 0; i < banks; i++) {
+               if (test_bit(i, toclear))
+                       wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
+       }
        wrmsrl(MSR_IA32_MCG_STATUS, 0);
  out2:
        atomic_dec(&mce_entry);
@@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code)
  * and historically has been the register value of the
  * MSR_IA32_THERMAL_STATUS (Intel) msr.
  */
-void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
+void mce_log_therm_throt_event(__u64 status)
 {
        struct mce m;
 
-       memset(&m, 0, sizeof(m));
-       m.cpu = cpu;
+       mce_setup(&m);
        m.bank = MCE_THERMAL_BANK;
        m.status = status;
-       rdtscll(m.tsc);
        mce_log(&m);
 }
 #endif /* CONFIG_X86_MCE_INTEL */
@@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
 
 static int check_interval = 5 * 60; /* 5 minutes */
 static int next_interval; /* in jiffies */
-static void mcheck_timer(struct work_struct *work);
-static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
+static void mcheck_timer(unsigned long);
+static DEFINE_PER_CPU(struct timer_list, mce_timer);
 
-static void mcheck_check_cpu(void *info)
+static void mcheck_timer(unsigned long data)
 {
-       if (mce_available(&current_cpu_data))
-               do_machine_check(NULL, 0);
-}
+       struct timer_list *t = &per_cpu(mce_timer, data);
 
-static void mcheck_timer(struct work_struct *work)
-{
-       on_each_cpu(mcheck_check_cpu, NULL, 1);
+       WARN_ON(smp_processor_id() != data);
+
+       if (mce_available(&current_cpu_data))
+               machine_check_poll(MCP_TIMESTAMP,
+                               &__get_cpu_var(mce_poll_banks));
 
        /*
         * Alert userspace if needed.  If we logged an MCE, reduce the
@@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work)
                                (int)round_jiffies_relative(check_interval*HZ));
        }
 
-       schedule_delayed_work(&mcheck_work, next_interval);
+       t->expires = jiffies + next_interval;
+       add_timer(t);
+}
+
+static void mce_do_trigger(struct work_struct *work)
+{
+       call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
 }
 
+static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+
 /*
- * This is only called from process context.  This is where we do
- * anything we need to alert userspace about new MCEs.  This is called
- * directly from the poller and also from entry.S and idle, thanks to
- * TIF_MCE_NOTIFY.
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+ * context.
  */
 int mce_notify_user(void)
 {
+       /* Not more than two messages every minute */
+       static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+
        clear_thread_flag(TIF_MCE_NOTIFY);
        if (test_and_clear_bit(0, &notify_user)) {
-               static unsigned long last_print;
-               unsigned long now = jiffies;
-
                wake_up_interruptible(&mce_wait);
-               if (trigger[0])
-                       call_usermodehelper(trigger, trigger_argv, NULL,
-                                               UMH_NO_WAIT);
 
-               if (time_after_eq(now, last_print + (check_interval*HZ))) {
-                       last_print = now;
+               /*
+                * There is no risk of missing notifications because
+                * work_pending is always cleared before the function is
+                * executed.
+                */
+               if (trigger[0] && !work_pending(&mce_trigger_work))
+                       schedule_work(&mce_trigger_work);
+
+               if (__ratelimit(&ratelimit))
                        printk(KERN_INFO "Machine check events logged\n");
-               }
 
                return 1;
        }
@@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = {
 
 static __init int periodic_mcheck_init(void)
 {
-       next_interval = check_interval * HZ;
-       if (next_interval)
-               schedule_delayed_work(&mcheck_work,
-                                     round_jiffies_relative(next_interval));
-       idle_notifier_register(&mce_idle_notifier);
-       return 0;
+       idle_notifier_register(&mce_idle_notifier);
+       return 0;
 }
 __initcall(periodic_mcheck_init);
 
-
 /*
  * Initialize Machine Checks for a CPU.
  */
-static void mce_init(void *dummy)
+static int mce_cap_init(void)
 {
        u64 cap;
-       int i;
+       unsigned b;
 
        rdmsrl(MSR_IA32_MCG_CAP, cap);
-       banks = cap & 0xff;
-       if (banks > MCE_EXTENDED_BANK) {
-               banks = MCE_EXTENDED_BANK;
-               printk(KERN_INFO "MCE: warning: using only %d banks\n",
-                      MCE_EXTENDED_BANK);
+       b = cap & 0xff;
+       if (b > MAX_NR_BANKS) {
+               printk(KERN_WARNING
+                      "MCE: Using only %u machine check banks out of %u\n",
+                       MAX_NR_BANKS, b);
+               b = MAX_NR_BANKS;
        }
+
+       /* Don't support asymmetric configurations today */
+       WARN_ON(banks != 0 && b != banks);
+       banks = b;
+       if (!bank) {
+               bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
+               if (!bank)
+                       return -ENOMEM;
+               memset(bank, 0xff, banks * sizeof(u64));
+       }
+
        /* Use accurate RIP reporting if available. */
        if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
                rip_msr = MSR_IA32_MCG_EIP;
 
-       /* Log the machine checks left over from the previous reset.
-          This also clears all registers */
-       do_machine_check(NULL, mce_bootlog ? -1 : -2);
+       return 0;
+}
+
+static void mce_init(void *dummy)
+{
+       u64 cap;
+       int i;
+       mce_banks_t all_banks;
+
+       /*
+        * Log the machine checks left over from the previous reset.
+        */
+       bitmap_fill(all_banks, MAX_NR_BANKS);
+       machine_check_poll(MCP_UC, &all_banks);
 
        set_in_cr4(X86_CR4_MCE);
 
+       rdmsrl(MSR_IA32_MCG_CAP, cap);
        if (cap & MCG_CTL_P)
                wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
 
        for (i = 0; i < banks; i++) {
-               if (i < NR_SYSFS_BANKS)
-                       wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
-               else
-                       wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
-
+               wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
                wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
        }
 }
 
 /* Add per CPU specific workarounds here */
-static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
+static void mce_cpu_quirks(struct cpuinfo_x86 *c)
 {
        /* This should be disabled by the BIOS, but isn't always */
        if (c->x86_vendor == X86_VENDOR_AMD) {
-               if(c->x86 == 15)
+               if (c->x86 == 15 && banks > 4)
                        /* disable GART TBL walk error reporting, which trips off
                           incorrectly with the IOMMU & 3ware & Cerberus. */
-                       clear_bit(10, &bank[4]);
+                       clear_bit(10, (unsigned long *)&bank[4]);
                if(c->x86 <= 17 && mce_bootlog < 0)
                        /* Lots of broken BIOS around that don't clear them
                           by default and leave crap in there. Don't log. */
@@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
        }
 }
 
+static void mce_init_timer(void)
+{
+       struct timer_list *t = &__get_cpu_var(mce_timer);
+
+       /* data race harmless because everyone sets to the same value */
+       if (!next_interval)
+               next_interval = check_interval * HZ;
+       if (!next_interval)
+               return;
+       setup_timer(t, mcheck_timer, smp_processor_id());
+       t->expires = round_jiffies_relative(jiffies + next_interval);
+       add_timer(t);
+}
+
 /*
  * Called for each booted CPU to set up machine checks.
  * Must be called with preempt off.
  */
 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
 {
-       mce_cpu_quirks(c);
+       if (!mce_available(c))
+               return;
 
-       if (mce_dont_init ||
-           !mce_available(c))
+       if (mce_cap_init() < 0) {
+               mce_dont_init = 1;
                return;
+       }
+       mce_cpu_quirks(c);
 
        mce_init(NULL);
        mce_cpu_features(c);
+       mce_init_timer();
 }
 
 /*
@@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
 {
        unsigned long *cpu_tsc;
        static DEFINE_MUTEX(mce_read_mutex);
-       unsigned next;
+       unsigned prev, next;
        char __user *buf = ubuf;
        int i, err;
 
@@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
        }
 
        err = 0;
-       for (i = 0; i < next; i++) {
-               unsigned long start = jiffies;
-
-               while (!mcelog.entry[i].finished) {
-                       if (time_after_eq(jiffies, start + 2)) {
-                               memset(mcelog.entry + i,0, sizeof(struct mce));
-                               goto timeout;
+       prev = 0;
+       do {
+               for (i = prev; i < next; i++) {
+                       unsigned long start = jiffies;
+
+                       while (!mcelog.entry[i].finished) {
+                               if (time_after_eq(jiffies, start + 2)) {
+                                       memset(mcelog.entry + i, 0,
+                                              sizeof(struct mce));
+                                       goto timeout;
+                               }
+                               cpu_relax();
                        }
-                       cpu_relax();
+                       smp_rmb();
+                       err |= copy_to_user(buf, mcelog.entry + i,
+                                           sizeof(struct mce));
+                       buf += sizeof(struct mce);
+timeout:
+                       ;
                }
-               smp_rmb();
-               err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
-               buf += sizeof(struct mce);
- timeout:
-               ;
-       }
 
-       memset(mcelog.entry, 0, next * sizeof(struct mce));
-       mcelog.next = 0;
+               memset(mcelog.entry + prev, 0,
+                      (next - prev) * sizeof(struct mce));
+               prev = next;
+               next = cmpxchg(&mcelog.next, prev, 0);
+       } while (next != prev);
 
        synchronize_sched();
 
@@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = {
        &mce_chrdev_ops,
 };
 
-static unsigned long old_cr4 __initdata;
-
-void __init stop_mce(void)
-{
-       old_cr4 = read_cr4();
-       clear_in_cr4(X86_CR4_MCE);
-}
-
-void __init restart_mce(void)
-{
-       if (old_cr4 & X86_CR4_MCE)
-               set_in_cr4(X86_CR4_MCE);
-}
-
 /*
  * Old style boot options parsing. Only for compatibility.
  */
@@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str)
        return 1;
 }
 
-/* mce=off disables machine check. Note you can re-enable it later
-   using sysfs.
+/* mce=off disables machine check.
    mce=TOLERANCELEVEL (number, see above)
    mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
    mce=nobootlog Don't log MCEs from before booting. */
@@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable);
  * Sysfs support
  */
 
+/*
+ * Disable machine checks on suspend and shutdown. We can't really handle
+ * them later.
+ */
+static int mce_disable(void)
+{
+       int i;
+
+       for (i = 0; i < banks; i++)
+               wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
+       return 0;
+}
+
+static int mce_suspend(struct sys_device *dev, pm_message_t state)
+{
+       return mce_disable();
+}
+
+static int mce_shutdown(struct sys_device *dev)
+{
+       return mce_disable();
+}
+
 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
    Only one CPU is active at this time, the others get readded later using
    CPU hotplug. */
@@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev)
        return 0;
 }
 
+static void mce_cpu_restart(void *data)
+{
+       del_timer_sync(&__get_cpu_var(mce_timer));
+       if (mce_available(&current_cpu_data))
+               mce_init(NULL);
+       mce_init_timer();
+}
+
 /* Reinit MCEs after user configuration changes */
 static void mce_restart(void)
 {
-       if (next_interval)
-               cancel_delayed_work(&mcheck_work);
-       /* Timer race is harmless here */
-       on_each_cpu(mce_init, NULL, 1);
        next_interval = check_interval * HZ;
-       if (next_interval)
-               schedule_delayed_work(&mcheck_work,
-                                     round_jiffies_relative(next_interval));
+       on_each_cpu(mce_cpu_restart, NULL, 1);
 }
 
 static struct sysdev_class mce_sysclass = {
+       .suspend = mce_suspend,
+       .shutdown = mce_shutdown,
        .resume = mce_resume,
        .name = "machinecheck",
 };
@@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit
        }                                                               \
        static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
 
-/*
- * TBD should generate these dynamically based on number of available banks.
- * Have only 6 contol banks in /sysfs until then.
- */
-ACCESSOR(bank0ctl,bank[0],mce_restart())
-ACCESSOR(bank1ctl,bank[1],mce_restart())
-ACCESSOR(bank2ctl,bank[2],mce_restart())
-ACCESSOR(bank3ctl,bank[3],mce_restart())
-ACCESSOR(bank4ctl,bank[4],mce_restart())
-ACCESSOR(bank5ctl,bank[5],mce_restart())
+static struct sysdev_attribute *bank_attrs;
+
+static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
+                        char *buf)
+{
+       u64 b = bank[attr - bank_attrs];
+       return sprintf(buf, "%llx\n", b);
+}
+
+static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
+                       const char *buf, size_t siz)
+{
+       char *end;
+       u64 new = simple_strtoull(buf, &end, 0);
+       if (end == buf)
+               return -EINVAL;
+       bank[attr - bank_attrs] = new;
+       mce_restart();
+       return end-buf;
+}
 
 static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
                                char *buf)
@@ -814,8 +986,6 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
 ACCESSOR(check_interval,check_interval,mce_restart())
 static struct sysdev_attribute *mce_attributes[] = {
-       &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
-       &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
        &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
        NULL
 };
@@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu)
                if (err)
                        goto error;
        }
+       for (i = 0; i < banks; i++) {
+               err = sysdev_create_file(&per_cpu(device_mce, cpu),
+                                       &bank_attrs[i]);
+               if (err)
+                       goto error2;
+       }
        cpu_set(cpu, mce_device_initialized);
 
        return 0;
+error2:
+       while (--i >= 0) {
+               sysdev_remove_file(&per_cpu(device_mce, cpu),
+                                       &bank_attrs[i]);
+       }
 error:
-       while (i--) {
+       while (--i >= 0) {
                sysdev_remove_file(&per_cpu(device_mce,cpu),
                                   mce_attributes[i]);
        }
@@ -868,15 +1049,46 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
        for (i = 0; mce_attributes[i]; i++)
                sysdev_remove_file(&per_cpu(device_mce,cpu),
                        mce_attributes[i]);
+       for (i = 0; i < banks; i++)
+               sysdev_remove_file(&per_cpu(device_mce, cpu),
+                       &bank_attrs[i]);
        sysdev_unregister(&per_cpu(device_mce,cpu));
        cpu_clear(cpu, mce_device_initialized);
 }
 
+/* Make sure there are no machine checks on offlined CPUs. */
+static void mce_disable_cpu(void *h)
+{
+       int i;
+       unsigned long action = *(unsigned long *)h;
+
+       if (!mce_available(&current_cpu_data))
+               return;
+       if (!(action & CPU_TASKS_FROZEN))
+               cmci_clear();
+       for (i = 0; i < banks; i++)
+               wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
+}
+
+static void mce_reenable_cpu(void *h)
+{
+       int i;
+       unsigned long action = *(unsigned long *)h;
+
+       if (!mce_available(&current_cpu_data))
+               return;
+       if (!(action & CPU_TASKS_FROZEN))
+               cmci_reenable();
+       for (i = 0; i < banks; i++)
+               wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
+}
+
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
 static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
+       struct timer_list *t = &per_cpu(mce_timer, cpu);
 
        switch (action) {
        case CPU_ONLINE:
@@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
                        threshold_cpu_callback(action, cpu);
                mce_remove_device(cpu);
                break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               del_timer_sync(t);
+               smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+               break;
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               t->expires = round_jiffies_relative(jiffies + next_interval);
+               add_timer_on(t, cpu);
+               smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+               break;
+       case CPU_POST_DEAD:
+               /* intentionally ignoring frozen here */
+               cmci_rediscover(cpu);
+               break;
        }
        return NOTIFY_OK;
 }
@@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = {
        .notifier_call = mce_cpu_callback,
 };
 
+static __init int mce_init_banks(void)
+{
+       int i;
+
+       bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
+                               GFP_KERNEL);
+       if (!bank_attrs)
+               return -ENOMEM;
+
+       for (i = 0; i < banks; i++) {
+               struct sysdev_attribute *a = &bank_attrs[i];
+               a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
+               if (!a->attr.name)
+                       goto nomem;
+               a->attr.mode = 0644;
+               a->show = show_bank;
+               a->store = set_bank;
+       }
+       return 0;
+
+nomem:
+       while (--i >= 0)
+               kfree(bank_attrs[i].attr.name);
+       kfree(bank_attrs);
+       bank_attrs = NULL;
+       return -ENOMEM;
+}
+
 static __init int mce_init_device(void)
 {
        int err;
@@ -906,6 +1161,11 @@ static __init int mce_init_device(void)
 
        if (!mce_available(&boot_cpu_data))
                return -EIO;
+
+       err = mce_init_banks();
+       if (err)
+               return err;
+
        err = sysdev_class_register(&mce_sysclass);
        if (err)
                return err;
index 9817506dd4698c6578646f04100836dd6ad435c8..c5a32f92d07ecc41b55b290f16fb7a0889c07c41 100644 (file)
@@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = {
 
 static DEFINE_PER_CPU(unsigned char, bank_map);        /* see which banks are on */
 
+static void amd_threshold_interrupt(void);
+
 /*
  * CPU Initialization
  */
@@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                        tr.reset = 0;
                        tr.old_limit = 0;
                        threshold_restart_bank(&tr);
+
+                       mce_threshold_vector = amd_threshold_interrupt;
                }
        }
 }
@@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
  * the interrupt goes off when error_count reaches threshold_limit.
  * the handler will simply log mcelog w/ software defined bank number.
  */
-asmlinkage void mce_threshold_interrupt(void)
+static void amd_threshold_interrupt(void)
 {
        unsigned int bank, block;
        struct mce m;
        u32 low = 0, high = 0, address = 0;
 
-       ack_APIC_irq();
-       exit_idle();
-       irq_enter();
-
-       memset(&m, 0, sizeof(m));
-       rdtscll(m.tsc);
-       m.cpu = smp_processor_id();
+       mce_setup(&m);
 
        /* assume first bank caused it */
        for (bank = 0; bank < NR_BANKS; ++bank) {
@@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void)
 
                        /* Log the machine check that caused the threshold
                           event. */
-                       do_machine_check(NULL, 0);
+                       machine_check_poll(MCP_TIMESTAMP,
+                                       &__get_cpu_var(mce_poll_banks));
 
                        if (high & MASK_OVERFLOW_HI) {
                                rdmsrl(address, m.misc);
@@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void)
                                       + bank * NR_BLOCKS
                                       + block;
                                mce_log(&m);
-                               goto out;
+                               return;
                        }
                }
        }
-out:
-       inc_irq_stat(irq_threshold_count);
-       irq_exit();
 }
 
 /*
index aa5e287c98e01334565a241d7b89e768c403985f..aaa7d97309387d5e99aa63cfa5e7b9e6ea3131e1 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Intel specific MCE features.
  * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
+ * Copyright (C) 2008, 2009 Intel Corporation
+ * Author: Andi Kleen
  */
 
 #include <linux/init.h>
@@ -13,6 +15,7 @@
 #include <asm/hw_irq.h>
 #include <asm/idle.h>
 #include <asm/therm_throt.h>
+#include <asm/apic.h>
 
 asmlinkage void smp_thermal_interrupt(void)
 {
@@ -25,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void)
 
        rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
        if (therm_throt_process(msr_val & 1))
-               mce_log_therm_throt_event(smp_processor_id(), msr_val);
+               mce_log_therm_throt_event(msr_val);
 
        inc_irq_stat(irq_thermal_count);
        irq_exit();
@@ -85,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
        return;
 }
 
+/*
+ * Support for Intel Correct Machine Check Interrupts. This allows
+ * the CPU to raise an interrupt when a corrected machine check happened.
+ * Normally we pick those up using a regular polling timer.
+ * Also supports reliable discovery of shared banks.
+ */
+
+static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
+
+/*
+ * cmci_discover_lock protects against parallel discovery attempts
+ * which could race against each other.
+ */
+static DEFINE_SPINLOCK(cmci_discover_lock);
+
+#define CMCI_THRESHOLD 1
+
+static int cmci_supported(int *banks)
+{
+       u64 cap;
+
+       /*
+        * Vendor check is not strictly needed, but the initial
+        * initialization is vendor keyed and this
+        * makes sure none of the backdoors are entered otherwise.
+        */
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return 0;
+       if (!cpu_has_apic || lapic_get_maxlvt() < 6)
+               return 0;
+       rdmsrl(MSR_IA32_MCG_CAP, cap);
+       *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
+       return !!(cap & MCG_CMCI_P);
+}
+
+/*
+ * The interrupt handler. This is called on every event.
+ * Just call the poller directly to log any events.
+ * This could in theory increase the threshold under high load,
+ * but doesn't for now.
+ */
+static void intel_threshold_interrupt(void)
+{
+       machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+       mce_notify_user();
+}
+
+static void print_update(char *type, int *hdr, int num)
+{
+       if (*hdr == 0)
+               printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
+       *hdr = 1;
+       printk(KERN_CONT " %s:%d", type, num);
+}
+
+/*
+ * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
+ * on this CPU. Use the algorithm recommended in the SDM to discover shared
+ * banks.
+ */
+static void cmci_discover(int banks, int boot)
+{
+       unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
+       int hdr = 0;
+       int i;
+
+       spin_lock(&cmci_discover_lock);
+       for (i = 0; i < banks; i++) {
+               u64 val;
+
+               if (test_bit(i, owned))
+                       continue;
+
+               rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+
+               /* Already owned by someone else? */
+               if (val & CMCI_EN) {
+                       if (test_and_clear_bit(i, owned) || boot)
+                               print_update("SHD", &hdr, i);
+                       __clear_bit(i, __get_cpu_var(mce_poll_banks));
+                       continue;
+               }
+
+               val |= CMCI_EN | CMCI_THRESHOLD;
+               wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
+               rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+
+               /* Did the enable bit stick? -- the bank supports CMCI */
+               if (val & CMCI_EN) {
+                       if (!test_and_set_bit(i, owned) || boot)
+                               print_update("CMCI", &hdr, i);
+                       __clear_bit(i, __get_cpu_var(mce_poll_banks));
+               } else {
+                       WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+               }
+       }
+       spin_unlock(&cmci_discover_lock);
+       if (hdr)
+               printk(KERN_CONT "\n");
+}
+
+/*
+ * Just in case we missed an event during initialization check
+ * all the CMCI owned banks.
+ */
+void cmci_recheck(void)
+{
+       unsigned long flags;
+       int banks;
+
+       if (!mce_available(&current_cpu_data) || !cmci_supported(&banks))
+               return;
+       local_irq_save(flags);
+       machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+       local_irq_restore(flags);
+}
+
+/*
+ * Disable CMCI on this CPU for all banks it owns when it goes down.
+ * This allows other CPUs to claim the banks on rediscovery.
+ */
+void cmci_clear(void)
+{
+       int i;
+       int banks;
+       u64 val;
+
+       if (!cmci_supported(&banks))
+               return;
+       spin_lock(&cmci_discover_lock);
+       for (i = 0; i < banks; i++) {
+               if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
+                       continue;
+               /* Disable CMCI */
+               rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+               val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
+               wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
+               __clear_bit(i, __get_cpu_var(mce_banks_owned));
+       }
+       spin_unlock(&cmci_discover_lock);
+}
+
+/*
+ * After a CPU went down cycle through all the others and rediscover
+ * Must run in process context.
+ */
+void cmci_rediscover(int dying)
+{
+       int banks;
+       int cpu;
+       cpumask_var_t old;
+
+       if (!cmci_supported(&banks))
+               return;
+       if (!alloc_cpumask_var(&old, GFP_KERNEL))
+               return;
+       cpumask_copy(old, &current->cpus_allowed);
+
+       for_each_online_cpu (cpu) {
+               if (cpu == dying)
+                       continue;
+               if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
+                       continue;
+               /* Recheck banks in case CPUs don't all have the same */
+               if (cmci_supported(&banks))
+                       cmci_discover(banks, 0);
+       }
+
+       set_cpus_allowed_ptr(current, old);
+       free_cpumask_var(old);
+}
+
+/*
+ * Reenable CMCI on this CPU in case a CPU down failed.
+ */
+void cmci_reenable(void)
+{
+       int banks;
+       if (cmci_supported(&banks))
+               cmci_discover(banks, 0);
+}
+
+static __cpuinit void intel_init_cmci(void)
+{
+       int banks;
+
+       if (!cmci_supported(&banks))
+               return;
+
+       mce_threshold_vector = intel_threshold_interrupt;
+       cmci_discover(banks, 1);
+       /*
+        * For CPU #0 this runs with still disabled APIC, but that's
+        * ok because only the vector is set up. We still do another
+        * check for the banks later for CPU #0 just to make sure
+        * to not miss any events.
+        */
+       apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
+       cmci_recheck();
+}
+
 void mce_intel_feature_init(struct cpuinfo_x86 *c)
 {
        intel_init_thermal(c);
+       intel_init_cmci();
 }
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
new file mode 100644 (file)
index 0000000..23ee9e7
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Common corrected MCE threshold handler code:
+ */
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+
+#include <asm/irq_vectors.h>
+#include <asm/apic.h>
+#include <asm/idle.h>
+#include <asm/mce.h>
+
+static void default_threshold_interrupt(void)
+{
+       printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
+                        THRESHOLD_APIC_VECTOR);
+}
+
+void (*mce_threshold_vector)(void) = default_threshold_interrupt;
+
+asmlinkage void mce_threshold_interrupt(void)
+{
+       exit_idle();
+       irq_enter();
+       inc_irq_stat(irq_threshold_count);
+       mce_threshold_vector();
+       irq_exit();
+       /* Ack only at the end to avoid potential reentry */
+       ack_APIC_irq();
+}
index 169a120587be7e4e6add63c38ed2e0eb139025dd..87b67e3a765ac212c2c954de3566ec487fdac8da 100644 (file)
@@ -729,7 +729,7 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
 
        spin_unlock_irqrestore(&ds_lock, irq);
 
-       ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
+       ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
        ds_resume_pebs(tracer);
 
        return tracer;
@@ -1029,5 +1029,4 @@ void ds_copy_thread(struct task_struct *tsk, struct task_struct *father)
 
 void ds_exit_thread(struct task_struct *tsk)
 {
-       WARN_ON(tsk->thread.ds_ctx);
 }
index b205272ad3947e8318659720f73917df8a7bcde5..1736acc4d7aa6cfc13bc8ebd0db0e2727a8ebf5e 100644 (file)
@@ -469,7 +469,7 @@ void __init efi_enter_virtual_mode(void)
        efi_memory_desc_t *md;
        efi_status_t status;
        unsigned long size;
-       u64 end, systab, addr, npages;
+       u64 end, systab, addr, npages, end_pfn;
        void *p, *va;
 
        efi.systab = NULL;
@@ -481,7 +481,10 @@ void __init efi_enter_virtual_mode(void)
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
 
-               if (PFN_UP(end) <= max_low_pfn_mapped)
+               end_pfn = PFN_UP(end);
+               if (end_pfn <= max_low_pfn_mapped
+                   || (end_pfn > (1UL << (32 - PAGE_SHIFT))
+                       && end_pfn <= max_pfn_mapped))
                        va = __va(md->phys_addr);
                else
                        va = efi_ioremap(md->phys_addr, size);
index a4ee29127fdf24d916af929339a12faab1c7e618..22c3b7828c50fa1f0c61e17d6680cbf19d0b6a17 100644 (file)
@@ -100,24 +100,11 @@ void __init efi_call_phys_epilog(void)
 
 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
 {
-       static unsigned pages_mapped __initdata;
-       unsigned i, pages;
-       unsigned long offset;
+       unsigned long last_map_pfn;
 
-       pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr);
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-
-       if (pages_mapped + pages > MAX_EFI_IO_PAGES)
+       last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
+       if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
                return NULL;
 
-       for (i = 0; i < pages; i++) {
-               __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
-                            phys_addr, PAGE_KERNEL);
-               phys_addr += PAGE_SIZE;
-               pages_mapped++;
-       }
-
-       return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
-                                            (pages_mapped - pages)) + offset;
+       return (void __iomem *)__va(phys_addr);
 }
index 83d1836b9467e74069d71805a65a53dbbcbce1b2..7ba4621c0dfa018393833c3f87703c0db126e38a 100644 (file)
@@ -984,6 +984,8 @@ apicinterrupt UV_BAU_MESSAGE \
 #endif
 apicinterrupt LOCAL_TIMER_VECTOR \
        apic_timer_interrupt smp_apic_timer_interrupt
+apicinterrupt GENERIC_INTERRUPT_VECTOR \
+       generic_interrupt smp_generic_interrupt
 
 #ifdef CONFIG_SMP
 apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
index b0f61f0dcd0a925d13a41a0500ae0f11821d51f3..f2f8540a7f3d0bd88a8ac7b27c9ba460581ba023 100644 (file)
@@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk)
 #ifdef CONFIG_X86_32
        if (!HAVE_HWFP) {
                memset(tsk->thread.xstate, 0, xstate_size);
-               finit();
+               finit_task(tsk);
                set_stopped_child_used_math(tsk);
                return 0;
        }
index f13ca1650aafce84b1ecf5c1e665f1bbf4a9f5c6..b864341dcc45f9025bb1a91aaa6fff954a5ca111 100644 (file)
@@ -15,6 +15,9 @@
 
 atomic_t irq_err_count;
 
+/* Function pointer for generic interrupt vector handling */
+void (*generic_interrupt_extension)(void) = NULL;
+
 /*
  * 'what should we do if we get a hw irq event on an illegal vector'.
  * each architecture has to answer this themselves.
@@ -56,6 +59,12 @@ static int show_other_interrupts(struct seq_file *p)
                seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
        seq_printf(p, "  Local timer interrupts\n");
 #endif
+       if (generic_interrupt_extension) {
+               seq_printf(p, "PLT: ");
+               for_each_online_cpu(j)
+                       seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
+               seq_printf(p, "  Platform interrupts\n");
+       }
 #ifdef CONFIG_SMP
        seq_printf(p, "RES: ");
        for_each_online_cpu(j)
@@ -163,6 +172,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
 #ifdef CONFIG_X86_LOCAL_APIC
        sum += irq_stats(cpu)->apic_timer_irqs;
 #endif
+       if (generic_interrupt_extension)
+               sum += irq_stats(cpu)->generic_irqs;
 #ifdef CONFIG_SMP
        sum += irq_stats(cpu)->irq_resched_count;
        sum += irq_stats(cpu)->irq_call_count;
@@ -226,4 +237,27 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        return 1;
 }
 
+/*
+ * Handler for GENERIC_INTERRUPT_VECTOR.
+ */
+void smp_generic_interrupt(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       ack_APIC_irq();
+
+       exit_idle();
+
+       irq_enter();
+
+       inc_irq_stat(generic_irqs);
+
+       if (generic_interrupt_extension)
+               generic_interrupt_extension();
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+}
+
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
index 50b8c3a3006cb6f22ae0aa0f0cf3a50071475154..bc132610544829de0f38a12dd8461d73104b39a2 100644 (file)
@@ -175,6 +175,9 @@ void __init native_init_IRQ(void)
        /* self generated IPI for local APIC timer */
        alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
+       /* generic IPI for platform specific use */
+       alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
+
        /* IPI vectors for APIC spurious and error interrupts */
        alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
        alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
index da481a1e3f303f8fe3bf10995e8e218cc556f2d4..c7a49e0ffbfbc0b4b2b0cd12b01d5294ca73745c 100644 (file)
@@ -147,6 +147,9 @@ static void __init apic_intr_init(void)
        /* self generated IPI for local APIC timer */
        alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
+       /* generic IPI for platform specific use */
+       alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
+
        /* IPI vectors for APIC spurious and error interrupts */
        alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
        alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
index 37cb1bda1baf98fc9fd591aeb6d9ad26049517a1..e8192401da47e28c6a60d1c7d389257b19ca054a 100644 (file)
@@ -558,6 +558,19 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
 
 static struct mpf_intel *mpf_found;
 
+static unsigned long __init get_mpc_size(unsigned long physptr)
+{
+       struct mpc_table *mpc;
+       unsigned long size;
+
+       mpc = early_ioremap(physptr, PAGE_SIZE);
+       size = mpc->length;
+       early_iounmap(mpc, PAGE_SIZE);
+       apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
+
+       return size;
+}
+
 /*
  * Scan the memory blocks for an SMP configuration block.
  */
@@ -611,12 +624,16 @@ static void __init __get_smp_config(unsigned int early)
                construct_default_ISA_mptable(mpf->feature1);
 
        } else if (mpf->physptr) {
+               struct mpc_table *mpc;
+               unsigned long size;
 
+               size = get_mpc_size(mpf->physptr);
+               mpc = early_ioremap(mpf->physptr, size);
                /*
                 * Read the physical hardware table.  Anything here will
                 * override the defaults.
                 */
-               if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
+               if (!smp_read_mpc(mpc, early)) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 0;
 #endif
@@ -624,8 +641,10 @@ static void __init __get_smp_config(unsigned int early)
                               "BIOS bug, MP table errors detected!...\n");
                        printk(KERN_ERR "... disabling SMP support. "
                               "(tell your hw vendor)\n");
+                       early_iounmap(mpc, size);
                        return;
                }
+               early_iounmap(mpc, size);
 
                if (early)
                        return;
@@ -697,10 +716,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
 
                        if (!reserve)
                                return 1;
-                       reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
+                       reserve_bootmem_generic(virt_to_phys(mpf), sizeof(*mpf),
                                        BOOTMEM_DEFAULT);
                        if (mpf->physptr) {
-                               unsigned long size = PAGE_SIZE;
+                               unsigned long size = get_mpc_size(mpf->physptr);
 #ifdef CONFIG_X86_32
                                /*
                                 * We cannot access to MPC table to compute
index 1cc18d439bbbd377bd6941eece6fa2397508d359..2aef36d8aca2783a2cf9cb04f74fe9a72c564734 100644 (file)
@@ -216,6 +216,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
                },
        },
+       {       /* Handle problems with rebooting on Dell XPS710 */
+               .callback = set_bios_reboot,
+               .ident = "Dell XPS710",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
+               },
+       },
        { }
 };
 
index 4c54bc0d8ff3cc632f10a2639628dbf0592f7715..f28c56e6bf94a4cc30617f6b39d0349f13f9ebff 100644 (file)
@@ -202,7 +202,9 @@ struct ist_info ist_info;
 #endif
 
 #else
-struct cpuinfo_x86 boot_cpu_data __read_mostly;
+struct cpuinfo_x86 boot_cpu_data __read_mostly = {
+       .x86_phys_bits = MAX_PHYSMEM_BITS,
+};
 EXPORT_SYMBOL(boot_cpu_data);
 #endif
 
@@ -770,6 +772,9 @@ void __init setup_arch(char **cmdline_p)
 
        finish_e820_parsing();
 
+       if (efi_enabled)
+               efi_init();
+
        dmi_scan_machine();
 
        dmi_check_system(bad_bios_dmi_table);
@@ -789,8 +794,6 @@ void __init setup_arch(char **cmdline_p)
        insert_resource(&iomem_resource, &data_resource);
        insert_resource(&iomem_resource, &bss_resource);
 
-       if (efi_enabled)
-               efi_init();
 
 #ifdef CONFIG_X86_32
        if (ppro_with_ram_bug()) {
index 249334f5080a16d2e5a6e2c4fb67a8c19be88891..ef7d10170c30d1e98291cfdb0b4d0f17eda49b28 100644 (file)
@@ -114,10 +114,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
 
 atomic_t init_deasserted;
 
-
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
-
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
 
 /* which logical CPUs are on which nodes */
@@ -271,8 +267,6 @@ static void __cpuinit smp_callin(void)
        cpumask_set_cpu(cpuid, cpu_callin_mask);
 }
 
-static int __cpuinitdata unsafe_smp;
-
 /*
  * Activate a secondary processor.
  */
@@ -340,76 +334,6 @@ notrace static void __cpuinit start_secondary(void *unused)
        cpu_idle();
 }
 
-static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
-{
-       /*
-        * Mask B, Pentium, but not Pentium MMX
-        */
-       if (c->x86_vendor == X86_VENDOR_INTEL &&
-           c->x86 == 5 &&
-           c->x86_mask >= 1 && c->x86_mask <= 4 &&
-           c->x86_model <= 3)
-               /*
-                * Remember we have B step Pentia with bugs
-                */
-               smp_b_stepping = 1;
-
-       /*
-        * Certain Athlons might work (for various values of 'work') in SMP
-        * but they are not certified as MP capable.
-        */
-       if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
-               if (num_possible_cpus() == 1)
-                       goto valid_k7;
-
-               /* Athlon 660/661 is valid. */
-               if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
-                   (c->x86_mask == 1)))
-                       goto valid_k7;
-
-               /* Duron 670 is valid */
-               if ((c->x86_model == 7) && (c->x86_mask == 0))
-                       goto valid_k7;
-
-               /*
-                * Athlon 662, Duron 671, and Athlon >model 7 have capability
-                * bit. It's worth noting that the A5 stepping (662) of some
-                * Athlon XP's have the MP bit set.
-                * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
-                * more.
-                */
-               if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
-                   ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
-                    (c->x86_model > 7))
-                       if (cpu_has_mp)
-                               goto valid_k7;
-
-               /* If we get here, not a certified SMP capable AMD system. */
-               unsafe_smp = 1;
-       }
-
-valid_k7:
-       ;
-}
-
-static void __cpuinit smp_checks(void)
-{
-       if (smp_b_stepping)
-               printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
-                                   "with B stepping processors.\n");
-
-       /*
-        * Don't taint if we are running SMP kernel on a single non-MP
-        * approved Athlon
-        */
-       if (unsafe_smp && num_online_cpus() > 1) {
-               printk(KERN_INFO "WARNING: This combination of AMD"
-                       "processors is not suitable for SMP.\n");
-               add_taint(TAINT_UNSAFE_SMP);
-       }
-}
-
 /*
  * The bootstrap kernel entry code has set these up. Save them for
  * a given CPU
@@ -423,7 +347,6 @@ void __cpuinit smp_store_cpu_info(int id)
        c->cpu_index = id;
        if (id != 0)
                identify_secondary_cpu(c);
-       smp_apply_quirks(c);
 }
 
 
@@ -1193,7 +1116,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        pr_debug("Boot done.\n");
 
        impress_friends();
-       smp_checks();
 #ifdef CONFIG_X86_IO_APIC
        setup_ioapic_dest();
 #endif
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
new file mode 100644 (file)
index 0000000..2ffb6c5
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * SGI RTC clock/timer routines.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ *  Copyright (c) 2009 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) Dimitri Sivanich
+ */
+#include <linux/clockchips.h>
+
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
+
+#define RTC_NAME               "sgi_rtc"
+
+static cycle_t uv_read_rtc(void);
+static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
+static void uv_rtc_timer_setup(enum clock_event_mode,
+                               struct clock_event_device *);
+
+static struct clocksource clocksource_uv = {
+       .name           = RTC_NAME,
+       .rating         = 400,
+       .read           = uv_read_rtc,
+       .mask           = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
+       .shift          = 10,
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct clock_event_device clock_event_device_uv = {
+       .name           = RTC_NAME,
+       .features       = CLOCK_EVT_FEAT_ONESHOT,
+       .shift          = 20,
+       .rating         = 400,
+       .irq            = -1,
+       .set_next_event = uv_rtc_next_event,
+       .set_mode       = uv_rtc_timer_setup,
+       .event_handler  = NULL,
+};
+
+static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+
+/* There is one of these allocated per node */
+struct uv_rtc_timer_head {
+       spinlock_t      lock;
+       /* next cpu waiting for timer, local node relative: */
+       int             next_cpu;
+       /* number of cpus on this node: */
+       int             ncpus;
+       struct {
+               int     lcpu;           /* systemwide logical cpu number */
+               u64     expires;        /* next timer expiration for this cpu */
+       } cpu[1];
+};
+
+/*
+ * Access to uv_rtc_timer_head via blade id.
+ */
+static struct uv_rtc_timer_head                **blade_info __read_mostly;
+
+static int                             uv_rtc_enable;
+
+/*
+ * Hardware interface routines
+ */
+
+/* Send IPIs to another node */
+static void uv_rtc_send_IPI(int cpu)
+{
+       unsigned long apicid, val;
+       int pnode;
+
+       apicid = cpu_physical_id(cpu);
+       pnode = uv_apicid_to_pnode(apicid);
+       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+             (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+             (GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
+
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+/* Check for an RTC interrupt pending */
+static int uv_intr_pending(int pnode)
+{
+       return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
+               UVH_EVENT_OCCURRED0_RTC1_MASK;
+}
+
+/* Setup interrupt and return non-zero if early expiration occurred. */
+static int uv_setup_intr(int cpu, u64 expires)
+{
+       u64 val;
+       int pnode = uv_cpu_to_pnode(cpu);
+
+       uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+               UVH_RTC1_INT_CONFIG_M_MASK);
+       uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
+
+       uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
+               UVH_EVENT_OCCURRED0_RTC1_MASK);
+
+       val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
+               ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
+
+       /* Set configuration */
+       uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
+       /* Initialize comparator value */
+       uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
+
+       return (expires < uv_read_rtc() && !uv_intr_pending(pnode));
+}
+
+/*
+ * Per-cpu timer tracking routines
+ */
+
+static __init void uv_rtc_deallocate_timers(void)
+{
+       int bid;
+
+       for_each_possible_blade(bid) {
+               kfree(blade_info[bid]);
+       }
+       kfree(blade_info);
+}
+
+/* Allocate per-node list of cpu timer expiration times. */
+static __init int uv_rtc_allocate_timers(void)
+{
+       int cpu;
+
+       blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
+       if (!blade_info)
+               return -ENOMEM;
+       memset(blade_info, 0, uv_possible_blades * sizeof(void *));
+
+       for_each_present_cpu(cpu) {
+               int nid = cpu_to_node(cpu);
+               int bid = uv_cpu_to_blade_id(cpu);
+               int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+               struct uv_rtc_timer_head *head = blade_info[bid];
+
+               if (!head) {
+                       head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
+                               (uv_blade_nr_possible_cpus(bid) *
+                                       2 * sizeof(u64)),
+                               GFP_KERNEL, nid);
+                       if (!head) {
+                               uv_rtc_deallocate_timers();
+                               return -ENOMEM;
+                       }
+                       spin_lock_init(&head->lock);
+                       head->ncpus = uv_blade_nr_possible_cpus(bid);
+                       head->next_cpu = -1;
+                       blade_info[bid] = head;
+               }
+
+               head->cpu[bcpu].lcpu = cpu;
+               head->cpu[bcpu].expires = ULLONG_MAX;
+       }
+
+       return 0;
+}
+
+/* Find and set the next expiring timer.  */
+static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
+{
+       u64 lowest = ULLONG_MAX;
+       int c, bcpu = -1;
+
+       head->next_cpu = -1;
+       for (c = 0; c < head->ncpus; c++) {
+               u64 exp = head->cpu[c].expires;
+               if (exp < lowest) {
+                       bcpu = c;
+                       lowest = exp;
+               }
+       }
+       if (bcpu >= 0) {
+               head->next_cpu = bcpu;
+               c = head->cpu[bcpu].lcpu;
+               if (uv_setup_intr(c, lowest))
+                       /* If we didn't set it up in time, trigger */
+                       uv_rtc_send_IPI(c);
+       } else {
+               uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+                       UVH_RTC1_INT_CONFIG_M_MASK);
+       }
+}
+
+/*
+ * Set expiration time for current cpu.
+ *
+ * Returns 1 if we missed the expiration time.
+ */
+static int uv_rtc_set_timer(int cpu, u64 expires)
+{
+       int pnode = uv_cpu_to_pnode(cpu);
+       int bid = uv_cpu_to_blade_id(cpu);
+       struct uv_rtc_timer_head *head = blade_info[bid];
+       int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+       u64 *t = &head->cpu[bcpu].expires;
+       unsigned long flags;
+       int next_cpu;
+
+       spin_lock_irqsave(&head->lock, flags);
+
+       next_cpu = head->next_cpu;
+       *t = expires;
+       /* Will this one be next to go off? */
+       if (next_cpu < 0 || bcpu == next_cpu ||
+                       expires < head->cpu[next_cpu].expires) {
+               head->next_cpu = bcpu;
+               if (uv_setup_intr(cpu, expires)) {
+                       *t = ULLONG_MAX;
+                       uv_rtc_find_next_timer(head, pnode);
+                       spin_unlock_irqrestore(&head->lock, flags);
+                       return 1;
+               }
+       }
+
+       spin_unlock_irqrestore(&head->lock, flags);
+       return 0;
+}
+
+/*
+ * Unset expiration time for current cpu.
+ *
+ * Returns 1 if this timer was pending.
+ */
+static int uv_rtc_unset_timer(int cpu)
+{
+       int pnode = uv_cpu_to_pnode(cpu);
+       int bid = uv_cpu_to_blade_id(cpu);
+       struct uv_rtc_timer_head *head = blade_info[bid];
+       int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+       u64 *t = &head->cpu[bcpu].expires;
+       unsigned long flags;
+       int rc = 0;
+
+       spin_lock_irqsave(&head->lock, flags);
+
+       if (head->next_cpu == bcpu && uv_read_rtc() >= *t)
+               rc = 1;
+
+       *t = ULLONG_MAX;
+
+       /* Was the hardware setup for this timer? */
+       if (head->next_cpu == bcpu)
+               uv_rtc_find_next_timer(head, pnode);
+
+       spin_unlock_irqrestore(&head->lock, flags);
+
+       return rc;
+}
+
+
+/*
+ * Kernel interface routines.
+ */
+
+/*
+ * Read the RTC.
+ */
+static cycle_t uv_read_rtc(void)
+{
+       return (cycle_t)uv_read_local_mmr(UVH_RTC);
+}
+
+/*
+ * Program the next event, relative to now
+ */
+static int uv_rtc_next_event(unsigned long delta,
+                            struct clock_event_device *ced)
+{
+       int ced_cpu = cpumask_first(ced->cpumask);
+
+       return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc());
+}
+
+/*
+ * Setup the RTC timer in oneshot mode
+ */
+static void uv_rtc_timer_setup(enum clock_event_mode mode,
+                              struct clock_event_device *evt)
+{
+       int ced_cpu = cpumask_first(evt->cpumask);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+       case CLOCK_EVT_MODE_ONESHOT:
+       case CLOCK_EVT_MODE_RESUME:
+               /* Nothing to do here yet */
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               uv_rtc_unset_timer(ced_cpu);
+               break;
+       }
+}
+
+static void uv_rtc_interrupt(void)
+{
+       struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+       int cpu = smp_processor_id();
+
+       if (!ced || !ced->event_handler)
+               return;
+
+       if (uv_rtc_unset_timer(cpu) != 1)
+               return;
+
+       ced->event_handler(ced);
+}
+
+static int __init uv_enable_rtc(char *str)
+{
+       uv_rtc_enable = 1;
+
+       return 1;
+}
+__setup("uvrtc", uv_enable_rtc);
+
+static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
+{
+       struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+
+       *ced = clock_event_device_uv;
+       ced->cpumask = cpumask_of(smp_processor_id());
+       clockevents_register_device(ced);
+}
+
+static __init int uv_rtc_setup_clock(void)
+{
+       int rc;
+
+       if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension)
+               return -ENODEV;
+
+       generic_interrupt_extension = uv_rtc_interrupt;
+
+       clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
+                               clocksource_uv.shift);
+
+       rc = clocksource_register(&clocksource_uv);
+       if (rc) {
+               generic_interrupt_extension = NULL;
+               return rc;
+       }
+
+       /* Setup and register clockevents */
+       rc = uv_rtc_allocate_timers();
+       if (rc) {
+               clocksource_unregister(&clocksource_uv);
+               generic_interrupt_extension = NULL;
+               return rc;
+       }
+
+       clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
+                               NSEC_PER_SEC, clock_event_device_uv.shift);
+
+       clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
+                                               sn_rtc_cycles_per_second;
+
+       clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
+                               (NSEC_PER_SEC / sn_rtc_cycles_per_second);
+
+       rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
+       if (rc) {
+               clocksource_unregister(&clocksource_uv);
+               generic_interrupt_extension = NULL;
+               uv_rtc_deallocate_timers();
+       }
+
+       return rc;
+}
+arch_initcall(uv_rtc_setup_clock);
index 491e737ce547c236830f8ecb4ef45c5415c53c1e..aa0987088774613ccc36ece2c33ee35e9f382866 100644 (file)
@@ -30,20 +30,29 @@ static void fclex(void)
 }
 
 /* Needs to be externally visible */
-void finit(void)
+void finit_task(struct task_struct *tsk)
 {
-       control_word = 0x037f;
-       partial_status = 0;
-       top = 0;                /* We don't keep top in the status word internally. */
-       fpu_tag_word = 0xffff;
+       struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
+       struct address *oaddr, *iaddr;
+       soft->cwd = 0x037f;
+       soft->swd = 0;
+       soft->ftop = 0; /* We don't keep top in the status word internally. */
+       soft->twd = 0xffff;
        /* The behaviour is different from that detailed in
           Section 15.1.6 of the Intel manual */
-       operand_address.offset = 0;
-       operand_address.selector = 0;
-       instruction_address.offset = 0;
-       instruction_address.selector = 0;
-       instruction_address.opcode = 0;
-       no_ip_update = 1;
+       oaddr = (struct address *)&soft->foo;
+       oaddr->offset = 0;
+       oaddr->selector = 0;
+       iaddr = (struct address *)&soft->fip;
+       iaddr->offset = 0;
+       iaddr->selector = 0;
+       iaddr->opcode = 0;
+       soft->no_update = 1;
+}
+
+void finit(void)
+{
+       finit_task(current);
 }
 
 /*
index 00f127c80b0e38fa50eec531e8e85c8d1f18fd6d..d11745334a674a08caf8109be2d970ce5969600d 100644 (file)
@@ -158,7 +158,6 @@ EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kunmap_atomic);
 
-#ifdef CONFIG_NUMA
 void __init set_highmem_pages_init(void)
 {
        struct zone *zone;
@@ -182,11 +181,3 @@ void __init set_highmem_pages_init(void)
        }
        totalram_pages += totalhigh_pages;
 }
-#else
-void __init set_highmem_pages_init(void)
-{
-       add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
-
-       totalram_pages += totalhigh_pages;
-}
-#endif /* CONFIG_NUMA */
index ce6a722587d8fa6cfd8343c3931866ce93eda13b..15219e0d1243479790e6cd809b68489da41b17f1 100644 (file)
@@ -1,8 +1,345 @@
+#include <linux/ioport.h>
 #include <linux/swap.h>
+
 #include <asm/cacheflush.h>
+#include <asm/e820.h>
+#include <asm/init.h>
 #include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/sections.h>
 #include <asm/system.h>
+#include <asm/tlbflush.h>
+
+unsigned long __initdata e820_table_start;
+unsigned long __meminitdata e820_table_end;
+unsigned long __meminitdata e820_table_top;
+
+int after_bootmem;
+
+int direct_gbpages
+#ifdef CONFIG_DIRECT_GBPAGES
+                               = 1
+#endif
+;
+
+static void __init find_early_table_space(unsigned long end, int use_pse,
+                                         int use_gbpages)
+{
+       unsigned long puds, pmds, ptes, tables, start;
+
+       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+
+       if (use_gbpages) {
+               unsigned long extra;
+
+               extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
+               pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+       } else
+               pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+
+       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+
+       if (use_pse) {
+               unsigned long extra;
+
+               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+#ifdef CONFIG_X86_32
+               extra += PMD_SIZE;
+#endif
+               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       } else
+               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+
+#ifdef CONFIG_X86_32
+       /* for fixmap */
+       tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
+#endif
+
+       /*
+        * RED-PEN putting page tables only on node 0 could
+        * cause a hotspot and fill up ZONE_DMA. The page tables
+        * need roughly 0.5KB per GB.
+        */
+#ifdef CONFIG_X86_32
+       start = 0x7000;
+       e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+                                       tables, PAGE_SIZE);
+#else /* CONFIG_X86_64 */
+       start = 0x8000;
+       e820_table_start = find_e820_area(start, end, tables, PAGE_SIZE);
+#endif
+       if (e820_table_start == -1UL)
+               panic("Cannot find space for the kernel page tables");
+
+       e820_table_start >>= PAGE_SHIFT;
+       e820_table_end = e820_table_start;
+       e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
+
+       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
+               end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
+}
+
+struct map_range {
+       unsigned long start;
+       unsigned long end;
+       unsigned page_size_mask;
+};
+
+#ifdef CONFIG_X86_32
+#define NR_RANGE_MR 3
+#else /* CONFIG_X86_64 */
+#define NR_RANGE_MR 5
+#endif
+
+static int save_mr(struct map_range *mr, int nr_range,
+                  unsigned long start_pfn, unsigned long end_pfn,
+                  unsigned long page_size_mask)
+{
+       if (start_pfn < end_pfn) {
+               if (nr_range >= NR_RANGE_MR)
+                       panic("run out of range for init_memory_mapping\n");
+               mr[nr_range].start = start_pfn<<PAGE_SHIFT;
+               mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
+               mr[nr_range].page_size_mask = page_size_mask;
+               nr_range++;
+       }
+
+       return nr_range;
+}
+
+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+       if (direct_gbpages && cpu_has_gbpages)
+               printk(KERN_INFO "Using GB pages for direct mapping\n");
+       else
+               direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
+/*
+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ * This runs before bootmem is initialized and gets pages directly from
+ * the physical memory. To access them they are temporarily mapped.
+ */
+unsigned long __init_refok init_memory_mapping(unsigned long start,
+                                              unsigned long end)
+{
+       unsigned long page_size_mask = 0;
+       unsigned long start_pfn, end_pfn;
+       unsigned long ret = 0;
+       unsigned long pos;
+
+       struct map_range mr[NR_RANGE_MR];
+       int nr_range, i;
+       int use_pse, use_gbpages;
+
+       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
+
+       if (!after_bootmem)
+               init_gbpages();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /*
+        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+        * This will simplify cpa(), which otherwise needs to support splitting
+        * large pages into small in interrupt context, etc.
+        */
+       use_pse = use_gbpages = 0;
+#else
+       use_pse = cpu_has_pse;
+       use_gbpages = direct_gbpages;
+#endif
+
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_PAE
+       set_nx();
+       if (nx_enabled)
+               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+#endif
+
+       /* Enable PSE if available */
+       if (cpu_has_pse)
+               set_in_cr4(X86_CR4_PSE);
+
+       /* Enable PGE if available */
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __supported_pte_mask |= _PAGE_GLOBAL;
+       }
+#endif
+
+       if (use_gbpages)
+               page_size_mask |= 1 << PG_LEVEL_1G;
+       if (use_pse)
+               page_size_mask |= 1 << PG_LEVEL_2M;
+
+       memset(mr, 0, sizeof(mr));
+       nr_range = 0;
+
+       /* head if not big page alignment ? */
+       start_pfn = start >> PAGE_SHIFT;
+       pos = start_pfn << PAGE_SHIFT;
+#ifdef CONFIG_X86_32
+       /*
+        * Don't use a large page for the first 2/4MB of memory
+        * because there are often fixed size MTRRs in there
+        * and overlapping MTRRs into large pages can cause
+        * slowdowns.
+        */
+       if (pos == 0)
+               end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
+       else
+               end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                                << (PMD_SHIFT - PAGE_SHIFT);
+#else /* CONFIG_X86_64 */
+       end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
+                       << (PMD_SHIFT - PAGE_SHIFT);
+#endif
+       if (end_pfn > (end >> PAGE_SHIFT))
+               end_pfn = end >> PAGE_SHIFT;
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+               pos = end_pfn << PAGE_SHIFT;
+       }
+
+       /* big page (2M) range */
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+#ifdef CONFIG_X86_32
+       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+#else /* CONFIG_X86_64 */
+       end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+                        << (PUD_SHIFT - PAGE_SHIFT);
+       if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
+               end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
+#endif
+
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
+
+#ifdef CONFIG_X86_64
+       /* big page (1G) range */
+       start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+                        << (PUD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask &
+                                ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
+               pos = end_pfn << PAGE_SHIFT;
+       }
+
+       /* tail is not big page (1G) alignment */
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
+#endif
+
+       /* tail is not big page (2M) alignment */
+       start_pfn = pos>>PAGE_SHIFT;
+       end_pfn = end>>PAGE_SHIFT;
+       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+
+       /* try to merge same page size and continuous */
+       for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
+               unsigned long old_start;
+               if (mr[i].end != mr[i+1].start ||
+                   mr[i].page_size_mask != mr[i+1].page_size_mask)
+                       continue;
+               /* move it */
+               old_start = mr[i].start;
+               memmove(&mr[i], &mr[i+1],
+                       (nr_range - 1 - i) * sizeof(struct map_range));
+               mr[i--].start = old_start;
+               nr_range--;
+       }
+
+       for (i = 0; i < nr_range; i++)
+               printk(KERN_DEBUG " %010lx - %010lx page %s\n",
+                               mr[i].start, mr[i].end,
+                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
+                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+
+       /*
+        * Find space for the kernel direct mapping tables.
+        *
+        * Later we should allocate these tables in the local node of the
+        * memory mapped. Unfortunately this is done currently before the
+        * nodes are discovered.
+        */
+       if (!after_bootmem)
+               find_early_table_space(end, use_pse, use_gbpages);
+
+#ifdef CONFIG_X86_32
+       for (i = 0; i < nr_range; i++)
+               kernel_physical_mapping_init(mr[i].start, mr[i].end,
+                                            mr[i].page_size_mask);
+       ret = end;
+#else /* CONFIG_X86_64 */
+       for (i = 0; i < nr_range; i++)
+               ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
+                                                  mr[i].page_size_mask);
+#endif
+
+#ifdef CONFIG_X86_32
+       early_ioremap_page_table_range_init();
+
+       load_cr3(swapper_pg_dir);
+#endif
+
+#ifdef CONFIG_X86_64
+       if (!after_bootmem)
+               mmu_cr4_features = read_cr4();
+#endif
+       __flush_tlb_all();
+
+       if (!after_bootmem && e820_table_end > e820_table_start)
+               reserve_early(e820_table_start << PAGE_SHIFT,
+                                e820_table_end << PAGE_SHIFT, "PGTABLE");
+
+       if (!after_bootmem)
+               early_memtest(start, end);
+
+       return ret >> PAGE_SHIFT;
+}
+
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number.
+ *
+ *
+ * On x86, access has to be given to the first megabyte of ram because that area
+ * contains bios code and data regions used by X and dosemu and similar apps.
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pagenr)
+{
+       if (pagenr <= 256)
+               return 1;
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+               return 0;
+       if (!page_is_ram(pagenr))
+               return 1;
+       return 0;
+}
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
@@ -47,3 +384,10 @@ void free_initmem(void)
                        (unsigned long)(&__init_begin),
                        (unsigned long)(&__init_end));
 }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       free_init_pages("initrd memory", start, end);
+}
+#endif
index 47df0e1bbeb9aca5a30c9b2d858431518263ad11..db81e9a8556b3b0bba854aeba5740bd0cc039039 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/paravirt.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
+#include <asm/init.h>
 
 unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
@@ -58,19 +59,14 @@ unsigned long highstart_pfn, highend_pfn;
 
 static noinline int do_test_wp_bit(void);
 
-
-static unsigned long __initdata table_start;
-static unsigned long __meminitdata table_end;
-static unsigned long __meminitdata table_top;
-
-static int __initdata after_init_bootmem;
+bool __read_mostly __vmalloc_start_set = false;
 
 static __init void *alloc_low_page(void)
 {
-       unsigned long pfn = table_end++;
+       unsigned long pfn = e820_table_end++;
        void *adr;
 
-       if (pfn >= table_top)
+       if (pfn >= e820_table_top)
                panic("alloc_low_page: ran out of memory");
 
        adr = __va(pfn * PAGE_SIZE);
@@ -90,7 +86,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
 
 #ifdef CONFIG_X86_PAE
        if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-               if (after_init_bootmem)
+               if (after_bootmem)
                        pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
                else
                        pmd_table = (pmd_t *)alloc_low_page();
@@ -117,7 +113,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
        if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
                pte_t *page_table = NULL;
 
-               if (after_init_bootmem) {
+               if (after_bootmem) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                        page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
 #endif
@@ -168,12 +164,12 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
        if (pmd_idx_kmap_begin != pmd_idx_kmap_end
            && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
            && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
-           && ((__pa(pte) >> PAGE_SHIFT) < table_start
-               || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
+           && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
+               || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
                pte_t *newpte;
                int i;
 
-               BUG_ON(after_init_bootmem);
+               BUG_ON(after_bootmem);
                newpte = alloc_low_page();
                for (i = 0; i < PTRS_PER_PTE; i++)
                        set_pte(newpte + i, pte[i]);
@@ -242,11 +238,14 @@ static inline int is_kernel_text(unsigned long addr)
  * of max_low_pfn pages, by creating page tables starting from address
  * PAGE_OFFSET:
  */
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
-                                               unsigned long start_pfn,
-                                               unsigned long end_pfn,
-                                               int use_pse)
+unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask)
 {
+       int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+       unsigned long start_pfn, end_pfn;
+       pgd_t *pgd_base = swapper_pg_dir;
        int pgd_idx, pmd_idx, pte_ofs;
        unsigned long pfn;
        pgd_t *pgd;
@@ -255,6 +254,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
        unsigned pages_2m, pages_4k;
        int mapping_iter;
 
+       start_pfn = start >> PAGE_SHIFT;
+       end_pfn = end >> PAGE_SHIFT;
+
        /*
         * First iteration will setup identity mapping using large/small pages
         * based on use_pse, with other attributes same as set by
@@ -369,26 +371,6 @@ repeat:
                mapping_iter = 2;
                goto repeat;
        }
-}
-
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
-       if (pagenr <= 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
-               return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
        return 0;
 }
 
@@ -545,8 +527,9 @@ void __init native_pagetable_setup_done(pgd_t *base)
  * be partially populated, and so it avoids stomping on any existing
  * mappings.
  */
-static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
+void __init early_ioremap_page_table_range_init(void)
 {
+       pgd_t *pgd_base = swapper_pg_dir;
        unsigned long vaddr, end;
 
        /*
@@ -641,7 +624,7 @@ static int __init noexec_setup(char *str)
 }
 early_param("noexec", noexec_setup);
 
-static void __init set_nx(void)
+void __init set_nx(void)
 {
        unsigned int v[4], l, h;
 
@@ -793,6 +776,8 @@ void __init initmem_init(unsigned long start_pfn,
 #ifdef CONFIG_FLATMEM
        max_mapnr = num_physpages;
 #endif
+       __vmalloc_start_set = true;
+
        printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
                        pages_to_mb(max_low_pfn));
 
@@ -814,176 +799,66 @@ static void __init zone_sizes_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
+static unsigned long __init setup_node_bootmem(int nodeid,
+                                unsigned long start_pfn,
+                                unsigned long end_pfn,
+                                unsigned long bootmap)
+{
+       unsigned long bootmap_size;
+
+       /* don't touch min_low_pfn */
+       bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
+                                        bootmap >> PAGE_SHIFT,
+                                        start_pfn, end_pfn);
+       printk(KERN_INFO "  node %d low ram: %08lx - %08lx\n",
+               nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+       printk(KERN_INFO "  node %d bootmap %08lx - %08lx\n",
+                nodeid, bootmap, bootmap + bootmap_size);
+       free_bootmem_with_active_regions(nodeid, end_pfn);
+       early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+
+       return bootmap + bootmap_size;
+}
+
 void __init setup_bootmem_allocator(void)
 {
-       int i;
+       int nodeid;
        unsigned long bootmap_size, bootmap;
        /*
         * Initialize the boot-time allocator (with low memory only):
         */
        bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
-       bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
-                                max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
+       bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
                                 PAGE_SIZE);
        if (bootmap == -1L)
                panic("Cannot find bootmem map of size %ld\n", bootmap_size);
        reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
 
-       /* don't touch min_low_pfn */
-       bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
-                                        min_low_pfn, max_low_pfn);
        printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
                 max_pfn_mapped<<PAGE_SHIFT);
-       printk(KERN_INFO "  low ram: %08lx - %08lx\n",
-                min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
-       printk(KERN_INFO "  bootmap %08lx - %08lx\n",
-                bootmap, bootmap + bootmap_size);
-       for_each_online_node(i)
-               free_bootmem_with_active_regions(i, max_low_pfn);
-       early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
-
-       after_init_bootmem = 1;
-}
-
-static void __init find_early_table_space(unsigned long end, int use_pse)
-{
-       unsigned long puds, pmds, ptes, tables, start;
+       printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
 
-       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+       for_each_online_node(nodeid) {
+                unsigned long start_pfn, end_pfn;
 
-       pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
-
-       if (use_pse) {
-               unsigned long extra;
-
-               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
-               extra += PMD_SIZE;
-               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       } else
-               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
-
-       /* for fixmap */
-       tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
-
-       /*
-        * RED-PEN putting page tables only on node 0 could
-        * cause a hotspot and fill up ZONE_DMA. The page tables
-        * need roughly 0.5KB per GB.
-        */
-       start = 0x7000;
-       table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
-                                       tables, PAGE_SIZE);
-       if (table_start == -1UL)
-               panic("Cannot find space for the kernel page tables");
-
-       table_start >>= PAGE_SHIFT;
-       table_end = table_start;
-       table_top = table_start + (tables>>PAGE_SHIFT);
-
-       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, table_start << PAGE_SHIFT,
-               (table_start << PAGE_SHIFT) + tables);
-}
-
-unsigned long __init_refok init_memory_mapping(unsigned long start,
-                                               unsigned long end)
-{
-       pgd_t *pgd_base = swapper_pg_dir;
-       unsigned long start_pfn, end_pfn;
-       unsigned long big_page_start;
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /*
-        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
-        * This will simplify cpa(), which otherwise needs to support splitting
-        * large pages into small in interrupt context, etc.
-        */
-       int use_pse = 0;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+               start_pfn = node_start_pfn[nodeid];
+               end_pfn = node_end_pfn[nodeid];
+               if (start_pfn > max_low_pfn)
+                       continue;
+               if (end_pfn > max_low_pfn)
+                       end_pfn = max_low_pfn;
 #else
-       int use_pse = cpu_has_pse;
+               start_pfn = 0;
+               end_pfn = max_low_pfn;
 #endif
-
-       /*
-        * Find space for the kernel direct mapping tables.
-        */
-       if (!after_init_bootmem)
-               find_early_table_space(end, use_pse);
-
-#ifdef CONFIG_X86_PAE
-       set_nx();
-       if (nx_enabled)
-               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
-#endif
-
-       /* Enable PSE if available */
-       if (cpu_has_pse)
-               set_in_cr4(X86_CR4_PSE);
-
-       /* Enable PGE if available */
-       if (cpu_has_pge) {
-               set_in_cr4(X86_CR4_PGE);
-               __supported_pte_mask |= _PAGE_GLOBAL;
-       }
-
-       /*
-        * Don't use a large page for the first 2/4MB of memory
-        * because there are often fixed size MTRRs in there
-        * and overlapping MTRRs into large pages can cause
-        * slowdowns.
-        */
-       big_page_start = PMD_SIZE;
-
-       if (start < big_page_start) {
-               start_pfn = start >> PAGE_SHIFT;
-               end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
-       } else {
-               /* head is not big page alignment ? */
-               start_pfn = start >> PAGE_SHIFT;
-               end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
-                                << (PMD_SHIFT - PAGE_SHIFT);
+               bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
+                                                bootmap);
        }
-       if (start_pfn < end_pfn)
-               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
-
-       /* big page range */
-       start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
-                        << (PMD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < (big_page_start >> PAGE_SHIFT))
-               start_pfn =  big_page_start >> PAGE_SHIFT;
-       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < end_pfn)
-               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
-                                            use_pse);
-
-       /* tail is not big page alignment ? */
-       start_pfn = end_pfn;
-       if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
-               end_pfn = end >> PAGE_SHIFT;
-               if (start_pfn < end_pfn)
-                       kernel_physical_mapping_init(pgd_base, start_pfn,
-                                                        end_pfn, 0);
-       }
-
-       early_ioremap_page_table_range_init(pgd_base);
 
-       load_cr3(swapper_pg_dir);
-
-       __flush_tlb_all();
-
-       if (!after_init_bootmem)
-               reserve_early(table_start << PAGE_SHIFT,
-                                table_end << PAGE_SHIFT, "PGTABLE");
-
-       if (!after_init_bootmem)
-               early_memtest(start, end);
-
-       return end >> PAGE_SHIFT;
+       after_bootmem = 1;
 }
 
-
 /*
  * paging_init() sets up the page tables - note that the first 8MB are
  * already mapped by head.S.
@@ -1217,13 +1092,6 @@ void mark_rodata_ro(void)
 }
 #endif
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
-#endif
-
 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {
index 07f44d491df16d9b392fdaa09e3bdf4143f3c236..54efa57d1c039b648d6eb77bef1878c10e9cdb0b 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/kdebug.h>
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
+#include <asm/init.h>
 
 /*
  * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -61,12 +62,6 @@ static unsigned long dma_reserve __initdata;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-int direct_gbpages
-#ifdef CONFIG_DIRECT_GBPAGES
-                               = 1
-#endif
-;
-
 static int __init parse_direct_gbpages_off(char *arg)
 {
        direct_gbpages = 0;
@@ -87,12 +82,10 @@ early_param("gbpages", parse_direct_gbpages_on);
  * around without checking the pgd every time.
  */
 
-int after_bootmem;
-
 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
-static int do_not_nx __cpuinitdata;
+static int disable_nx __cpuinitdata;
 
 /*
  * noexec=on|off
@@ -107,9 +100,9 @@ static int __init nonx_setup(char *str)
                return -EINVAL;
        if (!strncmp(str, "on", 2)) {
                __supported_pte_mask |= _PAGE_NX;
-               do_not_nx = 0;
+               disable_nx = 0;
        } else if (!strncmp(str, "off", 3)) {
-               do_not_nx = 1;
+               disable_nx = 1;
                __supported_pte_mask &= ~_PAGE_NX;
        }
        return 0;
@@ -121,7 +114,7 @@ void __cpuinit check_efer(void)
        unsigned long efer;
 
        rdmsrl(MSR_EFER, efer);
-       if (!(efer & EFER_NX) || do_not_nx)
+       if (!(efer & EFER_NX) || disable_nx)
                __supported_pte_mask &= ~_PAGE_NX;
 }
 
@@ -325,13 +318,9 @@ void __init cleanup_highmap(void)
        }
 }
 
-static unsigned long __initdata table_start;
-static unsigned long __meminitdata table_end;
-static unsigned long __meminitdata table_top;
-
 static __ref void *alloc_low_page(unsigned long *phys)
 {
-       unsigned long pfn = table_end++;
+       unsigned long pfn = e820_table_end++;
        void *adr;
 
        if (after_bootmem) {
@@ -341,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
                return adr;
        }
 
-       if (pfn >= table_top)
+       if (pfn >= e820_table_top)
                panic("alloc_low_page: ran out of memory");
 
        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
@@ -581,58 +570,10 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
        return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
-static void __init find_early_table_space(unsigned long end, int use_pse,
-                                         int use_gbpages)
-{
-       unsigned long puds, pmds, ptes, tables, start;
-
-       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
-       if (use_gbpages) {
-               unsigned long extra;
-               extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
-               pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
-       } else
-               pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
-
-       if (use_pse) {
-               unsigned long extra;
-               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
-               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       } else
-               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
-
-       /*
-        * RED-PEN putting page tables only on node 0 could
-        * cause a hotspot and fill up ZONE_DMA. The page tables
-        * need roughly 0.5KB per GB.
-        */
-       start = 0x8000;
-       table_start = find_e820_area(start, end, tables, PAGE_SIZE);
-       if (table_start == -1UL)
-               panic("Cannot find space for the kernel page tables");
-
-       table_start >>= PAGE_SHIFT;
-       table_end = table_start;
-       table_top = table_start + (tables >> PAGE_SHIFT);
-
-       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
-}
-
-static void __init init_gbpages(void)
-{
-       if (direct_gbpages && cpu_has_gbpages)
-               printk(KERN_INFO "Using GB pages for direct mapping\n");
-       else
-               direct_gbpages = 0;
-}
-
-static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
-                                               unsigned long end,
-                                               unsigned long page_size_mask)
+unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask)
 {
 
        unsigned long next, last_map_addr = end;
@@ -669,176 +610,6 @@ static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
        return last_map_addr;
 }
 
-struct map_range {
-       unsigned long start;
-       unsigned long end;
-       unsigned page_size_mask;
-};
-
-#define NR_RANGE_MR 5
-
-static int save_mr(struct map_range *mr, int nr_range,
-                  unsigned long start_pfn, unsigned long end_pfn,
-                  unsigned long page_size_mask)
-{
-
-       if (start_pfn < end_pfn) {
-               if (nr_range >= NR_RANGE_MR)
-                       panic("run out of range for init_memory_mapping\n");
-               mr[nr_range].start = start_pfn<<PAGE_SHIFT;
-               mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
-               mr[nr_range].page_size_mask = page_size_mask;
-               nr_range++;
-       }
-
-       return nr_range;
-}
-
-/*
- * Setup the direct mapping of the physical memory at PAGE_OFFSET.
- * This runs before bootmem is initialized and gets pages directly from
- * the physical memory. To access them they are temporarily mapped.
- */
-unsigned long __init_refok init_memory_mapping(unsigned long start,
-                                              unsigned long end)
-{
-       unsigned long last_map_addr = 0;
-       unsigned long page_size_mask = 0;
-       unsigned long start_pfn, end_pfn;
-       unsigned long pos;
-
-       struct map_range mr[NR_RANGE_MR];
-       int nr_range, i;
-       int use_pse, use_gbpages;
-
-       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
-
-       /*
-        * Find space for the kernel direct mapping tables.
-        *
-        * Later we should allocate these tables in the local node of the
-        * memory mapped. Unfortunately this is done currently before the
-        * nodes are discovered.
-        */
-       if (!after_bootmem)
-               init_gbpages();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /*
-        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
-        * This will simplify cpa(), which otherwise needs to support splitting
-        * large pages into small in interrupt context, etc.
-        */
-       use_pse = use_gbpages = 0;
-#else
-       use_pse = cpu_has_pse;
-       use_gbpages = direct_gbpages;
-#endif
-
-       if (use_gbpages)
-               page_size_mask |= 1 << PG_LEVEL_1G;
-       if (use_pse)
-               page_size_mask |= 1 << PG_LEVEL_2M;
-
-       memset(mr, 0, sizeof(mr));
-       nr_range = 0;
-
-       /* head if not big page alignment ?*/
-       start_pfn = start >> PAGE_SHIFT;
-       pos = start_pfn << PAGE_SHIFT;
-       end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
-                       << (PMD_SHIFT - PAGE_SHIFT);
-       if (end_pfn > (end >> PAGE_SHIFT))
-               end_pfn = end >> PAGE_SHIFT;
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* big page (2M) range*/
-       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
-                        << (PMD_SHIFT - PAGE_SHIFT);
-       end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
-                        << (PUD_SHIFT - PAGE_SHIFT);
-       if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
-               end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                               page_size_mask & (1<<PG_LEVEL_2M));
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* big page (1G) range */
-       start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
-                        << (PUD_SHIFT - PAGE_SHIFT);
-       end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                               page_size_mask &
-                                ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* tail is not big page (1G) alignment */
-       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
-                        << (PMD_SHIFT - PAGE_SHIFT);
-       end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                               page_size_mask & (1<<PG_LEVEL_2M));
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* tail is not big page (2M) alignment */
-       start_pfn = pos>>PAGE_SHIFT;
-       end_pfn = end>>PAGE_SHIFT;
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
-
-       /* try to merge same page size and continuous */
-       for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
-               unsigned long old_start;
-               if (mr[i].end != mr[i+1].start ||
-                   mr[i].page_size_mask != mr[i+1].page_size_mask)
-                       continue;
-               /* move it */
-               old_start = mr[i].start;
-               memmove(&mr[i], &mr[i+1],
-                        (nr_range - 1 - i) * sizeof (struct map_range));
-               mr[i--].start = old_start;
-               nr_range--;
-       }
-
-       for (i = 0; i < nr_range; i++)
-               printk(KERN_DEBUG " %010lx - %010lx page %s\n",
-                               mr[i].start, mr[i].end,
-                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
-                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
-
-       if (!after_bootmem)
-               find_early_table_space(end, use_pse, use_gbpages);
-
-       for (i = 0; i < nr_range; i++)
-               last_map_addr = kernel_physical_mapping_init(
-                                       mr[i].start, mr[i].end,
-                                       mr[i].page_size_mask);
-
-       if (!after_bootmem)
-               mmu_cr4_features = read_cr4();
-       __flush_tlb_all();
-
-       if (!after_bootmem && table_end > table_start)
-               reserve_early(table_start << PAGE_SHIFT,
-                                table_end << PAGE_SHIFT, "PGTABLE");
-
-       printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
-                        last_map_addr, end);
-
-       if (!after_bootmem)
-               early_memtest(start, end);
-
-       return last_map_addr >> PAGE_SHIFT;
-}
-
 #ifndef CONFIG_NUMA
 void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -910,28 +681,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
-       if (pagenr <= 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
-               return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
-}
-
-
 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
                         kcore_modules, kcore_vsyscall;
 
@@ -1019,13 +768,6 @@ void mark_rodata_ro(void)
 
 #endif
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
-#endif
-
 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {
index 433f7bd4648af4206fbc4afb0b2fd6b6dab59b6e..aca924a30ee6da36146acf1d86da367df04e7cc1 100644 (file)
@@ -38,8 +38,7 @@ unsigned long __phys_addr(unsigned long x)
        } else {
                VIRTUAL_BUG_ON(x < PAGE_OFFSET);
                x -= PAGE_OFFSET;
-               VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
-                                       !phys_addr_valid(x));
+               VIRTUAL_BUG_ON(!phys_addr_valid(x));
        }
        return x;
 }
@@ -56,10 +55,8 @@ bool __virt_addr_valid(unsigned long x)
                if (x < PAGE_OFFSET)
                        return false;
                x -= PAGE_OFFSET;
-               if (system_state == SYSTEM_BOOTING ?
-                               x > MAXMEM : !phys_addr_valid(x)) {
+               if (!phys_addr_valid(x))
                        return false;
-               }
        }
 
        return pfn_valid(x >> PAGE_SHIFT);
@@ -76,10 +73,9 @@ static inline int phys_addr_valid(unsigned long addr)
 #ifdef CONFIG_DEBUG_VIRTUAL
 unsigned long __phys_addr(unsigned long x)
 {
-       /* VMALLOC_* aren't constants; not available at the boot time */
+       /* VMALLOC_* aren't constants  */
        VIRTUAL_BUG_ON(x < PAGE_OFFSET);
-       VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
-               is_vmalloc_addr((void *) x));
+       VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
        return x - PAGE_OFFSET;
 }
 EXPORT_SYMBOL(__phys_addr);
@@ -89,7 +85,9 @@ bool __virt_addr_valid(unsigned long x)
 {
        if (x < PAGE_OFFSET)
                return false;
-       if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
+       if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
+               return false;
+       if (x >= FIXADDR_START)
                return false;
        return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
 }
@@ -508,13 +506,19 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
        return &bm_pte[pte_index(addr)];
 }
 
+static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
+
 void __init early_ioremap_init(void)
 {
        pmd_t *pmd;
+       int i;
 
        if (early_ioremap_debug)
                printk(KERN_INFO "early_ioremap_init()\n");
 
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+               slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
        memset(bm_pte, 0, sizeof(bm_pte));
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
@@ -581,6 +585,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
 
 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
+
 static int __init check_early_ioremap_leak(void)
 {
        int count = 0;
@@ -602,7 +607,8 @@ static int __init check_early_ioremap_leak(void)
 }
 late_initcall(check_early_ioremap_leak);
 
-static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
+static void __init __iomem *
+__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
 {
        unsigned long offset, last_addr;
        unsigned int nrpages;
@@ -668,9 +674,9 @@ static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned lo
                --nrpages;
        }
        if (early_ioremap_debug)
-               printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
+               printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
 
-       prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
+       prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
        return prev_map[slot];
 }
 
@@ -738,8 +744,3 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
        }
        prev_map[slot] = NULL;
 }
-
-void __this_fixmap_does_not_exist(void)
-{
-       WARN_ON(1);
-}
index 93d82038af4b541853420a48502df0fc4c9c2407..6a518dd08a36511661df2842815781c94a680fbd 100644 (file)
@@ -32,11 +32,14 @@ struct kmmio_fault_page {
        struct list_head list;
        struct kmmio_fault_page *release_next;
        unsigned long page; /* location of the fault page */
+       bool old_presence; /* page presence prior to arming */
+       bool armed;
 
        /*
         * Number of times this page has been registered as a part
         * of a probe. If zero, page is disarmed and this may be freed.
-        * Used only by writers (RCU).
+        * Used only by writers (RCU) and post_kmmio_handler().
+        * Protected by kmmio_lock, when linked into kmmio_page_table.
         */
        int count;
 };
@@ -105,57 +108,85 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
        return NULL;
 }
 
-static void set_page_present(unsigned long addr, bool present,
-                                                       unsigned int *pglevel)
+static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
+{
+       pmdval_t v = pmd_val(*pmd);
+       *old = !!(v & _PAGE_PRESENT);
+       v &= ~_PAGE_PRESENT;
+       if (present)
+               v |= _PAGE_PRESENT;
+       set_pmd(pmd, __pmd(v));
+}
+
+static void set_pte_presence(pte_t *pte, bool present, bool *old)
+{
+       pteval_t v = pte_val(*pte);
+       *old = !!(v & _PAGE_PRESENT);
+       v &= ~_PAGE_PRESENT;
+       if (present)
+               v |= _PAGE_PRESENT;
+       set_pte_atomic(pte, __pte(v));
+}
+
+static int set_page_presence(unsigned long addr, bool present, bool *old)
 {
-       pteval_t pteval;
-       pmdval_t pmdval;
        unsigned int level;
-       pmd_t *pmd;
        pte_t *pte = lookup_address(addr, &level);
 
        if (!pte) {
                pr_err("kmmio: no pte for page 0x%08lx\n", addr);
-               return;
+               return -1;
        }
 
-       if (pglevel)
-               *pglevel = level;
-
        switch (level) {
        case PG_LEVEL_2M:
-               pmd = (pmd_t *)pte;
-               pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
-               if (present)
-                       pmdval |= _PAGE_PRESENT;
-               set_pmd(pmd, __pmd(pmdval));
+               set_pmd_presence((pmd_t *)pte, present, old);
                break;
-
        case PG_LEVEL_4K:
-               pteval = pte_val(*pte) & ~_PAGE_PRESENT;
-               if (present)
-                       pteval |= _PAGE_PRESENT;
-               set_pte_atomic(pte, __pte(pteval));
+               set_pte_presence(pte, present, old);
                break;
-
        default:
                pr_err("kmmio: unexpected page level 0x%x.\n", level);
-               return;
+               return -1;
        }
 
        __flush_tlb_one(addr);
+       return 0;
 }
 
-/** Mark the given page as not present. Access to it will trigger a fault. */
-static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/*
+ * Mark the given page as not present. Access to it will trigger a fault.
+ *
+ * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
+ * protection is ignored here. RCU read lock is assumed held, so the struct
+ * will not disappear unexpectedly. Furthermore, the caller must guarantee,
+ * that double arming the same virtual address (page) cannot occur.
+ *
+ * Double disarming on the other hand is allowed, and may occur when a fault
+ * and mmiotrace shutdown happen simultaneously.
+ */
+static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
-       set_page_present(page & PAGE_MASK, false, pglevel);
+       int ret;
+       WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
+       if (f->armed) {
+               pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
+                                       f->page, f->count, f->old_presence);
+       }
+       ret = set_page_presence(f->page, false, &f->old_presence);
+       WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
+       f->armed = true;
+       return ret;
 }
 
-/** Mark the given page as present. */
-static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/** Restore the given page to saved presence state. */
+static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
-       set_page_present(page & PAGE_MASK, true, pglevel);
+       bool tmp;
+       int ret = set_page_presence(f->page, f->old_presence, &tmp);
+       WARN_ONCE(ret < 0,
+                       KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+       f->armed = false;
 }
 
 /*
@@ -202,28 +233,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 
        ctx = &get_cpu_var(kmmio_ctx);
        if (ctx->active) {
-               disarm_kmmio_fault_page(faultpage->page, NULL);
                if (addr == ctx->addr) {
                        /*
-                        * On SMP we sometimes get recursive probe hits on the
-                        * same address. Context is already saved, fall out.
+                        * A second fault on the same page means some other
+                        * condition needs handling by do_page_fault(), the
+                        * page really not being present is the most common.
                         */
-                       pr_debug("kmmio: duplicate probe hit on CPU %d, for "
-                                               "address 0x%08lx.\n",
-                                               smp_processor_id(), addr);
-                       ret = 1;
-                       goto no_kmmio_ctx;
-               }
-               /*
-                * Prevent overwriting already in-flight context.
-                * This should not happen, let's hope disarming at least
-                * prevents a panic.
-                */
-               pr_emerg("kmmio: recursive probe hit on CPU %d, "
+                       pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
+                                       addr, smp_processor_id());
+
+                       if (!faultpage->old_presence)
+                               pr_info("kmmio: unexpected secondary hit for "
+                                       "address 0x%08lx on CPU %d.\n", addr,
+                                       smp_processor_id());
+               } else {
+                       /*
+                        * Prevent overwriting already in-flight context.
+                        * This should not happen, let's hope disarming at
+                        * least prevents a panic.
+                        */
+                       pr_emerg("kmmio: recursive probe hit on CPU %d, "
                                        "for address 0x%08lx. Ignoring.\n",
                                        smp_processor_id(), addr);
-               pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
-                                       ctx->addr);
+                       pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
+                                               ctx->addr);
+                       disarm_kmmio_fault_page(faultpage);
+               }
                goto no_kmmio_ctx;
        }
        ctx->active++;
@@ -244,7 +279,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
        regs->flags &= ~X86_EFLAGS_IF;
 
        /* Now we set present bit in PTE and single step. */
-       disarm_kmmio_fault_page(ctx->fpage->page, NULL);
+       disarm_kmmio_fault_page(ctx->fpage);
 
        /*
         * If another cpu accesses the same page while we are stepping,
@@ -275,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
        struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
 
        if (!ctx->active) {
-               pr_debug("kmmio: spurious debug trap on CPU %d.\n",
+               pr_warning("kmmio: spurious debug trap on CPU %d.\n",
                                                        smp_processor_id());
                goto out;
        }
@@ -283,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
        if (ctx->probe && ctx->probe->post_handler)
                ctx->probe->post_handler(ctx->probe, condition, regs);
 
-       arm_kmmio_fault_page(ctx->fpage->page, NULL);
+       /* Prevent racing against release_kmmio_fault_page(). */
+       spin_lock(&kmmio_lock);
+       if (ctx->fpage->count)
+               arm_kmmio_fault_page(ctx->fpage);
+       spin_unlock(&kmmio_lock);
 
        regs->flags &= ~X86_EFLAGS_TF;
        regs->flags |= ctx->saved_flags;
@@ -315,20 +354,24 @@ static int add_kmmio_fault_page(unsigned long page)
        f = get_kmmio_fault_page(page);
        if (f) {
                if (!f->count)
-                       arm_kmmio_fault_page(f->page, NULL);
+                       arm_kmmio_fault_page(f);
                f->count++;
                return 0;
        }
 
-       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       f = kzalloc(sizeof(*f), GFP_ATOMIC);
        if (!f)
                return -1;
 
        f->count = 1;
        f->page = page;
-       list_add_rcu(&f->list, kmmio_page_list(f->page));
 
-       arm_kmmio_fault_page(f->page, NULL);
+       if (arm_kmmio_fault_page(f)) {
+               kfree(f);
+               return -1;
+       }
+
+       list_add_rcu(&f->list, kmmio_page_list(f->page));
 
        return 0;
 }
@@ -347,7 +390,7 @@ static void release_kmmio_fault_page(unsigned long page,
        f->count--;
        BUG_ON(f->count < 0);
        if (!f->count) {
-               disarm_kmmio_fault_page(f->page, NULL);
+               disarm_kmmio_fault_page(f);
                f->release_next = *release_list;
                *release_list = f;
        }
@@ -408,23 +451,24 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
 
 static void remove_kmmio_fault_pages(struct rcu_head *head)
 {
-       struct kmmio_delayed_release *dr = container_of(
-                                               head,
-                                               struct kmmio_delayed_release,
-                                               rcu);
+       struct kmmio_delayed_release *dr =
+               container_of(head, struct kmmio_delayed_release, rcu);
        struct kmmio_fault_page *p = dr->release_list;
        struct kmmio_fault_page **prevp = &dr->release_list;
        unsigned long flags;
+
        spin_lock_irqsave(&kmmio_lock, flags);
        while (p) {
-               if (!p->count)
+               if (!p->count) {
                        list_del_rcu(&p->list);
-               else
+                       prevp = &p->release_next;
+               } else {
                        *prevp = p->release_next;
-               prevp = &p->release_next;
+               }
                p = p->release_next;
        }
        spin_unlock_irqrestore(&kmmio_lock, flags);
+
        /* This is the real RCU destroy call. */
        call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
 }
index 0bcd7883d036d68e1943f49529c3e96c7a70fd62..605c8be06217b0da36344abd5f23d06326bcd87a 100644 (file)
@@ -100,6 +100,9 @@ static int __init parse_memtest(char *arg)
 {
        if (arg)
                memtest_pattern = simple_strtoul(arg, NULL, 0);
+       else
+               memtest_pattern = ARRAY_SIZE(patterns);
+
        return 0;
 }
 
index 451fe95a0352b3d63634231ea31c4eea016b2550..3daefa04ace535f693b07e5700e0470e17ca93b8 100644 (file)
@@ -416,10 +416,11 @@ void __init initmem_init(unsigned long start_pfn,
        for_each_online_node(nid)
                propagate_e820_map_node(nid);
 
-       for_each_online_node(nid)
+       for_each_online_node(nid) {
                memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+               NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+       }
 
-       NODE_DATA(0)->bdata = &bootmem_node_data[0];
        setup_bootmem_allocator();
 }
 
index ab50a8d7402c8c7f4320f0fd803a22a2d9abf84a..427fd1b56df5540f6a871f31c99c92af4f3edacd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Written by Pekka Paalanen, 2008 <pq@iki.fi>
+ * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
  */
 #include <linux/module.h>
 #include <linux/io.h>
@@ -9,35 +9,74 @@
 
 static unsigned long mmio_address;
 module_param(mmio_address, ulong, 0);
-MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
+MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
+                               "(or 8 MB if read_far is non-zero).");
+
+static unsigned long read_far = 0x400100;
+module_param(read_far, ulong, 0);
+MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
+                               "(default: 0x400100).");
+
+static unsigned v16(unsigned i)
+{
+       return i * 12 + 7;
+}
+
+static unsigned v32(unsigned i)
+{
+       return i * 212371 + 13;
+}
 
 static void do_write_test(void __iomem *p)
 {
        unsigned int i;
+       pr_info(MODULE_NAME ": write test.\n");
        mmiotrace_printk("Write test.\n");
+
        for (i = 0; i < 256; i++)
                iowrite8(i, p + i);
+
        for (i = 1024; i < (5 * 1024); i += 2)
-               iowrite16(i * 12 + 7, p + i);
+               iowrite16(v16(i), p + i);
+
        for (i = (5 * 1024); i < (16 * 1024); i += 4)
-               iowrite32(i * 212371 + 13, p + i);
+               iowrite32(v32(i), p + i);
 }
 
 static void do_read_test(void __iomem *p)
 {
        unsigned int i;
+       unsigned errs[3] = { 0 };
+       pr_info(MODULE_NAME ": read test.\n");
        mmiotrace_printk("Read test.\n");
+
        for (i = 0; i < 256; i++)
-               ioread8(p + i);
+               if (ioread8(p + i) != i)
+                       ++errs[0];
+
        for (i = 1024; i < (5 * 1024); i += 2)
-               ioread16(p + i);
+               if (ioread16(p + i) != v16(i))
+                       ++errs[1];
+
        for (i = (5 * 1024); i < (16 * 1024); i += 4)
-               ioread32(p + i);
+               if (ioread32(p + i) != v32(i))
+                       ++errs[2];
+
+       mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
+                                               errs[0], errs[1], errs[2]);
 }
 
-static void do_test(void)
+static void do_read_far_test(void __iomem *p)
 {
-       void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
+       pr_info(MODULE_NAME ": read far test.\n");
+       mmiotrace_printk("Read far test.\n");
+
+       ioread32(p + read_far);
+}
+
+static void do_test(unsigned long size)
+{
+       void __iomem *p = ioremap_nocache(mmio_address, size);
        if (!p) {
                pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
                return;
@@ -45,11 +84,15 @@ static void do_test(void)
        mmiotrace_printk("ioremap returned %p.\n", p);
        do_write_test(p);
        do_read_test(p);
+       if (read_far && read_far < size - 4)
+               do_read_far_test(p);
        iounmap(p);
 }
 
 static int __init init(void)
 {
+       unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
+
        if (mmio_address == 0) {
                pr_err(MODULE_NAME ": you have to use the module argument "
                                                        "mmio_address.\n");
@@ -58,10 +101,11 @@ static int __init init(void)
                return -ENXIO;
        }
 
-       pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
-                                       "in PCI address space, and writing "
-                                       "rubbish in there.\n", mmio_address);
-       do_test();
+       pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
+               "address space, and writing 16 kB of rubbish in there.\n",
+                size >> 10, mmio_address);
+       do_test(size);
+       pr_info(MODULE_NAME ": All done.\n");
        return 0;
 }
 
index a104593e70c38cdfdaebc1cf1214934e37ce3284..5a244f05360f5ca67592e29a7270f3874bf16c37 100644 (file)
@@ -39,14 +39,13 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
 }
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-                                            struct bio *bio,
-                                            unsigned int *seg_size_ptr)
+                                            struct bio *bio)
 {
        unsigned int phys_size;
        struct bio_vec *bv, *bvprv = NULL;
        int cluster, i, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
-       struct bio *fbio;
+       struct bio *fbio, *bbio;
 
        if (!bio)
                return 0;
@@ -87,26 +86,20 @@ new_segment:
                        seg_size = bv->bv_len;
                        highprv = high;
                }
+               bbio = bio;
        }
 
-       if (seg_size_ptr)
-               *seg_size_ptr = seg_size;
+       if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
+               fbio->bi_seg_front_size = seg_size;
+       if (seg_size > bbio->bi_seg_back_size)
+               bbio->bi_seg_back_size = seg_size;
 
        return nr_phys_segs;
 }
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-       unsigned int seg_size = 0, phys_segs;
-
-       phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
-
-       if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
-               rq->bio->bi_seg_front_size = seg_size;
-       if (seg_size > rq->biotail->bi_seg_back_size)
-               rq->biotail->bi_seg_back_size = seg_size;
-
-       rq->nr_phys_segments = phys_segs;
+       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -114,7 +107,7 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
        struct bio *nxt = bio->bi_next;
 
        bio->bi_next = NULL;
-       bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
+       bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
        bio->bi_next = nxt;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
index efe77df6863f4378f4f0da414591f940870b76ea..38a2bc02a98c7f6648f7a7a3388c6182b88c07f9 100644 (file)
@@ -215,8 +215,19 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
        mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
        type &= mask;
 
-       alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
-                                     name);
+       alg = crypto_alg_lookup(name, type, mask);
+       if (!alg) {
+               char tmp[CRYPTO_MAX_ALG_NAME];
+
+               request_module(name);
+
+               if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
+                   snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
+                       request_module(tmp);
+
+               alg = crypto_alg_lookup(name, type, mask);
+       }
+
        if (alg)
                return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
 
index a603bbf9b1b706caf37a15b94eb8b6ecb4a3e8dc..66e012cd3271f98cc8541c95ca13fb327b4dd7c7 100644 (file)
@@ -582,18 +582,18 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },            /* MCP79 */
        { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },            /* MCP79 */
        { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },            /* MCP79 */
-       { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci },            /* MCP7B */
-       { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci },            /* MCP7B */
+       { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci },            /* MCP89 */
+       { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci },            /* MCP89 */
 
        /* SiS */
        { PCI_VDEVICE(SI, 0x1184), board_ahci },                /* SiS 966 */
index 9fbf0595f3d44fd3f51d1339c9bbe127db7fa698..060bcd601f5758d63ae95e017fe7f501c8dd0607 100644 (file)
@@ -1322,14 +1322,16 @@ static u64 ata_id_n_sectors(const u16 *id)
 {
        if (ata_id_has_lba(id)) {
                if (ata_id_has_lba48(id))
-                       return ata_id_u64(id, 100);
+                       return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
                else
-                       return ata_id_u32(id, 60);
+                       return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
        } else {
                if (ata_id_current_chs_valid(id))
-                       return ata_id_u32(id, 57);
+                       return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
+                              id[ATA_ID_CUR_SECTORS];
                else
-                       return id[1] * id[3] * id[6];
+                       return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
+                              id[ATA_ID_SECTORS];
        }
 }
 
@@ -4612,7 +4614,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
        VPRINTK("unmapping %u sg elements\n", qc->n_elem);
 
        if (qc->n_elem)
-               dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
+               dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
 
        qc->flags &= ~ATA_QCFLAG_DMAMAP;
        qc->sg = NULL;
@@ -4727,7 +4729,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
                return -1;
 
        DPRINTK("%d sg elements mapped\n", n_elem);
-
+       qc->orig_n_elem = qc->n_elem;
        qc->n_elem = n_elem;
        qc->flags |= ATA_QCFLAG_DMAMAP;
 
index ce2ef04753390b2c95dacd591662efd3c1800ebb..ea890911d4fa3f67863b8f52e7aebc258b4ce7e7 100644 (file)
@@ -2423,11 +2423,14 @@ int ata_eh_reset(struct ata_link *link, int classify,
                }
 
                /* prereset() might have cleared ATA_EH_RESET.  If so,
-                * bang classes and return.
+                * bang classes, thaw and return.
                 */
                if (reset && !(ehc->i.action & ATA_EH_RESET)) {
                        ata_for_each_dev(dev, link, ALL)
                                classes[dev->devno] = ATA_DEV_NONE;
+                       if ((ap->pflags & ATA_PFLAG_FROZEN) &&
+                           ata_is_host_link(link))
+                               ata_eh_thaw_port(ap);
                        rc = 0;
                        goto out;
                }
@@ -2901,7 +2904,7 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
        int i;
 
        for (i = 0; i < ATA_EH_UA_TRIES; i++) {
-               u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
+               u8 *sense_buffer = dev->link->ap->sector_buf;
                u8 sense_key = 0;
                unsigned int err_mask;
 
index 55a8eed3f3a35cb9fa64b9e67800a40d72d5d062..f65b53785a8f93f304cfddc4261491ab7c37a090 100644 (file)
@@ -2523,7 +2523,7 @@ static void __exit nv_exit(void)
 module_init(nv_init);
 module_exit(nv_exit);
 module_param_named(adma, adma_enabled, bool, 0444);
-MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
+MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
 module_param_named(swncq, swncq_enabled, bool, 0444);
 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
 
index b5a06111463018b5d93f3d2b944caf9c60654281..4f9b6d7920173d19f85dc0cb822c3e33d87b1db9 100644 (file)
@@ -3606,11 +3606,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
                if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
                        return -ENODEV;
 
-               /* Some devices (notably the HP Smart Array 5i Controller)
-                  need a little pause here */
-               schedule_timeout_uninterruptible(30*HZ);
-
-               /* Now try to get the controller to respond to a no-op */
+               /* Now try to get the controller to respond to a no-op. Some
+                  devices (notably the HP Smart Array 5i Controller) need
+                  up to 30 seconds to respond. */
                for (i=0; i<30; i++) {
                        if (cciss_noop(pdev) == 0)
                                break;
index edbaac6c05739ab183e69f94c1dc230acea27176..bf034557767243ecbb0fd3c56538f7877e222345 100644 (file)
@@ -392,8 +392,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        struct loop_device *lo = p->lo;
        struct page *page = buf->page;
        sector_t IV;
-       size_t size;
-       int ret;
+       int size, ret;
 
        ret = buf->ops->confirm(pipe, buf);
        if (unlikely(ret))
index b6c8ce25435994558885e9e4b9c4ac6aafaa9357..8f905089b72b7e49c2434d1b6abba9fa76321051 100644 (file)
@@ -977,6 +977,8 @@ static void backend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosing:
+               if (info->gd == NULL)
+                       xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
                bd = bdget_disk(info->gd, 0);
                if (bd == NULL)
                        xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
index 2d637e0fbc038df28dbfbff2d342b89edf6db4a4..d9e751be8c5fb120e5f0d06ef27684c8f33fa549 100644 (file)
@@ -457,10 +457,12 @@ static int init_ixp_crypto(void)
        if (!ctx_pool) {
                goto err;
        }
-       ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
+       ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
+                                "ixp_crypto:out", NULL);
        if (ret)
                goto err;
-       ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
+       ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
+                                "ixp_crypto:in", NULL);
        if (ret) {
                qmgr_release_queue(SEND_QID);
                goto err;
index 856b3cc2558387b7b239923c37c54f9d47b250ff..3f0fdd18255db9febb61836d44b3d7382385140d 100644 (file)
@@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS("aes-all");
index a7fbadebf62330864734e5302be660d2f39ab51a..a2c8e8514b6340ac85915f158d7780491ce8b4a8 100644 (file)
@@ -304,7 +304,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("sha1");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS("sha1-all");
+MODULE_ALIAS("sha256-all");
 MODULE_ALIAS("sha1-padlock");
 MODULE_ALIAS("sha256-padlock");
index 33bd7534751820841d19732a5d5e79b4dea9adbf..25b743abfb59442b52ccec0057573716249bdbbf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
index 732fa1ec36ab3ae885b784ca1ea5bb863cc3c58f..e190d8b30700c40858fceb83afa05f49d9ce8d2e 100644 (file)
@@ -430,13 +430,15 @@ late_initcall(dmatest_init);
 static void __exit dmatest_exit(void)
 {
        struct dmatest_chan *dtc, *_dtc;
+       struct dma_chan *chan;
 
        list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
                list_del(&dtc->node);
+               chan = dtc->chan;
                dmatest_cleanup_channel(dtc);
                pr_debug("dmatest: dropped channel %s\n",
-                        dma_chan_name(dtc->chan));
-               dma_release_channel(dtc->chan);
+                        dma_chan_name(chan));
+               dma_release_channel(chan);
        }
 }
 module_exit(dmatest_exit);
index 70126a60623939dddc6af7a59aa7b1eac8a51637..86d6da47f558765736149344b2c595b13a289f3f 100644 (file)
@@ -158,7 +158,8 @@ static void dma_start(struct fsl_dma_chan *fsl_chan)
 
 static void dma_halt(struct fsl_dma_chan *fsl_chan)
 {
-       int i = 0;
+       int i;
+
        DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
                DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
                32);
@@ -166,8 +167,11 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
                DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
                | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
 
-       while (!dma_is_idle(fsl_chan) && (i++ < 100))
+       for (i = 0; i < 100; i++) {
+               if (dma_is_idle(fsl_chan))
+                       break;
                udelay(10);
+       }
        if (i >= 100 && !dma_is_idle(fsl_chan))
                dev_err(fsl_chan->dev, "DMA halt timeout!\n");
 }
index 4105d6575b645dc7674c35ea5bca0c60bde89d9f..ed83dd9df192a5a4b65d5f1f15b3e1e2349178b3 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 Intel Corporation.
+ * Copyright(c) 2007 - 2009 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 6cf622da0286481fccb32a73285c72c83fb6a57b..c012a1e150431972649b10a502cf658f9a71320b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 Intel Corporation.
+ * Copyright(c) 2007 - 2009 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 
 #define DCA_TAG_MAP_MASK 0xDF
 
+/* expected tag map bytes for I/OAT ver.2 */
+#define DCA2_TAG_MAP_BYTE0 0x80
+#define DCA2_TAG_MAP_BYTE1 0x0
+#define DCA2_TAG_MAP_BYTE2 0x81
+#define DCA2_TAG_MAP_BYTE3 0x82
+#define DCA2_TAG_MAP_BYTE4 0x82
+
+/* verify if tag map matches expected values */
+static inline int dca2_tag_map_valid(u8 *tag_map)
+{
+       return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
+               (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
+               (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
+               (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
+               (tag_map[4] == DCA2_TAG_MAP_BYTE4));
+}
+
 /*
  * "Legacy" DCA systems do not implement the DCA register set in the
  * I/OAT device.  Software needs direct support for their tag mappings.
@@ -452,6 +469,13 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
                        ioatdca->tag_map[i] = 0;
        }
 
+       if (!dca2_tag_map_valid(ioatdca->tag_map)) {
+               dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
+                       "disabling DCA\n");
+               free_dca_provider(dca);
+               return NULL;
+       }
+
        err = register_dca_provider(dca, &pdev->dev);
        if (err) {
                free_dca_provider(dca);
index b3759c4b65360c0c3b72664c0a675887474f041e..5905cd36bcd23b43b86dcc532a1a70cc5ce5ce58 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2007 Intel Corporation.
+ * Copyright(c) 2004 - 2009 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -189,11 +189,13 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
                ioat_chan->xfercap = xfercap;
                ioat_chan->desccount = 0;
                INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
-               if (ioat_chan->device->version != IOAT_VER_1_2) {
-                       writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
-                                       | IOAT_DMA_DCA_ANY_CPU,
-                               ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
-               }
+               if (ioat_chan->device->version == IOAT_VER_2_0)
+                       writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
+                              IOAT_DMA_DCA_ANY_CPU,
+                              ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+               else if (ioat_chan->device->version == IOAT_VER_3_0)
+                       writel(IOAT_DMA_DCA_ANY_CPU,
+                              ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
                spin_lock_init(&ioat_chan->cleanup_lock);
                spin_lock_init(&ioat_chan->desc_lock);
                INIT_LIST_HEAD(&ioat_chan->free_desc);
@@ -1169,9 +1171,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
                                 * up if the client is done with the descriptor
                                 */
                                if (async_tx_test_ack(&desc->async_tx)) {
-                                       list_del(&desc->node);
-                                       list_add_tail(&desc->node,
-                                                     &ioat_chan->free_desc);
+                                       list_move_tail(&desc->node,
+                                                      &ioat_chan->free_desc);
                                } else
                                        desc->async_tx.cookie = 0;
                        } else {
@@ -1362,6 +1363,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        dma_cookie_t cookie;
        int err = 0;
        struct completion cmp;
+       unsigned long tmo;
 
        src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
        if (!src)
@@ -1413,9 +1415,10 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        }
        device->common.device_issue_pending(dma_chan);
 
-       wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+       tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+       if (tmo == 0 ||
+           device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
                                        != DMA_SUCCESS) {
                dev_err(&device->pdev->dev,
                        "Self-test copy timed out, disabling\n");
@@ -1657,6 +1660,13 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
                " %d channels, device version 0x%02x, driver version %s\n",
                device->common.chancnt, device->version, IOAT_DMA_VERSION);
 
+       if (!device->common.chancnt) {
+               dev_err(&device->pdev->dev,
+                       "Intel(R) I/OAT DMA Engine problem found: "
+                       "zero channels detected\n");
+               goto err_setup_interrupts;
+       }
+
        err = ioat_dma_setup_interrupts(device);
        if (err)
                goto err_setup_interrupts;
@@ -1696,6 +1706,9 @@ void ioat_dma_remove(struct ioatdma_device *device)
        struct dma_chan *chan, *_chan;
        struct ioat_dma_chan *ioat_chan;
 
+       if (device->version != IOAT_VER_3_0)
+               cancel_delayed_work(&device->work);
+
        ioat_dma_remove_interrupts(device);
 
        dma_async_device_unregister(&device->common);
@@ -1707,10 +1720,6 @@ void ioat_dma_remove(struct ioatdma_device *device)
        pci_release_regions(device->pdev);
        pci_disable_device(device->pdev);
 
-       if (device->version != IOAT_VER_3_0) {
-               cancel_delayed_work(&device->work);
-       }
-
        list_for_each_entry_safe(chan, _chan,
                                 &device->common.channels, device_node) {
                ioat_chan = to_ioat_chan(chan);
index a3306d0e1372a44b2950bc06d220d9b716f0bb00..a52ff4bd4601f78dbfe3fe472bb0b26c568e422d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -29,7 +29,7 @@
 #include <linux/pci_ids.h>
 #include <net/tcp.h>
 
-#define IOAT_DMA_VERSION  "3.30"
+#define IOAT_DMA_VERSION  "3.64"
 
 enum ioat_interrupt {
        none = 0,
@@ -135,12 +135,14 @@ static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
        #ifdef CONFIG_NET_DMA
        switch (dev->version) {
        case IOAT_VER_1_2:
-       case IOAT_VER_3_0:
                sysctl_tcp_dma_copybreak = 4096;
                break;
        case IOAT_VER_2_0:
                sysctl_tcp_dma_copybreak = 2048;
                break;
+       case IOAT_VER_3_0:
+               sysctl_tcp_dma_copybreak = 262144;
+               break;
        }
        #endif
 }
index f1ae2c776f7487b40e695395e3562679f420b29e..afa57eef86c946e685d13b8e2e7554ec66a7ff6b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
index 827cb503cac6979a0fa98e3fbd25a113e14fda27..49bc277424f8c319b3a46369fe13e9339dc6cce6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
index ea5440dd10dc1ffb56dda5393d51f388e02ca87b..16adbe61cfb2cd60efd1a01246be3f9c9f4eac43 100644 (file)
@@ -928,19 +928,19 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
 
        for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
                xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
-               if (!xor_srcs[src_idx])
-                       while (src_idx--) {
+               if (!xor_srcs[src_idx]) {
+                       while (src_idx--)
                                __free_page(xor_srcs[src_idx]);
-                               return -ENOMEM;
-                       }
+                       return -ENOMEM;
+               }
        }
 
        dest = alloc_page(GFP_KERNEL);
-       if (!dest)
-               while (src_idx--) {
+       if (!dest) {
+               while (src_idx--)
                        __free_page(xor_srcs[src_idx]);
-                       return -ENOMEM;
-               }
+               return -ENOMEM;
+       }
 
        /* Fill in src buffers */
        for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
@@ -1401,7 +1401,7 @@ MODULE_ALIAS("platform:iop-adma");
 
 static struct platform_driver iop_adma_driver = {
        .probe          = iop_adma_probe,
-       .remove         = iop_adma_remove,
+       .remove         = __devexit_p(iop_adma_remove),
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "iop-adma",
index 1f154d08e98f88cfbff23c192bf473dcdfa5cad0..ae50a9d1a4e683ae4665debfdb8a1294bbde05cc 100644 (file)
@@ -729,7 +729,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
 
        ichan->status = IPU_CHANNEL_READY;
 
-       spin_unlock_irqrestore(ipu->lock, flags);
+       spin_unlock_irqrestore(&ipu->lock, flags);
 
        return 0;
 }
index d35cbd1ff0b31e6635dd803eeae708cf75d68772..cb7f26fb9f188ce594081069bd61c6e99c957930 100644 (file)
@@ -1019,19 +1019,19 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
 
        for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
                xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
-               if (!xor_srcs[src_idx])
-                       while (src_idx--) {
+               if (!xor_srcs[src_idx]) {
+                       while (src_idx--)
                                __free_page(xor_srcs[src_idx]);
-                               return -ENOMEM;
-                       }
+                       return -ENOMEM;
+               }
        }
 
        dest = alloc_page(GFP_KERNEL);
-       if (!dest)
-               while (src_idx--) {
+       if (!dest) {
+               while (src_idx--)
                        __free_page(xor_srcs[src_idx]);
-                       return -ENOMEM;
-               }
+               return -ENOMEM;
+       }
 
        /* Fill in src buffers */
        for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
@@ -1287,7 +1287,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
 
 static struct platform_driver mv_xor_driver = {
        .probe          = mv_xor_probe,
-       .remove         = mv_xor_remove,
+       .remove         = __devexit_p(mv_xor_remove),
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = MV_XOR_NAME,
index 096e2a37446d3e8fb333f45c342bc2a975607b34..7c8b15b22bf2e089fd9356debc9f967031723cbe 100644 (file)
@@ -168,7 +168,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
            file_priv->minor->master != file_priv->master) {
                mutex_lock(&dev->struct_mutex);
                file_priv->minor->master = drm_master_get(file_priv->master);
-               mutex_lock(&dev->struct_mutex);
+               mutex_unlock(&dev->struct_mutex);
        }
 
        return 0;
index eeda276f8f164021a9f59c12e1c7612ac60d94b4..7f186bbcb99d965bc69cdc8df5bd1cd83b8e89da 100644 (file)
@@ -482,7 +482,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
        return 0;
 }
 
-static void __devexit
+static void
 mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
 {
        if (drv_data->reg_base) {
@@ -577,7 +577,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
 
 static struct platform_driver mv64xxx_i2c_driver = {
        .probe  = mv64xxx_i2c_probe,
-       .remove = mv64xxx_i2c_remove,
+       .remove = __devexit_p(mv64xxx_i2c_remove),
        .driver = {
                .owner  = THIS_MODULE,
                .name   = MV64XXX_I2C_CTLR_NAME,
index e072903b12f016aab1aa0d72e0939907253ff033..5ea3bfad172a02a1c27100920fec28c5d60b1e4f 100644 (file)
@@ -721,6 +721,11 @@ config BLK_DEV_IDE_TX4939
        depends on SOC_TX4939
        select BLK_DEV_IDEDMA_SFF
 
+config BLK_DEV_IDE_AT91
+       tristate "Atmel AT91 (SAM9, CAP9, AT572D940HF) IDE support"
+       depends on ARM && ARCH_AT91 && !ARCH_AT91RM9200 && !ARCH_AT91X40
+       select IDE_TIMINGS
+
 config IDE_ARM
        tristate "ARM IDE support"
        depends on ARM && (ARCH_RPC || ARCH_SHARK)
index d0e3d7d5b4672bf9874a1832b7fff2aaf5239007..1c326d94aa6d932021476f041801ddf4f57e9e8c 100644 (file)
@@ -116,3 +116,4 @@ obj-$(CONFIG_BLK_DEV_IDE_AU1XXX)    += au1xxx-ide.o
 
 obj-$(CONFIG_BLK_DEV_IDE_TX4938)       += tx4938ide.o
 obj-$(CONFIG_BLK_DEV_IDE_TX4939)       += tx4939ide.o
+obj-$(CONFIG_BLK_DEV_IDE_AT91)         += at91_ide.o
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
new file mode 100644 (file)
index 0000000..1bb50f4
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+ * IDE host driver for AT91 (SAM9, CAP9, AT572D940HF) Static Memory Controller
+ * with Compact Flash True IDE logic
+ *
+ * Copyright (c) 2008, 2009 Kelvatek Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/ide.h>
+#include <linux/platform_device.h>
+
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/at91sam9263.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/at91sam9263_matrix.h>
+
+#define DRV_NAME "at91_ide"
+
+#define perr(fmt, args...) pr_err(DRV_NAME ": " fmt, ##args)
+#define pdbg(fmt, args...) pr_debug("%s " fmt, __func__, ##args)
+
+/*
+ * Access to IDE device is possible through EBI Static Memory Controller
+ * with Compact Flash logic. For details see EBI and SMC datasheet sections
+ * of any microcontroller from AT91SAM9 family.
+ *
+ * Within SMC chip select address space, lines A[23:21] distinguish Compact
+ * Flash modes (I/O, common memory, attribute memory, True IDE). IDE modes are:
+ *   0x00c0000 - True IDE
+ *   0x00e0000 - Alternate True IDE (Alt Status Register)
+ *
+ * On True IDE mode Task File and Data Register are mapped at the same address.
+ * To distinguish access between these two different bus data width is used:
+ * 8Bit for Task File, 16Bit for Data I/O.
+ *
+ * After initialization we do 8/16 bit flipping (changes in SMC MODE register)
+ * only inside IDE callback routines which are serialized by IDE layer,
+ * so no additional locking needed.
+ */
+
+#define TASK_FILE      0x00c00000
+#define ALT_MODE       0x00e00000
+#define REGS_SIZE      8
+
+#define enter_16bit(cs, mode) do {                                     \
+       mode = at91_sys_read(AT91_SMC_MODE(cs));                        \
+       at91_sys_write(AT91_SMC_MODE(cs), mode | AT91_SMC_DBW_16);      \
+} while (0)
+
+#define leave_16bit(cs, mode) at91_sys_write(AT91_SMC_MODE(cs), mode);
+
+static void set_smc_timings(const u8 chipselect, const u16 cycle,
+                           const u16 setup, const u16 pulse,
+                           const u16 data_float, int use_iordy)
+{
+       unsigned long mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
+                            AT91_SMC_BAT_SELECT;
+
+       /* disable or enable waiting for IORDY signal */
+       if (use_iordy)
+               mode |= AT91_SMC_EXNWMODE_READY;
+
+       /* add data float cycles if needed */
+       if (data_float)
+               mode |= AT91_SMC_TDF_(data_float);
+
+       at91_sys_write(AT91_SMC_MODE(chipselect), mode);
+
+       /* setup timings in SMC */
+       at91_sys_write(AT91_SMC_SETUP(chipselect), AT91_SMC_NWESETUP_(setup) |
+                                                  AT91_SMC_NCS_WRSETUP_(0) |
+                                                  AT91_SMC_NRDSETUP_(setup) |
+                                                  AT91_SMC_NCS_RDSETUP_(0));
+       at91_sys_write(AT91_SMC_PULSE(chipselect), AT91_SMC_NWEPULSE_(pulse) |
+                                                  AT91_SMC_NCS_WRPULSE_(cycle) |
+                                                  AT91_SMC_NRDPULSE_(pulse) |
+                                                  AT91_SMC_NCS_RDPULSE_(cycle));
+       at91_sys_write(AT91_SMC_CYCLE(chipselect), AT91_SMC_NWECYCLE_(cycle) |
+                                                  AT91_SMC_NRDCYCLE_(cycle));
+}
+
+static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
+{
+       u64 tmp = ns;
+
+       tmp *= mck_hz;
+       tmp += 1000*1000*1000 - 1; /* round up */
+       do_div(tmp, 1000*1000*1000);
+       return (unsigned int) tmp;
+}
+
+static void apply_timings(const u8 chipselect, const u8 pio,
+                         const struct ide_timing *timing, int use_iordy)
+{
+       unsigned int t0, t1, t2, t6z;
+       unsigned int cycle, setup, pulse, data_float;
+       unsigned int mck_hz;
+       struct clk *mck;
+
+       /* see table 22 of Compact Flash standard 4.1 for the meaning,
+        * we do not stretch active (t2) time, so setup (t1) + hold time (th)
+        * assure at least minimal recovery (t2i) time */
+       t0 = timing->cyc8b;
+       t1 = timing->setup;
+       t2 = timing->act8b;
+       t6z = (pio < 5) ? 30 : 20;
+
+       pdbg("t0=%u t1=%u t2=%u t6z=%u\n", t0, t1, t2, t6z);
+
+       mck = clk_get(NULL, "mck");
+       BUG_ON(IS_ERR(mck));
+       mck_hz = clk_get_rate(mck);
+       pdbg("mck_hz=%u\n", mck_hz);
+
+       cycle = calc_mck_cycles(t0, mck_hz);
+       setup = calc_mck_cycles(t1, mck_hz);
+       pulse = calc_mck_cycles(t2, mck_hz);
+       data_float = calc_mck_cycles(t6z, mck_hz);
+
+       pdbg("cycle=%u setup=%u pulse=%u data_float=%u\n",
+            cycle, setup, pulse, data_float);
+
+       set_smc_timings(chipselect, cycle, setup, pulse, data_float, use_iordy);
+}
+
+static void at91_ide_input_data(ide_drive_t *drive, struct request *rq,
+                               void *buf, unsigned int len)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       u8 chipselect = hwif->select_data;
+       unsigned long mode;
+
+       pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
+
+       len++;
+
+       enter_16bit(chipselect, mode);
+       __ide_mm_insw((void __iomem *) io_ports->data_addr, buf, len / 2);
+       leave_16bit(chipselect, mode);
+}
+
+static void at91_ide_output_data(ide_drive_t *drive, struct request *rq,
+                                void *buf, unsigned int len)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       u8 chipselect = hwif->select_data;
+       unsigned long mode;
+
+       pdbg("cs %u buf %p len %d\n", chipselect,  buf, len);
+
+       enter_16bit(chipselect, mode);
+       __ide_mm_outsw((void __iomem *) io_ports->data_addr, buf, len / 2);
+       leave_16bit(chipselect, mode);
+}
+
+static u8 ide_mm_inb(unsigned long port)
+{
+       return readb((void __iomem *) port);
+}
+
+static void ide_mm_outb(u8 value, unsigned long port)
+{
+       writeb(value, (void __iomem *) port);
+}
+
+static void at91_ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+       u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+       if (task->tf_flags & IDE_TFLAG_FLAGGED)
+               HIHI = 0xFF;
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
+               u16 data = (tf->hob_data << 8) | tf->data;
+
+               at91_ide_output_data(drive, NULL, &data, 2);
+       }
+
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+               ide_mm_outb(tf->hob_feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+               ide_mm_outb(tf->hob_nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+               ide_mm_outb(tf->hob_lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+               ide_mm_outb(tf->hob_lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+               ide_mm_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+               ide_mm_outb(tf->feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+               ide_mm_outb(tf->nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+               ide_mm_outb(tf->lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+               ide_mm_outb(tf->lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+               ide_mm_outb(tf->lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+               ide_mm_outb((tf->device & HIHI) | drive->select, io_ports->device_addr);
+}
+
+static void at91_ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+
+       if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+               u16 data;
+
+               at91_ide_input_data(drive, NULL, &data, 2);
+               tf->data = data & 0xff;
+               tf->hob_data = (data >> 8) & 0xff;
+       }
+
+       /* be sure we're looking at the low order bits */
+       ide_mm_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+
+       if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+               tf->feature = ide_mm_inb(io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+               tf->nsect  = ide_mm_inb(io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+               tf->lbal   = ide_mm_inb(io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+               tf->lbam   = ide_mm_inb(io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+               tf->lbah   = ide_mm_inb(io_ports->lbah_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+               tf->device = ide_mm_inb(io_ports->device_addr);
+
+       if (task->tf_flags & IDE_TFLAG_LBA48) {
+               ide_mm_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
+
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+                       tf->hob_feature = ide_mm_inb(io_ports->feature_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+                       tf->hob_nsect   = ide_mm_inb(io_ports->nsect_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+                       tf->hob_lbal    = ide_mm_inb(io_ports->lbal_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+                       tf->hob_lbam    = ide_mm_inb(io_ports->lbam_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+                       tf->hob_lbah    = ide_mm_inb(io_ports->lbah_addr);
+       }
+}
+
+static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+{
+       struct ide_timing *timing;
+       u8 chipselect = drive->hwif->select_data;
+       int use_iordy = 0;
+
+       pdbg("chipselect %u pio %u\n", chipselect, pio);
+
+       timing = ide_timing_find_mode(XFER_PIO_0 + pio);
+       BUG_ON(!timing);
+
+       if ((pio > 2 || ata_id_has_iordy(drive->id)) &&
+           !(ata_id_is_cfa(drive->id) && pio > 4))
+               use_iordy = 1;
+
+       apply_timings(chipselect, pio, timing, use_iordy);
+}
+
+static const struct ide_tp_ops at91_ide_tp_ops = {
+       .exec_command   = ide_exec_command,
+       .read_status    = ide_read_status,
+       .read_altstatus = ide_read_altstatus,
+       .set_irq        = ide_set_irq,
+
+       .tf_load        = at91_ide_tf_load,
+       .tf_read        = at91_ide_tf_read,
+
+       .input_data     = at91_ide_input_data,
+       .output_data    = at91_ide_output_data,
+};
+
+static const struct ide_port_ops at91_ide_port_ops = {
+       .set_pio_mode   = at91_ide_set_pio_mode,
+};
+
+static const struct ide_port_info at91_ide_port_info __initdata = {
+       .port_ops       = &at91_ide_port_ops,
+       .tp_ops         = &at91_ide_tp_ops,
+       .host_flags     = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
+                         IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
+       .pio_mask       = ATA_PIO5,
+};
+
+/*
+ * If interrupt is delivered through GPIO, IRQ are triggered on falling
+ * and rising edge of signal. Whereas IDE device request interrupt on high
+ * level (rising edge in our case). This mean we have fake interrupts, so
+ * we need to check interrupt pin and exit instantly from ISR when line
+ * is on low level.
+ */
+
+irqreturn_t at91_irq_handler(int irq, void *dev_id)
+{
+       int ntries = 8;
+       int pin_val1, pin_val2;
+
+       /* additional deglitch, line can be noisy in badly designed PCB */
+       do {
+               pin_val1 = at91_get_gpio_value(irq);
+               pin_val2 = at91_get_gpio_value(irq);
+       } while (pin_val1 != pin_val2 && --ntries > 0);
+
+       if (pin_val1 == 0 || ntries <= 0)
+               return IRQ_HANDLED;
+
+       return ide_intr(irq, dev_id);
+}
+
+static int __init at91_ide_probe(struct platform_device *pdev)
+{
+       int ret;
+       hw_regs_t hw;
+       hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
+       struct ide_host *host;
+       struct resource *res;
+       unsigned long tf_base = 0, ctl_base = 0;
+       struct at91_cf_data *board = pdev->dev.platform_data;
+
+       if (!board)
+               return -ENODEV;
+
+       if (board->det_pin && at91_get_gpio_value(board->det_pin) != 0) {
+               perr("no device detected\n");
+               return -ENODEV;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               perr("can't get memory resource\n");
+               return -ENODEV;
+       }
+
+       if (!devm_request_mem_region(&pdev->dev, res->start + TASK_FILE,
+                                    REGS_SIZE, "ide") ||
+           !devm_request_mem_region(&pdev->dev, res->start + ALT_MODE,
+                                    REGS_SIZE, "alt")) {
+               perr("memory resources in use\n");
+               return -EBUSY;
+       }
+
+       pdbg("chipselect %u irq %u res %08lx\n", board->chipselect,
+            board->irq_pin, (unsigned long) res->start);
+
+       tf_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + TASK_FILE,
+                                              REGS_SIZE);
+       ctl_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + ALT_MODE,
+                                               REGS_SIZE);
+       if (!tf_base || !ctl_base) {
+               perr("can't map memory regions\n");
+               return -EBUSY;
+       }
+
+       memset(&hw, 0, sizeof(hw));
+
+       if (board->flags & AT91_IDE_SWAP_A0_A2) {
+               /* workaround for stupid hardware bug */
+               hw.io_ports.data_addr   = tf_base + 0;
+               hw.io_ports.error_addr  = tf_base + 4;
+               hw.io_ports.nsect_addr  = tf_base + 2;
+               hw.io_ports.lbal_addr   = tf_base + 6;
+               hw.io_ports.lbam_addr   = tf_base + 1;
+               hw.io_ports.lbah_addr   = tf_base + 5;
+               hw.io_ports.device_addr = tf_base + 3;
+               hw.io_ports.command_addr = tf_base + 7;
+               hw.io_ports.ctl_addr    = ctl_base + 3;
+       } else
+               ide_std_init_ports(&hw, tf_base, ctl_base + 6);
+
+       hw.irq = board->irq_pin;
+       hw.chipset = ide_generic;
+       hw.dev = &pdev->dev;
+
+       host = ide_host_alloc(&at91_ide_port_info, hws);
+       if (!host) {
+               perr("failed to allocate ide host\n");
+               return -ENOMEM;
+       }
+
+       /* setup Static Memory Controller - PIO 0 as default */
+       apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
+
+       /* with GPIO interrupt we have to do quirks in handler */
+       if (board->irq_pin >= PIN_BASE)
+               host->irq_handler = at91_irq_handler;
+
+       host->ports[0]->select_data = board->chipselect;
+
+       ret = ide_host_register(host, &at91_ide_port_info, hws);
+       if (ret) {
+               perr("failed to register ide host\n");
+               goto err_free_host;
+       }
+       platform_set_drvdata(pdev, host);
+       return 0;
+
+err_free_host:
+       ide_host_free(host);
+       return ret;
+}
+
+static int __exit at91_ide_remove(struct platform_device *pdev)
+{
+       struct ide_host *host = platform_get_drvdata(pdev);
+
+       ide_host_remove(host);
+       return 0;
+}
+
+static struct platform_driver at91_ide_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+       },
+       .remove = __exit_p(at91_ide_remove),
+};
+
+static int __init at91_ide_init(void)
+{
+       return platform_driver_probe(&at91_ide_driver, at91_ide_probe);
+}
+
+static void __exit at91_ide_exit(void)
+{
+       platform_driver_unregister(&at91_ide_driver);
+}
+
+module_init(at91_ide_init);
+module_exit(at91_ide_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanislaw Gruszka <stf_xl@wp.pl>");
+
index 1146f4204c6e4499ac93f75bfc409d14642c25a1..1f86dcbd2b1c83c00eb68ccb6f8ca62164445c63 100644 (file)
@@ -125,5 +125,5 @@ const struct ide_proc_devset ide_disk_settings[] = {
        IDE_PROC_DEVSET(multcount,      0,    16),
        IDE_PROC_DEVSET(nowerr,         0,     1),
        IDE_PROC_DEVSET(wcache,         0,     1),
-       { 0 },
+       { NULL },
 };
index 3ec762cb60abfcceae5e0f5ede250adbc2ec2f7c..fcd4d8153df567f06f00a5cf5b642dc25306af87 100644 (file)
@@ -29,5 +29,5 @@ const struct ide_proc_devset ide_floppy_settings[] = {
        IDE_PROC_DEVSET(bios_head, 0,  255),
        IDE_PROC_DEVSET(bios_sect, 0,   63),
        IDE_PROC_DEVSET(ticks,     0,  255),
-       { 0 },
+       { NULL },
 };
index 9ee51adf567faefd6ade2873a41444a1d44ca961..a9a6c208288a27173970b705de620fea3e076294 100644 (file)
@@ -908,7 +908,7 @@ void ide_timer_expiry (unsigned long data)
        ide_drive_t     *uninitialized_var(drive);
        ide_handler_t   *handler;
        unsigned long   flags;
-       unsigned long   wait = -1;
+       int             wait = -1;
        int             plug_device = 0;
 
        spin_lock_irqsave(&hwif->lock, flags);
@@ -1162,6 +1162,7 @@ out_early:
 
        return irq_ret;
 }
+EXPORT_SYMBOL_GPL(ide_intr);
 
 /**
  *     ide_do_drive_cmd        -       issue IDE special command
index 753b92ebe0ae5e72416d78cf3fe4baf0a94aabd6..b1892bd95c6fe5e7d270e3b92e2fd825b6847cb3 100644 (file)
@@ -315,6 +315,8 @@ void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
        u8 io_32bit = drive->io_32bit;
        u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
 
+       len++;
+
        if (io_32bit) {
                unsigned long uninitialized_var(flags);
 
index ce0818a993f68a42bf445bc79dea4eaa0ccc94da..ee8e3e7cad51e6b66ce1737d263633e947e6e29c 100644 (file)
@@ -950,6 +950,7 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
 static int init_irq (ide_hwif_t *hwif)
 {
        struct ide_io_ports *io_ports = &hwif->io_ports;
+       irq_handler_t irq_handler;
        int sa = 0;
 
        mutex_lock(&ide_cfg_mtx);
@@ -959,6 +960,10 @@ static int init_irq (ide_hwif_t *hwif)
        hwif->timer.function = &ide_timer_expiry;
        hwif->timer.data = (unsigned long)hwif;
 
+       irq_handler = hwif->host->irq_handler;
+       if (irq_handler == NULL)
+               irq_handler = ide_intr;
+
 #if defined(__mc68000__)
        sa = IRQF_SHARED;
 #endif /* __mc68000__ */
@@ -969,7 +974,7 @@ static int init_irq (ide_hwif_t *hwif)
        if (io_ports->ctl_addr)
                hwif->tp_ops->set_irq(hwif, 1);
 
-       if (request_irq(hwif->irq, &ide_intr, sa, hwif->name, hwif))
+       if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
                goto out_up;
 
        if (!hwif->rqsize) {
index 1d8978b3314a93bfb6d5bf8e05a52dcd11a94757..a7b9287ee0d4309bc30a9fe5d97df93f6f74eb3a 100644 (file)
@@ -231,7 +231,7 @@ static const struct ide_proc_devset ide_generic_settings[] = {
        IDE_PROC_DEVSET(pio_mode, 0, 255),
        IDE_PROC_DEVSET(unmaskirq, 0, 1),
        IDE_PROC_DEVSET(using_dma, 0, 1),
-       { 0 },
+       { NULL },
 };
 
 static void proc_ide_settings_warn(void)
index bb450a7608c2fe9a8ef08f19b03d86ba980f3fde..4e6181c7bbdaf613e18a6fdd461cdfdef60afa83 100644 (file)
@@ -2166,7 +2166,7 @@ static const struct ide_proc_devset idetape_settings[] = {
        __IDE_PROC_DEVSET(speed,        0, 0xffff, NULL, NULL),
        __IDE_PROC_DEVSET(tdsc,         IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX,
                                        mulf_tdsc, divf_tdsc),
-       { 0 },
+       { NULL },
 };
 #endif
 
index 9c50e6f1c23649d75ba0a2522f5e52b04ff4740e..34ce2703d29a4522dbf52ca1b681571a14e0c42a 100644 (file)
@@ -248,12 +248,15 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 
        sg_init_one(&sg, data_buf, len);
 
-       /*
-        * The spec states that CSR and CID accesses have a timeout
-        * of 64 clock cycles.
-        */
-       data.timeout_ns = 0;
-       data.timeout_clks = 64;
+       if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
+               /*
+                * The spec states that CSR and CID accesses have a timeout
+                * of 64 clock cycles.
+                */
+               data.timeout_ns = 0;
+               data.timeout_clks = 64;
+       } else
+               mmc_set_data_timeout(&data, card);
 
        mmc_wait_for_req(host, &mrq);
 
index 917cf8d3ae9561c5e4e1559758b46efb89d466d2..c2dfd3ea353d61315a8ed4bfc8c056201c30f67e 100644 (file)
@@ -149,7 +149,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
 
 static struct platform_driver orion_nand_driver = {
        .probe          = orion_nand_probe,
-       .remove         = orion_nand_remove,
+       .remove         = __devexit_p(orion_nand_remove),
        .driver         = {
                .name   = "orion_nand",
                .owner  = THIS_MODULE,
index c69c0cdba4a26b1cc254f1ac112cce46171c5f04..811a3ccd14c107c56136af8884022f7cd9f4498f 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 obj-$(CONFIG_ARM_AM79C961A)    += am79c961a.o
-obj-$(CONFIG_ARM_ETHERH)       += etherh.o ../8390.o
+obj-$(CONFIG_ARM_ETHERH)       += etherh.o
 obj-$(CONFIG_ARM_ETHER3)       += ether3.o
 obj-$(CONFIG_ARM_ETHER1)       += ether1.o
 obj-$(CONFIG_ARM_AT91_ETHER)   += at91_ether.o
index 54b52e5b1821bbcd196b3f2f70cea19c51a5ec37..f52f668c49bfae5fcf526d299f9368661dd0d32c 100644 (file)
@@ -641,15 +641,15 @@ static const struct net_device_ops etherh_netdev_ops = {
        .ndo_open               = etherh_open,
        .ndo_stop               = etherh_close,
        .ndo_set_config         = etherh_set_config,
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
index 48ff701d3a72b9cd1d5a6cb7b345a89e9678e50d..2552b9f325ee9b4c33eea202329e23e47b544c3e 100644 (file)
@@ -2230,7 +2230,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
 
 static struct platform_driver pxafb_driver = {
        .probe          = pxafb_probe,
-       .remove         = pxafb_remove,
+       .remove         = __devexit_p(pxafb_remove),
        .suspend        = pxafb_suspend,
        .resume         = pxafb_resume,
        .driver         = {
index f0c2b7a1a175b0d23bbaec7878f8da9e5d8b2768..734d9806a872bc300ad756ab7978192025a58f37 100644 (file)
@@ -269,7 +269,7 @@ static int __devinit gef_wdt_probe(struct of_device *dev,
        bus_clk = 133; /* in MHz */
 
        freq = fsl_get_sys_freq();
-       if (freq > 0)
+       if (freq != -1)
                bus_clk = freq;
 
        /* Map devices registers into memory */
index 0b798fdaa378eccd53e39c5ab1fa538e19388b03..74c92d38411285b1c64c8e87febead6658b9da5e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/watchdog.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <mach/timex.h>
 #include <mach/regs-timer.h>
 
 #define WDT_DEFAULT_TIME       5       /* seconds */
index 14a339f58b6a7627c3a4f82665a1fe70ecd8c907..b64ae1a17832c0e6dd53b045af77d805db005b86 100644 (file)
@@ -29,6 +29,7 @@
 #define  WDT_EN                        0x0010
 #define WDT_VAL                        (TIMER_VIRT_BASE + 0x0024)
 
+#define ORION5X_TCLK           166666667
 #define WDT_MAX_DURATION       (0xffffffff / ORION5X_TCLK)
 #define WDT_IN_USE             0
 #define WDT_OK_TO_CLOSE                1
index 57027f4653ce7c56b5baf783b9c90816f50e084f..f3553fa40b17f912d223220c163343c2e4895597 100644 (file)
 #include <asm/time.h>
 #include <asm/mach-rc32434/integ.h>
 
-#define MAX_TIMEOUT                    20
-#define RC32434_WDT_INTERVAL           (15 * HZ)
-
-#define VERSION "0.2"
+#define VERSION "0.4"
 
 static struct {
-       struct completion stop;
-       int running;
-       struct timer_list timer;
-       int queue;
-       int default_ticks;
        unsigned long inuse;
 } rc32434_wdt_device;
 
 static struct integ __iomem *wdt_reg;
-static int ticks = 100 * HZ;
 
 static int expect_close;
-static int timeout;
+
+/* Board internal clock speed in Hz,
+ * the watchdog timer ticks at. */
+extern unsigned int idt_cpu_freq;
+
+/* translate wtcompare value to seconds and vice versa */
+#define WTCOMP2SEC(x)  (x / idt_cpu_freq)
+#define SEC2WTCOMP(x)  (x * idt_cpu_freq)
+
+/* Use a default timeout of 20s. This should be
+ * safe for CPU clock speeds up to 400MHz, as
+ * ((2 ^ 32) - 1) / (400MHz / 2) = 21s.  */
+#define WATCHDOG_TIMEOUT 20
+
+static int timeout = WATCHDOG_TIMEOUT;
 
 static int nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, int, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
        __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+/* apply or and nand masks to data read from addr and write back */
+#define SET_BITS(addr, or, nand) \
+       writel((readl(&addr) | or) & ~nand, &addr)
 
 static void rc32434_wdt_start(void)
 {
-       u32 val;
-
-       if (!rc32434_wdt_device.inuse) {
-               writel(0, &wdt_reg->wtcount);
+       u32 or, nand;
 
-               val = RC32434_ERR_WRE;
-               writel(readl(&wdt_reg->errcs) | val, &wdt_reg->errcs);
+       /* zero the counter before enabling */
+       writel(0, &wdt_reg->wtcount);
 
-               val = RC32434_WTC_EN;
-               writel(readl(&wdt_reg->wtc) | val, &wdt_reg->wtc);
-       }
-       rc32434_wdt_device.running++;
-}
+       /* don't generate a non-maskable interrupt,
+        * do a warm reset instead */
+       nand = 1 << RC32434_ERR_WNE;
+       or = 1 << RC32434_ERR_WRE;
 
-static void rc32434_wdt_stop(void)
-{
-       u32 val;
+       /* reset the ERRCS timeout bit in case it's set */
+       nand |= 1 << RC32434_ERR_WTO;
 
-       if (rc32434_wdt_device.running) {
+       SET_BITS(wdt_reg->errcs, or, nand);
 
-               val = ~RC32434_WTC_EN;
-               writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
+       /* reset WTC timeout bit and enable WDT */
+       nand = 1 << RC32434_WTC_TO;
+       or = 1 << RC32434_WTC_EN;
 
-               val = ~RC32434_ERR_WRE;
-               writel(readl(&wdt_reg->errcs) & val, &wdt_reg->errcs);
+       SET_BITS(wdt_reg->wtc, or, nand);
+}
 
-               rc32434_wdt_device.running = 0;
-       }
+static void rc32434_wdt_stop(void)
+{
+       /* Disable WDT */
+       SET_BITS(wdt_reg->wtc, 0, 1 << RC32434_WTC_EN);
 }
 
-static void rc32434_wdt_set(int new_timeout)
+static int rc32434_wdt_set(int new_timeout)
 {
-       u32 cmp = new_timeout * HZ;
-       u32 state, val;
+       int max_to = WTCOMP2SEC((u32)-1);
 
+       if (new_timeout < 0 || new_timeout > max_to) {
+               printk(KERN_ERR KBUILD_MODNAME
+                       ": timeout value must be between 0 and %d",
+                       max_to);
+               return -EINVAL;
+       }
        timeout = new_timeout;
-       /*
-        * store and disable WTC
-        */
-       state = (u32)(readl(&wdt_reg->wtc) & RC32434_WTC_EN);
-       val = ~RC32434_WTC_EN;
-       writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
-
-       writel(0, &wdt_reg->wtcount);
-       writel(cmp, &wdt_reg->wtcompare);
-
-       /*
-        * restore WTC
-        */
-
-       writel(readl(&wdt_reg->wtc) | state, &wdt_reg);
-}
+       writel(SEC2WTCOMP(timeout), &wdt_reg->wtcompare);
 
-static void rc32434_wdt_reset(void)
-{
-       ticks = rc32434_wdt_device.default_ticks;
+       return 0;
 }
 
-static void rc32434_wdt_update(unsigned long unused)
+static void rc32434_wdt_ping(void)
 {
-       if (rc32434_wdt_device.running)
-               ticks--;
-
        writel(0, &wdt_reg->wtcount);
-
-       if (rc32434_wdt_device.queue && ticks)
-               mod_timer(&rc32434_wdt_device.timer,
-                       jiffies + RC32434_WDT_INTERVAL);
-       else
-               complete(&rc32434_wdt_device.stop);
 }
 
 static int rc32434_wdt_open(struct inode *inode, struct file *file)
@@ -142,19 +127,23 @@ static int rc32434_wdt_open(struct inode *inode, struct file *file)
        if (nowayout)
                __module_get(THIS_MODULE);
 
+       rc32434_wdt_start();
+       rc32434_wdt_ping();
+
        return nonseekable_open(inode, file);
 }
 
 static int rc32434_wdt_release(struct inode *inode, struct file *file)
 {
-       if (expect_close && nowayout == 0) {
+       if (expect_close == 42) {
                rc32434_wdt_stop();
                printk(KERN_INFO KBUILD_MODNAME ": disabling watchdog timer\n");
                module_put(THIS_MODULE);
-       } else
+       } else {
                printk(KERN_CRIT KBUILD_MODNAME
                        ": device closed unexpectedly. WDT will not stop !\n");
-
+               rc32434_wdt_ping();
+       }
        clear_bit(0, &rc32434_wdt_device.inuse);
        return 0;
 }
@@ -174,10 +163,10 @@ static ssize_t rc32434_wdt_write(struct file *file, const char *data,
                                if (get_user(c, data + i))
                                        return -EFAULT;
                                if (c == 'V')
-                                       expect_close = 1;
+                                       expect_close = 42;
                        }
                }
-               rc32434_wdt_update(0);
+               rc32434_wdt_ping();
                return len;
        }
        return 0;
@@ -197,11 +186,11 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
        };
        switch (cmd) {
        case WDIOC_KEEPALIVE:
-               rc32434_wdt_reset();
+               rc32434_wdt_ping();
                break;
        case WDIOC_GETSTATUS:
        case WDIOC_GETBOOTSTATUS:
-               value = readl(&wdt_reg->wtcount);
+               value = 0;
                if (copy_to_user(argp, &value, sizeof(int)))
                        return -EFAULT;
                break;
@@ -218,6 +207,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
                        break;
                case WDIOS_DISABLECARD:
                        rc32434_wdt_stop();
+                       break;
                default:
                        return -EINVAL;
                }
@@ -225,11 +215,9 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
        case WDIOC_SETTIMEOUT:
                if (copy_from_user(&new_timeout, argp, sizeof(int)))
                        return -EFAULT;
-               if (new_timeout < 1)
+               if (rc32434_wdt_set(new_timeout))
                        return -EINVAL;
-               if (new_timeout > MAX_TIMEOUT)
-                       return -EINVAL;
-               rc32434_wdt_set(new_timeout);
+               /* Fall through */
        case WDIOC_GETTIMEOUT:
                return copy_to_user(argp, &timeout, sizeof(int));
        default:
@@ -254,15 +242,15 @@ static struct miscdevice rc32434_wdt_miscdev = {
        .fops   = &rc32434_wdt_fops,
 };
 
-static char banner[] = KERN_INFO KBUILD_MODNAME
+static char banner[] __devinitdata = KERN_INFO KBUILD_MODNAME
                ": Watchdog Timer version " VERSION ", timer margin: %d sec\n";
 
-static int rc32434_wdt_probe(struct platform_device *pdev)
+static int __devinit rc32434_wdt_probe(struct platform_device *pdev)
 {
        int ret;
        struct resource *r;
 
-       r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb500_wdt_res");
+       r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb532_wdt_res");
        if (!r) {
                printk(KERN_ERR KBUILD_MODNAME
                        "failed to retrieve resources\n");
@@ -277,24 +265,12 @@ static int rc32434_wdt_probe(struct platform_device *pdev)
        }
 
        ret = misc_register(&rc32434_wdt_miscdev);
-
        if (ret < 0) {
                printk(KERN_ERR KBUILD_MODNAME
                        "failed to register watchdog device\n");
                goto unmap;
        }
 
-       init_completion(&rc32434_wdt_device.stop);
-       rc32434_wdt_device.queue = 0;
-
-       clear_bit(0, &rc32434_wdt_device.inuse);
-
-       setup_timer(&rc32434_wdt_device.timer, rc32434_wdt_update, 0L);
-
-       rc32434_wdt_device.default_ticks = ticks;
-
-       rc32434_wdt_start();
-
        printk(banner, timeout);
 
        return 0;
@@ -304,23 +280,17 @@ unmap:
        return ret;
 }
 
-static int rc32434_wdt_remove(struct platform_device *pdev)
+static int __devexit rc32434_wdt_remove(struct platform_device *pdev)
 {
-       if (rc32434_wdt_device.queue) {
-               rc32434_wdt_device.queue = 0;
-               wait_for_completion(&rc32434_wdt_device.stop);
-       }
        misc_deregister(&rc32434_wdt_miscdev);
-
        iounmap(wdt_reg);
-
        return 0;
 }
 
 static struct platform_driver rc32434_wdt = {
        .probe  = rc32434_wdt_probe,
-       .remove = rc32434_wdt_remove,
-       .driver = {
+       .remove = __devexit_p(rc32434_wdt_remove),
+       .driver = {
                .name = "rc32434_wdt",
        }
 };
index f18a919be70becf286c74364345a464e4d6e75e6..627f8c3337a3a24314624683ceac71ed30c72e95 100644 (file)
@@ -188,7 +188,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
        struct ext4_group_desc *gdp;
        struct ext4_super_block *es;
        struct ext4_sb_info *sbi;
-       int fatal = 0, err, count;
+       int fatal = 0, err, count, cleared;
        ext4_group_t flex_group;
 
        if (atomic_read(&inode->i_count) > 1) {
@@ -248,8 +248,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
                goto error_return;
 
        /* Ok, now we can actually update the inode bitmaps.. */
-       if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-                                       bit, bitmap_bh->b_data))
+       spin_lock(sb_bgl_lock(sbi, block_group));
+       cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
+       spin_unlock(sb_bgl_lock(sbi, block_group));
+       if (!cleared)
                ext4_error(sb, "ext4_free_inode",
                           "bit already cleared for inode %lu", ino);
        else {
index c837dfc2b3c613799a228744e5de396ca206b1ee..321728f48f2d0c38204a312fbc67685428ee318e 100644 (file)
@@ -80,7 +80,7 @@ static struct buffer_head *get_block_length(struct super_block *sb,
  * generated a larger block - this does occasionally happen with zlib).
  */
 int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
-                       int length, u64 *next_index, int srclength)
+                       int length, u64 *next_index, int srclength, int pages)
 {
        struct squashfs_sb_info *msblk = sb->s_fs_info;
        struct buffer_head **bh;
@@ -185,6 +185,14 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
                        }
 
                        if (msblk->stream.avail_out == 0) {
+                               if (page == pages) {
+                                       ERROR("zlib_inflate tried to "
+                                               "decompress too much data, "
+                                               "expected %d bytes.  Zlib "
+                                               "data probably corrupt\n",
+                                               srclength);
+                                       goto release_mutex;
+                               }
                                msblk->stream.next_out = buffer[page++];
                                msblk->stream.avail_out = PAGE_CACHE_SIZE;
                        }
@@ -268,7 +276,8 @@ block_release:
                put_bh(bh[k]);
 
 read_failure:
-       ERROR("sb_bread failed reading block 0x%llx\n", cur_index);
+       ERROR("squashfs_read_data failed to read block 0x%llx\n",
+                                       (unsigned long long) index);
        kfree(bh);
        return -EIO;
 }
index f29eda16d25ebd6c9bee029b872ac97e5a31fc29..1c4739e33af638bf819278c755e2d656ec64288d 100644 (file)
@@ -119,7 +119,7 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
 
                        entry->length = squashfs_read_data(sb, entry->data,
                                block, length, &entry->next_index,
-                               cache->block_size);
+                               cache->block_size, cache->pages);
 
                        spin_lock(&cache->lock);
 
@@ -406,7 +406,7 @@ int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
        for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
                data[i] = buffer;
        res = squashfs_read_data(sb, data, block, length |
-               SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length);
+               SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
        kfree(data);
        return res;
 }
index 7a63398bb855ab6982810ae366b577139e6abe92..9101dbde39ece6a9ca5d8b47d3344c1f3bdc6879 100644 (file)
@@ -133,7 +133,8 @@ int squashfs_read_inode(struct inode *inode, long long ino)
        type = le16_to_cpu(sqshb_ino->inode_type);
        switch (type) {
        case SQUASHFS_REG_TYPE: {
-               unsigned int frag_offset, frag_size, frag;
+               unsigned int frag_offset, frag;
+               int frag_size;
                u64 frag_blk;
                struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
 
@@ -175,7 +176,8 @@ int squashfs_read_inode(struct inode *inode, long long ino)
                break;
        }
        case SQUASHFS_LREG_TYPE: {
-               unsigned int frag_offset, frag_size, frag;
+               unsigned int frag_offset, frag;
+               int frag_size;
                u64 frag_blk;
                struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
 
index 6b2515d027d5b0545b0ac490baf1df9a1acd138c..0e9feb6adf7e120ccad9efd9815717fd44dbc83c 100644 (file)
@@ -34,7 +34,7 @@ static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
 
 /* block.c */
 extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
-                               int);
+                               int, int);
 
 /* cache.c */
 extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
index 071df5b5b49184a28d354f217b78844002e4053b..681ec0d83799cc22595f44aadad42ca5d63e0538 100644 (file)
@@ -389,7 +389,7 @@ static int __init init_squashfs_fs(void)
                return err;
        }
 
-       printk(KERN_INFO "squashfs: version 4.0 (2009/01/03) "
+       printk(KERN_INFO "squashfs: version 4.0 (2009/01/31) "
                "Phillip Lougher\n");
 
        return 0;
index 08a86d5cdf1b7ea2c6c995f1073aaa59092d9a8c..9a061accd8b8de95f7dcb282d2488f0613418c9e 100644 (file)
@@ -89,6 +89,8 @@ enum {
        ATA_ID_DLF              = 128,
        ATA_ID_CSFO             = 129,
        ATA_ID_CFA_POWER        = 160,
+       ATA_ID_CFA_KEY_MGMT     = 162,
+       ATA_ID_CFA_MODES        = 163,
        ATA_ID_ROT_SPEED        = 217,
        ATA_ID_PIO4             = (1 << 1),
 
index f0413845f20ee75481543cb97c570d6dbda142b4..1956c8d46d326ea98c2286a6f3f47e8e428518a1 100644 (file)
@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
 
 /**
  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
- * @refcount: local_t used for open-coded "bigref" counting
  * @memcpy_count: transaction counter
  * @bytes_transferred: byte counter
  */
@@ -114,9 +113,6 @@ struct dma_chan_percpu {
  * @cookie: last cookie value returned to client
  * @chan_id: channel ID for sysfs
  * @dev: class device for sysfs
- * @refcount: kref, used in "bigref" slow-mode
- * @slow_ref: indicates that the DMA channel is free
- * @rcu: the DMA channel's RCU head
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
  * @client-count: how many clients are using this channel
@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor {
  * @global_node: list_head for global dma_device_list
  * @cap_mask: one or more dma_capability flags
  * @max_xor: maximum number of xor sources, 0 if no capability
- * @refcount: reference count
- * @done: IO completion struct
  * @dev_id: unique device ID
  * @dev: struct device reference for dma mapping api
  * @device_alloc_chan_resources: allocate resources and return the
@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor {
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
  * @device_prep_slave_sg: prepares a slave dma operation
  * @device_terminate_all: terminate all pending operations
+ * @device_is_tx_complete: poll for transaction completion
  * @device_issue_pending: push pending transactions to hardware
  */
 struct dma_device {
index c37e9241fae785ce24930473b59f3029c6e99ff0..ed21bd3dbd2552a9322a45c943e078d0e6a4e2fc 100644 (file)
@@ -511,7 +511,6 @@ struct hd_driveid {
        unsigned short  words69_70[2];  /* reserved words 69-70
                                         * future command overlap and queuing
                                         */
-       /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
        unsigned short  words71_74[4];  /* reserved words 71-74
                                         * for IDENTIFY PACKET DEVICE command
                                         */
index fe235b65207ee74d43e74818d94bdaf6759816d5..e0cedfe9fad46b63d13fad9ce0a006d5e75fecd3 100644 (file)
@@ -866,6 +866,7 @@ struct ide_host {
        unsigned int    n_ports;
        struct device   *dev[2];
        unsigned int    (*init_chipset)(struct pci_dev *);
+       irq_handler_t   irq_handler;
        unsigned long   host_flags;
        void            *host_priv;
        ide_hwif_t      *cur_port;      /* for hosts requiring serialization */
index 5d87bc09a1f5bcef65f97a4c06599c8e9f7295b1..dc18b87ed72244e90e898f43885f5bdca3c806b2 100644 (file)
@@ -275,7 +275,7 @@ enum {
         * advised to wait only for the following duration before
         * doing SRST.
         */
-       ATA_TMOUT_PMP_SRST_WAIT = 1000,
+       ATA_TMOUT_PMP_SRST_WAIT = 5000,
 
        /* ATA bus states */
        BUS_UNKNOWN             = 0,
@@ -530,6 +530,7 @@ struct ata_queued_cmd {
        unsigned long           flags;          /* ATA_QCFLAG_xxx */
        unsigned int            tag;
        unsigned int            n_elem;
+       unsigned int            orig_n_elem;
 
        int                     dma_dir;
 
@@ -750,7 +751,8 @@ struct ata_port {
        acpi_handle             acpi_handle;
        struct ata_acpi_gtm     __acpi_init_gtm; /* use ata_acpi_init_gtm() */
 #endif
-       u8                      sector_buf[ATA_SECT_SIZE]; /* owned by EH */
+       /* owned by EH */
+       u8                      sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
 };
 
 /* The following initializer overrides a method to NULL whether one of
index f3f697df1d71b439b677efbc0da952ae6d25d066..80044a4f3ab9e1c890021fc0ff9dca533c00f71e 100644 (file)
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
 #define rcu_enter_nohz()       do { } while (0)
 #define rcu_exit_nohz()                do { } while (0)
 
+/* A context switch is a grace period for rcuclassic. */
+static inline int rcu_blocking_is_gp(void)
+{
+       return num_online_cpus() == 1;
+}
+
 #endif /* __LINUX_RCUCLASSIC_H */
index 921340a7b71cf4c88638e2f905aa0fdc1385f958..528343e6da51a7c19cda8797c55cd6640612cb58 100644 (file)
@@ -52,6 +52,9 @@ struct rcu_head {
        void (*func)(struct rcu_head *head);
 };
 
+/* Internal to kernel, but needed by rcupreempt.h. */
+extern int rcu_scheduler_active;
+
 #if defined(CONFIG_CLASSIC_RCU)
 #include <linux/rcuclassic.h>
 #elif defined(CONFIG_TREE_RCU)
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
 
 /* Internal to kernel */
 extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
 extern int rcu_needs_cpu(int cpu);
 
 #endif /* __LINUX_RCUPDATE_H */
index 3e05c09b54a22408db83e0f0a87a5a8bf9a40e8f..74304b4538d833ec7bcd26be28bdec8f436d73f7 100644 (file)
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
 #define rcu_exit_nohz()                do { } while (0)
 #endif /* CONFIG_NO_HZ */
 
+/*
+ * A context switch is a grace period for rcupreempt synchronize_rcu()
+ * only during early boot, before the scheduler has been initialized.
+ * So, how the heck do we get a context switch?  Well, if the caller
+ * invokes synchronize_rcu(), they are willing to accept a context
+ * switch, so we simply pretend that one happened.
+ *
+ * After boot, there might be a blocked or preempted task in an RCU
+ * read-side critical section, so we cannot then take the fastpath.
+ */
+static inline int rcu_blocking_is_gp(void)
+{
+       return num_online_cpus() == 1 && !rcu_scheduler_active;
+}
+
 #endif /* __LINUX_RCUPREEMPT_H */
index d4368b7975c3d09d5b1c08d2a822adebb06e62ac..a722fb67bb2d5a2500852c4fd5593f0e91777af0 100644 (file)
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
 }
 #endif /* CONFIG_NO_HZ */
 
+/* A context switch is a grace period for rcutree. */
+static inline int rcu_blocking_is_gp(void)
+{
+       return num_online_cpus() == 1;
+}
+
 #endif /* __LINUX_RCUTREE_H */
index f0a50b20e8a03c2f98dec1cd859aa6e06853f7e2..a7c7698583bb15bae4de5b0da9b8aeda61019c5f 100644 (file)
@@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
 extern int sched_group_set_rt_period(struct task_group *tg,
                                      long rt_period_us);
 extern long sched_group_rt_period(struct task_group *tg);
+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
 #endif
 #endif
 
+extern int task_can_switch_user(struct user_struct *up,
+                                       struct task_struct *tsk);
+
 #ifdef CONFIG_TASK_XACCT
 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
 {
index 1bcb357a01a1d00781b97fcad67eaa45dd82fba7..e0417e4d3f15ad3fdd66ef845c7ef8854f72aaae 100644 (file)
@@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio)
 #define SERIO_FUJITSU  0x35
 #define SERIO_ZHENHUA  0x36
 #define SERIO_INEXIO   0x37
-#define SERIO_TOUCHIT213       0x37
+#define SERIO_TOUCHIT213       0x38
 #define SERIO_W8001    0x39
 
 #endif
index 6441083f827355108980280d7c7dd08f1ec16f5e..6bf83afd654da44b5b52383a4697d4d98e0ff6fa 100644 (file)
@@ -98,7 +98,7 @@ static inline void mark_rodata_ro(void) { }
 extern void tc_init(void);
 #endif
 
-enum system_states system_state;
+enum system_states system_state __read_mostly;
 EXPORT_SYMBOL(system_state);
 
 /*
@@ -464,6 +464,7 @@ static noinline void __init_refok rest_init(void)
         * at least once to get things moving:
         */
        init_idle_bootup_task(current);
+       rcu_scheduler_starting();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
index bd5a9003497c200d7ef7a44ec252b2d96b2ac562..654c640a6b9c137b6a44c1e57159fbdeeac48b61 100644 (file)
@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
 void rcu_check_callbacks(int cpu, int user)
 {
        if (user ||
-           (idle_cpu(cpu) && !in_softirq() &&
-                               hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+           (idle_cpu(cpu) && rcu_scheduler_active &&
+            !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
 
                /*
                 * Get here if this CPU took its interrupt from user
index d92a76a881aa47a9aa059c3221910834f6c50cf8..cae8a059cf47f4fab142b001c5f4d25ec8e95006 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/cpu.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/kernel_stat.h>
 
 enum rcu_barrier {
        RCU_BARRIER_STD,
@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
 static atomic_t rcu_barrier_cpu_count;
 static DEFINE_MUTEX(rcu_barrier_mutex);
 static struct completion rcu_barrier_completion;
+int rcu_scheduler_active __read_mostly;
 
 /*
  * Awaken the corresponding synchronize_rcu() instance now that a
@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head  *head)
 void synchronize_rcu(void)
 {
        struct rcu_synchronize rcu;
+
+       if (rcu_blocking_is_gp())
+               return;
+
        init_completion(&rcu.completion);
        /* Will wake me after RCU finished. */
        call_rcu(&rcu.head, wakeme_after_rcu);
@@ -175,3 +181,9 @@ void __init rcu_init(void)
        __rcu_init();
 }
 
+void rcu_scheduler_starting(void)
+{
+       WARN_ON(num_online_cpus() != 1);
+       WARN_ON(nr_context_switches() > 0);
+       rcu_scheduler_active = 1;
+}
index 33cfc50781f9968d06e79edcb93049441a2a4231..5d59e850fb71f6f58b854cdbd0a1f1792f59c8ad 100644 (file)
@@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
 {
        struct rcu_synchronize rcu;
 
+       if (num_online_cpus() == 1)
+               return;  /* blocking is gp if only one CPU! */
+
        init_completion(&rcu.completion);
        /* Will wake me after RCU finished. */
        call_rcu_sched(&rcu.head, wakeme_after_rcu);
index b2fd602a6f6f0433553a36a79e3a24172a8a2a2f..97ce31579ec0664682e1b2efb792cd77e7c0030e 100644 (file)
@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
 void rcu_check_callbacks(int cpu, int user)
 {
        if (user ||
-           (idle_cpu(cpu) && !in_softirq() &&
-                               hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+           (idle_cpu(cpu) && rcu_scheduler_active &&
+            !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
 
                /*
                 * Get here if this CPU took its interrupt from user
index 0e5c38e1c8b5cdad3e2ab022fa8db8f79b88316c..0a76d0b6f2151e587a1ce04f8c1eba56da95d773 100644 (file)
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 {
        ktime_t now;
 
-       if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
+       if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return;
 
        if (hrtimer_active(&rt_b->rt_period_timer))
@@ -9219,6 +9219,16 @@ static int sched_rt_global_constraints(void)
 
        return ret;
 }
+
+int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
+{
+       /* Don't accept realtime tasks when there is no way for them to run */
+       if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
+               return 0;
+
+       return 1;
+}
+
 #else /* !CONFIG_RT_GROUP_SCHED */
 static int sched_rt_global_constraints(void)
 {
@@ -9312,8 +9322,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
                      struct task_struct *tsk)
 {
 #ifdef CONFIG_RT_GROUP_SCHED
-       /* Don't accept realtime tasks when there is no way for them to run */
-       if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
+       if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
                return -EINVAL;
 #else
        /* We don't support RT-tasks being in separate groups */
index 0365b4899a3d37e17c9f7fe4042e7ed3b4d0617b..57d3f67f6f38af7fdfb0fad66ff1cdbef790951e 100644 (file)
@@ -626,6 +626,7 @@ static int ksoftirqd(void * __bind_cpu)
                        preempt_enable_no_resched();
                        cond_resched();
                        preempt_disable();
+                       rcu_qsctr_inc((long)__bind_cpu);
                }
                preempt_enable();
                set_current_state(TASK_INTERRUPTIBLE);
index f145c415bc160e62b6d888cf6fa9d3198c00b4a7..37f458e6882adbd1f2b0697e5077c92e4252e03f 100644 (file)
@@ -559,7 +559,7 @@ error:
        abort_creds(new);
        return retval;
 }
-  
+
 /*
  * change the user struct in a credentials set to match the new UID
  */
@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
        if (!new_user)
                return -EAGAIN;
 
+       if (!task_can_switch_user(new_user, current)) {
+               free_uid(new_user);
+               return -EINVAL;
+       }
+
        if (atomic_read(&new_user->processes) >=
                                current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
                        new_user != INIT_USER) {
@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
                        goto error;
        }
 
-       retval = -EAGAIN;
-       if (new->uid != old->uid && set_user(new) < 0)
-               goto error;
-
+       if (new->uid != old->uid) {
+               retval = set_user(new);
+               if (retval < 0)
+                       goto error;
+       }
        if (ruid != (uid_t) -1 ||
            (euid != (uid_t) -1 && euid != old->uid))
                new->suid = new->euid;
@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
        retval = -EPERM;
        if (capable(CAP_SETUID)) {
                new->suid = new->uid = uid;
-               if (uid != old->uid && set_user(new) < 0) {
-                       retval = -EAGAIN;
-                       goto error;
+               if (uid != old->uid) {
+                       retval = set_user(new);
+                       if (retval < 0)
+                               goto error;
                }
        } else if (uid != old->uid && uid != new->suid) {
                goto error;
@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
                        goto error;
        }
 
-       retval = -EAGAIN;
        if (ruid != (uid_t) -1) {
                new->uid = ruid;
-               if (ruid != old->uid && set_user(new) < 0)
-                       goto error;
+               if (ruid != old->uid) {
+                       retval = set_user(new);
+                       if (retval < 0)
+                               goto error;
+               }
        }
        if (euid != (uid_t) -1)
                new->euid = euid;
index 3551ac742395c40af9edeb7d94d0d0b936caba9b..6a9b696128c855f9ddacfc9e3a7a128141379e49 100644 (file)
@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
 
 #endif
 
+#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
+/*
+ * We need to check if a setuid can take place. This function should be called
+ * before successfully completing the setuid.
+ */
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+
+       return sched_rt_can_attach(up->tg, tsk);
+
+}
+#else
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+       return 1;
+}
+#endif
+
 /*
  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
  * caller must undo that ref with free_uid().
index 0278bc08304440df4af600670312e7bd3bce764c..e7ded1326b0ff835cec48f4e448b0c69f8df1d04 100644 (file)
@@ -1498,58 +1498,31 @@ static int smack_socket_post_create(struct socket *sock, int family,
  * looks for host based access restrictions
  *
  * This version will only be appropriate for really small
- * sets of single label hosts. Because of the masking
- * it cannot shortcut out on the first match. There are
- * numerious ways to address the problem, but none of them
- * have been applied here.
+ * sets of single label hosts.
  *
  * Returns the label of the far end or NULL if it's not special.
  */
 static char *smack_host_label(struct sockaddr_in *sip)
 {
        struct smk_netlbladdr *snp;
-       char *bestlabel = NULL;
        struct in_addr *siap = &sip->sin_addr;
-       struct in_addr *liap;
-       struct in_addr *miap;
-       struct in_addr bestmask;
 
        if (siap->s_addr == 0)
                return NULL;
 
-       bestmask.s_addr = 0;
-
        for (snp = smack_netlbladdrs; snp != NULL; snp = snp->smk_next) {
-               liap = &snp->smk_host.sin_addr;
-               miap = &snp->smk_mask;
-               /*
-                * If the addresses match after applying the list entry mask
-                * the entry matches the address. If it doesn't move along to
-                * the next entry.
-                */
-               if ((liap->s_addr & miap->s_addr) !=
-                   (siap->s_addr & miap->s_addr))
-                       continue;
                /*
-                * If the list entry mask identifies a single address
-                * it can't get any more specific.
+                * we break after finding the first match because
+                * the list is sorted from longest to shortest mask
+                * so we have found the most specific match
                 */
-               if (miap->s_addr == 0xffffffff)
+               if ((&snp->smk_host.sin_addr)->s_addr  ==
+                       (siap->s_addr & (&snp->smk_mask)->s_addr)) {
                        return snp->smk_label;
-               /*
-                * If the list entry mask is less specific than the best
-                * already found this entry is uninteresting.
-                */
-               if ((miap->s_addr | bestmask.s_addr) == bestmask.s_addr)
-                       continue;
-               /*
-                * This is better than any entry found so far.
-                */
-               bestmask.s_addr = miap->s_addr;
-               bestlabel = snp->smk_label;
+               }
        }
 
-       return bestlabel;
+       return NULL;
 }
 
 /**
index 8e42800878f468912c56cece9336e7c8c6bfa727..51f0efc50dab46a9200f98b6d22db0039a9a0653 100644 (file)
@@ -650,10 +650,6 @@ static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
 
        return skp;
 }
-/*
-#define BEMASK 0x80000000
-*/
-#define BEMASK 0x00000001
 #define BEBITS (sizeof(__be32) * 8)
 
 /*
@@ -663,12 +659,10 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
 {
        struct smk_netlbladdr *skp = (struct smk_netlbladdr *) v;
        unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr;
-       __be32 bebits;
-       int maskn = 0;
+       int maskn;
+       u32 temp_mask = be32_to_cpu(skp->smk_mask.s_addr);
 
-       for (bebits = BEMASK; bebits != 0; maskn++, bebits <<= 1)
-               if ((skp->smk_mask.s_addr & bebits) == 0)
-                       break;
+       for (maskn = 0; temp_mask; temp_mask <<= 1, maskn++);
 
        seq_printf(s, "%u.%u.%u.%u/%d %s\n",
                hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label);
@@ -701,6 +695,42 @@ static int smk_open_netlbladdr(struct inode *inode, struct file *file)
        return seq_open(file, &netlbladdr_seq_ops);
 }
 
+/**
+ * smk_netlbladdr_insert
+ * @new : netlabel to insert
+ *
+ * This helper insert netlabel in the smack_netlbladdrs list
+ * sorted by netmask length (longest to smallest)
+ */
+static void smk_netlbladdr_insert(struct smk_netlbladdr *new)
+{
+       struct smk_netlbladdr *m;
+
+       if (smack_netlbladdrs == NULL) {
+               smack_netlbladdrs = new;
+               return;
+       }
+
+       /* the comparison '>' is a bit hacky, but works */
+       if (new->smk_mask.s_addr > smack_netlbladdrs->smk_mask.s_addr) {
+               new->smk_next = smack_netlbladdrs;
+               smack_netlbladdrs = new;
+               return;
+       }
+       for (m = smack_netlbladdrs; m != NULL; m = m->smk_next) {
+               if (m->smk_next == NULL) {
+                       m->smk_next = new;
+                       return;
+               }
+               if (new->smk_mask.s_addr > m->smk_next->smk_mask.s_addr) {
+                       new->smk_next = m->smk_next;
+                       m->smk_next = new;
+                       return;
+               }
+       }
+}
+
+
 /**
  * smk_write_netlbladdr - write() for /smack/netlabel
  * @filp: file pointer, not actually used
@@ -724,8 +754,9 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
        struct netlbl_audit audit_info;
        struct in_addr mask;
        unsigned int m;
-       __be32 bebits = BEMASK;
+       u32 mask_bits = (1<<31);
        __be32 nsa;
+       u32 temp_mask;
 
        /*
         * Must have privilege.
@@ -761,10 +792,13 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
        if (sp == NULL)
                return -EINVAL;
 
-       for (mask.s_addr = 0; m > 0; m--) {
-               mask.s_addr |= bebits;
-               bebits <<= 1;
+       for (temp_mask = 0; m > 0; m--) {
+               temp_mask |= mask_bits;
+               mask_bits >>= 1;
        }
+       mask.s_addr = cpu_to_be32(temp_mask);
+
+       newname.sin_addr.s_addr &= mask.s_addr;
        /*
         * Only allow one writer at a time. Writes should be
         * quite rare and small in any case.
@@ -772,6 +806,7 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
        mutex_lock(&smk_netlbladdr_lock);
 
        nsa = newname.sin_addr.s_addr;
+       /* try to find if the prefix is already in the list */
        for (skp = smack_netlbladdrs; skp != NULL; skp = skp->smk_next)
                if (skp->smk_host.sin_addr.s_addr == nsa &&
                    skp->smk_mask.s_addr == mask.s_addr)
@@ -787,9 +822,8 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
                        rc = 0;
                        skp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr;
                        skp->smk_mask.s_addr = mask.s_addr;
-                       skp->smk_next = smack_netlbladdrs;
                        skp->smk_label = sp;
-                       smack_netlbladdrs = skp;
+                       smk_netlbladdr_insert(skp);
                }
        } else {
                rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
index 3bc427645da8ef1411400990b67c9fc181e4cece..6094344fb223312bb668ad92b1cab3bcc626c068 100644 (file)
@@ -1207,7 +1207,7 @@ static const char *slave_vols[] = {
        "LFE Playback Volume",
        "Side Playback Volume",
        "Headphone Playback Volume",
-       "Headphone Playback Volume",
+       "Headphone2 Playback Volume",
        "Speaker Playback Volume",
        "External Speaker Playback Volume",
        "Speaker2 Playback Volume",
@@ -1221,7 +1221,7 @@ static const char *slave_sws[] = {
        "LFE Playback Switch",
        "Side Playback Switch",
        "Headphone Playback Switch",
-       "Headphone Playback Switch",
+       "Headphone2 Playback Switch",
        "Speaker Playback Switch",
        "External Speaker Playback Switch",
        "Speaker2 Playback Switch",
@@ -3516,6 +3516,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
        if (! spec->autocfg.line_outs)
                return 0; /* can't find valid pin config */
 
+#if 0 /* FIXME: temporarily disabled */
        /* If we have no real line-out pin and multiple hp-outs, HPs should
         * be set up as multi-channel outputs.
         */
@@ -3535,6 +3536,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
                spec->autocfg.line_out_type = AUTO_PIN_HP_OUT;
                spec->autocfg.hp_outs = 0;
        }
+#endif /* FIXME: temporarily disabled */
        if (spec->autocfg.mono_out_pin) {
                int dir = get_wcaps(codec, spec->autocfg.mono_out_pin) &
                        (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP);