]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'dma-debug' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux...
authorIngo Molnar <mingo@elte.hu>
Fri, 3 Apr 2009 14:35:09 +0000 (16:35 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 3 Apr 2009 14:35:09 +0000 (16:35 +0200)
arch/x86/boot/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/signal.c
arch/x86/mm/highmem_32.c
arch/x86/mm/iomap_32.c

index fb737ce5888dfbd7213c171bf07ea43f88e66f6f..6633b6e7505a68cc32ebc198f9ec6cd46cd0d9c3 100644 (file)
@@ -57,6 +57,7 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
 # How to compile the 16-bit code.  Note we always compile for -march=i386,
 # that way we can complain to the user if the CPU is insufficient.
 KBUILD_CFLAGS  := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+                  -DDISABLE_BRANCH_PROFILING \
                   -Wall -Wstrict-prototypes \
                   -march=i386 -mregparm=3 \
                   -include $(srctree)/$(src)/code16gcc.h \
@@ -66,7 +67,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
                        $(call cc-option, -fno-unit-at-a-time)) \
                   $(call cc-option, -fno-stack-protector) \
                   $(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS +=   $(call cc-option,-m32)
+KBUILD_CFLAGS  += $(call cc-option, -m32)
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 
 $(obj)/bzImage: asflags-y  := $(SVGA_MODE)
index 3ca4c194b8e588217dabef4dc5da54bcb2489edd..65551c9f85718baf762d203329b6be42082a2b9c 100644 (file)
@@ -8,6 +8,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma h
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 cflags-$(CONFIG_X86_64) := -mcmodel=small
 KBUILD_CFLAGS += $(cflags-y)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
index 400331b50a53f0838c407a8b688bcb8d6f686087..3a97a4cf187245462f3890f08313544762642f28 100644 (file)
@@ -153,7 +153,6 @@ static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
 static ssize_t __init setup_pcpu_remap(size_t static_size)
 {
        static struct vm_struct vm;
-       pg_data_t *last;
        size_t ptrs_size, dyn_size;
        unsigned int cpu;
        ssize_t ret;
@@ -162,22 +161,9 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
         * If large page isn't supported, there's no benefit in doing
         * this.  Also, on non-NUMA, embedding is better.
         */
-       if (!cpu_has_pse || pcpu_need_numa())
+       if (!cpu_has_pse || !pcpu_need_numa())
                return -EINVAL;
 
-       last = NULL;
-       for_each_possible_cpu(cpu) {
-               int node = early_cpu_to_node(cpu);
-
-               if (node_online(node) && NODE_DATA(node) &&
-                   last && last != NODE_DATA(node))
-                       goto proceed;
-
-               last = NODE_DATA(node);
-       }
-       return -EINVAL;
-
-proceed:
        /*
         * Currently supports only single page.  Supporting multiple
         * pages won't be too difficult if it ever becomes necessary.
index dfcc74ab0ab64f922c12717ada6a248408593a75..14425166b8e3f4838073ce6052aef507aa62510a 100644 (file)
@@ -221,7 +221,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
        if (!onsigstack) {
                /* This is the X/Open sanctioned signal stack switching.  */
                if (ka->sa.sa_flags & SA_ONSTACK) {
-                       if (sas_ss_flags(sp) == 0)
+                       if (current->sas_ss_size)
                                sp = current->sas_ss_sp + current->sas_ss_size;
                } else {
 #ifdef CONFIG_X86_32
index 5bc5d1688c1c771730ba6fa1ecdea1bafe959767..8126e8d1a2a4a789509cb49af563b6cbb76395ae 100644 (file)
@@ -40,7 +40,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 
        debug_kmap_atomic(type);
 
-       debug_kmap_atomic(type);
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
index bff0c9032f8c6f7045518d9fdd2bd417a3be7634..e331f77348a787608c0c6cfe82bd022d81eb42aa 100644 (file)
@@ -39,6 +39,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 
        pagefault_disable();
 
+       debug_kmap_atomic(type);
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@@ -72,7 +73,6 @@ iounmap_atomic(void *kvaddr, enum km_type type)
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-       debug_kmap_atomic(type);
        /*
         * Force other mappings to Oops if they'll try to access this pte
         * without first remap it.  Keeping stale mappings around is a bad idea