]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
authorLinus Torvalds <torvalds@g5.osdl.org>
Sun, 5 Feb 2006 19:10:54 +0000 (11:10 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 5 Feb 2006 19:10:54 +0000 (11:10 -0800)
67 files changed:
CREDITS
MAINTAINERS
arch/i386/Kconfig
arch/i386/kernel/cpu/amd.c
arch/i386/kernel/cpu/centaur.c
arch/i386/kernel/cpu/common.c
arch/i386/kernel/cpu/cyrix.c
arch/i386/kernel/cpu/intel_cacheinfo.c
arch/i386/kernel/cpu/nexgen.c
arch/i386/kernel/cpu/rise.c
arch/i386/kernel/cpu/transmeta.c
arch/i386/kernel/cpu/umc.c
arch/i386/kernel/nmi.c
arch/i386/kernel/process.c
arch/i386/kernel/traps.c
arch/x86_64/kernel/smpboot.c
block/elevator.c
block/ll_rw_blk.c
drivers/block/Kconfig
drivers/block/pktcdvd.c
drivers/message/i2o/i2o_scsi.c
drivers/scsi/scsi.c
fs/file.c
fs/fuse/dev.c
fs/jbd/transaction.c
fs/namei.c
include/asm-i386/system.h
include/asm-x86_64/numa.h
include/asm-x86_64/system.h
include/linux/jbd.h
include/linux/netfilter_ipv4/ipt_connbytes.h
include/linux/netfilter_ipv4/ipt_policy.h
include/linux/netfilter_ipv6/ip6t_policy.h
include/linux/pktcdvd.h
include/linux/reiserfs_acl.h
include/net/netfilter/nf_conntrack_l3proto.h
kernel/intermodule.c
kernel/sched.c
mm/hugetlb.c
mm/page_alloc.c
mm/slab.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/utils.c
net/ipv4/icmp.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_conntrack_netlink.c
net/ipv4/netfilter/ip_conntrack_tftp.c
net/ipv4/netfilter/ip_nat_standalone.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/ipt_policy.c
net/ipv4/proc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_policy.c
net/ipv6/proc.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/socket.c
scripts/kconfig/Makefile
security/selinux/Kconfig
security/selinux/Makefile
security/selinux/hooks.c

diff --git a/CREDITS b/CREDITS
index 8e577ce4abeb4f44ffcb83a894c66130aebb6a58..6957ef4efab3a2fd60732456e5c515bee68bc57e 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3101,7 +3101,7 @@ S: Minto, NSW, 2566
 S: Australia
 
 N: Stephen Smalley
-E: sds@epoch.ncsc.mil
+E: sds@tycho.nsa.gov
 D: portions of the Linux Security Module (LSM) framework and security modules
 
 N: Chris Smith
index b6cbac5dbfd5ddf972857d85946428751ebe9065..11d44daa60259112c104b9109ca7c960ae6fff4d 100644 (file)
@@ -2298,7 +2298,7 @@ S:        Supported
 
 SELINUX SECURITY MODULE
 P:     Stephen Smalley
-M:     sds@epoch.ncsc.mil
+M:     sds@tycho.nsa.gov
 P:     James Morris
 M:     jmorris@namei.org
 L:     linux-kernel@vger.kernel.org (kernel issues)
index d86c865a7cd22b1d1892e03d3473b45767539677..0afec8566e7bd750743910580c06093481e29daa 100644 (file)
@@ -442,6 +442,7 @@ config HIGHMEM4G
 
 config HIGHMEM64G
        bool "64GB"
+       depends on X86_CMPXCHG64
        help
          Select this if you have a 32-bit processor and more than 4
          gigabytes of physical RAM.
index 333578a4e91afcf768a5dc35e10188a76c06591f..0810f81f2a05d154cf0bc8bcd4885129eef831d8 100644 (file)
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
 }
 
 //early_arch_initcall(amd_init_cpu);
+
+static int __init amd_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_AMD] = NULL;
+       return 0;
+}
+
+late_initcall(amd_exit_cpu);
index 0dd92a23d62206e9d0fe0e59976c39872dee8ada..f52669ecb93fd549f40878971f0c16c860a43ed2 100644 (file)
@@ -470,3 +470,11 @@ int __init centaur_init_cpu(void)
 }
 
 //early_arch_initcall(centaur_init_cpu);
+
+static int __init centaur_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_CENTAUR] = NULL;
+       return 0;
+}
+
+late_initcall(centaur_exit_cpu);
index 15aee26ec2b6308be3d9e9f7e0e47f120be11fe1..7eb9213734a321614ea6da6837ccd1bbbdf1dd85 100644 (file)
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
 
 static struct cpu_dev default_cpu = {
        .c_init = default_init,
+       .c_vendor = "Unknown",
 };
 static struct cpu_dev * this_cpu = &default_cpu;
 
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
 {
        char *v = c->x86_vendor_id;
        int i;
+       static int printed;
 
        for (i = 0; i < X86_VENDOR_NUM; i++) {
                if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
                                c->x86_vendor = i;
                                if (!early)
                                        this_cpu = cpu_devs[i];
-                               break;
+                               return;
                        }
                }
        }
+       if (!printed) {
+               printed++;
+               printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+               printk(KERN_ERR "CPU: Your system may be unstable.\n");
+       }
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       this_cpu = &default_cpu;
 }
 
 
index 75015975d0386a47cd48439d4eefa66cb2bcb75c..00f2e058797cffd8019b08ecfb1fdbdc3215e7a1 100644 (file)
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
 /*
  * Handle National Semiconductor branded processors
  */
-static void __devinit init_nsc(struct cpuinfo_x86 *c)
+static void __init init_nsc(struct cpuinfo_x86 *c)
 {
        /* There may be GX1 processors in the wild that are branded
         * NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
 
 //early_arch_initcall(cyrix_init_cpu);
 
+static int __init cyrix_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_CYRIX] = NULL;
+       return 0;
+}
+
+late_initcall(cyrix_exit_cpu);
+
 static struct cpu_dev nsc_cpu_dev __initdata = {
        .c_vendor       = "NSC",
        .c_ident        = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
 }
 
 //early_arch_initcall(nsc_init_cpu);
+
+static int __init nsc_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_NSC] = NULL;
+       return 0;
+}
+
+late_initcall(nsc_exit_cpu);
index af591c73345fa4120e6716b37e262b35e12754af..ffe58cee0c488241b3a03a838064eba3df182395 100644 (file)
@@ -152,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
        return 0;
 }
 
+/* will only be called once; __init is safe here */
 static int __init find_num_cache_leaves(void)
 {
        unsigned int            eax, ebx, ecx, edx;
index 30898a260a5cd8c86c2b2b339d43bf0f0f2bfd81..ad87fa58058d574c7eaa431b2497f82ff60156a8 100644 (file)
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
 }
 
 //early_arch_initcall(nexgen_init_cpu);
+
+static int __init nexgen_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_NEXGEN] = NULL;
+       return 0;
+}
+
+late_initcall(nexgen_exit_cpu);
index 8602425628ca7b0f5066fb16ad2e0713421e7829..d08d5a2811c83cb85d05c697293985b86c86de42 100644 (file)
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
 }
 
 //early_arch_initcall(rise_init_cpu);
+
+static int __init rise_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_RISE] = NULL;
+       return 0;
+}
+
+late_initcall(rise_exit_cpu);
index fc426380366bcbbd131e7e2d7f508f2f2ca1349c..bdbeb77f4e22fa9e7f6636cf03c03e8bb59c4dd6 100644 (file)
@@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
 #endif
 }
 
-static void transmeta_identify(struct cpuinfo_x86 * c)
+static void __init transmeta_identify(struct cpuinfo_x86 * c)
 {
        u32 xlvl;
        generic_identify(c);
@@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void)
 }
 
 //early_arch_initcall(transmeta_init_cpu);
+
+static int __init transmeta_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
+       return 0;
+}
+
+late_initcall(transmeta_exit_cpu);
index 264fcad559d5ccecba710dbaaee5376913964b52..2cd988f6dc556c4bd5f415f93de77d12c1b5e8cf 100644 (file)
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
 }
 
 //early_arch_initcall(umc_init_cpu);
+
+static int __init umc_exit_cpu(void)
+{
+       cpu_devs[X86_VENDOR_UMC] = NULL;
+       return 0;
+}
+
+late_initcall(umc_exit_cpu);
index d661703ac1cb713db7327de628e4f7f5b09efcb1..63f39a7e2c96b0e3db1ce48ab73696e95db60a0c 100644 (file)
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
        if (nmi_watchdog == NMI_LOCAL_APIC)
                smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
        local_irq_enable();
        mdelay((10*1000)/nmi_hz); // wait 10 ticks
index 2185377fdde118424aaa36b3b3d8c36498373161..0480454ebffa6808d09735ecb3bf85232c1400ac 100644 (file)
@@ -297,8 +297,10 @@ void show_regs(struct pt_regs * regs)
 
        if (user_mode(regs))
                printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-       printk(" EFLAGS: %08lx    %s  (%s)\n",
-              regs->eflags, print_tainted(), system_utsname.release);
+       printk(" EFLAGS: %08lx    %s  (%s %.*s)\n",
+              regs->eflags, print_tainted(), system_utsname.release,
+              (int)strcspn(system_utsname.version, " "),
+              system_utsname.version);
        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
                regs->eax,regs->ebx,regs->ecx,regs->edx);
        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
index 0aaebf3e1cfa321a6b20f6ade450063f3554e0f7..b814dbdcc91e58b06c140ff1a099f223c2212768 100644 (file)
@@ -166,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task,
                stack = (unsigned long*)context->previous_esp;
                if (!stack)
                        break;
-               printk(KERN_EMERG " =======================\n");
+               printk(log_lvl);
+               printk(" =======================\n");
        }
 }
 
@@ -239,9 +240,11 @@ void show_registers(struct pt_regs *regs)
        }
        print_modules();
        printk(KERN_EMERG "CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\n"
-                       "EFLAGS: %08lx   (%s) \n",
+                       "EFLAGS: %08lx   (%s %.*s) \n",
                smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-               print_tainted(), regs->eflags, system_utsname.release);
+               print_tainted(), regs->eflags, system_utsname.release,
+               (int)strcspn(system_utsname.version, " "),
+               system_utsname.version);
        print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
        printk(KERN_EMERG "eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
                regs->eax, regs->ebx, regs->ecx, regs->edx);
index 67e4e28f4df8d79d9b97d76b7e2ec0fbf794f5e1..a28756ef7cef972dcfd8fff90b1f8a7a19ff5e4a 100644 (file)
@@ -59,7 +59,6 @@
 #include <asm/nmi.h>
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
-#include <asm/numa.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -891,7 +890,6 @@ do_rest:
        if (boot_error) {
                cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
                clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
-               clear_node_cpumask(cpu); /* was set by numa_add_cpu */
                cpu_clear(cpu, cpu_present_map);
                cpu_clear(cpu, cpu_possible_map);
                x86_cpu_to_apicid[cpu] = BAD_APICID;
@@ -1189,7 +1187,6 @@ void remove_cpu_from_maps(void)
        cpu_clear(cpu, cpu_callout_map);
        cpu_clear(cpu, cpu_callin_map);
        clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
-       clear_node_cpumask(cpu);
 }
 
 int __cpu_disable(void)
index 96a61e029ce5e7858d2e024c08eff817b5a998d6..2fc269f69726d67e7a8ab7bcf6a58e2a10af009b 100644 (file)
@@ -323,7 +323,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                /*
                 * toggle ordered color
                 */
-               q->ordcolor ^= 1;
+               if (blk_barrier_rq(rq))
+                       q->ordcolor ^= 1;
 
                /*
                 * barriers implicitly indicate back insertion
index f9fc07efd2da995d933fda955895175ec1e5012d..ee5ed98db4cd15ff2ab7f5ae5b3a893466691abe 100644 (file)
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
 
 int blk_do_ordered(request_queue_t *q, struct request **rqp)
 {
-       struct request *rq = *rqp, *allowed_rq;
+       struct request *rq = *rqp;
        int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
                }
        }
 
+       /*
+        * Ordered sequence in progress
+        */
+
+       /* Special requests are not subject to ordering rules. */
+       if (!blk_fs_request(rq) &&
+           rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+               return 1;
+
        if (q->ordered & QUEUE_ORDERED_TAG) {
+               /* Ordered by tag.  Blocking the next barrier is enough. */
                if (is_barrier && rq != &q->bar_rq)
                        *rqp = NULL;
-               return 1;
-       }
-
-       switch (blk_ordered_cur_seq(q)) {
-       case QUEUE_ORDSEQ_PREFLUSH:
-               allowed_rq = &q->pre_flush_rq;
-               break;
-       case QUEUE_ORDSEQ_BAR:
-               allowed_rq = &q->bar_rq;
-               break;
-       case QUEUE_ORDSEQ_POSTFLUSH:
-               allowed_rq = &q->post_flush_rq;
-               break;
-       default:
-               allowed_rq = NULL;
-               break;
+       } else {
+               /* Ordered by draining.  Wait for turn. */
+               WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+               if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+                       *rqp = NULL;
        }
 
-       if (rq != allowed_rq &&
-           (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
-            rq == &q->post_flush_rq))
-               *rqp = NULL;
-
        return 1;
 }
 
@@ -3453,7 +3447,7 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
index 139cbba76180bcba6076c40bc30fce7c7d1deb41..8b133167740768c6ac96071a356a7c8fd1046ac6 100644 (file)
@@ -433,12 +433,12 @@ config CDROM_PKTCDVD_BUFFERS
          This controls the maximum number of active concurrent packets. More
          concurrent packets can increase write performance, but also require
          more memory. Each concurrent packet will require approximately 64Kb
-         of non-swappable kernel memory, memory which will be allocated at
-         pktsetup time.
+         of non-swappable kernel memory, memory which will be allocated when
+         a disc is opened for writing.
 
 config CDROM_PKTCDVD_WCACHE
-       bool "Enable write caching"
-       depends on CDROM_PKTCDVD
+       bool "Enable write caching (EXPERIMENTAL)"
+       depends on CDROM_PKTCDVD && EXPERIMENTAL
        help
          If enabled, write caching will be set for the CD-R/W device. For now
          this option is dangerous unless the CD-RW media is known good, as we
index 93affeeef7bd3675009f1edc885684824bad633f..4e7dbcc425ff69d7f1f3c851e47d7ed5c60351b6 100644 (file)
@@ -43,8 +43,6 @@
  *
  *************************************************************************/
 
-#define VERSION_CODE   "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
-
 #include <linux/pktcdvd.h>
 #include <linux/config.h>
 #include <linux/module.h>
@@ -131,7 +129,7 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
 /*
  * Allocate a packet_data struct
  */
-static struct packet_data *pkt_alloc_packet_data(void)
+static struct packet_data *pkt_alloc_packet_data(int frames)
 {
        int i;
        struct packet_data *pkt;
@@ -140,11 +138,12 @@ static struct packet_data *pkt_alloc_packet_data(void)
        if (!pkt)
                goto no_pkt;
 
-       pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
+       pkt->frames = frames;
+       pkt->w_bio = pkt_bio_alloc(frames);
        if (!pkt->w_bio)
                goto no_bio;
 
-       for (i = 0; i < PAGES_PER_PACKET; i++) {
+       for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
                pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
                if (!pkt->pages[i])
                        goto no_page;
@@ -152,7 +151,7 @@ static struct packet_data *pkt_alloc_packet_data(void)
 
        spin_lock_init(&pkt->lock);
 
-       for (i = 0; i < PACKET_MAX_SIZE; i++) {
+       for (i = 0; i < frames; i++) {
                struct bio *bio = pkt_bio_alloc(1);
                if (!bio)
                        goto no_rd_bio;
@@ -162,14 +161,14 @@ static struct packet_data *pkt_alloc_packet_data(void)
        return pkt;
 
 no_rd_bio:
-       for (i = 0; i < PACKET_MAX_SIZE; i++) {
+       for (i = 0; i < frames; i++) {
                struct bio *bio = pkt->r_bios[i];
                if (bio)
                        bio_put(bio);
        }
 
 no_page:
-       for (i = 0; i < PAGES_PER_PACKET; i++)
+       for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
                if (pkt->pages[i])
                        __free_page(pkt->pages[i]);
        bio_put(pkt->w_bio);
@@ -186,12 +185,12 @@ static void pkt_free_packet_data(struct packet_data *pkt)
 {
        int i;
 
-       for (i = 0; i < PACKET_MAX_SIZE; i++) {
+       for (i = 0; i < pkt->frames; i++) {
                struct bio *bio = pkt->r_bios[i];
                if (bio)
                        bio_put(bio);
        }
-       for (i = 0; i < PAGES_PER_PACKET; i++)
+       for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
                __free_page(pkt->pages[i]);
        bio_put(pkt->w_bio);
        kfree(pkt);
@@ -206,17 +205,17 @@ static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
        list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
                pkt_free_packet_data(pkt);
        }
+       INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
 }
 
 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
 {
        struct packet_data *pkt;
 
-       INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
-       INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
-       spin_lock_init(&pd->cdrw.active_list_lock);
+       BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
+
        while (nr_packets > 0) {
-               pkt = pkt_alloc_packet_data();
+               pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
                if (!pkt) {
                        pkt_shrink_pktlist(pd);
                        return 0;
@@ -951,7 +950,7 @@ try_next_bio:
 
        pd->current_sector = zone + pd->settings.size;
        pkt->sector = zone;
-       pkt->frames = pd->settings.size >> 2;
+       BUG_ON(pkt->frames != pd->settings.size >> 2);
        pkt->write_size = 0;
 
        /*
@@ -1639,7 +1638,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
        pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
        if (pd->settings.size == 0) {
                printk("pktcdvd: detected zero packet size!\n");
-               pd->settings.size = 128;
+               return -ENXIO;
        }
        if (pd->settings.size > PACKET_MAX_SECTORS) {
                printk("pktcdvd: packet size is too big\n");
@@ -1987,8 +1986,14 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
        if ((ret = pkt_set_segment_merging(pd, q)))
                goto out_unclaim;
 
-       if (write)
+       if (write) {
+               if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+                       printk("pktcdvd: not enough memory for buffers\n");
+                       ret = -ENOMEM;
+                       goto out_unclaim;
+               }
                printk("pktcdvd: %lukB available on disc\n", lba << 1);
+       }
 
        return 0;
 
@@ -2014,6 +2019,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
        pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
        bd_release(pd->bdev);
        blkdev_put(pd->bdev);
+
+       pkt_shrink_pktlist(pd);
 }
 
 static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
@@ -2379,12 +2386,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        /* This is safe, since we have a reference from open(). */
        __module_get(THIS_MODULE);
 
-       if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
-               printk("pktcdvd: not enough memory for buffers\n");
-               ret = -ENOMEM;
-               goto out_mem;
-       }
-
        pd->bdev = bdev;
        set_blocksize(bdev, CD_FRAMESIZE);
 
@@ -2395,7 +2396,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        if (IS_ERR(pd->cdrw.thread)) {
                printk("pktcdvd: can't start kernel thread\n");
                ret = -ENOMEM;
-               goto out_thread;
+               goto out_mem;
        }
 
        proc = create_proc_entry(pd->name, 0, pkt_proc);
@@ -2406,8 +2407,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
        return 0;
 
-out_thread:
-       pkt_shrink_pktlist(pd);
 out_mem:
        blkdev_put(bdev);
        /* This is safe: open() is still holding a reference. */
@@ -2503,6 +2502,10 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
                goto out_mem;
        pd->disk = disk;
 
+       INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+       INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+       spin_lock_init(&pd->cdrw.active_list_lock);
+
        spin_lock_init(&pd->lock);
        spin_lock_init(&pd->iosched.lock);
        sprintf(pd->name, "pktcdvd%d", idx);
@@ -2567,8 +2570,6 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
 
        blkdev_put(pd->bdev);
 
-       pkt_shrink_pktlist(pd);
-
        remove_proc_entry(pd->name, pkt_proc);
        DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
 
@@ -2678,7 +2679,6 @@ static int __init pkt_init(void)
 
        pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
 
-       DPRINTK("pktcdvd: %s\n", VERSION_CODE);
        return 0;
 
 out:
index f9e5a23697a1f8ae54ae28b0d1388c492800ef25..c08ddac3717d8fb129da204b8f955fee38ea890d 100644 (file)
@@ -732,7 +732,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
            cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
        msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
 
-       if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
+       if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
                status = SUCCESS;
 
        return status;
index 245ca99a641eb6432ee14434500531d36675fb22..c551bb84dbfb6c46351624dcf640d3ac9be44ca4 100644 (file)
@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
        if (error)
                goto cleanup_sysctl;
 
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
 
        devfs_mk_dir("scsi");
index fd066b261c751875de1c4871974b54d335eafcd7..cea7cbea11d0d5fca944ab11a331568fe0949fb7 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
 void __init files_defer_init(void)
 {
        int i;
-       /* Really early - can't use for_each_cpu */
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                fdtable_defer_list_init(i);
 }
index 4526da8907c6d384fefee03b18bd6894558bf459..f556a0d5c0d31010b86552ff958dcb55b20947f5 100644 (file)
@@ -120,9 +120,9 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc)
        return do_get_request(fc);
 }
 
+/* Must be called with fuse_lock held */
 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
 {
-       spin_lock(&fuse_lock);
        if (req->preallocated) {
                atomic_dec(&fc->num_waiting);
                list_add(&req->list, &fc->unused_list);
@@ -134,10 +134,18 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
                fc->outstanding_debt--;
        else
                up(&fc->outstanding_sem);
-       spin_unlock(&fuse_lock);
 }
 
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
+{
+       if (atomic_dec_and_test(&req->count)) {
+               spin_lock(&fuse_lock);
+               fuse_putback_request(fc, req);
+               spin_unlock(&fuse_lock);
+       }
+}
+
+static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count))
                fuse_putback_request(fc, req);
@@ -163,26 +171,36 @@ void fuse_release_background(struct fuse_req *req)
  * still waiting), the 'end' callback is called if given, else the
  * reference to the request is released
  *
+ * Releasing extra reference for foreground requests must be done
+ * within the same locked region as setting state to finished.  This
+ * is because fuse_reset_request() may be called after request is
+ * finished and it must be the sole possessor.  If request is
+ * interrupted and put in the background, it will return with an error
+ * and hence never be reset and reused.
+ *
  * Called with fuse_lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
-       void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
-       req->end = NULL;
        list_del(&req->list);
        req->state = FUSE_REQ_FINISHED;
-       spin_unlock(&fuse_lock);
-       if (req->background) {
+       if (!req->background) {
+               wake_up(&req->waitq);
+               fuse_put_request_locked(fc, req);
+               spin_unlock(&fuse_lock);
+       } else {
+               void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
+               req->end = NULL;
+               spin_unlock(&fuse_lock);
                down_read(&fc->sbput_sem);
                if (fc->mounted)
                        fuse_release_background(req);
                up_read(&fc->sbput_sem);
+               if (end)
+                       end(fc, req);
+               else
+                       fuse_put_request(fc, req);
        }
-       wake_up(&req->waitq);
-       if (end)
-               end(fc, req);
-       else
-               fuse_put_request(fc, req);
 }
 
 /*
index 429f4b263cf1198bf179af4377ff8b1c21af3d5e..ca917973c2c06d1fe1035a4d61ef21f505e5b435 100644 (file)
@@ -1308,6 +1308,7 @@ int journal_stop(handle_t *handle)
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
        int old_handle_count, err;
+       pid_t pid;
 
        J_ASSERT(transaction->t_updates > 0);
        J_ASSERT(journal_current_handle() == handle);
@@ -1333,8 +1334,15 @@ int journal_stop(handle_t *handle)
         * It doesn't cost much - we're about to run a commit and sleep
         * on IO anyway.  Speeds up many-threaded, many-dir operations
         * by 30x or more...
+        *
+        * But don't do this if this process was the most recent one to
+        * perform a synchronous write.  We do this to detect the case where a
+        * single process is doing a stream of sync writes.  No point in waiting
+        * for joiners in that case.
         */
-       if (handle->h_sync) {
+       pid = current->pid;
+       if (handle->h_sync && journal->j_last_sync_writer != pid) {
+               journal->j_last_sync_writer = pid;
                do {
                        old_handle_count = transaction->t_handle_count;
                        schedule_timeout_uninterruptible(1);
index 7ac9fb4acb2c7d265fc48dff00810861f65cc4ce..faf61c35308cb023474f5859c606b8ef164d35c3 100644 (file)
@@ -790,7 +790,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
 
        inode = nd->dentry->d_inode;
        if (nd->depth)
-               lookup_flags = LOOKUP_FOLLOW;
+               lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
 
        /* At this point we know we have a real path component. */
        for(;;) {
@@ -885,7 +885,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
 last_with_slashes:
                lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 last_component:
-               nd->flags &= ~LOOKUP_CONTINUE;
+               /* Clear LOOKUP_CONTINUE iff it was previously unset */
+               nd->flags &= lookup_flags | ~LOOKUP_CONTINUE;
                if (lookup_flags & LOOKUP_PARENT)
                        goto lookup_parent;
                if (this.name[0] == '.') switch (this.len) {
@@ -1069,6 +1070,8 @@ static int fastcall do_path_lookup(int dfd, const char *name,
                                unsigned int flags, struct nameidata *nd)
 {
        int retval = 0;
+       int fput_needed;
+       struct file *file;
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags;
@@ -1090,29 +1093,22 @@ static int fastcall do_path_lookup(int dfd, const char *name,
                nd->mnt = mntget(current->fs->pwdmnt);
                nd->dentry = dget(current->fs->pwd);
        } else {
-               struct file *file;
-               int fput_needed;
                struct dentry *dentry;
 
                file = fget_light(dfd, &fput_needed);
-               if (!file) {
-                       retval = -EBADF;
-                       goto out_fail;
-               }
+               retval = -EBADF;
+               if (!file)
+                       goto unlock_fail;
 
                dentry = file->f_dentry;
 
-               if (!S_ISDIR(dentry->d_inode->i_mode)) {
-                       retval = -ENOTDIR;
-                       fput_light(file, fput_needed);
-                       goto out_fail;
-               }
+               retval = -ENOTDIR;
+               if (!S_ISDIR(dentry->d_inode->i_mode))
+                       goto fput_unlock_fail;
 
                retval = file_permission(file, MAY_EXEC);
-               if (retval) {
-                       fput_light(file, fput_needed);
-                       goto out_fail;
-               }
+               if (retval)
+                       goto fput_unlock_fail;
 
                nd->mnt = mntget(file->f_vfsmnt);
                nd->dentry = dget(dentry);
@@ -1126,7 +1122,12 @@ out:
        if (unlikely(current->audit_context
                     && nd && nd->dentry && nd->dentry->d_inode))
                audit_inode(name, nd->dentry->d_inode, flags);
-out_fail:
+       return retval;
+
+fput_unlock_fail:
+       fput_light(file, fput_needed);
+unlock_fail:
+       read_unlock(&current->fs->lock);
        return retval;
 }
 
index 36a92ed6a9d0d7a916c60f33313bec9d4fb0c494..399145a247f290580c53576a81c0f7ba94ee61e8 100644 (file)
@@ -507,7 +507,7 @@ struct alt_instr {
 #define smp_rmb()      rmb()
 #define smp_wmb()      wmb()
 #define smp_read_barrier_depends()     read_barrier_depends()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 #else
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
index dffe276ca2df49d55044dad35b124373f64e6bf8..34e434ce3268409b7cc90f5fb1c6c925d051086c 100644 (file)
@@ -22,15 +22,8 @@ extern void numa_set_node(int cpu, int node);
 extern unsigned char apicid_to_node[256];
 #ifdef CONFIG_NUMA
 extern void __init init_cpu_to_node(void);
-
-static inline void clear_node_cpumask(int cpu)
-{
-       clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
-}
-
 #else
 #define init_cpu_to_node() do {} while (0)
-#define clear_node_cpumask(cpu) do {} while (0)
 #endif
 
 #define NUMA_NO_NODE 0xff
index a73f0c789d8b994b3459771ec90a51e93af0eb08..b7f66034ae7ac5e4d9ba115cf6db9d651bb2c41b 100644 (file)
@@ -327,7 +327,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 #define wmb()  asm volatile("" ::: "memory")
 #endif
 #define read_barrier_depends() do {} while(0)
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
index 558cb4c26ec9ee6c1d6f8daab784f8bc497fba84..751bb3849467e3ce3b08188d69787efbda74a070 100644 (file)
@@ -23,6 +23,7 @@
 #define jfs_debug jbd_debug
 #else
 
+#include <linux/types.h>
 #include <linux/buffer_head.h>
 #include <linux/journal-head.h>
 #include <linux/stddef.h>
@@ -618,6 +619,7 @@ struct transaction_s
  * @j_wbuf: array of buffer_heads for journal_commit_transaction
  * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
  *     number that will fit in j_blocksize
+ * @j_last_sync_writer: most recent pid which did a synchronous write
  * @j_private: An opaque pointer to fs-private information.
  */
 
@@ -807,6 +809,8 @@ struct journal_s
        struct buffer_head      **j_wbuf;
        int                     j_wbufsize;
 
+       pid_t                   j_last_sync_writer;
+
        /*
         * An opaque pointer to fs-private information.  ext3 puts its
         * superblock pointer here
index b04dfa3083c9527d59ceec1e6fe095ed63ebecde..f63e6ee911130b9b30204980459ccddd2d005279 100644 (file)
@@ -1,10 +1,10 @@
 #ifndef _IPT_CONNBYTES_H
 #define _IPT_CONNBYTES_H
 
-#include <net/netfilter/xt_connbytes.h>
+#include <linux/netfilter/xt_connbytes.h>
 #define ipt_connbytes_what xt_connbytes_what
 
-#define IPT_CONNBYTES_PKTS     XT_CONNBYTES_PACKETS
+#define IPT_CONNBYTES_PKTS     XT_CONNBYTES_PKTS
 #define IPT_CONNBYTES_BYTES    XT_CONNBYTES_BYTES
 #define IPT_CONNBYTES_AVGPKT   XT_CONNBYTES_AVGPKT
 
index 7fd1bec453f181130f0a92caf8ac6b3d3d5e43ce..a3f6eff39d335c71abc26cbb227528c7d9bd119a 100644 (file)
@@ -27,16 +27,22 @@ struct ipt_policy_spec
                        reqid:1;
 };
 
+union ipt_policy_addr
+{
+       struct in_addr  a4;
+       struct in6_addr a6;
+};
+
 struct ipt_policy_elem
 {
-       u_int32_t       saddr;
-       u_int32_t       smask;
-       u_int32_t       daddr;
-       u_int32_t       dmask;
-       u_int32_t       spi;
-       u_int32_t       reqid;
-       u_int8_t        proto;
-       u_int8_t        mode;
+       union ipt_policy_addr   saddr;
+       union ipt_policy_addr   smask;
+       union ipt_policy_addr   daddr;
+       union ipt_policy_addr   dmask;
+       u_int32_t               spi;
+       u_int32_t               reqid;
+       u_int8_t                proto;
+       u_int8_t                mode;
 
        struct ipt_policy_spec  match;
        struct ipt_policy_spec  invert;
index 5a93afcd2ff1ea7f3d15424cef7eec67795572a7..671bd818300fa167e8f12b6d584fba7fc292cab6 100644 (file)
@@ -27,16 +27,22 @@ struct ip6t_policy_spec
                        reqid:1;
 };
 
+union ip6t_policy_addr
+{
+       struct in_addr  a4;
+       struct in6_addr a6;
+};
+
 struct ip6t_policy_elem
 {
-       struct in6_addr saddr;
-       struct in6_addr smask;
-       struct in6_addr daddr;
-       struct in6_addr dmask;
-       u_int32_t       spi;
-       u_int32_t       reqid;
-       u_int8_t        proto;
-       u_int8_t        mode;
+       union ip6t_policy_addr  saddr;
+       union ip6t_policy_addr  smask;
+       union ip6t_policy_addr  daddr;
+       union ip6t_policy_addr  dmask;
+       u_int32_t               spi;
+       u_int32_t               reqid;
+       u_int8_t                proto;
+       u_int8_t                mode;
 
        struct ip6t_policy_spec match;
        struct ip6t_policy_spec invert;
index 2c177e4c8f226d50803162dcb421beb629d4cbd3..8a94c717c26636fb6e21f54db6c1c63e1a066f2d 100644 (file)
@@ -114,7 +114,7 @@ struct pkt_ctrl_command {
 
 struct packet_settings
 {
-       __u                   size;           /* packet size in (512 byte) sectors */
+       __u32                   size;           /* packet size in (512 byte) sectors */
        __u8                    fp;             /* fixed packets */
        __u8                    link_loss;      /* the rest is specified
                                                 * as per Mt Fuji */
@@ -169,8 +169,8 @@ struct packet_iosched
 #if (PAGE_SIZE % CD_FRAMESIZE) != 0
 #error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
 #endif
-#define PACKET_MAX_SIZE                32
-#define PAGES_PER_PACKET       (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE)
+#define PACKET_MAX_SIZE                128
+#define FRAMES_PER_PAGE                (PAGE_SIZE / CD_FRAMESIZE)
 #define PACKET_MAX_SECTORS     (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
 
 enum packet_data_state {
@@ -219,7 +219,7 @@ struct packet_data
        atomic_t                io_errors;      /* Number of read/write errors during IO */
 
        struct bio              *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
-       struct page             *pages[PAGES_PER_PACKET];
+       struct page             *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
 
        int                     cache_valid;    /* If non-zero, the data for the zone defined */
                                                /* by the sector variable is completely cached */
index 0a3605099c444c2617a5ef43232c0382e4bfb75d..806ec5b067075554792d5631996b24595157d697 100644 (file)
@@ -58,9 +58,13 @@ extern struct reiserfs_xattr_handler posix_acl_default_handler;
 extern struct reiserfs_xattr_handler posix_acl_access_handler;
 #else
 
-#define reiserfs_get_acl NULL
 #define reiserfs_cache_default_acl(inode) 0
 
+static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
+{
+       return NULL;
+}
+
 static inline int reiserfs_xattr_posix_acl_init(void)
 {
        return 0;
index 67856eb93b435093807db606e220ce575988571f..dac43b15a5b04bf6b5c90fc07aabf033a717d1cb 100644 (file)
@@ -88,12 +88,6 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
 extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
 extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
 
-static inline struct nf_conntrack_l3proto *
-__nf_ct_l3proto_find(u_int16_t l3proto)
-{
-       return nf_ct_l3protos[l3proto];
-}
-
 extern struct nf_conntrack_l3proto *
 nf_ct_l3proto_find_get(u_int16_t l3proto);
 
@@ -103,4 +97,13 @@ extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
 extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
+
+static inline struct nf_conntrack_l3proto *
+__nf_ct_l3proto_find(u_int16_t l3proto)
+{
+       if (unlikely(l3proto >= AF_MAX))
+               return &nf_conntrack_generic_l3proto;
+       return nf_ct_l3protos[l3proto];
+}
+
 #endif /*_NF_CONNTRACK_L3PROTO_H*/
index 0cbe633420fb9b6d46376debb0c68d201a5966d5..55b1e5b85db97fef9b361ba86daee3dab0211f7f 100644 (file)
@@ -179,3 +179,6 @@ EXPORT_SYMBOL(inter_module_register);
 EXPORT_SYMBOL(inter_module_unregister);
 EXPORT_SYMBOL(inter_module_get_request);
 EXPORT_SYMBOL(inter_module_put);
+
+MODULE_LICENSE("GPL");
+
index f77f23f8f479c898666082f3c9da0d52bb5a5bab..bc38804e40ddf72954695470ae50fb3a5c0b82a9 100644 (file)
@@ -5551,13 +5551,15 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
                        -1
 #endif
                );
-       printk("migration_cost=");
-       for (distance = 0; distance <= max_distance; distance++) {
-               if (distance)
-                       printk(",");
-               printk("%ld", (long)migration_cost[distance] / 1000);
+       if (system_state == SYSTEM_BOOTING) {
+               printk("migration_cost=");
+               for (distance = 0; distance <= max_distance; distance++) {
+                       if (distance)
+                               printk(",");
+                       printk("%ld", (long)migration_cost[distance] / 1000);
+               }
+               printk("\n");
        }
-       printk("\n");
        j1 = jiffies;
        if (migration_debug)
                printk("migration: %ld seconds\n", (j1-j0)/HZ);
@@ -6109,7 +6111,7 @@ void __init sched_init(void)
        runqueue_t *rq;
        int i, j, k;
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                prio_array_t *array;
 
                rq = cpu_rq(i);
index b21d78c941b527b9c8021d23a6d63bf3dd892523..ceb3ebb3c399e9e346c12b15a8411616f871d5db 100644 (file)
@@ -444,6 +444,15 @@ retry:
                page = alloc_huge_page(vma, address);
                if (!page) {
                        hugetlb_put_quota(mapping);
+                       /*
+                        * No huge pages available. So this is an OOM
+                        * condition but we do not want to trigger the OOM
+                        * killer, so we return VM_FAULT_SIGBUS.
+                        *
+                        * A program using hugepages may fault with Bus Error
+                        * because no huge pages are available in the cpuset, per
+                        * memory policy or because all are in use!
+                        */
                        goto out;
                }
 
index 44b4eb4202d91e33acc45384f1468a4823ff5442..dde04ff4be31873b88c38efbf94d30fd06ca5329 100644 (file)
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
 {
        int cpu = 0;
 
-       memset(ret, 0, sizeof(*ret));
+       memset(ret, 0, nr * sizeof(unsigned long));
        cpus_and(*cpumask, *cpumask, cpu_online_map);
 
        cpu = first_cpu(*cpumask);
        while (cpu < NR_CPUS) {
                unsigned long *in, *out, off;
 
+               if (!cpu_isset(cpu, *cpumask))
+                       continue;
+
                in = (unsigned long *)&per_cpu(page_states, cpu);
 
                cpu = next_cpu(cpu, *cpumask);
 
-               if (cpu < NR_CPUS)
+               if (likely(cpu < NR_CPUS))
                        prefetch(&per_cpu(page_states, cpu));
 
                out = (unsigned long *)ret;
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  * not check if the processor is online before following the pageset pointer.
  * Other parts of the kernel may not check if the zone is available.
  */
-static struct per_cpu_pageset
-       boot_pageset[NR_CPUS];
+static struct per_cpu_pageset boot_pageset[NR_CPUS];
 
 /*
  * Dynamically allocate memory for the
index 71370256a7eb11e8b6eeb0c88cbd0e23955ab163..9cc049a942c6b6dcfcdc7dacd5211c66f799b120 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -294,6 +294,7 @@ struct kmem_list3 {
        unsigned long next_reap;
        int free_touched;
        unsigned int free_limit;
+       unsigned int colour_next;       /* Per-node cache coloring */
        spinlock_t list_lock;
        struct array_cache *shared;     /* shared per node */
        struct array_cache **alien;     /* on other nodes */
@@ -344,6 +345,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
        INIT_LIST_HEAD(&parent->slabs_free);
        parent->shared = NULL;
        parent->alien = NULL;
+       parent->colour_next = 0;
        spin_lock_init(&parent->list_lock);
        parent->free_objects = 0;
        parent->free_touched = 0;
@@ -390,7 +392,6 @@ struct kmem_cache {
 
        size_t colour;          /* cache colouring range */
        unsigned int colour_off;        /* colour offset */
-       unsigned int colour_next;       /* cache colouring */
        struct kmem_cache *slabp_cache;
        unsigned int slab_size;
        unsigned int dflags;    /* dynamic flags */
@@ -883,14 +884,14 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
        }
 }
 
-static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
+static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
 {
        int i = 0;
        struct array_cache *ac;
        unsigned long flags;
 
        for_each_online_node(i) {
-               ac = l3->alien[i];
+               ac = alien[i];
                if (ac) {
                        spin_lock_irqsave(&ac->lock, flags);
                        __drain_alien_cache(cachep, ac, i);
@@ -900,8 +901,11 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
 }
 #else
 #define alloc_alien_cache(node, limit) do { } while (0)
-#define free_alien_cache(ac_ptr) do { } while (0)
-#define drain_alien_cache(cachep, l3) do { } while (0)
+#define drain_alien_cache(cachep, alien) do { } while (0)
+
+static inline void free_alien_cache(struct array_cache **ac_ptr)
+{
+}
 #endif
 
 static int __devinit cpuup_callback(struct notifier_block *nfb,
@@ -935,6 +939,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
                                    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
 
+                               /*
+                                * The l3s don't come and go as CPUs come and
+                                * go.  cache_chain_mutex is sufficient
+                                * protection here.
+                                */
                                cachep->nodelists[node] = l3;
                        }
 
@@ -949,26 +958,47 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                   & array cache's */
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
+                       struct array_cache *shared;
+                       struct array_cache **alien;
 
                        nc = alloc_arraycache(node, cachep->limit,
-                                             cachep->batchcount);
+                                               cachep->batchcount);
                        if (!nc)
                                goto bad;
+                       shared = alloc_arraycache(node,
+                                       cachep->shared * cachep->batchcount,
+                                       0xbaadf00d);
+                       if (!shared)
+                               goto bad;
+#ifdef CONFIG_NUMA
+                       alien = alloc_alien_cache(node, cachep->limit);
+                       if (!alien)
+                               goto bad;
+#endif
                        cachep->array[cpu] = nc;
 
                        l3 = cachep->nodelists[node];
                        BUG_ON(!l3);
-                       if (!l3->shared) {
-                               if (!(nc = alloc_arraycache(node,
-                                                           cachep->shared *
-                                                           cachep->batchcount,
-                                                           0xbaadf00d)))
-                                       goto bad;
 
-                               /* we are serialised from CPU_DEAD or
-                                  CPU_UP_CANCELLED by the cpucontrol lock */
-                               l3->shared = nc;
+                       spin_lock_irq(&l3->list_lock);
+                       if (!l3->shared) {
+                               /*
+                                * We are serialised from CPU_DEAD or
+                                * CPU_UP_CANCELLED by the cpucontrol lock
+                                */
+                               l3->shared = shared;
+                               shared = NULL;
+                       }
+#ifdef CONFIG_NUMA
+                       if (!l3->alien) {
+                               l3->alien = alien;
+                               alien = NULL;
                        }
+#endif
+                       spin_unlock_irq(&l3->list_lock);
+
+                       kfree(shared);
+                       free_alien_cache(alien);
                }
                mutex_unlock(&cache_chain_mutex);
                break;
@@ -977,25 +1007,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
+               /*
+                * Even if all the cpus of a node are down, we don't free the
+                * kmem_list3 of any cache. This to avoid a race between
+                * cpu_down, and a kmalloc allocation from another cpu for
+                * memory from the node of the cpu going down.  The list3
+                * structure is usually allocated from kmem_cache_create() and
+                * gets destroyed at kmem_cache_destroy().
+                */
                /* fall thru */
        case CPU_UP_CANCELED:
                mutex_lock(&cache_chain_mutex);
 
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
+                       struct array_cache *shared;
+                       struct array_cache **alien;
                        cpumask_t mask;
 
                        mask = node_to_cpumask(node);
-                       spin_lock_irq(&cachep->spinlock);
                        /* cpu is dead; no one can alloc from it. */
                        nc = cachep->array[cpu];
                        cachep->array[cpu] = NULL;
                        l3 = cachep->nodelists[node];
 
                        if (!l3)
-                               goto unlock_cache;
+                               goto free_array_cache;
 
-                       spin_lock(&l3->list_lock);
+                       spin_lock_irq(&l3->list_lock);
 
                        /* Free limit for this kmem_list3 */
                        l3->free_limit -= cachep->batchcount;
@@ -1003,34 +1042,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                                free_block(cachep, nc->entry, nc->avail, node);
 
                        if (!cpus_empty(mask)) {
-                               spin_unlock(&l3->list_lock);
-                               goto unlock_cache;
+                               spin_unlock_irq(&l3->list_lock);
+                               goto free_array_cache;
                        }
 
-                       if (l3->shared) {
+                       shared = l3->shared;
+                       if (shared) {
                                free_block(cachep, l3->shared->entry,
                                           l3->shared->avail, node);
-                               kfree(l3->shared);
                                l3->shared = NULL;
                        }
-                       if (l3->alien) {
-                               drain_alien_cache(cachep, l3);
-                               free_alien_cache(l3->alien);
-                               l3->alien = NULL;
-                       }
 
-                       /* free slabs belonging to this node */
-                       if (__node_shrink(cachep, node)) {
-                               cachep->nodelists[node] = NULL;
-                               spin_unlock(&l3->list_lock);
-                               kfree(l3);
-                       } else {
-                               spin_unlock(&l3->list_lock);
+                       alien = l3->alien;
+                       l3->alien = NULL;
+
+                       spin_unlock_irq(&l3->list_lock);
+
+                       kfree(shared);
+                       if (alien) {
+                               drain_alien_cache(cachep, alien);
+                               free_alien_cache(alien);
                        }
-                     unlock_cache:
-                       spin_unlock_irq(&cachep->spinlock);
+free_array_cache:
                        kfree(nc);
                }
+               /*
+                * In the previous loop, all the objects were freed to
+                * the respective cache's slabs,  now we can go ahead and
+                * shrink each nodelist to its limit.
+                */
+               list_for_each_entry(cachep, &cache_chain, next) {
+                       l3 = cachep->nodelists[node];
+                       if (!l3)
+                               continue;
+                       spin_lock_irq(&l3->list_lock);
+                       /* free slabs belonging to this node */
+                       __node_shrink(cachep, node);
+                       spin_unlock_irq(&l3->list_lock);
+               }
                mutex_unlock(&cache_chain_mutex);
                break;
 #endif
@@ -1119,7 +1168,6 @@ void __init kmem_cache_init(void)
                BUG();
 
        cache_cache.colour = left_over / cache_cache.colour_off;
-       cache_cache.colour_next = 0;
        cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
                                      sizeof(struct slab), cache_line_size());
 
@@ -2011,18 +2059,16 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
 
        smp_call_function_all_cpus(do_drain, cachep);
        check_irq_on();
-       spin_lock_irq(&cachep->spinlock);
        for_each_online_node(node) {
                l3 = cachep->nodelists[node];
                if (l3) {
-                       spin_lock(&l3->list_lock);
+                       spin_lock_irq(&l3->list_lock);
                        drain_array_locked(cachep, l3->shared, 1, node);
-                       spin_unlock(&l3->list_lock);
+                       spin_unlock_irq(&l3->list_lock);
                        if (l3->alien)
-                               drain_alien_cache(cachep, l3);
+                               drain_alien_cache(cachep, l3->alien);
                }
        }
-       spin_unlock_irq(&cachep->spinlock);
 }
 
 static int __node_shrink(struct kmem_cache *cachep, int node)
@@ -2324,20 +2370,20 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
                 */
                ctor_flags |= SLAB_CTOR_ATOMIC;
 
-       /* About to mess with non-constant members - lock. */
+       /* Take the l3 list lock to change the colour_next on this node */
        check_irq_off();
-       spin_lock(&cachep->spinlock);
+       l3 = cachep->nodelists[nodeid];
+       spin_lock(&l3->list_lock);
 
        /* Get colour for the slab, and cal the next value. */
-       offset = cachep->colour_next;
-       cachep->colour_next++;
-       if (cachep->colour_next >= cachep->colour)
-               cachep->colour_next = 0;
-       offset *= cachep->colour_off;
+       offset = l3->colour_next;
+       l3->colour_next++;
+       if (l3->colour_next >= cachep->colour)
+               l3->colour_next = 0;
+       spin_unlock(&l3->list_lock);
 
-       spin_unlock(&cachep->spinlock);
+       offset *= cachep->colour_off;
 
-       check_irq_off();
        if (local_flags & __GFP_WAIT)
                local_irq_enable();
 
@@ -2367,7 +2413,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
        if (local_flags & __GFP_WAIT)
                local_irq_disable();
        check_irq_off();
-       l3 = cachep->nodelists[nodeid];
        spin_lock(&l3->list_lock);
 
        /* Make slab active. */
@@ -2725,6 +2770,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
        BUG_ON(!l3);
 
       retry:
+       check_irq_off();
        spin_lock(&l3->list_lock);
        entry = l3->slabs_partial.next;
        if (entry == &l3->slabs_partial) {
@@ -3304,11 +3350,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount
        smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 
        check_irq_on();
-       spin_lock_irq(&cachep->spinlock);
+       spin_lock(&cachep->spinlock);
        cachep->batchcount = batchcount;
        cachep->limit = limit;
        cachep->shared = shared;
-       spin_unlock_irq(&cachep->spinlock);
+       spin_unlock(&cachep->spinlock);
 
        for_each_online_cpu(i) {
                struct array_cache *ccold = new.new[i];
@@ -3440,7 +3486,7 @@ static void cache_reap(void *unused)
 
                l3 = searchp->nodelists[numa_node_id()];
                if (l3->alien)
-                       drain_alien_cache(searchp, l3);
+                       drain_alien_cache(searchp, l3->alien);
                spin_lock_irq(&l3->list_lock);
 
                drain_array_locked(searchp, cpu_cache_get(searchp), 0,
@@ -3564,8 +3610,7 @@ static int s_show(struct seq_file *m, void *p)
        int node;
        struct kmem_list3 *l3;
 
-       check_irq_on();
-       spin_lock_irq(&cachep->spinlock);
+       spin_lock(&cachep->spinlock);
        active_objs = 0;
        num_slabs = 0;
        for_each_online_node(node) {
@@ -3573,7 +3618,8 @@ static int s_show(struct seq_file *m, void *p)
                if (!l3)
                        continue;
 
-               spin_lock(&l3->list_lock);
+               check_irq_on();
+               spin_lock_irq(&l3->list_lock);
 
                list_for_each(q, &l3->slabs_full) {
                        slabp = list_entry(q, struct slab, list);
@@ -3598,9 +3644,10 @@ static int s_show(struct seq_file *m, void *p)
                        num_slabs++;
                }
                free_objects += l3->free_objects;
-               shared_avail += l3->shared->avail;
+               if (l3->shared)
+                       shared_avail += l3->shared->avail;
 
-               spin_unlock(&l3->list_lock);
+               spin_unlock_irq(&l3->list_lock);
        }
        num_slabs += active_slabs;
        num_objs = num_slabs * cachep->num;
@@ -3644,7 +3691,7 @@ static int s_show(struct seq_file *m, void *p)
        }
 #endif
        seq_putc(m, '\n');
-       spin_unlock_irq(&cachep->spinlock);
+       spin_unlock(&cachep->spinlock);
        return 0;
 }
 
index ce617b3dbbb8dbb4c9a77cfcfe70c25c5748f304..802baf755ef465c392b757642999c3e40b20e2d4 100644 (file)
@@ -46,7 +46,7 @@
 #define PRINTR(format, args...) do { if (net_ratelimit()) \
                                 printk(format , ## args); } while (0)
 
-static unsigned int nlbufsiz = 4096;
+static unsigned int nlbufsiz = NLMSG_GOODSIZE;
 module_param(nlbufsiz, uint, 0600);
 MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
                            "(defaults to 4096)");
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data)
 static struct sk_buff *ulog_alloc_skb(unsigned int size)
 {
        struct sk_buff *skb;
+       unsigned int n;
 
-       skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
+       n = max(size, nlbufsiz);
+       skb = alloc_skb(n, GFP_ATOMIC);
        if (!skb) {
                PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
-                      "of size %ub!\n", nlbufsiz);
-               if (size < nlbufsiz) {
+                      "of size %ub!\n", n);
+               if (n > size) {
                        /* try to allocate only as much as we need for
                         * current packet */
                        skb = alloc_skb(size, GFP_ATOMIC);
index 00729b3604f8b540e507f74e0aabf4cf6e06fa96..cbd4020cc84d6a142c0128496a80de3909b1f58e 100644 (file)
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len)
                BUGPRINT("Entries_size never zero\n");
                return -EINVAL;
        }
+       /* overflow check */
+       if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
+                       SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
+               return -ENOMEM;
+       if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
+               return -ENOMEM;
+
        countersize = COUNTER_OFFSET(tmp.nentries) * 
                                        (highest_possible_processor_id()+1);
        newinfo = (struct ebt_table_info *)
index ffb82073056e761267dcbd480603d01645cfd7cf..2afb0de953291c3dfb2e16b25ba764a43e3cd860 100644 (file)
@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
         *      Initialise the packet receive queues.
         */
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                struct softnet_data *queue;
 
                queue = &per_cpu(softnet_data, i);
index ac1d1fcf8673f63158691ee0004952b803206f84..fdc4f38bc46ccfbcc86c4a36698489cfcd0ba08b 100644 (file)
@@ -121,7 +121,7 @@ void __init net_random_init(void)
 {
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, i+jiffies);
        }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
        unsigned long seed[NR_CPUS];
 
        get_random_bytes(seed, sizeof(seed));
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, seed[i]);
        }
index 6bc0887b0834ff9ee85dbe21e3aaf940b76f6fa1..4d1c40972a4bcb7b2d32e08b0c21fa79eb69c02c 100644 (file)
@@ -524,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
                                          iph->tos;
 
        if (ip_options_echo(&icmp_param.replyopts, skb_in))
-               goto ende;
+               goto out_unlock;
 
 
        /*
index afe3d8f8177d7df83df0264e20f3072077ff1317..dd1048be8a0115b4cd08591fbd3b29b19e9bda36 100644 (file)
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len)
        if (len != sizeof(tmp) + tmp.size)
                return -ENOPROTOOPT;
 
+       /* overflow check */
+       if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+                       SMP_CACHE_BYTES)
+               return -ENOMEM;
+       if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+               return -ENOMEM;
+
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
                return -ENOMEM;
index c9ebbe0d2d9cc6ad8bca99b0b3f94ab699695d52..e0b5926c76f94d0adfee4fa2a26f10bdbce33bf6 100644 (file)
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
 
        b = skb->tail;
 
-       type |= NFNL_SUBSYS_CTNETLINK << 8;
+       type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
        nlh   = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
        nfmsg = NLMSG_DATA(nlh);
 
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
 };
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
 
 static int __init ctnetlink_init(void)
 {
index d3c5a371f993e390a5f6c521b649050be8d950b9..4ba4463cec280ee816d814afc25cae8ec197cf55 100644 (file)
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb,
 
                exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
                exp->mask.src.ip = 0xffffffff;
+               exp->mask.src.u.udp.port = 0;
                exp->mask.dst.ip = 0xffffffff;
                exp->mask.dst.u.udp.port = 0xffff;
                exp->mask.dst.protonum = 0xff;
index ad438fb185b8943dfafde63c13fcbb405a5caf46..92c54999a19d023d049af354123b096839757aad 100644 (file)
@@ -209,8 +209,8 @@ ip_nat_in(unsigned int hooknum,
            && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 
-               if (ct->tuplehash[dir].tuple.src.ip !=
-                   ct->tuplehash[!dir].tuple.dst.ip) {
+               if (ct->tuplehash[dir].tuple.dst.ip !=
+                   ct->tuplehash[!dir].tuple.src.ip) {
                        dst_release((*pskb)->dst);
                        (*pskb)->dst = NULL;
                }
index 2371b2062c2d812468ad62f4fe19d4360e748f41..16f47c675fefd8304d86174f6d4beeebdec6539b 100644 (file)
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len)
        if (len != sizeof(tmp) + tmp.size)
                return -ENOPROTOOPT;
 
+       /* overflow check */
+       if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+                       SMP_CACHE_BYTES)
+               return -ENOMEM;
+       if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+               return -ENOMEM;
+
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
                return -ENOMEM;
index 641dbc477650f6059e9577386a1664d6c9ea0206..180a9ea57b69fb4e05a7c8b171f1bc1628ae6f8c 100644 (file)
  * each nlgroup you are using, so the total kernel memory usage increases
  * by that factor.
  *
+ * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
+ * nlbufsiz is used with alloc_skb, which adds another
+ * sizeof(struct skb_shared_info).  Use NLMSG_GOODSIZE instead.
+ *
  * flushtimeout:
  *   Specify, after how many hundredths of a second the queue should be
  *   flushed even if it is not full yet.
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
 
 #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
 
-static unsigned int nlbufsiz = 4096;
+static unsigned int nlbufsiz = NLMSG_GOODSIZE;
 module_param(nlbufsiz, uint, 0400);
 MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
 
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data)
 static struct sk_buff *ulog_alloc_skb(unsigned int size)
 {
        struct sk_buff *skb;
+       unsigned int n;
 
        /* alloc skb which should be big enough for a whole
         * multipart message. WARNING: has to be <= 131000
         * due to slab allocator restrictions */
 
-       skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
+       n = max(size, nlbufsiz);
+       skb = alloc_skb(n, GFP_ATOMIC);
        if (!skb) {
-               PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n",
-                       nlbufsiz);
+               PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
 
-               /* try to allocate only as much as we need for 
-                * current packet */
+               if (n > size) {
+                       /* try to allocate only as much as we need for 
+                        * current packet */
 
-               skb = alloc_skb(size, GFP_ATOMIC);
-               if (!skb)
-                       PRINTR("ipt_ULOG: can't even allocate %ub\n", size);
+                       skb = alloc_skb(size, GFP_ATOMIC);
+                       if (!skb)
+                               PRINTR("ipt_ULOG: can't even allocate %ub\n",
+                                      size);
+               }
        }
 
        return skb;
index 18ca8258a1c597c170fd718f5c8556a1eaefb382..5a7a265280f927a4ccac41140e7b682c8d3f3fe9 100644 (file)
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL");
 static inline int
 match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e)
 {
-#define MATCH(x,y)     (!e->match.x || ((e->x == (y)) ^ e->invert.x))
+#define MATCH_ADDR(x,y,z)      (!e->match.x ||                              \
+                                ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \
+                                 ^ e->invert.x))
+#define MATCH(x,y)             (!e->match.x || ((e->x == (y)) ^ e->invert.x))
 
-       return MATCH(saddr, x->props.saddr.a4 & e->smask) &&
-              MATCH(daddr, x->id.daddr.a4 & e->dmask) &&
+       return MATCH_ADDR(saddr, smask, x->props.saddr.a4) &&
+              MATCH_ADDR(daddr, dmask, x->id.daddr.a4) &&
               MATCH(proto, x->id.proto) &&
               MATCH(mode, x->props.mode) &&
               MATCH(spi, x->id.spi) &&
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
                        return 0;
        }
 
-       return strict ? 1 : 0;
+       return strict ? i == info->len : 0;
 }
 
 static int match(const struct sk_buff *skb,
index 39d49dc333a7f0dc47e1bbd1f8d7a7c02c3f2cfb..1b167c4bb3beb0254f446a6ab21ca41fd87a0455 100644 (file)
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
        int res = 0;
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
index 847068fd33676cfd3b6bc510be992baf4ab8d06d..74ff56c322f47ee45b3b873e4672d0bc9c13511a 100644 (file)
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len)
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
                return -EFAULT;
 
+       /* overflow check */
+       if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+                       SMP_CACHE_BYTES)
+               return -ENOMEM;
+       if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+               return -ENOMEM;
+
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
                return -ENOMEM;
index afe1cc4c18a5bffc734d6011b1dfddd395007158..3d39ec924041a4883fb86f7e90a3db9c912f8976 100644 (file)
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
 static inline int
 match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e)
 {
-#define MATCH_ADDR(x,y,z)      (!e->match.x || \
-                                ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x)
+#define MATCH_ADDR(x,y,z)      (!e->match.x ||                                \
+                                ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \
+                                 ^ e->invert.x))
 #define MATCH(x,y)             (!e->match.x || ((e->x == (y)) ^ e->invert.x))
        
        return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) &&
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info)
                        return 0;
        }
 
-       return strict ? 1 : 0;
+       return strict ? i == info->len : 0;
 }
 
 static int match(const struct sk_buff *skb,
index 50a13e75d70ec5fecd20269c8c4e90bc523164cc..4238b1ed886012a331b1c5dd16ddffa1ea2eaaae 100644 (file)
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
        int res = 0;
        int cpu;
 
-       for (cpu=0; cpu<NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
index 62bb509f05d4fc4bf32a7b0eb9db0ff0c8e972c5..0ce337a1d974da68634b351c84b09e815a968244 100644 (file)
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
 struct nf_conntrack_protocol *
 __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
 {
-       if (unlikely(nf_ct_protos[l3proto] == NULL))
+       if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
                return &nf_conntrack_generic_protocol;
 
        return nf_ct_protos[l3proto][protocol];
index ab0c920f0d30bbf840d731f8c6baddaab7c3f089..6f210f399762d8196630e2c8a087dc0f548a759d 100644 (file)
@@ -657,8 +657,6 @@ static int __init init(void)
        /* FIXME should be configurable whether IPv4 and IPv6 FTP connections
                 are tracked or not - YK */
        for (i = 0; i < ports_c; i++) {
-               memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
-
                ftp[i][0].tuple.src.l3num = PF_INET;
                ftp[i][1].tuple.src.l3num = PF_INET6;
                for (j = 0; j < 2; j++) {
index 73ab16bc7d4052d155257686b09cd103da3a960d..9ff3463037e1d0b080820c4457d488a1dd7e6bdc 100644 (file)
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
 
        b = skb->tail;
 
-       type |= NFNL_SUBSYS_CTNETLINK << 8;
+       type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
        nlh   = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
        nfmsg = NLMSG_DATA(nlh);
 
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
 };
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
 
 static int __init ctnetlink_init(void)
 {
index e10512e229b60e2c3cc216f01a5550a9ca7ba006..3b3c781b40c067f62b19f47744297c4a6cd747f9 100644 (file)
@@ -37,7 +37,7 @@
 #include "../bridge/br_private.h"
 #endif
 
-#define NFULNL_NLBUFSIZ_DEFAULT        4096
+#define NFULNL_NLBUFSIZ_DEFAULT        NLMSG_GOODSIZE
 #define NFULNL_TIMEOUT_DEFAULT         100     /* every second */
 #define NFULNL_QTHRESH_DEFAULT         100     /* 100 packets */
 
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
                                        unsigned int pkt_size)
 {
        struct sk_buff *skb;
+       unsigned int n;
 
        UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
 
        /* alloc skb which should be big enough for a whole multipart
         * message.  WARNING: has to be <= 128k due to slab restrictions */
 
-       skb = alloc_skb(inst_size, GFP_ATOMIC);
+       n = max(inst_size, pkt_size);
+       skb = alloc_skb(n, GFP_ATOMIC);
        if (!skb) {
                PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
                        inst_size);
 
-               /* try to allocate only as much as we need for current
-                * packet */
+               if (n > pkt_size) {
+                       /* try to allocate only as much as we need for current
+                        * packet */
 
-               skb = alloc_skb(pkt_size, GFP_ATOMIC);
-               if (!skb)
-                       PRINTR("nfnetlink_log: can't even alloc %u bytes\n",
-                               pkt_size);
+                       skb = alloc_skb(pkt_size, GFP_ATOMIC);
+                       if (!skb)
+                               PRINTR("nfnetlink_log: can't even alloc %u "
+                                      "bytes\n", pkt_size);
+               }
        }
 
        return skb;
index 18ed9c5d209ca8e13b1cfc7330862fc3534cd275..cac38b2e147aec5cf1aa10707753150585dc9d9f 100644 (file)
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
        }
 
        if (nfqa[NFQA_MARK-1])
-               skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1]));
+               entry->skb->nfmark = ntohl(*(u_int32_t *)
+                                          NFA_DATA(nfqa[NFQA_MARK-1]));
                
        issue_verdict(entry, verdict);
        instance_put(queue);
index b38a263853c32038b07fe7dcb9dae457d4456722..a00851f981dbfcb17b26f64d7bfc2a5e64e31a5d 100644 (file)
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
        int cpu;
        int counter = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_cpu(cpu)
                counter += per_cpu(sockets_in_use, cpu);
 
        /* It can be negative, by the way. 8) */
index 5760e057ecbac174679bedfe60de520645ec659a..d64aae85c3788e10036b772baed84217d08d1bd2 100644 (file)
@@ -123,7 +123,17 @@ KBUILD_HAVE_NLS := $(shell \
      then echo yes ; \
      else echo no ; fi)
 ifeq ($(KBUILD_HAVE_NLS),no)
-HOSTCFLAGS     += -DKBUILD_NO_NLS
+  HOSTCFLAGS   += -DKBUILD_NO_NLS
+else
+  KBUILD_NEED_LINTL := $(shell \
+    if echo -e "\#include <libintl.h>\nint main(int a, char** b) { gettext(\"\"); return 0; }\n" | \
+      $(HOSTCC) $(HOSTCFLAGS) -x c - -o /dev/null> /dev/null 2>&1 ; \
+    then echo no ; \
+    else echo yes ; fi)
+  ifeq ($(KBUILD_NEED_LINTL),yes)
+    HOSTLOADLIBES_conf += -lintl
+    HOSTLOADLIBES_mconf        += -lintl
+  endif
 endif
 
 # generated files seem to need this to find local include files
index b59582b92283eb667fe7cab218da01af215a3628..502f78f13f5f762d84267c3ec45feea3c7e33887 100644 (file)
@@ -1,6 +1,6 @@
 config SECURITY_SELINUX
        bool "NSA SELinux Support"
-       depends on SECURITY && NET && INET
+       depends on SECURITY_NETWORK && NET && INET
        default n
        help
          This selects NSA Security-Enhanced Linux (SELinux).
index 06d54d9d20a5049e25911e33668dc3ed1a2dd9c4..688c0a267b62060de2f0b4172b8fe09fe5ff7361 100644 (file)
@@ -4,9 +4,7 @@
 
 obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
 
-selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o
-
-selinux-$(CONFIG_SECURITY_NETWORK) += netif.o
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o
 
 selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
 
index 4ae834d89bce9e81a98c8be4a0056341603ffded..b7773bf68efa8fb104c45312b8a385cbb701d499 100644 (file)
@@ -232,7 +232,6 @@ static void superblock_free_security(struct super_block *sb)
        kfree(sbsec);
 }
 
-#ifdef CONFIG_SECURITY_NETWORK
 static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
 {
        struct sk_security_struct *ssec;
@@ -261,7 +260,6 @@ static void sk_free_security(struct sock *sk)
        sk->sk_security = NULL;
        kfree(ssec);
 }
-#endif /* CONFIG_SECURITY_NETWORK */
 
 /* The security server must be initialized before
    any labeling or access decisions can be provided. */
@@ -2736,8 +2734,6 @@ static void selinux_task_to_inode(struct task_struct *p,
        return;
 }
 
-#ifdef CONFIG_SECURITY_NETWORK
-
 /* Returns error only if unable to parse addresses */
 static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
 {
@@ -3556,15 +3552,6 @@ static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
 
 #endif /* CONFIG_NETFILTER */
 
-#else
-
-static inline int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
-{
-       return 0;
-}
-
-#endif /* CONFIG_SECURITY_NETWORK */
-
 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
        struct task_security_struct *tsec;
@@ -4340,7 +4327,6 @@ static struct security_operations selinux_ops = {
        .getprocattr =                  selinux_getprocattr,
        .setprocattr =                  selinux_setprocattr,
 
-#ifdef CONFIG_SECURITY_NETWORK
         .unix_stream_connect =         selinux_socket_unix_stream_connect,
        .unix_may_send =                selinux_socket_unix_may_send,
 
@@ -4362,7 +4348,6 @@ static struct security_operations selinux_ops = {
        .sk_alloc_security =            selinux_sk_alloc_security,
        .sk_free_security =             selinux_sk_free_security,
        .sk_getsid =                    selinux_sk_getsid_security,
-#endif
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
        .xfrm_policy_alloc_security =   selinux_xfrm_policy_alloc,
@@ -4440,7 +4425,7 @@ next_sb:
    all processes and objects when they are created. */
 security_initcall(selinux_init);
 
-#if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_NETFILTER)
+#if defined(CONFIG_NETFILTER)
 
 static struct nf_hook_ops selinux_ipv4_op = {
        .hook =         selinux_ipv4_postroute_last,
@@ -4501,13 +4486,13 @@ static void selinux_nf_ip_exit(void)
 }
 #endif
 
-#else /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */
+#else /* CONFIG_NETFILTER */
 
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 #define selinux_nf_ip_exit()
 #endif
 
-#endif /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */
+#endif /* CONFIG_NETFILTER */
 
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 int selinux_disable(void)