]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'upstream-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
authorJeff Garzik <jeff@garzik.org>
Tue, 28 Feb 2006 23:04:30 +0000 (18:04 -0500)
committerJeff Garzik <jeff@garzik.org>
Tue, 28 Feb 2006 23:04:30 +0000 (18:04 -0500)
42 files changed:
arch/i386/kernel/acpi/boot.c
arch/mips/lib/iomap.c
arch/mips/mm/c-r4k.c
arch/powerpc/kernel/vdso64/gettimeofday.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/eeh_driver.c
arch/powerpc/platforms/pseries/pci_dlpar.c
arch/x86_64/kernel/apic.c
block/cfq-iosched.c
drivers/net/8139cp.c
drivers/net/via-velocity.c
drivers/scsi/libata-core.c
drivers/scsi/libata-scsi.c
drivers/scsi/libata.h
drivers/serial/sunsu.c
drivers/usb/gadget/lh7a40x_udc.c
drivers/usb/gadget/rndis.c
drivers/usb/host/pci-quirks.c
drivers/usb/input/hid-core.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/visor.c
drivers/usb/serial/visor.h
drivers/usb/storage/unusual_devs.h
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/quota/xfs_qm.c
fs/xfs/xfs_rtalloc.c
include/asm-mips/io.h
include/asm-powerpc/eeh.h
include/asm-x86_64/acpi.h
include/linux/netfilter_bridge/ebt_log.h
include/linux/netfilter_ipv4/ipt_LOG.h
include/linux/netfilter_ipv6/ip6t_LOG.h
include/net/xfrm.h
net/bridge/netfilter/ebt_log.c
net/core/request_sock.c
net/ipv4/esp4.c
net/ipv4/netfilter/ipt_LOG.c
net/ipv6/netfilter/ip6t_LOG.c
net/netfilter/nf_queue.c
net/xfrm/xfrm_policy.c

index 8309a7b2cd63fc7640200e2f156ff119baabfc70..79577f0ace98847ee39a4f7ad4c9842f3ea88ec6 100644 (file)
@@ -44,6 +44,9 @@ extern void __init clustered_apic_check(void);
 extern int gsi_irq_sharing(int gsi);
 #include <asm/proto.h>
 
+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
+
+
 #else                          /* X86 */
 
 #ifdef CONFIG_X86_LOCAL_APIC
index 7e2ced715cfbdc4e1f925898a5df1c177b8bf005..f4ac5bbcd81f17441a559c471d4615497514f5f8 100644 (file)
@@ -63,7 +63,7 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
                return ioport_map(start, len);
        if (flags & IORESOURCE_MEM) {
                if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap_cacheable_cow(start, len);
+                       return ioremap_cachable(start, len);
                return ioremap_nocache(start, len);
        }
 
index 1b71d91e82689f9f0effd43b5bbafdff5c96a565..0668e9bfce413c97fa23f102708ec6f9c002452c 100644 (file)
@@ -235,7 +235,9 @@ static inline void r4k_blast_scache_page_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
-       if (sc_lsize == 16)
+       if (scache_size == 0)
+               r4k_blast_scache_page = (void *)no_sc_noop;
+       else if (sc_lsize == 16)
                r4k_blast_scache_page = blast_scache16_page;
        else if (sc_lsize == 32)
                r4k_blast_scache_page = blast_scache32_page;
@@ -251,7 +253,9 @@ static inline void r4k_blast_scache_page_indexed_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
-       if (sc_lsize == 16)
+       if (scache_size == 0)
+               r4k_blast_scache_page_indexed = (void *)no_sc_noop;
+       else if (sc_lsize == 16)
                r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
        else if (sc_lsize == 32)
                r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
@@ -267,7 +271,9 @@ static inline void r4k_blast_scache_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
-       if (sc_lsize == 16)
+       if (scache_size == 0)
+               r4k_blast_scache = (void *)no_sc_noop;
+       else if (sc_lsize == 16)
                r4k_blast_scache = blast_scache16;
        else if (sc_lsize == 32)
                r4k_blast_scache = blast_scache32;
@@ -482,7 +488,7 @@ static inline void local_r4k_flush_icache_range(void *args)
                        protected_blast_dcache_range(start, end);
                }
 
-               if (!cpu_icache_snoops_remote_store) {
+               if (!cpu_icache_snoops_remote_store && scache_size) {
                        if (end - start > scache_size)
                                r4k_blast_scache();
                        else
@@ -651,7 +657,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
 
        R4600_HIT_CACHEOP_WAR_IMPL;
        protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
-       if (!cpu_icache_snoops_remote_store)
+       if (!cpu_icache_snoops_remote_store && scache_size)
                protected_writeback_scache_line(addr & ~(sc_lsize - 1));
        protected_flush_icache_line(addr & ~(ic_lsize - 1));
        if (MIPS4K_ICACHE_REFILL_WAR) {
index ccaeda5136d17e63a71c6d1d64209e2d8d515694..4ee871f1cadbc776f6c5a512f74a86b27220b2d6 100644 (file)
@@ -225,9 +225,9 @@ V_FUNCTION_BEGIN(__do_get_xsec)
   .cfi_startproc
        /* check for update count & load values */
 1:     ld      r8,CFG_TB_UPDATE_COUNT(r3)
-       andi.   r0,r4,1                 /* pending update ? loop */
+       andi.   r0,r8,1                 /* pending update ? loop */
        bne-    1b
-       xor     r0,r4,r4                /* create dependency */
+       xor     r0,r8,r8                /* create dependency */
        add     r3,r3,r0
 
        /* Get TB & offset it */
index b1f614c612dd136963cf4c480b90f306fb848b06..e9d589eefc14ce07aa5f670c036b35bfc3124328 100644 (file)
@@ -169,7 +169,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 #ifdef CONFIG_PPC_ISERIES
                if (_machine == PLATFORM_ISERIES_LPAR)
                        ret = iSeries_hpte_insert(hpteg, va,
-                                                 virt_to_abs(paddr),
+                                                 __pa(vaddr),
                                                  tmp_mode,
                                                  HPTE_V_BOLTED,
                                                  psize);
index 83578313ee7e7e635fb07663df1e64ab080dff94..2ab9dcdfb41579704c55d8c1a4ddc918e832d6ee 100644 (file)
@@ -893,6 +893,20 @@ void eeh_add_device_tree_early(struct device_node *dn)
 }
 EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
 
+void eeh_add_device_tree_late(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               eeh_add_device_late(dev);
+               if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+                       struct pci_bus *subbus = dev->subordinate;
+                       if (subbus)
+                               eeh_add_device_tree_late(subbus);
+               }
+       }
+}
+
 /**
  * eeh_add_device_late - perform EEH initialization for the indicated pci device
  * @dev: pci device for which to set up EEH
index e3cbba49fd6e83466be3074294e543ccc576e768..b811d5ff92feea02696adabf0313c9c45a7c40f2 100644 (file)
@@ -37,7 +37,7 @@
 
 static inline const char * pcid_name (struct pci_dev *pdev)
 {
-       if (pdev->dev.driver)
+       if (pdev && pdev->dev.driver)
                return pdev->dev.driver->name;
        return "";
 }
index bdaa8aabdaa64c4388c5ca4d933464a0293aa6f0..f3bad900bbcf81a236b3f9c7f081ef97477bc16e 100644 (file)
@@ -106,6 +106,8 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
                        }
                }
        }
+
+       eeh_add_device_tree_late(bus);
 }
 EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
 
@@ -114,7 +116,6 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
 {
        u8 sec_busno;
        struct pci_bus *child_bus;
-       struct pci_dev *child_dev;
 
        /* Get busno of downstream bus */
        pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
@@ -129,10 +130,6 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
 
        pci_scan_child_bus(child_bus);
 
-       list_for_each_entry(child_dev, &child_bus->devices, bus_list) {
-               eeh_add_device_late(child_dev);
-       }
-
        /* Fixup new pci devices without touching bus struct */
        pcibios_fixup_new_pci_devices(child_bus, 0);
 
@@ -160,18 +157,25 @@ pcibios_add_pci_devices(struct pci_bus * bus)
 
        eeh_add_device_tree_early(dn);
 
-       /* pci_scan_slot should find all children */
-       slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
-       num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
-       if (num) {
-               pcibios_fixup_new_pci_devices(bus, 1);
-               pci_bus_add_devices(bus);
-       }
+       if (_machine == PLATFORM_PSERIES_LPAR) {
+               /* use ofdt-based probe */
+               of_scan_bus(dn, bus);
+               if (!list_empty(&bus->devices)) {
+                       pcibios_fixup_new_pci_devices(bus, 0);
+                       pci_bus_add_devices(bus);
+               }
+       } else {
+               /* use legacy probe */
+               slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
+               num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
+               if (num) {
+                       pcibios_fixup_new_pci_devices(bus, 1);
+                       pci_bus_add_devices(bus);
+               }
 
-       list_for_each_entry(dev, &bus->devices, bus_list) {
-               eeh_add_device_late (dev);
-               if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
-                       pcibios_pci_config_bridge(dev);
+               list_for_each_entry(dev, &bus->devices, bus_list)
+                       if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+                               pcibios_pci_config_bridge(dev);
        }
 }
 EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
index d70605eda3339904b2349edb57e61cab4ce6228c..e5b14c57eaa0644fa8e4106a445b58e3d1f51e1d 100644 (file)
@@ -962,14 +962,12 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
        irq_exit();
 }
 
-int __initdata unsync_tsc_on_multicluster;
-
 /*
  * oem_force_hpet_timer -- force HPET mode for some boxes.
  *
  * Thus far, the major user of this is IBM's Summit2 series:
  *
- * Some clustered boxes may have unsynced TSC problems if they are
+ * Clustered boxes may have unsynced TSC problems if they are
  * multi-chassis. Use available data to take a good guess.
  * If in doubt, go HPET.
  */
@@ -979,11 +977,6 @@ __cpuinit int oem_force_hpet_timer(void)
        unsigned id;
        DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
 
-       /* Only do this check on IBM machines - big Unisys systems
-          use multiple clusters too, but have synchronized TSC */
-       if (!unsync_tsc_on_multicluster)
-               return 0;
-
        bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
 
        for (i = 0; i < NR_CPUS; i++) {
index 74fae2daf87e177679a64bd9810965c57ea89514..c8dbe38c81c80bf1544901bb7b16ac6cd73337e0 100644 (file)
@@ -239,7 +239,6 @@ enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_fifo_expire,
        CFQ_CFQQ_FLAG_idle_window,
        CFQ_CFQQ_FLAG_prio_changed,
-       CFQ_CFQQ_FLAG_expired,
 };
 
 #define CFQ_CFQQ_FNS(name)                                             \
@@ -264,7 +263,6 @@ CFQ_CFQQ_FNS(must_dispatch);
 CFQ_CFQQ_FNS(fifo_expire);
 CFQ_CFQQ_FNS(idle_window);
 CFQ_CFQQ_FNS(prio_changed);
-CFQ_CFQQ_FNS(expired);
 #undef CFQ_CFQQ_FNS
 
 enum cfq_rq_state_flags {
@@ -336,7 +334,7 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
  */
 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 {
-       if (!cfqd->rq_in_driver && cfqd->busy_queues)
+       if (cfqd->busy_queues)
                kblockd_schedule_work(&cfqd->unplug_work);
 }
 
@@ -736,12 +734,62 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                cfqq->slice_left = 0;
                cfq_clear_cfqq_must_alloc_slice(cfqq);
                cfq_clear_cfqq_fifo_expire(cfqq);
-               cfq_clear_cfqq_expired(cfqq);
        }
 
        cfqd->active_queue = cfqq;
 }
 
+/*
+ * current cfqq expired its slice (or was too idle), select new one
+ */
+static void
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                   int preempted)
+{
+       unsigned long now = jiffies;
+
+       if (cfq_cfqq_wait_request(cfqq))
+               del_timer(&cfqd->idle_slice_timer);
+
+       if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
+               cfqq->service_last = now;
+               cfq_schedule_dispatch(cfqd);
+       }
+
+       cfq_clear_cfqq_must_dispatch(cfqq);
+       cfq_clear_cfqq_wait_request(cfqq);
+
+       /*
+        * store what was left of this slice, if the queue idled out
+        * or was preempted
+        */
+       if (time_after(cfqq->slice_end, now))
+               cfqq->slice_left = cfqq->slice_end - now;
+       else
+               cfqq->slice_left = 0;
+
+       if (cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, preempted);
+
+       if (cfqq == cfqd->active_queue)
+               cfqd->active_queue = NULL;
+
+       if (cfqd->active_cic) {
+               put_io_context(cfqd->active_cic->ioc);
+               cfqd->active_cic = NULL;
+       }
+
+       cfqd->dispatch_slice = 0;
+}
+
+static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
+{
+       struct cfq_queue *cfqq = cfqd->active_queue;
+
+       if (cfqq)
+               __cfq_slice_expired(cfqd, cfqq, preempted);
+}
+
 /*
  * 0
  * 0,1
@@ -801,16 +849,7 @@ static int cfq_get_next_prio_level(struct cfq_data *cfqd)
 
 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
 {
-       struct cfq_queue *cfqq;
-
-       /*
-        * if current queue is expired but not done with its requests yet,
-        * wait for that to happen
-        */
-       if ((cfqq = cfqd->active_queue) != NULL) {
-               if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
-                       return NULL;
-       }
+       struct cfq_queue *cfqq = NULL;
 
        /*
         * if current list is non-empty, grab first entry. if it is empty,
@@ -837,66 +876,11 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
        return cfqq;
 }
 
-/*
- * current cfqq expired its slice (or was too idle), select new one
- */
-static void
-__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                   int preempted)
-{
-       unsigned long now = jiffies;
-
-       if (cfq_cfqq_wait_request(cfqq))
-               del_timer(&cfqd->idle_slice_timer);
-
-       if (!preempted && !cfq_cfqq_dispatched(cfqq))
-               cfqq->service_last = now;
-
-       cfq_clear_cfqq_must_dispatch(cfqq);
-       cfq_clear_cfqq_wait_request(cfqq);
-
-       /*
-        * store what was left of this slice, if the queue idled out
-        * or was preempted
-        */
-       if (time_after(cfqq->slice_end, now))
-               cfqq->slice_left = cfqq->slice_end - now;
-       else
-               cfqq->slice_left = 0;
-
-       if (cfq_cfqq_on_rr(cfqq))
-               cfq_resort_rr_list(cfqq, preempted);
-
-       if (cfqq == cfqd->active_queue)
-               cfqd->active_queue = NULL;
-
-       if (cfqd->active_cic) {
-               put_io_context(cfqd->active_cic->ioc);
-               cfqd->active_cic = NULL;
-       }
-
-       cfqd->dispatch_slice = 0;
-}
-
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
-{
-       struct cfq_queue *cfqq = cfqd->active_queue;
-
-       if (cfqq) {
-               /*
-                * use deferred expiry, if there are requests in progress as
-                * not to disturb the slice of the next queue
-                */
-               if (cfq_cfqq_dispatched(cfqq))
-                       cfq_mark_cfqq_expired(cfqq);
-               else
-                       __cfq_slice_expired(cfqd, cfqq, preempted);
-       }
-}
-
 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
 {
+       unsigned long sl;
+
        WARN_ON(!RB_EMPTY(&cfqq->sort_list));
        WARN_ON(cfqq != cfqd->active_queue);
 
@@ -916,13 +900,8 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        cfq_mark_cfqq_must_dispatch(cfqq);
        cfq_mark_cfqq_wait_request(cfqq);
 
-       if (!timer_pending(&cfqd->idle_slice_timer)) {
-               unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
-
-               cfqd->idle_slice_timer.expires = jiffies + slice_left;
-               add_timer(&cfqd->idle_slice_timer);
-       }
-
+       sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
+       mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
        return 1;
 }
 
@@ -1006,9 +985,6 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
        if (!cfqq)
                goto new_queue;
 
-       if (cfq_cfqq_expired(cfqq))
-               goto new_queue;
-
        /*
         * slice has expired
         */
@@ -1181,10 +1157,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
        BUG_ON(cfq_cfqq_on_rr(cfqq));
 
-       if (unlikely(cfqd->active_queue == cfqq)) {
+       if (unlikely(cfqd->active_queue == cfqq))
                __cfq_slice_expired(cfqd, cfqq, 0);
-               cfq_schedule_dispatch(cfqd);
-       }
 
        cfq_put_cfqd(cfqq->cfqd);
 
@@ -1245,10 +1219,8 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
 
        spin_lock(q->queue_lock);
 
-       if (unlikely(cic->cfqq == cfqd->active_queue)) {
+       if (unlikely(cic->cfqq == cfqd->active_queue))
                __cfq_slice_expired(cfqd, cic->cfqq, 0);
-               cfq_schedule_dispatch(cfqd);
-       }
 
        cfq_put_queue(cic->cfqq);
        cic->cfqq = NULL;
@@ -1715,10 +1687,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
                        cfqq->service_last = now;
                        cfq_resort_rr_list(cfqq, 0);
                }
-               if (cfq_cfqq_expired(cfqq)) {
-                       __cfq_slice_expired(cfqd, cfqq, 0);
-                       cfq_schedule_dispatch(cfqd);
-               }
+               cfq_schedule_dispatch(cfqd);
        }
 
        if (cfq_crq_is_sync(crq))
index f822cd3025ff07b154e48e7cd2d226e21f4f6898..dd410496aadbbe83fbb7ad6a99a25b414a38bc21 100644 (file)
@@ -1118,13 +1118,18 @@ err_out:
        return -ENOMEM;
 }
 
+static void cp_init_rings_index (struct cp_private *cp)
+{
+       cp->rx_tail = 0;
+       cp->tx_head = cp->tx_tail = 0;
+}
+
 static int cp_init_rings (struct cp_private *cp)
 {
        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
 
-       cp->rx_tail = 0;
-       cp->tx_head = cp->tx_tail = 0;
+       cp_init_rings_index(cp);
 
        return cp_refill_rx (cp);
 }
@@ -1886,30 +1891,30 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
 
        spin_unlock_irqrestore (&cp->lock, flags);
 
-       if (cp->pdev && cp->wol_enabled) {
-               pci_save_state (cp->pdev);
-               cp_set_d3_state (cp);
-       }
+       pci_save_state(pdev);
+       pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
 }
 
 static int cp_resume (struct pci_dev *pdev)
 {
-       struct net_device *dev;
-       struct cp_private *cp;
+       struct net_device *dev = pci_get_drvdata (pdev);
+       struct cp_private *cp = netdev_priv(dev);
        unsigned long flags;
 
-       dev = pci_get_drvdata (pdev);
-       cp  = netdev_priv(dev);
+       if (!netif_running(dev))
+               return 0;
 
        netif_device_attach (dev);
-       
-       if (cp->pdev && cp->wol_enabled) {
-               pci_set_power_state (cp->pdev, PCI_D0);
-               pci_restore_state (cp->pdev);
-       }
-       
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_enable_wake(pdev, PCI_D0, 0);
+
+       /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
+       cp_init_rings_index (cp);
        cp_init_hw (cp);
        netif_start_queue (dev);
 
index c2d5907dc8e054edd6ecab18bdc954cfbfd1554b..ed1f837c8fda2274a3874855550f1d39e81f1bcd 100644 (file)
@@ -1106,6 +1106,9 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
 
        for (i = 0; i < vptr->options.numrx; i++) {
                struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
+               struct rx_desc *rd = vptr->rd_ring + i;
+
+               memset(rd, 0, sizeof(*rd));
 
                if (!rd_info->skb)
                        continue;
index 5f1d7580218dde548ccd0d8ba4f71589ed5efcc6..4f91b0dc572bb5b1aea66a6aed56df874808eac8 100644 (file)
@@ -82,6 +82,10 @@ int atapi_enabled = 0;
 module_param(atapi_enabled, int, 0444);
 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
 
+int libata_fua = 0;
+module_param_named(fua, libata_fua, int, 0444);
+MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+
 MODULE_AUTHOR("Jeff Garzik");
 MODULE_DESCRIPTION("Library module for ATA devices");
 MODULE_LICENSE("GPL");
index 07b1e7cc61dfcbc0ab5ce762be6d83ecb4f1f1db..59503c9ccac9ff794a66d29c358f4d98c4b33ed0 100644 (file)
@@ -1708,6 +1708,8 @@ static int ata_dev_supports_fua(u16 *id)
 {
        unsigned char model[41], fw[9];
 
+       if (!libata_fua)
+               return 0;
        if (!ata_id_has_fua(id))
                return 0;
 
index e03ce48b7b4b633c8752319470cdb56105054659..fddaf479a5440ff08c12d6e22c49f9e373814731 100644 (file)
@@ -41,6 +41,7 @@ struct ata_scsi_args {
 
 /* libata-core.c */
 extern int atapi_enabled;
+extern int libata_fua;
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
                                      struct ata_device *dev);
 extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
index 308704566948ea1a89a43fdad26810c6f7d09432..4e453fa966ae23956a370ffc24dc19a2f241a0fe 100644 (file)
@@ -299,13 +299,10 @@ static void sunsu_start_tx(struct uart_port *port)
 static void sunsu_stop_rx(struct uart_port *port)
 {
        struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
-       unsigned long flags;
 
-       spin_lock_irqsave(&up->port.lock, flags);
        up->ier &= ~UART_IER_RLSI;
        up->port.read_status_mask &= ~UART_LSR_DR;
        serial_out(up, UART_IER, up->ier);
-       spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
 static void sunsu_enable_ms(struct uart_port *port)
index e02fea5a54339da5de3351927f880a724e7a58e4..1a362c5e7f3d05c88fdfc66af743c30dfc067a91 100644 (file)
@@ -1062,11 +1062,11 @@ static int lh7a40x_ep_enable(struct usb_ep *_ep,
        ep->pio_irqs = 0;
        ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
 
+       spin_unlock_irqrestore(&ep->dev->lock, flags);
+
        /* Reset halt state (does flush) */
        lh7a40x_set_halt(_ep, 0);
 
-       spin_unlock_irqrestore(&ep->dev->lock, flags);
-
        DEBUG("%s: enabled %s\n", __FUNCTION__, _ep->name);
        return 0;
 }
@@ -1775,6 +1775,7 @@ static void lh7a40x_ep0_setup(struct lh7a40x_udc *dev, u32 csr)
                                        break;
 
                                qep = &dev->ep[ep_num];
+                               spin_unlock(&dev->lock);
                                if (ctrl.bRequest == USB_REQ_SET_FEATURE) {
                                        DEBUG_SETUP("SET_FEATURE (%d)\n",
                                                    ep_num);
@@ -1784,6 +1785,7 @@ static void lh7a40x_ep0_setup(struct lh7a40x_udc *dev, u32 csr)
                                                    ep_num);
                                        lh7a40x_set_halt(&qep->ep, 0);
                                }
+                               spin_lock(&dev->lock);
                                usb_set_index(0);
 
                                /* Reply with a ZLP on next IN token */
index 9689efeb364c20ab38a499d5106a0c122bb43d4c..6d6eaad73968caa6af45521858b4b1287231041e 100644 (file)
@@ -853,11 +853,14 @@ static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
        // DEBUG("%s: OID = %08X\n", __FUNCTION__, cpu_to_le32(buf->OID));
        if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
        
-       /* 
-        * we need more memory: 
-        * oid_supported_list is the largest answer 
+       /*
+        * we need more memory:
+        * gen_ndis_query_resp expects enough space for
+        * rndis_query_cmplt_type followed by data.
+        * oid_supported_list is the largest data reply
         */
-       r = rndis_add_response (configNr, sizeof (oid_supported_list));
+       r = rndis_add_response (configNr,
+               sizeof (oid_supported_list) + sizeof(rndis_query_cmplt_type));
        if (!r)
                return -ENOMEM;
        resp = (rndis_query_cmplt_type *) r->buf;
index 118288d944233bf4b54dd9209562836654f084d1..9e81c26313f91e90a0b7a06a75b9bd0649041ec2 100644 (file)
@@ -260,12 +260,13 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
                                                offset + EHCI_USBLEGCTLSTS,
                                                val | EHCI_USBLEGCTLSTS_SOOE);
 #endif
-                       }
 
-                       /* always say Linux will own the hardware
-                        * by setting EHCI_USBLEGSUP_OS.
-                        */
-                       pci_write_config_byte(pdev, offset + 3, 1);
+                               /* some systems get upset if this semaphore is
+                                * set for any other reason than forcing a BIOS
+                                * handoff..
+                                */
+                               pci_write_config_byte(pdev, offset + 3, 1);
+                       }
 
                        /* if boot firmware now owns EHCI, spin till
                         * it hands it over.
index 772478086bd3237a29cc6562d49ce5866e07ae56..07a012f88772f128ad7b38a027bcedd9167077b6 100644 (file)
@@ -1407,6 +1407,7 @@ void hid_init_reports(struct hid_device *hid)
 #define USB_VENDOR_ID_WISEGROUP                0x0925
 #define USB_DEVICE_ID_1_PHIDGETSERVO_20        0x8101
 #define USB_DEVICE_ID_4_PHIDGETSERVO_20        0x8104
+#define USB_DEVICE_ID_DUAL_USB_JOYPAD   0x8866
 
 #define USB_VENDOR_ID_CODEMERCS                0x07c0
 #define USB_DEVICE_ID_CODEMERCS_IOW40  0x1500
@@ -1577,6 +1578,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_USBHUB_KB, HID_QUIRK_NOGET},
        { USB_VENDOR_ID_HP, USB_DEVICE_ID_HP_USBHUB_KB, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_TANGTOP, USB_DEVICE_ID_TANGTOP_USBPS2, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
 
        { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_POWERMOUSE, HID_QUIRK_2WHEEL_POWERMOUSE },
        { USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU, HID_QUIRK_2WHEEL_MOUSE_HACK_7 },
index f2b4ca8692d822c65bdc215a164fadd3d8bf9a73..c145e1ed8429d6bdf813bb534b441c2db1d149e0 100644 (file)
@@ -469,8 +469,14 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
        { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_MHAM_YS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y6_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y8_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_MHAM_IC_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_MHAM_DB9_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_MHAM_RS232_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y9_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_VCP_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_D2XX_PID) },
        { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
index ca40f16370f1106bb612e0dd87af0b0401d8c3b9..bdef3b8c731f1c22c5bcb5aa516bbd39d760ad15 100644 (file)
 
 /*
  * microHAM product IDs (http://www.microham.com).
- * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>.
+ * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
+ * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
+ * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
  */
+#define FTDI_MHAM_KW_PID 0xEEE8                /* USB-KW interface */
+#define FTDI_MHAM_YS_PID 0xEEE9                /* USB-YS interface */
 #define FTDI_MHAM_Y6_PID 0xEEEA                /* USB-Y6 interface */
 #define FTDI_MHAM_Y8_PID 0xEEEB                /* USB-Y8 interface */
+#define FTDI_MHAM_IC_PID 0xEEEC                /* USB-IC interface */
+#define FTDI_MHAM_DB9_PID 0xEEED       /* USB-DB9 interface */
+#define FTDI_MHAM_RS232_PID 0xEEEE     /* USB-RS232 interface */
+#define FTDI_MHAM_Y9_PID 0xEEEF                /* USB-Y9 interface */
 
 /*
  * Active Robots product ids.
index bce3d55affd8d1d410647191d754f1752ea4849d..11a48d8747526a31355eb306fdac9cbefa6103dd 100644 (file)
@@ -69,6 +69,8 @@ static struct usb_device_id id_table [] = {
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
        { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID),
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
+       { USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID),
+               .driver_info = (kernel_ulong_t)&palm_os_4_probe },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID),
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID),
@@ -139,6 +141,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
        { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID) },
        { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID) },
+       { USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID) },
index b84d1cb4c69393b40ede2929d9fd83746f533565..765118d83fb6c4a8ca693062a9d7e955348d38c6 100644 (file)
@@ -36,6 +36,9 @@
 #define PALM_ZIRE_ID                   0x0070
 #define PALM_M100_ID                   0x0080
 
+#define GSPDA_VENDOR_ID                0x115e
+#define GSPDA_XPLORE_M68_ID            0xf100
+
 #define SONY_VENDOR_ID                 0x054C
 #define SONY_CLIE_3_5_ID               0x0038
 #define SONY_CLIE_4_0_ID               0x0066
index e71c5ca1a07b6824c1a56dd4a54744dc80b9efa3..31ca92056c272840f61df0ae448eed26e4ab0d06 100644 (file)
@@ -753,6 +753,13 @@ UNUSUAL_DEV(  0x0693, 0x0005, 0x0100, 0x0100,
                "Flashgate",
                US_SC_SCSI, US_PR_BULK, NULL, 0 ), 
 
+/* Reported by David Hamilton <niftimusmaximus@lycos.com> */
+UNUSUAL_DEV(  0x069b, 0x3004, 0x0001, 0x0001,
+               "Thomson Multimedia Inc.",
+               "RCA RD1080 MP3 Player",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY ),
+
 UNUSUAL_DEV(  0x0781, 0x0001, 0x0200, 0x0200, 
                "Sandisk",
                "ImageMate SDDR-05a",
index 8f2beec526cfb58257dbf77b258384ce2310970b..74d8be87f983d4995c32bc87ebcc89ff33b577bb 100644 (file)
@@ -540,7 +540,7 @@ xfs_probe_cluster(
 
        /* First sum forwards in this page */
        do {
-               if (mapped != buffer_mapped(bh))
+               if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
                        return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
index 53a00fb217fa53d1be2cf2a3676439c52a4e204c..7c0e39dc618983a356c551a71a6c44fd39eaf23f 100644 (file)
@@ -68,6 +68,9 @@ kmem_zone_t   *qm_dqzone;
 kmem_zone_t    *qm_dqtrxzone;
 STATIC kmem_shaker_t   xfs_qm_shaker;
 
+STATIC cred_t  xfs_zerocr;
+STATIC xfs_inode_t     xfs_zeroino;
+
 STATIC void    xfs_qm_list_init(xfs_dqlist_t *, char *, int);
 STATIC void    xfs_qm_list_destroy(xfs_dqlist_t *);
 
@@ -1393,8 +1396,6 @@ xfs_qm_qino_alloc(
        xfs_trans_t     *tp;
        int             error;
        unsigned long   s;
-       cred_t          zerocr;
-       xfs_inode_t     zeroino;
        int             committed;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
@@ -1406,11 +1407,9 @@ xfs_qm_qino_alloc(
                xfs_trans_cancel(tp, 0);
                return error;
        }
-       memset(&zerocr, 0, sizeof(zerocr));
-       memset(&zeroino, 0, sizeof(zeroino));
 
-       if ((error = xfs_dir_ialloc(&tp, &zeroino, S_IFREG, 1, 0,
-                                  &zerocr, 0, 1, ip, &committed))) {
+       if ((error = xfs_dir_ialloc(&tp, &xfs_zeroino, S_IFREG, 1, 0,
+                                  &xfs_zerocr, 0, 1, ip, &committed))) {
                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
                                 XFS_TRANS_ABORT);
                return error;
index 06fc061c50fc99d1b6ef71685e53bd334aa76281..5b413946b1c5dad1e93c4cbbfed467f10938795d 100644 (file)
@@ -130,7 +130,8 @@ xfs_growfs_rt_alloc(
                /*
                 * Lock the inode.
                 */
-               if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL, &ip)))
+               if ((error = xfs_trans_iget(mp, tp, ino, 0,
+                                               XFS_ILOCK_EXCL, &ip)))
                        goto error_exit;
                XFS_BMAP_INIT(&flist, &firstblock);
                /*
@@ -170,8 +171,8 @@ xfs_growfs_rt_alloc(
                        /*
                         * Lock the bitmap inode.
                         */
-                       if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL,
-                                       &ip)))
+                       if ((error = xfs_trans_iget(mp, tp, ino, 0,
+                                                       XFS_ILOCK_EXCL, &ip)))
                                goto error_exit;
                        /*
                         * Get a buffer for the block.
@@ -2023,8 +2024,8 @@ xfs_growfs_rt(
                /*
                 * Lock out other callers by grabbing the bitmap inode lock.
                 */
-               if ((error = xfs_trans_iget(mp, tp, 0, mp->m_sb.sb_rbmino,
-                               XFS_ILOCK_EXCL, &ip)))
+               if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
+                                               XFS_ILOCK_EXCL, &ip)))
                        goto error_exit;
                ASSERT(ip == mp->m_rbmip);
                /*
@@ -2037,8 +2038,8 @@ xfs_growfs_rt(
                /*
                 * Get the summary inode into the transaction.
                 */
-               if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino,
-                               0, XFS_ILOCK_EXCL, &ip)))
+               if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0,
+                                               XFS_ILOCK_EXCL, &ip)))
                        goto error_exit;
                ASSERT(ip == mp->m_rsumip);
                /*
@@ -2158,10 +2159,9 @@ xfs_rtallocate_extent(
        /*
         * Lock out other callers by grabbing the bitmap inode lock.
         */
-       error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
-       if (error) {
+       if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
+                                       XFS_ILOCK_EXCL, &ip)))
                return error;
-       }
        sumbp = NULL;
        /*
         * Allocate by size, or near another block, or exactly at some block.
@@ -2221,10 +2221,9 @@ xfs_rtfree_extent(
        /*
         * Synchronize by locking the bitmap inode.
         */
-       error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
-       if (error) {
+       if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
+                                       XFS_ILOCK_EXCL, &ip)))
                return error;
-       }
 #if defined(__KERNEL__) && defined(DEBUG)
        /*
         * Check to see that this whole range is currently allocated.
@@ -2365,8 +2364,8 @@ xfs_rtpick_extent(
        __uint64_t      seq;            /* sequence number of file creation */
        __uint64_t      *seqp;          /* pointer to seqno in inode */
 
-       error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
-       if (error)
+       if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
+                                       XFS_ILOCK_EXCL, &ip)))
                return error;
        ASSERT(ip == mp->m_rbmip);
        seqp = (__uint64_t *)&ip->i_d.di_atime;
index 5a4c8a54b8f43801755e43b9b8a7b92cb35fb79a..8c011aa61afaa1711311cd09282f9e8f75730238 100644 (file)
@@ -282,6 +282,24 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
 #define ioremap_nocache(offset, size)                                  \
        __ioremap_mode((offset), (size), _CACHE_UNCACHED)
 
+/*
+ * ioremap_cachable -   map bus memory into CPU space
+ * @offset:         bus address of the memory
+ * @size:           size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked cachable by
+ * the CPU.  Also enables full write-combining.  Useful for some
+ * memory-like regions on I/O busses.
+ */
+#define ioremap_cachable(offset, size)                                 \
+       __ioremap_mode((offset), (size), PAGE_CACHABLE_DEFAULT)
+
 /*
  * These two are MIPS specific ioremap variant.  ioremap_cacheable_cow
  * requests a cachable mapping, ioremap_uncached_accelerated requests a
index b263fb2fa6e4376312677746cc00a10911f676df..7dfb408fe2cac07830c3ed552aeeee3a8cafd8e2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/string.h>
 
 struct pci_dev;
+struct pci_bus;
 struct device_node;
 
 #ifdef CONFIG_EEH
@@ -61,7 +62,7 @@ void __init pci_addr_cache_build(void);
  */
 void eeh_add_device_early(struct device_node *);
 void eeh_add_device_tree_early(struct device_node *);
-void eeh_add_device_late(struct pci_dev *);
+void eeh_add_device_tree_late(struct pci_bus *);
 
 /**
  * eeh_remove_device - undo EEH setup for the indicated pci device
@@ -116,12 +117,12 @@ static inline void pci_addr_cache_build(void) { }
 
 static inline void eeh_add_device_early(struct device_node *dn) { }
 
-static inline void eeh_add_device_late(struct pci_dev *dev) { }
-
 static inline void eeh_remove_device(struct pci_dev *dev) { }
 
 static inline void eeh_add_device_tree_early(struct device_node *dn) { }
 
+static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
 static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
 #define EEH_POSSIBLE_ERROR(val, type) (0)
 #define EEH_IO_ERROR_VALUE(size) (-1UL)
index e2b9923189a0fc578a202f6d7b45ad10161ca80d..aa1c7b2e438cb36d3c9756b153d37a04c2570e9a 100644 (file)
@@ -164,20 +164,6 @@ extern u8 x86_acpiid_to_apicid[];
 
 extern int acpi_skip_timer_override;
 
-extern int unsync_tsc_on_multicluster;
-
-static inline int acpi_madt_oem_check(char *oem, char *productid) 
-{ 
-       /* Copied from i386. Probably has too many entries. */
-       if (!strncmp(oem, "IBM ENSW", 8) && 
-               (!strncmp(productid, "VIGIL SMP", 9) 
-                       || !strncmp(productid, "EXA", 3)
-                       || !strncmp(productid, "RUTHLESS SMP", 12))) {
-               unsync_tsc_on_multicluster = 1;
-        }
-        return 0;
-}
-
 #endif /*__KERNEL__*/
 
 #endif /*_ASM_ACPI_H*/
index 358fbc84fb59c52b391aaa4b831a297f6a4db0d3..96e231ae75548a3188b77b5708567cb99c75e0e4 100644 (file)
@@ -3,6 +3,7 @@
 
 #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
 #define EBT_LOG_ARP 0x02
+#define EBT_LOG_NFLOG 0x04
 #define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP)
 #define EBT_LOG_PREFIX_SIZE 30
 #define EBT_LOG_WATCHER "log"
index 22d16177319b9d46a335d11ede61f86c9fb28b8d..892f9a33fea88eff56fcb94ae9247705a3a44814 100644 (file)
@@ -6,7 +6,8 @@
 #define IPT_LOG_TCPOPT         0x02    /* Log TCP options */
 #define IPT_LOG_IPOPT          0x04    /* Log IP options */
 #define IPT_LOG_UID            0x08    /* Log UID owning local socket */
-#define IPT_LOG_MASK           0x0f
+#define IPT_LOG_NFLOG          0x10    /* Log using nf_log backend */
+#define IPT_LOG_MASK           0x1f
 
 struct ipt_log_info {
        unsigned char level;
index 9008ff5c40aec70e209f8f9efd036e34db1621ab..060c1a1c6c603ab19231b34d0e7325912d8f4b77 100644 (file)
@@ -6,7 +6,8 @@
 #define IP6T_LOG_TCPOPT                0x02    /* Log TCP options */
 #define IP6T_LOG_IPOPT         0x04    /* Log IP options */
 #define IP6T_LOG_UID           0x08    /* Log UID owning local socket */
-#define IP6T_LOG_MASK          0x0f
+#define IP6T_LOG_NFLOG         0x10    /* Log using nf_log backend */
+#define IP6T_LOG_MASK          0x1f
 
 struct ip6t_log_info {
        unsigned char level;
index 004e645f3e1829a81b39ed67918a243f66ef049e..8d362c49b8a9ecb9f00539a0ed1d292f0068bb3a 100644 (file)
@@ -233,7 +233,6 @@ struct xfrm_type
        int                     (*init_state)(struct xfrm_state *x);
        void                    (*destructor)(struct xfrm_state *);
        int                     (*input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb);
-       int                     (*post_input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb);
        int                     (*output)(struct xfrm_state *, struct sk_buff *pskb);
        /* Estimate maximal size of result of transformation of a dgram */
        u32                     (*get_max_size)(struct xfrm_state *, int size);
index 0128fbbe23281241d2929ab9ffd6eb74d71d67d9..288ff1d4ccc47fc453ba1cb6b605581da8722358 100644 (file)
@@ -166,7 +166,12 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
        li.u.log.level = info->loglevel;
        li.u.log.logflags = info->bitmask;
 
-       nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, info->prefix);
+       if (info->bitmask & EBT_LOG_NFLOG)
+               nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
+                             info->prefix);
+       else
+               ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
+                              info->prefix);
 }
 
 static struct ebt_watcher log =
index b8203de5ff073c4e3bda5166ef1542b793141a0c..98f0fc923f9195326ed9d0fe538e58fd7528d8d7 100644 (file)
@@ -52,7 +52,6 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
        rwlock_init(&queue->syn_wait_lock);
        queue->rskq_accept_head = queue->rskq_accept_head = NULL;
-       queue->rskq_defer_accept = 0;
        lopt->nr_table_entries = nr_table_entries;
 
        write_lock_bh(&queue->syn_wait_lock);
index 73bfcae8af9c367b3d88ffdf9f8e14afa476e927..09590f3560866758e0db00ff43580ca7b9ab798d 100644 (file)
 #include <net/protocol.h>
 #include <net/udp.h>
 
-/* decapsulation data for use when post-processing */
-struct esp_decap_data {
-       xfrm_address_t  saddr;
-       __u16           sport;
-       __u8            proto;
-};
-
 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
@@ -150,6 +143,10 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
        int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
        int nfrags;
        int encap_len = 0;
+       u8 nexthdr[2];
+       struct scatterlist *sg;
+       u8 workbuf[60];
+       int padlen;
 
        if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
                goto out;
@@ -185,122 +182,82 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
        if (esp->conf.ivlen)
                crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
 
-        {
-               u8 nexthdr[2];
-               struct scatterlist *sg = &esp->sgbuf[0];
-               u8 workbuf[60];
-               int padlen;
-
-               if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
-                       sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
-                       if (!sg)
-                               goto out;
-               }
-               skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
-               crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
-               if (unlikely(sg != &esp->sgbuf[0]))
-                       kfree(sg);
-
-               if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
-                       BUG();
+       sg = &esp->sgbuf[0];
 
-               padlen = nexthdr[0];
-               if (padlen+2 >= elen)
+       if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
+               sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
+               if (!sg)
                        goto out;
-
-               /* ... check padding bits here. Silly. :-) */ 
-
-               if (x->encap && decap && decap->decap_type) {
-                       struct esp_decap_data *encap_data;
-                       struct udphdr *uh = (struct udphdr *) (iph+1);
-
-                       encap_data = (struct esp_decap_data *) (decap->decap_data);
-                       encap_data->proto = 0;
-
-                       switch (decap->decap_type) {
-                       case UDP_ENCAP_ESPINUDP:
-                       case UDP_ENCAP_ESPINUDP_NON_IKE:
-                               encap_data->proto = AF_INET;
-                               encap_data->saddr.a4 = iph->saddr;
-                               encap_data->sport = uh->source;
-                               encap_len = (void*)esph - (void*)uh;
-                               break;
-
-                       default:
-                               goto out;
-                       }
-               }
-
-               iph->protocol = nexthdr[1];
-               pskb_trim(skb, skb->len - alen - padlen - 2);
-               memcpy(workbuf, skb->nh.raw, iph->ihl*4);
-               skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen);
-               skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
-               memcpy(skb->nh.raw, workbuf, iph->ihl*4);
-               skb->nh.iph->tot_len = htons(skb->len);
        }
+       skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
+       crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
+       if (unlikely(sg != &esp->sgbuf[0]))
+               kfree(sg);
 
-       return 0;
+       if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
+               BUG();
 
-out:
-       return -EINVAL;
-}
+       padlen = nexthdr[0];
+       if (padlen+2 >= elen)
+               goto out;
 
-static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
-{
-  
-       if (x->encap) {
-               struct xfrm_encap_tmpl *encap;
-               struct esp_decap_data *decap_data;
+       /* ... check padding bits here. Silly. :-) */ 
 
-               encap = x->encap;
-               decap_data = (struct esp_decap_data *)(decap->decap_data);
+       if (x->encap) {
+               struct xfrm_encap_tmpl *encap = x->encap;
+               struct udphdr *uh;
 
-               /* first, make sure that the decap type == the encap type */
                if (encap->encap_type != decap->decap_type)
-                       return -EINVAL;
+                       goto out;
 
-               switch (encap->encap_type) {
-               default:
-               case UDP_ENCAP_ESPINUDP:
-               case UDP_ENCAP_ESPINUDP_NON_IKE:
-                       /*
-                        * 1) if the NAT-T peer's IP or port changed then
-                        *    advertize the change to the keying daemon.
-                        *    This is an inbound SA, so just compare
-                        *    SRC ports.
-                        */
-                       if (decap_data->proto == AF_INET &&
-                           (decap_data->saddr.a4 != x->props.saddr.a4 ||
-                            decap_data->sport != encap->encap_sport)) {
-                               xfrm_address_t ipaddr;
-
-                               ipaddr.a4 = decap_data->saddr.a4;
-                               km_new_mapping(x, &ipaddr, decap_data->sport);
-                                       
-                               /* XXX: perhaps add an extra
-                                * policy check here, to see
-                                * if we should allow or
-                                * reject a packet from a
-                                * different source
-                                * address/port.
-                                */
-                       }
-               
-                       /*
-                        * 2) ignore UDP/TCP checksums in case
-                        *    of NAT-T in Transport Mode, or
-                        *    perform other post-processing fixes
-                        *    as per * draft-ietf-ipsec-udp-encaps-06,
-                        *    section 3.1.2
+               uh = (struct udphdr *)(iph + 1);
+               encap_len = (void*)esph - (void*)uh;
+
+               /*
+                * 1) if the NAT-T peer's IP or port changed then
+                *    advertize the change to the keying daemon.
+                *    This is an inbound SA, so just compare
+                *    SRC ports.
+                */
+               if (iph->saddr != x->props.saddr.a4 ||
+                   uh->source != encap->encap_sport) {
+                       xfrm_address_t ipaddr;
+
+                       ipaddr.a4 = iph->saddr;
+                       km_new_mapping(x, &ipaddr, uh->source);
+                               
+                       /* XXX: perhaps add an extra
+                        * policy check here, to see
+                        * if we should allow or
+                        * reject a packet from a
+                        * different source
+                        * address/port.
                         */
-                       if (!x->props.mode)
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-                       break;
                }
+       
+               /*
+                * 2) ignore UDP/TCP checksums in case
+                *    of NAT-T in Transport Mode, or
+                *    perform other post-processing fixes
+                *    as per draft-ietf-ipsec-udp-encaps-06,
+                *    section 3.1.2
+                */
+               if (!x->props.mode)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
+
+       iph->protocol = nexthdr[1];
+       pskb_trim(skb, skb->len - alen - padlen - 2);
+       memcpy(workbuf, skb->nh.raw, iph->ihl*4);
+       skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen);
+       skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
+       memcpy(skb->nh.raw, workbuf, iph->ihl*4);
+       skb->nh.iph->tot_len = htons(skb->len);
+
        return 0;
+
+out:
+       return -EINVAL;
 }
 
 static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
@@ -458,7 +415,6 @@ static struct xfrm_type esp_type =
        .destructor     = esp_destroy,
        .get_max_size   = esp4_get_max_size,
        .input          = esp_input,
-       .post_input     = esp_post_input,
        .output         = esp_output
 };
 
@@ -470,15 +426,6 @@ static struct net_protocol esp4_protocol = {
 
 static int __init esp4_init(void)
 {
-       struct xfrm_decap_state decap;
-
-       if (sizeof(struct esp_decap_data)  >
-           sizeof(decap.decap_data)) {
-               extern void decap_data_too_small(void);
-
-               decap_data_too_small();
-       }
-
        if (xfrm_register_type(&esp_type, AF_INET) < 0) {
                printk(KERN_INFO "ip esp init: can't add xfrm type\n");
                return -EAGAIN;
index 6606ddb66a29e6ea0044c24ce596bb9767aae424..cc27545ff97f67b3d60dbea1b3f32bce4f8d3001 100644 (file)
@@ -425,7 +425,12 @@ ipt_log_target(struct sk_buff **pskb,
        li.u.log.level = loginfo->level;
        li.u.log.logflags = loginfo->logflags;
 
-       nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, loginfo->prefix);
+       if (loginfo->logflags & IPT_LOG_NFLOG)
+               nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
+                             loginfo->prefix);
+       else
+               ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
+                              loginfo->prefix);
 
        return IPT_CONTINUE;
 }
index 77c725832decdbfc28b1108f9912cae8b09965dd..6b930efa9fb9965bc78bcb64009ffb81a51e8b0d 100644 (file)
@@ -436,7 +436,12 @@ ip6t_log_target(struct sk_buff **pskb,
        li.u.log.level = loginfo->level;
        li.u.log.logflags = loginfo->logflags;
 
-       nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, loginfo->prefix);
+       if (loginfo->logflags & IP6T_LOG_NFLOG)
+               nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
+                             loginfo->prefix);
+       else
+               ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
+                               loginfo->prefix);
 
        return IP6T_CONTINUE;
 }
index d3a4f30a7f2247f0bac16cdc9466c31b12409e39..d9f0d7ef103b61f8209eaa9fd9aaa29306438309 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
 #include <linux/seq_file.h>
+#include <linux/rcupdate.h>
 #include <net/protocol.h>
 
 #include "nf_internals.h"
@@ -16,7 +17,7 @@
  * for queueing and must reinject all packets it receives, no matter what.
  */
 static struct nf_queue_handler *queue_handler[NPROTO];
-static struct nf_queue_rerouter *queue_rerouter;
+static struct nf_queue_rerouter *queue_rerouter[NPROTO];
 
 static DEFINE_RWLOCK(queue_handler_lock);
 
@@ -64,7 +65,7 @@ int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
                return -EINVAL;
 
        write_lock_bh(&queue_handler_lock);
-       memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
+       rcu_assign_pointer(queue_rerouter[pf], rer);
        write_unlock_bh(&queue_handler_lock);
 
        return 0;
@@ -77,8 +78,9 @@ int nf_unregister_queue_rerouter(int pf)
                return -EINVAL;
 
        write_lock_bh(&queue_handler_lock);
-       memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
+       rcu_assign_pointer(queue_rerouter[pf], NULL);
        write_unlock_bh(&queue_handler_lock);
+       synchronize_rcu();
        return 0;
 }
 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
@@ -114,16 +116,17 @@ int nf_queue(struct sk_buff **skb,
        struct net_device *physindev = NULL;
        struct net_device *physoutdev = NULL;
 #endif
+       struct nf_queue_rerouter *rerouter;
 
        /* QUEUE == DROP if noone is waiting, to be safe. */
        read_lock(&queue_handler_lock);
-       if (!queue_handler[pf] || !queue_handler[pf]->outfn) {
+       if (!queue_handler[pf]) {
                read_unlock(&queue_handler_lock);
                kfree_skb(*skb);
                return 1;
        }
 
-       info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
+       info = kmalloc(sizeof(*info)+queue_rerouter[pf]->rer_size, GFP_ATOMIC);
        if (!info) {
                if (net_ratelimit())
                        printk(KERN_ERR "OOM queueing packet %p\n",
@@ -155,15 +158,13 @@ int nf_queue(struct sk_buff **skb,
                if (physoutdev) dev_hold(physoutdev);
        }
 #endif
-       if (queue_rerouter[pf].save)
-               queue_rerouter[pf].save(*skb, info);
+       rerouter = rcu_dereference(queue_rerouter[pf]);
+       if (rerouter)
+               rerouter->save(*skb, info);
 
        status = queue_handler[pf]->outfn(*skb, info, queuenum,
                                          queue_handler[pf]->data);
 
-       if (status >= 0 && queue_rerouter[pf].reroute)
-               status = queue_rerouter[pf].reroute(skb, info);
-
        read_unlock(&queue_handler_lock);
 
        if (status < 0) {
@@ -189,6 +190,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
 {
        struct list_head *elem = &info->elem->list;
        struct list_head *i;
+       struct nf_queue_rerouter *rerouter;
 
        rcu_read_lock();
 
@@ -212,7 +214,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
                        break;
        }
   
-       if (elem == &nf_hooks[info->pf][info->hook]) {
+       if (i == &nf_hooks[info->pf][info->hook]) {
                /* The module which sent it to userspace is gone. */
                NFDEBUG("%s: module disappeared, dropping packet.\n",
                        __FUNCTION__);
@@ -225,6 +227,12 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
                verdict = NF_ACCEPT;
        }
 
+       if (verdict == NF_ACCEPT) {
+               rerouter = rcu_dereference(queue_rerouter[info->pf]);
+               if (rerouter && rerouter->reroute(&skb, info) < 0)
+                       verdict = NF_DROP;
+       }
+
        if (verdict == NF_ACCEPT) {
        next_hook:
                verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
@@ -322,22 +330,12 @@ int __init netfilter_queue_init(void)
 {
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry *pde;
-#endif
-       queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
-                                GFP_KERNEL);
-       if (!queue_rerouter)
-               return -ENOMEM;
 
-#ifdef CONFIG_PROC_FS
        pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
-       if (!pde) {
-               kfree(queue_rerouter);
+       if (!pde)
                return -1;
-       }
        pde->proc_fops = &nfqueue_file_ops;
 #endif
-       memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
-
        return 0;
 }
 
index 8206025d8e46297c786e58fc19fd1725c0bd5ff5..ae62054a9fc4c22bb41678f2c8fd64ce87e7d427 100644 (file)
@@ -996,13 +996,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                        struct sec_decap_state *xvec = &(skb->sp->x[i]);
                        if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
                                return 0;
-
-                       /* If there is a post_input processor, try running it */
-                       if (xvec->xvec->type->post_input &&
-                           (xvec->xvec->type->post_input)(xvec->xvec,
-                                                          &(xvec->decap),
-                                                          skb) != 0)
-                               return 0;
                }
        }