]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'x86/oprofile' into oprofile
authorIngo Molnar <mingo@elte.hu>
Tue, 19 Aug 2008 01:34:07 +0000 (03:34 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 19 Aug 2008 01:34:07 +0000 (03:34 +0200)
1  2 
arch/Kconfig
arch/x86/kernel/apic_32.c
arch/x86/kernel/apic_64.c
arch/x86/oprofile/op_model_p4.c
include/linux/pci_ids.h

diff --combined arch/Kconfig
index 2651af48b2e508cf39492ca1dcd4462ff7004d0d,364c6dadde0a6b6ed988f15294f799240b292a68..0267babe5eb9dc6a6c79f2347234b7d78aafc2e8
@@@ -13,20 -13,6 +13,20 @@@ config OPROFIL
  
          If unsure, say N.
  
 +config OPROFILE_IBS
 +      bool "OProfile AMD IBS support (EXPERIMENTAL)"
 +      default n
 +      depends on OPROFILE && SMP && X86
 +      help
 +          Instruction-Based Sampling (IBS) is a new profiling
 +          technique that provides rich, precise program performance
 +          information. IBS is introduced by AMD Family10h processors
 +          (AMD Opteron Quad-Core processor “Barcelona”) to overcome
 +          the limitations of conventional performance counter
 +          sampling.
 +
 +        If unsure, say N.
 +
  config HAVE_OPROFILE
        def_bool n
  
@@@ -73,6 -59,24 +73,24 @@@ config HAVE_KPROBE
  config HAVE_KRETPROBES
        def_bool n
  
+ #
+ # An arch should select this if it provides all these things:
+ #
+ #     task_pt_regs()          in asm/processor.h or asm/ptrace.h
+ #     arch_has_single_step()  if there is hardware single-step support
+ #     arch_has_block_step()   if there is hardware block-step support
+ #     arch_ptrace()           and not #define __ARCH_SYS_PTRACE
+ #     compat_arch_ptrace()    and #define __ARCH_WANT_COMPAT_SYS_PTRACE
+ #     asm/syscall.h           supplying asm-generic/syscall.h interface
+ #     linux/regset.h          user_regset interfaces
+ #     CORE_DUMP_USE_REGSET    #define'd in linux/elf.h
+ #     TIF_SYSCALL_TRACE       calls tracehook_report_syscall_{entry,exit}
+ #     TIF_NOTIFY_RESUME       calls tracehook_notify_resume()
+ #     signal delivery         calls tracehook_signal_handler()
+ #
+ config HAVE_ARCH_TRACEHOOK
+       def_bool n
  config HAVE_DMA_ATTRS
        def_bool n
  
index 0059e7a8a9e6eaa607d24b70a18c4221efc1d607,f88bd0d982b08540889e991c42a019344002d3d3..0ff576d026a4870ae0933933f95262334a4ea544
@@@ -646,9 -646,6 +646,9 @@@ int setup_profiling_timer(unsigned int 
   *
   * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
   * MCE interrupts are supported. Thus MCE offset must be set to 0.
 + *
 + * If mask=1, the LVT entry does not generate interrupts while mask=0
 + * enables the vector. See also the BKDGs.
   */
  
  #define APIC_EILVT_LVTOFF_MCE 0
@@@ -672,7 -669,6 +672,7 @@@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 m
        setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
        return APIC_EILVT_LVTOFF_IBS;
  }
 +EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
  
  /*
   * Local APIC start and shutdown
@@@ -1458,8 -1454,6 +1458,6 @@@ void disconnect_bsp_APIC(int virt_wire_
        }
  }
  
- unsigned int __cpuinitdata maxcpus = NR_CPUS;
  void __cpuinit generic_processor_info(int apicid, int version)
  {
        int cpu;
                return;
        }
  
-       if (num_processors >= maxcpus) {
-               printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-                       " Processor ignored.\n", maxcpus);
-               return;
-       }
        num_processors++;
        cpus_complement(tmp_map, cpu_present_map);
        cpu = first_cpu(tmp_map);
@@@ -1724,15 -1712,19 +1716,19 @@@ static int __init parse_lapic_timer_c2_
  }
  early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
  
- static int __init apic_set_verbosity(char *str)
+ static int __init apic_set_verbosity(char *arg)
  {
-       if (strcmp("debug", str) == 0)
+       if (!arg)
+               return -EINVAL;
+       if (strcmp(arg, "debug") == 0)
                apic_verbosity = APIC_DEBUG;
-       else if (strcmp("verbose", str) == 0)
+       else if (strcmp(arg, "verbose") == 0)
                apic_verbosity = APIC_VERBOSE;
-       return 1;
+       return 0;
  }
__setup("apic=", apic_set_verbosity);
early_param("apic", apic_set_verbosity);
  
  static int __init lapic_insert_resource(void)
  {
index e571351f2a93edb715a593b488a9710e41c09fd2,446c062e831cf46ce8ca276f38ee95a216049a93..57744f4a75b4f3ebe9216d1eb7f6a287a304de50
@@@ -90,7 -90,6 +90,6 @@@ static unsigned long apic_phys
  
  unsigned long mp_lapic_addr;
  
- unsigned int __cpuinitdata maxcpus = NR_CPUS;
  /*
   * Get the LAPIC version
   */
@@@ -205,9 -204,6 +204,9 @@@ static void __setup_APIC_LVTT(unsigned 
   *
   * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
   * MCE interrupts are supported. Thus MCE offset must be set to 0.
 + *
 + * If mask=1, the LVT entry does not generate interrupts while mask=0
 + * enables the vector. See also the BKDGs.
   */
  
  #define APIC_EILVT_LVTOFF_MCE 0
@@@ -232,7 -228,6 +231,7 @@@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 m
        setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
        return APIC_EILVT_LVTOFF_IBS;
  }
 +EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
  
  /*
   * Program the next event, relative to now
@@@ -1066,12 -1061,6 +1065,6 @@@ void __cpuinit generic_processor_info(i
                return;
        }
  
-       if (num_processors >= maxcpus) {
-               printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-                      " Processor ignored.\n", maxcpus);
-               return;
-       }
        num_processors++;
        cpus_complement(tmp_map, cpu_present_map);
        cpu = first_cpu(tmp_map);
index e641545d47963de015f71b72e0f2609ef404cc12,43ac5af338d8c910c2295a7484453ab6b8a01b2a..cacba61ffbac86672d07a66e517a12e6ba616371
  
  #include <linux/oprofile.h>
  #include <linux/smp.h>
+ #include <linux/ptrace.h>
+ #include <linux/nmi.h>
  #include <asm/msr.h>
- #include <asm/ptrace.h>
  #include <asm/fixmap.h>
  #include <asm/apic.h>
- #include <asm/nmi.h>
  
  #include "op_x86_model.h"
  #include "op_counter.h"
@@@ -40,7 -41,7 +41,7 @@@ static unsigned int num_controls = NUM_
  static inline void setup_num_counters(void)
  {
  #ifdef CONFIG_SMP
-       if (smp_num_siblings == 2){
+       if (smp_num_siblings == 2) {
                num_counters = NUM_COUNTERS_HT2;
                num_controls = NUM_CONTROLS_HT2;
        }
@@@ -86,7 -87,7 +87,7 @@@ struct p4_event_binding 
  #define CTR_FLAME_2    (1 << 6)
  #define CTR_IQ_5       (1 << 7)
  
- static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
+ static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
        { CTR_BPU_0,   MSR_P4_BPU_PERFCTR0,   MSR_P4_BPU_CCCR0 },
        { CTR_MS_0,    MSR_P4_MS_PERFCTR0,    MSR_P4_MS_CCCR0 },
        { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
        { CTR_IQ_5,    MSR_P4_IQ_PERFCTR5,    MSR_P4_IQ_CCCR5 }
  };
  
- #define NUM_UNUSED_CCCRS      NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT
+ #define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
  
  /* p4 event codes in libop/op_event.h are indices into this table. */
  
  static struct p4_event_binding p4_events[NUM_EVENTS] = {
-       
        { /* BRANCH_RETIRED */
-               0x05, 0x06, 
+               0x05, 0x06,
                { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
                  {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
        },
-       
        { /* MISPRED_BRANCH_RETIRED */
-               0x04, 0x03, 
+               0x04, 0x03,
                { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
                  { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
        },
-       
        { /* TC_DELIVER_MODE */
                0x01, 0x01,
-               { { CTR_MS_0, MSR_P4_TC_ESCR0},  
+               { { CTR_MS_0, MSR_P4_TC_ESCR0},
                  { CTR_MS_2, MSR_P4_TC_ESCR1} }
        },
-       
        { /* BPU_FETCH_REQUEST */
-               0x00, 0x03, 
+               0x00, 0x03,
                { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
                  { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
        },
        },
  
        { /* LOAD_PORT_REPLAY */
-               0x02, 0x04, 
+               0x02, 0x04,
                { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
                  { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
        },
        },
  
        { /* BSQ_CACHE_REFERENCE */
-               0x07, 0x0c, 
+               0x07, 0x0c,
                { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
                  { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
        },
  
        { /* IOQ_ALLOCATION */
-               0x06, 0x03, 
+               0x06, 0x03,
                { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
                  { 0, 0 } }
        },
  
        { /* IOQ_ACTIVE_ENTRIES */
-               0x06, 0x1a, 
+               0x06, 0x1a,
                { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
                  { 0, 0 } }
        },
  
        { /* FSB_DATA_ACTIVITY */
-               0x06, 0x17, 
+               0x06, 0x17,
                { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
                  { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
        },
  
        { /* BSQ_ALLOCATION */
-               0x07, 0x05, 
+               0x07, 0x05,
                { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
                  { 0, 0 } }
        },
  
        { /* BSQ_ACTIVE_ENTRIES */
                0x07, 0x06,
-               { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},  
+               { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
                  { 0, 0 } }
        },
  
        { /* X87_ASSIST */
-               0x05, 0x03, 
+               0x05, 0x03,
                { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
                  { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
        },
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
-   
        { /* PACKED_SP_UOP */
-               0x01, 0x08, 
+               0x01, 0x08,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
-   
        { /* PACKED_DP_UOP */
-               0x01, 0x0c, 
+               0x01, 0x0c,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
  
        { /* SCALAR_SP_UOP */
-               0x01, 0x0a, 
+               0x01, 0x0a,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
        },
  
        { /* 64BIT_MMX_UOP */
-               0x01, 0x02, 
+               0x01, 0x02,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
-   
        { /* 128BIT_MMX_UOP */
-               0x01, 0x1a, 
+               0x01, 0x1a,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
  
        { /* X87_FP_UOP */
-               0x01, 0x04, 
+               0x01, 0x04,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
-   
        { /* X87_SIMD_MOVES_UOP */
-               0x01, 0x2e, 
+               0x01, 0x2e,
                { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
                  { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
        },
-   
        { /* MACHINE_CLEAR */
-               0x05, 0x02, 
+               0x05, 0x02,
                { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
                  { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
        },
                { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
                  { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
        },
-   
        { /* TC_MS_XFER */
-               0x00, 0x05, 
+               0x00, 0x05,
                { { CTR_MS_0, MSR_P4_MS_ESCR0},
                  { CTR_MS_2, MSR_P4_MS_ESCR1} }
        },
        },
  
        { /* INSTR_RETIRED */
-               0x04, 0x02, 
+               0x04, 0x02,
                { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
                  { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
        },
                  { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
        },
  
-       { /* UOP_TYPE */    
-               0x02, 0x02, 
+       { /* UOP_TYPE */
+               0x02, 0x02,
                { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
                  { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
        },
  
        { /* RETIRED_MISPRED_BRANCH_TYPE */
-               0x02, 0x05, 
+               0x02, 0x05,
                { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
                  { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
        },
  #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
  #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
  #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
- #define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0)
- #define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0)
+ #define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
+ #define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
  
  #define CCCR_RESERVED_BITS 0x38030FFF
  #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
  #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
  #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
  #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
- #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
- #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
+ #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
+ #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
  #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
  #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
  
- #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0)
- #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0)
- #define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0)
- #define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0)
+ #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
+ #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
+ #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
+ #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
  #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
  
  
@@@ -380,7 -381,7 +381,7 @@@ static unsigned int get_stagger(void
  #ifdef CONFIG_SMP
        int cpu = smp_processor_id();
        return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
- #endif        
+ #endif
        return 0;
  }
  
@@@ -395,25 -396,23 +396,23 @@@ static unsigned long reset_value[NUM_CO
  
  static void p4_fill_in_addresses(struct op_msrs * const msrs)
  {
-       unsigned int i; 
+       unsigned int i;
        unsigned int addr, cccraddr, stag;
  
        setup_num_counters();
        stag = get_stagger();
  
        /* initialize some registers */
-       for (i = 0; i < num_counters; ++i) {
+       for (i = 0; i < num_counters; ++i)
                msrs->counters[i].addr = 0;
-       }
-       for (i = 0; i < num_controls; ++i) {
+       for (i = 0; i < num_controls; ++i)
                msrs->controls[i].addr = 0;
-       }
-       
        /* the counter & cccr registers we pay attention to */
        for (i = 0; i < num_counters; ++i) {
                addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
                cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
-               if (reserve_perfctr_nmi(addr)){
+               if (reserve_perfctr_nmi(addr)) {
                        msrs->counters[i].addr = addr;
                        msrs->controls[i].addr = cccraddr;
                }
                if (reserve_evntsel_nmi(addr))
                        msrs->controls[i].addr = addr;
        }
-       
        for (addr = MSR_P4_MS_ESCR0 + stag;
-            addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { 
+            addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
                if (reserve_evntsel_nmi(addr))
                        msrs->controls[i].addr = addr;
        }
-       
        for (addr = MSR_P4_IX_ESCR0 + stag;
-            addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { 
+            addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
                if (reserve_evntsel_nmi(addr))
                        msrs->controls[i].addr = addr;
        }
  
        /* there are 2 remaining non-contiguously located ESCRs */
  
-       if (num_counters == NUM_COUNTERS_NON_HT) {              
+       if (num_counters == NUM_COUNTERS_NON_HT) {
                /* standard non-HT CPUs handle both remaining ESCRs*/
                if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
                        msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
@@@ -498,20 -497,20 +497,20 @@@ static void pmc_setup_one_p4_counter(un
        unsigned int stag;
  
        stag = get_stagger();
-       
        /* convert from counter *number* to counter *bit* */
        counter_bit = 1 << VIRT_CTR(stag, ctr);
-       
        /* find our event binding structure. */
        if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
-               printk(KERN_ERR 
-                      "oprofile: P4 event code 0x%lx out of range\n", 
+               printk(KERN_ERR
+                      "oprofile: P4 event code 0x%lx out of range\n",
                       counter_config[ctr].event);
                return;
        }
-       
        ev = &(p4_events[counter_config[ctr].event - 1]);
-       
        for (i = 0; i < maxbind; i++) {
                if (ev->bindings[i].virt_counter & counter_bit) {
  
                                ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
                        }
                        ESCR_SET_EVENT_SELECT(escr, ev->event_select);
-                       ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);                       
+                       ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
                        ESCR_WRITE(escr, high, ev, i);
-                      
                        /* modify CCCR */
                        CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
                        CCCR_CLEAR(cccr);
                        CCCR_SET_REQUIRED_BITS(cccr);
                        CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
-                       if (stag == 0) {
+                       if (stag == 0)
                                CCCR_SET_PMI_OVF_0(cccr);
-                       } else {
+                       else
                                CCCR_SET_PMI_OVF_1(cccr);
-                       }
                        CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
                        return;
                }
        }
  
-       printk(KERN_ERR 
+       printk(KERN_ERR
               "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
               counter_config[ctr].event, stag, ctr);
  }
@@@ -559,14 -557,14 +557,14 @@@ static void p4_setup_ctrs(struct op_msr
        stag = get_stagger();
  
        rdmsr(MSR_IA32_MISC_ENABLE, low, high);
-       if (! MISC_PMC_ENABLED_P(low)) {
+       if (!MISC_PMC_ENABLED_P(low)) {
                printk(KERN_ERR "oprofile: P4 PMC not available\n");
                return;
        }
  
        /* clear the cccrs we will use */
        for (i = 0 ; i < num_counters ; i++) {
-               if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
+               if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
                        continue;
                rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
                CCCR_CLEAR(low);
  
        /* clear all escrs (including those outside our concern) */
        for (i = num_counters; i < num_controls; i++) {
-               if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
+               if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
                        continue;
                wrmsr(msrs->controls[i].addr, 0, 0);
        }
  
        /* setup all counters */
        for (i = 0 ; i < num_counters ; ++i) {
-               if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) {
+               if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
                        reset_value[i] = counter_config[i].count;
                        pmc_setup_one_p4_counter(i);
                        CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
@@@ -603,11 -601,11 +601,11 @@@ static int p4_check_ctrs(struct pt_reg
        stag = get_stagger();
  
        for (i = 0; i < num_counters; ++i) {
-               
-               if (!reset_value[i]) 
+               if (!reset_value[i])
                        continue;
  
-               /* 
+               /*
                 * there is some eccentricity in the hardware which
                 * requires that we perform 2 extra corrections:
                 *
                 *
                 * - write the counter back twice to ensure it gets
                 *   updated properly.
-                * 
+                *
                 * the former seems to be related to extra NMIs happening
                 * during the current NMI; the latter is reported as errata
                 * N15 in intel doc 249199-029, pentium 4 specification
                 * update, though their suggested work-around does not
                 * appear to solve the problem.
                 */
-               
                real = VIRT_CTR(stag, i);
  
                CCCR_READ(low, high, real);
-               CTR_READ(ctr, high, real);
+               CTR_READ(ctr, high, real);
                if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
                        oprofile_add_sample(regs, i);
-                       CTR_WRITE(reset_value[i], real);
+                       CTR_WRITE(reset_value[i], real);
                        CCCR_CLEAR_OVF(low);
                        CCCR_WRITE(low, high, real);
-                       CTR_WRITE(reset_value[i], real);
+                       CTR_WRITE(reset_value[i], real);
                }
        }
  
@@@ -683,15 -681,16 +681,16 @@@ static void p4_shutdown(struct op_msrs 
        int i;
  
        for (i = 0 ; i < num_counters ; ++i) {
-               if (CTR_IS_RESERVED(msrs,i))
+               if (CTR_IS_RESERVED(msrs, i))
                        release_perfctr_nmi(msrs->counters[i].addr);
        }
-       /* some of the control registers are specially reserved in
+       /*
+        * some of the control registers are specially reserved in
         * conjunction with the counter registers (hence the starting offset).
         * This saves a few bits.
         */
        for (i = num_counters ; i < num_controls ; ++i) {
-               if (CTRL_IS_RESERVED(msrs,i))
+               if (CTRL_IS_RESERVED(msrs, i))
                        release_evntsel_nmi(msrs->controls[i].addr);
        }
  }
  struct op_x86_model_spec const op_p4_ht2_spec = {
        .num_counters = NUM_COUNTERS_HT2,
        .num_controls = NUM_CONTROLS_HT2,
 +      .num_hardware_counters = NUM_COUNTERS_HT2,
 +      .num_hardware_controls = NUM_CONTROLS_HT2,
        .fill_in_addresses = &p4_fill_in_addresses,
        .setup_ctrs = &p4_setup_ctrs,
        .check_ctrs = &p4_check_ctrs,
  struct op_x86_model_spec const op_p4_spec = {
        .num_counters = NUM_COUNTERS_NON_HT,
        .num_controls = NUM_CONTROLS_NON_HT,
 +      .num_hardware_counters = NUM_COUNTERS_NON_HT,
 +      .num_hardware_controls = NUM_CONTROLS_NON_HT,
        .fill_in_addresses = &p4_fill_in_addresses,
        .setup_ctrs = &p4_setup_ctrs,
        .check_ctrs = &p4_check_ctrs,
diff --combined include/linux/pci_ids.h
index 917d48e5ea06aa7dff61a9add906160d90df22c1,9ec2bcce8e8385d3e192f8b7c835a2062c592314..4463ca5b893454b54425cdf5f51e4efc2123546f
  #define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP       0x1101
  #define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL        0x1102
  #define PCI_DEVICE_ID_AMD_K8_NB_MISC  0x1103
 +#define PCI_DEVICE_ID_AMD_10H_NB_HT   0x1200
 +#define PCI_DEVICE_ID_AMD_10H_NB_MAP  0x1201
 +#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202
 +#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203
 +#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204
  #define PCI_DEVICE_ID_AMD_LANCE               0x2000
  #define PCI_DEVICE_ID_AMD_LANCE_HOME  0x2001
  #define PCI_DEVICE_ID_AMD_SCSI                0x2020
  #define PCI_VENDOR_ID_TI              0x104c
  #define PCI_DEVICE_ID_TI_TVP4020      0x3d07
  #define PCI_DEVICE_ID_TI_4450         0x8011
+ #define PCI_DEVICE_ID_TI_TSB43AB22    0x8023
  #define PCI_DEVICE_ID_TI_XX21_XX11    0x8031
  #define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
  #define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
  #define PCI_DEVICE_ID_MOXA_C320               0x3200
  
  #define PCI_VENDOR_ID_CCD             0x1397
+ #define PCI_DEVICE_ID_CCD_HFC4S               0x08B4
+ #define PCI_SUBDEVICE_ID_CCD_PMX2S    0x1234
+ #define PCI_DEVICE_ID_CCD_HFC8S               0x16B8
  #define PCI_DEVICE_ID_CCD_2BD0                0x2bd0
+ #define PCI_DEVICE_ID_CCD_HFCE1               0x30B1
+ #define PCI_SUBDEVICE_ID_CCD_SPD4S    0x3136
+ #define PCI_SUBDEVICE_ID_CCD_SPDE1    0x3137
  #define PCI_DEVICE_ID_CCD_B000                0xb000
  #define PCI_DEVICE_ID_CCD_B006                0xb006
  #define PCI_DEVICE_ID_CCD_B007                0xb007
  #define PCI_DEVICE_ID_CCD_B00B                0xb00b
  #define PCI_DEVICE_ID_CCD_B00C                0xb00c
  #define PCI_DEVICE_ID_CCD_B100                0xb100
+ #define PCI_SUBDEVICE_ID_CCD_IOB4ST   0xB520
+ #define PCI_SUBDEVICE_ID_CCD_IOB8STR  0xB521
+ #define PCI_SUBDEVICE_ID_CCD_IOB8ST   0xB522
+ #define PCI_SUBDEVICE_ID_CCD_IOB1E1   0xB523
+ #define PCI_SUBDEVICE_ID_CCD_SWYX4S   0xB540
+ #define PCI_SUBDEVICE_ID_CCD_JH4S20   0xB550
+ #define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552
+ #define PCI_SUBDEVICE_ID_CCD_BN4S     0xB560
+ #define PCI_SUBDEVICE_ID_CCD_BN8S     0xB562
+ #define PCI_SUBDEVICE_ID_CCD_BNE1     0xB563
+ #define PCI_SUBDEVICE_ID_CCD_BNE1D    0xB564
+ #define PCI_SUBDEVICE_ID_CCD_BNE1DP   0xB565
+ #define PCI_SUBDEVICE_ID_CCD_BN2S     0xB566
+ #define PCI_SUBDEVICE_ID_CCD_BN1SM    0xB567
+ #define PCI_SUBDEVICE_ID_CCD_BN4SM    0xB568
+ #define PCI_SUBDEVICE_ID_CCD_BN2SM    0xB569
+ #define PCI_SUBDEVICE_ID_CCD_BNE1M    0xB56A
+ #define PCI_SUBDEVICE_ID_CCD_BN8SP    0xB56B
+ #define PCI_SUBDEVICE_ID_CCD_HFC4S    0xB620
+ #define PCI_SUBDEVICE_ID_CCD_HFC8S    0xB622
  #define PCI_DEVICE_ID_CCD_B700                0xb700
  #define PCI_DEVICE_ID_CCD_B701                0xb701
+ #define PCI_SUBDEVICE_ID_CCD_HFCE1    0xC523
+ #define PCI_SUBDEVICE_ID_CCD_OV2S     0xE884
+ #define PCI_SUBDEVICE_ID_CCD_OV4S     0xE888
+ #define PCI_SUBDEVICE_ID_CCD_OV8S     0xE998
  
  #define PCI_VENDOR_ID_EXAR            0x13a8
  #define PCI_DEVICE_ID_EXAR_XR17C152   0x0152
  #define PCI_DEVICE_ID_HERC_WIN                0x5732
  #define PCI_DEVICE_ID_HERC_UNI                0x5832
  
- #define PCI_VENDOR_ID_RDC             0x17f3
  #define PCI_VENDOR_ID_SITECOM         0x182d
  #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069
  
  
  #define PCI_VENDOR_ID_3COM_2          0xa727
  
+ #define PCI_VENDOR_ID_DIGIUM          0xd161
+ #define PCI_DEVICE_ID_DIGIUM_HFC4S    0xb410
  #define PCI_SUBVENDOR_ID_EXSYS                0xd84d
  #define PCI_SUBDEVICE_ID_EXSYS_4014   0x4014
  #define PCI_SUBDEVICE_ID_EXSYS_4055   0x4055