]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'linus' into timers/hrtimers
authorIngo Molnar <mingo@elte.hu>
Mon, 29 Dec 2008 08:42:58 +0000 (09:42 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 29 Dec 2008 09:37:07 +0000 (10:37 +0100)
Conflicts:
sound/drivers/pcsp/pcsp.c

Semantic conflict:

        sound/core/hrtimer.c

1  2 
arch/x86/Kconfig
arch/x86/kernel/hpet.c
fs/exec.c
kernel/sched.c
kernel/trace/trace_sysprof.c
sound/core/hrtimer.c
sound/drivers/pcsp/pcsp.c

diff --combined arch/x86/Kconfig
index 19f0d97829ee6fcca82cdd8dd69002529f2c978f,98a0ed52b5c39ec9d728d89c557353f97f986a32..66c14961a9b58492119d3b66ee428fa969b78db6
@@@ -19,6 -19,8 +19,8 @@@ config X86_6
  config X86
        def_bool y
        select HAVE_AOUT if X86_32
+       select HAVE_READQ
+       select HAVE_WRITEQ
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
        select HAVE_ARCH_KGDB if !X86_VOYAGER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_GENERIC_DMA_COHERENT if X86_32
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       select USER_STACKTRACE_SUPPORT
  
  config ARCH_DEFCONFIG
        string
@@@ -87,6 -92,10 +92,10 @@@ config GENERIC_IOMA
  config GENERIC_BUG
        def_bool y
        depends on BUG
+       select GENERIC_BUG_RELATIVE_POINTERS if X86_64
+ config GENERIC_BUG_RELATIVE_POINTERS
+       bool
  
  config GENERIC_HWEIGHT
        def_bool y
@@@ -242,21 -251,13 +251,13 @@@ config X86_FIND_SMP_CONFI
        def_bool y
        depends on X86_MPPARSE || X86_VOYAGER
  
- if ACPI
  config X86_MPPARSE
-       def_bool y
-       bool "Enable MPS table"
+       bool "Enable MPS table" if ACPI
+       default y
        depends on X86_LOCAL_APIC
        help
          For old smp systems that do not have proper acpi support. Newer systems
          (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
- endif
- if !ACPI
- config X86_MPPARSE
-       def_bool y
-       depends on X86_LOCAL_APIC
- endif
  
  choice
        prompt "Subarchitecture Type"
@@@ -367,10 -368,10 +368,10 @@@ config X86_RDC321
          as R-8610-(G).
          If you don't have one of these chips, you should say N here.
  
- config SCHED_NO_NO_OMIT_FRAME_POINTER
+ config SCHED_OMIT_FRAME_POINTER
        def_bool y
        prompt "Single-depth WCHAN output"
-       depends on X86_32
+       depends on X86
        help
          Calculate simpler /proc/<PID>/wchan values. If this option
          is disabled then wchan values will recurse back to the
@@@ -465,10 -466,6 +466,6 @@@ config X86_CYCLONE_TIME
        def_bool y
        depends on X86_GENERICARCH
  
- config ES7000_CLUSTERED_APIC
-       def_bool y
-       depends on SMP && X86_ES7000 && MPENTIUMIII
  source "arch/x86/Kconfig.cpu"
  
  config HPET_TIMER
           The HPET provides a stable time base on SMP
           systems, unlike the TSC, but it is more expensive to access,
           as it is off-chip.  You can find the HPET spec at
 -         <http://www.intel.com/hardwaredesign/hpetspec.htm>.
 +         <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
  
           You can safely choose Y here.  However, HPET will only be
           activated if the platform and the BIOS support this feature.
@@@ -569,7 -566,7 +566,7 @@@ config AMD_IOMM
  
  # need this always selected by IOMMU for the VIA workaround
  config SWIOTLB
-       bool
+       def_bool y if X86_64
        help
          Support for software bounce buffers used on x86-64 systems
          which don't have a hardware IOMMU (e.g. the current generation
@@@ -660,6 -657,30 +657,30 @@@ config X86_VISWS_API
        def_bool y
        depends on X86_32 && X86_VISWS
  
+ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
+       bool "Reroute for broken boot IRQs"
+       default n
+       depends on X86_IO_APIC
+       help
+         This option enables a workaround that fixes a source of
+         spurious interrupts. This is recommended when threaded
+         interrupt handling is used on systems where the generation of
+         superfluous "boot interrupts" cannot be disabled.
+         Some chipsets generate a legacy INTx "boot IRQ" when the IRQ
+         entry in the chipset's IO-APIC is masked (as, e.g. the RT
+         kernel does during interrupt handling). On chipsets where this
+         boot IRQ generation cannot be disabled, this workaround keeps
+         the original IRQ line masked so that only the equivalent "boot
+         IRQ" is delivered to the CPUs. The workaround also tells the
+         kernel to set up the IRQ handler on the boot IRQ line. In this
+         way only one interrupt is delivered to the kernel. Otherwise
+         the spurious second interrupt may cause the kernel to bring
+         down (vital) interrupt lines.
+         Only affects "broken" chipsets. Interrupt sharing may be
+         increased on these systems.
  config X86_MCE
        bool "Machine Check Exception"
        depends on !X86_VOYAGER
@@@ -956,24 -977,37 +977,37 @@@ config X86_PA
  config ARCH_PHYS_ADDR_T_64BIT
         def_bool X86_64 || X86_PAE
  
+ config DIRECT_GBPAGES
+       bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
+       default y
+       depends on X86_64
+       help
+         Allow the kernel linear mapping to use 1GB pages on CPUs that
+         support it. This can improve the kernel's performance a tiny bit by
+         reducing TLB pressure. If in doubt, say "Y".
  # Common NUMA Features
  config NUMA
-       bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
+       bool "Numa Memory Allocation and Scheduler Support"
        depends on SMP
        depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
        default n if X86_PC
        default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
        help
          Enable NUMA (Non Uniform Memory Access) support.
          The kernel will try to allocate memory used by a CPU on the
          local memory controller of the CPU and add some more
          NUMA awareness to the kernel.
  
-         For 32-bit this is currently highly experimental and should be only
-         used for kernel development. It might also cause boot failures.
-         For 64-bit this is recommended on all multiprocessor Opteron systems.
-         If the system is EM64T, you should say N unless your system is
-         EM64T NUMA.
+         For 64-bit this is recommended if the system is Intel Core i7
+         (or later), AMD Opteron, or EM64T NUMA.
+         For 32-bit this is only needed on (rare) 32-bit-only platforms
+         that support NUMA topologies, such as NUMAQ / Summit, or if you
+         boot a 32-bit kernel on a 64-bit NUMA platform.
+         Otherwise, you should say N.
  
  comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
        depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
@@@ -1493,6 -1527,10 +1527,10 @@@ config ARCH_ENABLE_MEMORY_HOTPLU
        def_bool y
        depends on X86_64 || (X86_32 && HIGHMEM)
  
+ config ARCH_ENABLE_MEMORY_HOTREMOVE
+       def_bool y
+       depends on MEMORY_HOTPLUG
  config HAVE_ARCH_EARLY_PFN_TO_NID
        def_bool X86_64
        depends on NUMA
@@@ -1632,13 -1670,6 +1670,6 @@@ config APM_ALLOW_INT
          many of the newer IBM Thinkpads.  If you experience hangs when you
          suspend, try setting this to Y.  Otherwise, say N.
  
- config APM_REAL_MODE_POWER_OFF
-       bool "Use real mode APM BIOS call to power off"
-       help
-         Use real mode APM BIOS calls to switch off the computer. This is
-         a work-around for a number of buggy BIOSes. Switch this option on if
-         your computer crashes instead of powering off properly.
  endif # APM
  
  source "arch/x86/kernel/cpu/cpufreq/Kconfig"
diff --combined arch/x86/kernel/hpet.c
index a1f6ed5e1a05b0a5305dc6c15a92b65682aebf25,3f0a3edf0a573a2f5f2d1051b32285ebffcdbb36..845ea097383ee4051a24b54bca8da4c29bd9f6d1
@@@ -33,7 -33,9 +33,9 @@@
   * HPET address is set in acpi/boot.c, when an ACPI entry exists
   */
  unsigned long                         hpet_address;
- unsigned long                         hpet_num_timers;
+ #ifdef CONFIG_PCI_MSI
+ static unsigned long                  hpet_num_timers;
+ #endif
  static void __iomem                   *hpet_virt_address;
  
  struct hpet_dev {
@@@ -811,7 -813,7 +813,7 @@@ int __init hpet_enable(void
  
  out_nohpet:
        hpet_clear_mapping();
 -      boot_hpet_disable = 1;
 +      hpet_address = 0;
        return 0;
  }
  
@@@ -834,11 -836,10 +836,11 @@@ static __init int hpet_late_init(void
  
                hpet_address = force_hpet_address;
                hpet_enable();
 -              if (!hpet_virt_address)
 -                      return -ENODEV;
        }
  
 +      if (!hpet_virt_address)
 +              return -ENODEV;
 +
        hpet_reserve_platform_timers(hpet_readl(HPET_ID));
  
        for_each_online_cpu(cpu) {
diff --combined fs/exec.c
index b4e5b8a9216a29b4531a701b3d76247b6ee5471f,1f59ea079cbb80f1910c98a828f0cb32f1d6292c..02d2e120542d76ca26ee233e638749e53bc227f0
+++ b/fs/exec.c
@@@ -55,6 -55,7 +55,7 @@@
  #include <asm/uaccess.h>
  #include <asm/mmu_context.h>
  #include <asm/tlb.h>
+ #include "internal.h"
  
  #ifdef __alpha__
  /* for /sbin/loader handling in search_binary_handler() */
@@@ -772,6 -773,7 +773,6 @@@ static int de_thread(struct task_struc
        struct signal_struct *sig = tsk->signal;
        struct sighand_struct *oldsighand = tsk->sighand;
        spinlock_t *lock = &oldsighand->siglock;
 -      struct task_struct *leader = NULL;
        int count;
  
        if (thread_group_empty(tsk))
         * and to assume its PID:
         */
        if (!thread_group_leader(tsk)) {
 -              leader = tsk->group_leader;
 +              struct task_struct *leader = tsk->group_leader;
  
                sig->notify_count = -1; /* for exit_notify() */
                for (;;) {
  
                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
                leader->exit_state = EXIT_DEAD;
 -
                write_unlock_irq(&tasklist_lock);
 +
 +              release_task(leader);
        }
  
        sig->group_exit_task = NULL;
  no_thread_group:
        exit_itimers(sig);
        flush_itimer_signals();
 -      if (leader)
 -              release_task(leader);
  
        if (atomic_read(&oldsighand->count) != 1) {
                struct sighand_struct *newsighand;
@@@ -978,7 -981,7 +979,7 @@@ int flush_old_exec(struct linux_binprm 
        /* This is the point of no return */
        current->sas_ss_sp = current->sas_ss_size = 0;
  
-       if (current->euid == current->uid && current->egid == current->gid)
+       if (current_euid() == current_uid() && current_egid() == current_gid())
                set_dumpable(current->mm, 1);
        else
                set_dumpable(current->mm, suid_dumpable);
         */
        current->mm->task_size = TASK_SIZE;
  
-       if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
-               suid_keys(current);
-               set_dumpable(current->mm, suid_dumpable);
+       /* install the new credentials */
+       if (bprm->cred->uid != current_euid() ||
+           bprm->cred->gid != current_egid()) {
                current->pdeath_signal = 0;
        } else if (file_permission(bprm->file, MAY_READ) ||
-                       (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
-               suid_keys(current);
+                  bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) {
                set_dumpable(current->mm, suid_dumpable);
        }
  
+       current->personality &= ~bprm->per_clear;
        /* An exec changes our domain. We are no longer part of the thread
           group */
  
  
  EXPORT_SYMBOL(flush_old_exec);
  
+ /*
+  * install the new credentials for this executable
+  */
+ void install_exec_creds(struct linux_binprm *bprm)
+ {
+       security_bprm_committing_creds(bprm);
+       commit_creds(bprm->cred);
+       bprm->cred = NULL;
+       /* cred_exec_mutex must be held at least to this point to prevent
+        * ptrace_attach() from altering our determination of the task's
+        * credentials; any time after this it may be unlocked */
+       security_bprm_committed_creds(bprm);
+ }
+ EXPORT_SYMBOL(install_exec_creds);
+ /*
+  * determine how safe it is to execute the proposed program
+  * - the caller must hold current->cred_exec_mutex to protect against
+  *   PTRACE_ATTACH
+  */
+ void check_unsafe_exec(struct linux_binprm *bprm)
+ {
+       struct task_struct *p = current;
+       bprm->unsafe = tracehook_unsafe_exec(p);
+       if (atomic_read(&p->fs->count) > 1 ||
+           atomic_read(&p->files->count) > 1 ||
+           atomic_read(&p->sighand->count) > 1)
+               bprm->unsafe |= LSM_UNSAFE_SHARE;
+ }
  /* 
   * Fill the binprm structure from the inode. 
   * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+  *
+  * This may be called multiple times for binary chains (scripts for example).
   */
  int prepare_binprm(struct linux_binprm *bprm)
  {
-       int mode;
+       umode_t mode;
        struct inode * inode = bprm->file->f_path.dentry->d_inode;
        int retval;
  
        if (bprm->file->f_op == NULL)
                return -EACCES;
  
-       bprm->e_uid = current->euid;
-       bprm->e_gid = current->egid;
+       /* clear any previous set[ug]id data from a previous binary */
+       bprm->cred->euid = current_euid();
+       bprm->cred->egid = current_egid();
  
-       if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
+       if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
                /* Set-uid? */
                if (mode & S_ISUID) {
-                       current->personality &= ~PER_CLEAR_ON_SETID;
-                       bprm->e_uid = inode->i_uid;
+                       bprm->per_clear |= PER_CLEAR_ON_SETID;
+                       bprm->cred->euid = inode->i_uid;
                }
  
                /* Set-gid? */
                 * executable.
                 */
                if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-                       current->personality &= ~PER_CLEAR_ON_SETID;
-                       bprm->e_gid = inode->i_gid;
+                       bprm->per_clear |= PER_CLEAR_ON_SETID;
+                       bprm->cred->egid = inode->i_gid;
                }
        }
  
        /* fill in binprm security blob */
-       retval = security_bprm_set(bprm);
+       retval = security_bprm_set_creds(bprm);
        if (retval)
                return retval;
+       bprm->cred_prepared = 1;
  
-       memset(bprm->buf,0,BINPRM_BUF_SIZE);
-       return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
+       memset(bprm->buf, 0, BINPRM_BUF_SIZE);
+       return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
  }
  
  EXPORT_SYMBOL(prepare_binprm);
  
- static int unsafe_exec(struct task_struct *p)
- {
-       int unsafe = tracehook_unsafe_exec(p);
-       if (atomic_read(&p->fs->count) > 1 ||
-           atomic_read(&p->files->count) > 1 ||
-           atomic_read(&p->sighand->count) > 1)
-               unsafe |= LSM_UNSAFE_SHARE;
-       return unsafe;
- }
- void compute_creds(struct linux_binprm *bprm)
- {
-       int unsafe;
-       if (bprm->e_uid != current->uid) {
-               suid_keys(current);
-               current->pdeath_signal = 0;
-       }
-       exec_keys(current);
-       task_lock(current);
-       unsafe = unsafe_exec(current);
-       security_bprm_apply_creds(bprm, unsafe);
-       task_unlock(current);
-       security_bprm_post_apply_creds(bprm);
- }
- EXPORT_SYMBOL(compute_creds);
  /*
   * Arguments are '\0' separated strings found at the location bprm->p
   * points to; chop off the first by relocating brpm->p to right after
@@@ -1268,6 -1281,8 +1279,8 @@@ EXPORT_SYMBOL(search_binary_handler)
  void free_bprm(struct linux_binprm *bprm)
  {
        free_arg_pages(bprm);
+       if (bprm->cred)
+               abort_creds(bprm->cred);
        kfree(bprm);
  }
  
@@@ -1293,10 -1308,20 +1306,20 @@@ int do_execve(char * filename
        if (!bprm)
                goto out_files;
  
+       retval = mutex_lock_interruptible(&current->cred_exec_mutex);
+       if (retval < 0)
+               goto out_free;
+       retval = -ENOMEM;
+       bprm->cred = prepare_exec_creds();
+       if (!bprm->cred)
+               goto out_unlock;
+       check_unsafe_exec(bprm);
        file = open_exec(filename);
        retval = PTR_ERR(file);
        if (IS_ERR(file))
-               goto out_kfree;
+               goto out_unlock;
  
        sched_exec();
  
  
        bprm->argc = count(argv, MAX_ARG_STRINGS);
        if ((retval = bprm->argc) < 0)
-               goto out_mm;
+               goto out;
  
        bprm->envc = count(envp, MAX_ARG_STRINGS);
        if ((retval = bprm->envc) < 0)
-               goto out_mm;
-       retval = security_bprm_alloc(bprm);
-       if (retval)
                goto out;
  
        retval = prepare_binprm(bprm);
  
        current->flags &= ~PF_KTHREAD;
        retval = search_binary_handler(bprm,regs);
-       if (retval >= 0) {
-               /* execve success */
-               security_bprm_free(bprm);
-               acct_update_integrals(current);
-               free_bprm(bprm);
-               if (displaced)
-                       put_files_struct(displaced);
-               return retval;
-       }
+       if (retval < 0)
+               goto out;
  
- out:
-       if (bprm->security)
-               security_bprm_free(bprm);
+       /* execve succeeded */
+       mutex_unlock(&current->cred_exec_mutex);
+       acct_update_integrals(current);
+       free_bprm(bprm);
+       if (displaced)
+               put_files_struct(displaced);
+       return retval;
  
- out_mm:
+ out:
        if (bprm->mm)
                mmput (bprm->mm);
  
@@@ -1362,7 -1380,11 +1378,11 @@@ out_file
                allow_write_access(bprm->file);
                fput(bprm->file);
        }
- out_kfree:
+ out_unlock:
+       mutex_unlock(&current->cred_exec_mutex);
+ out_free:
        free_bprm(bprm);
  
  out_files:
@@@ -1394,6 -1416,7 +1414,7 @@@ EXPORT_SYMBOL(set_binfmt)
   */
  static int format_corename(char *corename, long signr)
  {
+       const struct cred *cred = current_cred();
        const char *pat_ptr = core_pattern;
        int ispipe = (*pat_ptr == '|');
        char *out_ptr = corename;
                        /* uid */
                        case 'u':
                                rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", current->uid);
+                                             "%d", cred->uid);
                                if (rc > out_end - out_ptr)
                                        goto out;
                                out_ptr += rc;
                        /* gid */
                        case 'g':
                                rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", current->gid);
+                                             "%d", cred->gid);
                                if (rc > out_end - out_ptr)
                                        goto out;
                                out_ptr += rc;
@@@ -1714,8 -1737,9 +1735,9 @@@ int do_coredump(long signr, int exit_co
        struct linux_binfmt * binfmt;
        struct inode * inode;
        struct file * file;
+       const struct cred *old_cred;
+       struct cred *cred;
        int retval = 0;
-       int fsuid = current->fsuid;
        int flag = 0;
        int ispipe = 0;
        unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
        binfmt = current->binfmt;
        if (!binfmt || !binfmt->core_dump)
                goto fail;
+       cred = prepare_creds();
+       if (!cred) {
+               retval = -ENOMEM;
+               goto fail;
+       }
        down_write(&mm->mmap_sem);
        /*
         * If another thread got here first, or we are not dumpable, bail out.
         */
        if (mm->core_state || !get_dumpable(mm)) {
                up_write(&mm->mmap_sem);
+               put_cred(cred);
                goto fail;
        }
  
         */
        if (get_dumpable(mm) == 2) {    /* Setuid core dump mode */
                flag = O_EXCL;          /* Stop rewrite attacks */
-               current->fsuid = 0;     /* Dump root private */
+               cred->fsuid = 0;        /* Dump root private */
        }
  
        retval = coredump_wait(exit_code, &core_state);
-       if (retval < 0)
+       if (retval < 0) {
+               put_cred(cred);
                goto fail;
+       }
+       old_cred = override_creds(cred);
  
        /*
         * Clear any false indication of pending signals that might
         * Dont allow local users get cute and trick others to coredump
         * into their pre-created files:
         */
-       if (inode->i_uid != current->fsuid)
+       if (inode->i_uid != current_fsuid())
                goto close_fail;
        if (!file->f_op)
                goto close_fail;
@@@ -1840,7 -1876,8 +1874,8 @@@ fail_unlock
        if (helper_argv)
                argv_free(helper_argv);
  
-       current->fsuid = fsuid;
+       revert_creds(old_cred);
+       put_cred(cred);
        coredump_finish(mm);
  fail:
        return retval;
diff --combined kernel/sched.c
index 22c532a6f82cb9223d682a1b9b21389584b66cc5,748ff924a29056e57f5c30058ce39ea7e470b23e..355eda28720b2bde889b16981c9de6e936146ec9
   */
  #define RUNTIME_INF   ((u64)~0ULL)
  
+ DEFINE_TRACE(sched_wait_task);
+ DEFINE_TRACE(sched_wakeup);
+ DEFINE_TRACE(sched_wakeup_new);
+ DEFINE_TRACE(sched_switch);
+ DEFINE_TRACE(sched_migrate_task);
  #ifdef CONFIG_SMP
  /*
   * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
@@@ -203,6 -209,7 +209,6 @@@ void init_rt_bandwidth(struct rt_bandwi
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        rt_b->rt_period_timer.function = sched_rt_period_timer;
 -      rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
  }
  
  static inline int rt_bandwidth_enabled(void)
@@@ -260,6 -267,10 +266,10 @@@ struct task_group 
        struct cgroup_subsys_state css;
  #endif
  
+ #ifdef CONFIG_USER_SCHED
+       uid_t uid;
+ #endif
  #ifdef CONFIG_FAIR_GROUP_SCHED
        /* schedulable entities of this group on each cpu */
        struct sched_entity **se;
  
  #ifdef CONFIG_USER_SCHED
  
+ /* Helper function to pass uid information to create_sched_user() */
+ void set_tg_uid(struct user_struct *user)
+ {
+       user->tg->uid = user->uid;
+ }
  /*
   * Root task group.
   *    Every UID task group (including init_task_group aka UID-0) will
@@@ -344,7 -361,9 +360,9 @@@ static inline struct task_group *task_g
        struct task_group *tg;
  
  #ifdef CONFIG_USER_SCHED
-       tg = p->user->tg;
+       rcu_read_lock();
+       tg = __task_cred(p)->user->tg;
+       rcu_read_unlock();
  #elif defined(CONFIG_CGROUP_SCHED)
        tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
                                struct task_group, css);
@@@ -585,6 -604,8 +603,8 @@@ struct rq 
  #ifdef CONFIG_SCHEDSTATS
        /* latency stats */
        struct sched_info rq_sched_info;
+       unsigned long long rq_cpu_time;
+       /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  
        /* sys_sched_yield() stats */
        unsigned int yld_exp_empty;
@@@ -702,45 -723,18 +722,18 @@@ static __read_mostly char *sched_feat_n
  
  #undef SCHED_FEAT
  
- static int sched_feat_open(struct inode *inode, struct file *filp)
- {
-       filp->private_data = inode->i_private;
-       return 0;
- }
- static ssize_t
- sched_feat_read(struct file *filp, char __user *ubuf,
-               size_t cnt, loff_t *ppos)
+ static int sched_feat_show(struct seq_file *m, void *v)
  {
-       char *buf;
-       int r = 0;
-       int len = 0;
        int i;
  
        for (i = 0; sched_feat_names[i]; i++) {
-               len += strlen(sched_feat_names[i]);
-               len += 4;
-       }
-       buf = kmalloc(len + 2, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       for (i = 0; sched_feat_names[i]; i++) {
-               if (sysctl_sched_features & (1UL << i))
-                       r += sprintf(buf + r, "%s ", sched_feat_names[i]);
-               else
-                       r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
+               if (!(sysctl_sched_features & (1UL << i)))
+                       seq_puts(m, "NO_");
+               seq_printf(m, "%s ", sched_feat_names[i]);
        }
+       seq_puts(m, "\n");
  
-       r += sprintf(buf + r, "\n");
-       WARN_ON(r >= len + 2);
-       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-       kfree(buf);
-       return r;
+       return 0;
  }
  
  static ssize_t
@@@ -785,10 -779,17 +778,17 @@@ sched_feat_write(struct file *filp, con
        return cnt;
  }
  
+ static int sched_feat_open(struct inode *inode, struct file *filp)
+ {
+       return single_open(filp, sched_feat_show, NULL);
+ }
  static struct file_operations sched_feat_fops = {
-       .open   = sched_feat_open,
-       .read   = sched_feat_read,
-       .write  = sched_feat_write,
+       .open           = sched_feat_open,
+       .write          = sched_feat_write,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
  };
  
  static __init int sched_init_debug(void)
@@@ -1138,6 -1139,7 +1138,6 @@@ static void init_rq_hrtick(struct rq *r
  
        hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        rq->hrtick_timer.function = hrtick;
 -      rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
  }
  #else /* CONFIG_SCHED_HRTICK */
  static inline void hrtick_clear(struct rq *rq)
@@@ -1472,27 -1474,13 +1472,13 @@@ static voi
  update_group_shares_cpu(struct task_group *tg, int cpu,
                        unsigned long sd_shares, unsigned long sd_rq_weight)
  {
-       int boost = 0;
        unsigned long shares;
        unsigned long rq_weight;
  
        if (!tg->se[cpu])
                return;
  
-       rq_weight = tg->cfs_rq[cpu]->load.weight;
-       /*
-        * If there are currently no tasks on the cpu pretend there is one of
-        * average load so that when a new task gets to run here it will not
-        * get delayed by group starvation.
-        */
-       if (!rq_weight) {
-               boost = 1;
-               rq_weight = NICE_0_LOAD;
-       }
-       if (unlikely(rq_weight > sd_rq_weight))
-               rq_weight = sd_rq_weight;
+       rq_weight = tg->cfs_rq[cpu]->rq_weight;
  
        /*
         *           \Sum shares * rq_weight
         *               \Sum rq_weight
         *
         */
-       shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
+       shares = (sd_shares * rq_weight) / sd_rq_weight;
        shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
  
        if (abs(shares - tg->se[cpu]->load.weight) >
                unsigned long flags;
  
                spin_lock_irqsave(&rq->lock, flags);
-               /*
-                * record the actual number of shares, not the boosted amount.
-                */
-               tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
-               tg->cfs_rq[cpu]->rq_weight = rq_weight;
+               tg->cfs_rq[cpu]->shares = shares;
  
                __set_se_shares(tg->se[cpu], shares);
                spin_unlock_irqrestore(&rq->lock, flags);
   */
  static int tg_shares_up(struct task_group *tg, void *data)
  {
-       unsigned long rq_weight = 0;
+       unsigned long weight, rq_weight = 0;
        unsigned long shares = 0;
        struct sched_domain *sd = data;
        int i;
  
        for_each_cpu_mask(i, sd->span) {
-               rq_weight += tg->cfs_rq[i]->load.weight;
+               /*
+                * If there are currently no tasks on the cpu pretend there
+                * is one of average load so that when a new task gets to
+                * run here it will not get delayed by group starvation.
+                */
+               weight = tg->cfs_rq[i]->load.weight;
+               if (!weight)
+                       weight = NICE_0_LOAD;
+               tg->cfs_rq[i]->rq_weight = weight;
+               rq_weight += weight;
                shares += tg->cfs_rq[i]->shares;
        }
  
        if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
                shares = tg->shares;
  
-       if (!rq_weight)
-               rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
        for_each_cpu_mask(i, sd->span)
                update_group_shares_cpu(tg, i, shares, rq_weight);
  
@@@ -1610,6 -1601,39 +1599,39 @@@ static inline void update_shares_locked
  
  #endif
  
+ /*
+  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+  */
+ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(this_rq->lock)
+       __acquires(busiest->lock)
+       __acquires(this_rq->lock)
+ {
+       int ret = 0;
+       if (unlikely(!irqs_disabled())) {
+               /* printk() doesn't work good under rq->lock */
+               spin_unlock(&this_rq->lock);
+               BUG_ON(1);
+       }
+       if (unlikely(!spin_trylock(&busiest->lock))) {
+               if (busiest < this_rq) {
+                       spin_unlock(&this_rq->lock);
+                       spin_lock(&busiest->lock);
+                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+                       ret = 1;
+               } else
+                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+       }
+       return ret;
+ }
+ static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(busiest->lock)
+ {
+       spin_unlock(&busiest->lock);
+       lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+ }
  #endif
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
@@@ -1843,6 -1867,8 +1865,8 @@@ void set_task_cpu(struct task_struct *p
  
        clock_offset = old_rq->clock - new_rq->clock;
  
+       trace_sched_migrate_task(p, task_cpu(p), new_cpu);
  #ifdef CONFIG_SCHEDSTATS
        if (p->se.wait_start)
                p->se.wait_start -= clock_offset;
@@@ -2252,6 -2278,7 +2276,7 @@@ static int try_to_wake_up(struct task_s
  
        smp_wmb();
        rq = task_rq_lock(p, &flags);
+       update_rq_clock(rq);
        old_state = p->state;
        if (!(old_state & state))
                goto out;
@@@ -2309,12 -2336,11 +2334,11 @@@ out_activate
                schedstat_inc(p, se.nr_wakeups_local);
        else
                schedstat_inc(p, se.nr_wakeups_remote);
-       update_rq_clock(rq);
        activate_task(rq, p, 1);
        success = 1;
  
  out_running:
-       trace_sched_wakeup(rq, p);
+       trace_sched_wakeup(rq, p, success);
        check_preempt_curr(rq, p, sync);
  
        p->state = TASK_RUNNING;
@@@ -2447,7 -2473,7 +2471,7 @@@ void wake_up_new_task(struct task_struc
                p->sched_class->task_new(rq, p);
                inc_nr_running(rq);
        }
-       trace_sched_wakeup_new(rq, p);
+       trace_sched_wakeup_new(rq, p, 1);
        check_preempt_curr(rq, p, 0);
  #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
@@@ -2809,40 -2835,6 +2833,6 @@@ static void double_rq_unlock(struct rq 
                __release(rq2->lock);
  }
  
- /*
-  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
-  */
- static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
-       __releases(this_rq->lock)
-       __acquires(busiest->lock)
-       __acquires(this_rq->lock)
- {
-       int ret = 0;
-       if (unlikely(!irqs_disabled())) {
-               /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
-               BUG_ON(1);
-       }
-       if (unlikely(!spin_trylock(&busiest->lock))) {
-               if (busiest < this_rq) {
-                       spin_unlock(&this_rq->lock);
-                       spin_lock(&busiest->lock);
-                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
-                       ret = 1;
-               } else
-                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
-       }
-       return ret;
- }
- static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
-       __releases(busiest->lock)
- {
-       spin_unlock(&busiest->lock);
-       lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
- }
  /*
   * If dest_cpu is allowed for this process, migrate the task to it.
   * This is accomplished by forcing the cpu_allowed mask to only
@@@ -2860,7 -2852,6 +2850,6 @@@ static void sched_migrate_task(struct t
            || unlikely(!cpu_active(dest_cpu)))
                goto out;
  
-       trace_sched_migrate_task(rq, p, dest_cpu);
        /* force the process onto the specified CPU */
        if (migrate_task(p, dest_cpu, &req)) {
                /* Need to wait for migration thread (might exit: take ref). */
@@@ -3705,7 -3696,7 +3694,7 @@@ out_balanced
  static void idle_balance(int this_cpu, struct rq *this_rq)
  {
        struct sched_domain *sd;
-       int pulled_task = -1;
+       int pulled_task = 0;
        unsigned long next_balance = jiffies + HZ;
        cpumask_t tmpmask;
  
@@@ -5132,6 -5123,22 +5121,22 @@@ __setscheduler(struct rq *rq, struct ta
        set_load_weight(p);
  }
  
+ /*
+  * check the target process has a UID that matches the current process's
+  */
+ static bool check_same_owner(struct task_struct *p)
+ {
+       const struct cred *cred = current_cred(), *pcred;
+       bool match;
+       rcu_read_lock();
+       pcred = __task_cred(p);
+       match = (cred->euid == pcred->euid ||
+                cred->euid == pcred->uid);
+       rcu_read_unlock();
+       return match;
+ }
  static int __sched_setscheduler(struct task_struct *p, int policy,
                                struct sched_param *param, bool user)
  {
@@@ -5191,8 -5198,7 +5196,7 @@@ recheck
                        return -EPERM;
  
                /* can't change other user's priorities */
-               if ((current->euid != p->euid) &&
-                   (current->euid != p->uid))
+               if (!check_same_owner(p))
                        return -EPERM;
        }
  
@@@ -5424,8 -5430,7 +5428,7 @@@ long sched_setaffinity(pid_t pid, cons
        read_unlock(&tasklist_lock);
  
        retval = -EPERM;
-       if ((current->euid != p->euid) && (current->euid != p->uid) &&
-                       !capable(CAP_SYS_NICE))
+       if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
                goto out_unlock;
  
        retval = security_task_setscheduler(p, 0, NULL);
@@@ -5894,6 -5899,7 +5897,7 @@@ void __cpuinit init_idle(struct task_st
         * The idle tasks have their own, simple scheduling class:
         */
        idle->sched_class = &idle_sched_class;
+       ftrace_graph_init_task(idle);
  }
  
  /*
@@@ -6124,7 -6130,6 +6128,6 @@@ static int __migrate_task_irq(struct ta
  
  /*
   * Figure out where task on dead CPU should go, use force if necessary.
-  * NOTE: interrupts should be disabled by the caller
   */
  static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
  {
@@@ -6636,28 -6641,6 +6639,6 @@@ early_initcall(migration_init)
  
  #ifdef CONFIG_SCHED_DEBUG
  
- static inline const char *sd_level_to_string(enum sched_domain_level lvl)
- {
-       switch (lvl) {
-       case SD_LV_NONE:
-                       return "NONE";
-       case SD_LV_SIBLING:
-                       return "SIBLING";
-       case SD_LV_MC:
-                       return "MC";
-       case SD_LV_CPU:
-                       return "CPU";
-       case SD_LV_NODE:
-                       return "NODE";
-       case SD_LV_ALLNODES:
-                       return "ALLNODES";
-       case SD_LV_MAX:
-                       return "MAX";
-       }
-       return "MAX";
- }
  static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  cpumask_t *groupmask)
  {
                return -1;
        }
  
-       printk(KERN_CONT "span %s level %s\n",
-               str, sd_level_to_string(sd->level));
+       printk(KERN_CONT "span %s level %s\n", str, sd->name);
  
        if (!cpu_isset(cpu, sd->span)) {
                printk(KERN_ERR "ERROR: domain->span does not contain "
@@@ -6814,6 -6796,8 +6794,8 @@@ sd_parent_degenerate(struct sched_domai
                                SD_BALANCE_EXEC |
                                SD_SHARE_CPUPOWER |
                                SD_SHARE_PKG_RESOURCES);
+               if (nr_node_ids == 1)
+                       pflags &= ~SD_SERIALIZE;
        }
        if (~cflags & pflags)
                return 0;
@@@ -7334,13 -7318,21 +7316,21 @@@ struct allmasks 
  };
  
  #if   NR_CPUS > 128
- #define       SCHED_CPUMASK_ALLOC             1
- #define       SCHED_CPUMASK_FREE(v)           kfree(v)
- #define       SCHED_CPUMASK_DECLARE(v)        struct allmasks *v
+ #define SCHED_CPUMASK_DECLARE(v)      struct allmasks *v
+ static inline void sched_cpumask_alloc(struct allmasks **masks)
+ {
+       *masks = kmalloc(sizeof(**masks), GFP_KERNEL);
+ }
+ static inline void sched_cpumask_free(struct allmasks *masks)
+ {
+       kfree(masks);
+ }
  #else
- #define       SCHED_CPUMASK_ALLOC             0
- #define       SCHED_CPUMASK_FREE(v)
- #define       SCHED_CPUMASK_DECLARE(v)        struct allmasks _v, *v = &_v
+ #define SCHED_CPUMASK_DECLARE(v)      struct allmasks _v, *v = &_v
+ static inline void sched_cpumask_alloc(struct allmasks **masks)
+ { }
+ static inline void sched_cpumask_free(struct allmasks *masks)
+ { }
  #endif
  
  #define       SCHED_CPUMASK_VAR(v, a)         cpumask_t *v = (cpumask_t *) \
@@@ -7416,9 -7408,8 +7406,8 @@@ static int __build_sched_domains(const 
                return -ENOMEM;
        }
  
- #if SCHED_CPUMASK_ALLOC
        /* get space for all scratch cpumask variables */
-       allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
+       sched_cpumask_alloc(&allmasks);
        if (!allmasks) {
                printk(KERN_WARNING "Cannot alloc cpumask array\n");
                kfree(rd);
  #endif
                return -ENOMEM;
        }
- #endif
        tmpmask = (cpumask_t *)allmasks;
  
  
                cpu_attach_domain(sd, rd, i);
        }
  
-       SCHED_CPUMASK_FREE((void *)allmasks);
+       sched_cpumask_free(allmasks);
        return 0;
  
  #ifdef CONFIG_NUMA
  error:
        free_sched_groups(cpu_map, tmpmask);
-       SCHED_CPUMASK_FREE((void *)allmasks);
+       sched_cpumask_free(allmasks);
        kfree(rd);
        return -ENOMEM;
  #endif
@@@ -7710,8 -7701,14 +7699,14 @@@ static struct sched_domain_attr *dattr_
   */
  static cpumask_t fallback_doms;
  
- void __attribute__((weak)) arch_update_cpu_topology(void)
+ /*
+  * arch_update_cpu_topology lets virtualized architectures update the
+  * cpu core maps. It is supposed to return 1 if the topology changed
+  * or 0 if it stayed the same.
+  */
+ int __attribute__((weak)) arch_update_cpu_topology(void)
  {
+       return 0;
  }
  
  /*
@@@ -7751,8 -7748,6 +7746,6 @@@ static void detach_destroy_domains(cons
        cpumask_t tmpmask;
        int i;
  
-       unregister_sched_domain_sysctl();
        for_each_cpu_mask_nr(i, *cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
        synchronize_sched();
@@@ -7805,17 -7800,21 +7798,21 @@@ void partition_sched_domains(int ndoms_
                             struct sched_domain_attr *dattr_new)
  {
        int i, j, n;
+       int new_topology;
  
        mutex_lock(&sched_domains_mutex);
  
        /* always unregister in case we don't destroy any domains */
        unregister_sched_domain_sysctl();
  
+       /* Let architecture update cpu core mappings. */
+       new_topology = arch_update_cpu_topology();
        n = doms_new ? ndoms_new : 0;
  
        /* Destroy deleted domains */
        for (i = 0; i < ndoms_cur; i++) {
-               for (j = 0; j < n; j++) {
+               for (j = 0; j < n && !new_topology; j++) {
                        if (cpus_equal(doms_cur[i], doms_new[j])
                            && dattrs_equal(dattr_cur, i, dattr_new, j))
                                goto match1;
@@@ -7830,12 -7829,12 +7827,12 @@@ match1
                ndoms_cur = 0;
                doms_new = &fallback_doms;
                cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
-               dattr_new = NULL;
+               WARN_ON_ONCE(dattr_new);
        }
  
        /* Build new domains */
        for (i = 0; i < ndoms_new; i++) {
-               for (j = 0; j < ndoms_cur; j++) {
+               for (j = 0; j < ndoms_cur && !new_topology; j++) {
                        if (cpus_equal(doms_new[i], doms_cur[j])
                            && dattrs_equal(dattr_new, i, dattr_cur, j))
                                goto match2;
@@@ -8490,7 -8489,7 +8487,7 @@@ stati
  int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se, *parent_se;
+       struct sched_entity *se;
        struct rq *rq;
        int i;
  
        for_each_possible_cpu(i) {
                rq = cpu_rq(i);
  
-               cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
-                               GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
+               cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
+                                     GFP_KERNEL, cpu_to_node(i));
                if (!cfs_rq)
                        goto err;
  
-               se = kmalloc_node(sizeof(struct sched_entity),
-                               GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
+               se = kzalloc_node(sizeof(struct sched_entity),
+                                 GFP_KERNEL, cpu_to_node(i));
                if (!se)
                        goto err;
  
-               parent_se = parent ? parent->se[i] : NULL;
-               init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
+               init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
        }
  
        return 1;
@@@ -8578,7 -8576,7 +8574,7 @@@ stati
  int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  {
        struct rt_rq *rt_rq;
-       struct sched_rt_entity *rt_se, *parent_se;
+       struct sched_rt_entity *rt_se;
        struct rq *rq;
        int i;
  
        for_each_possible_cpu(i) {
                rq = cpu_rq(i);
  
-               rt_rq = kmalloc_node(sizeof(struct rt_rq),
-                               GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
+               rt_rq = kzalloc_node(sizeof(struct rt_rq),
+                                    GFP_KERNEL, cpu_to_node(i));
                if (!rt_rq)
                        goto err;
  
-               rt_se = kmalloc_node(sizeof(struct sched_rt_entity),
-                               GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
+               rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
+                                    GFP_KERNEL, cpu_to_node(i));
                if (!rt_se)
                        goto err;
  
-               parent_se = parent ? parent->rt_se[i] : NULL;
-               init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
+               init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
        }
  
        return 1;
@@@ -9249,11 -9246,12 +9244,12 @@@ struct cgroup_subsys cpu_cgroup_subsys 
   * (balbir@in.ibm.com).
   */
  
- /* track cpu usage of a group of tasks */
+ /* track cpu usage of a group of tasks and its child groups */
  struct cpuacct {
        struct cgroup_subsys_state css;
        /* cpuusage holds pointer to a u64-type object on every cpu */
        u64 *cpuusage;
+       struct cpuacct *parent;
  };
  
  struct cgroup_subsys cpuacct_subsys;
@@@ -9287,6 -9285,9 +9283,9 @@@ static struct cgroup_subsys_state *cpua
                return ERR_PTR(-ENOMEM);
        }
  
+       if (cgrp->parent)
+               ca->parent = cgroup_ca(cgrp->parent);
        return &ca->css;
  }
  
@@@ -9300,6 -9301,41 +9299,41 @@@ cpuacct_destroy(struct cgroup_subsys *s
        kfree(ca);
  }
  
+ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
+ {
+       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 data;
+ #ifndef CONFIG_64BIT
+       /*
+        * Take rq->lock to make 64-bit read safe on 32-bit platforms.
+        */
+       spin_lock_irq(&cpu_rq(cpu)->lock);
+       data = *cpuusage;
+       spin_unlock_irq(&cpu_rq(cpu)->lock);
+ #else
+       data = *cpuusage;
+ #endif
+       return data;
+ }
+ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
+ {
+       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+ #ifndef CONFIG_64BIT
+       /*
+        * Take rq->lock to make 64-bit write safe on 32-bit platforms.
+        */
+       spin_lock_irq(&cpu_rq(cpu)->lock);
+       *cpuusage = val;
+       spin_unlock_irq(&cpu_rq(cpu)->lock);
+ #else
+       *cpuusage = val;
+ #endif
+ }
  /* return total cpu usage (in nanoseconds) of a group */
  static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
  {
        u64 totalcpuusage = 0;
        int i;
  
-       for_each_possible_cpu(i) {
-               u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
-               /*
-                * Take rq->lock to make 64-bit addition safe on 32-bit
-                * platforms.
-                */
-               spin_lock_irq(&cpu_rq(i)->lock);
-               totalcpuusage += *cpuusage;
-               spin_unlock_irq(&cpu_rq(i)->lock);
-       }
+       for_each_present_cpu(i)
+               totalcpuusage += cpuacct_cpuusage_read(ca, i);
  
        return totalcpuusage;
  }
@@@ -9334,23 -9361,39 +9359,39 @@@ static int cpuusage_write(struct cgrou
                goto out;
        }
  
-       for_each_possible_cpu(i) {
-               u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
+       for_each_present_cpu(i)
+               cpuacct_cpuusage_write(ca, i, 0);
  
-               spin_lock_irq(&cpu_rq(i)->lock);
-               *cpuusage = 0;
-               spin_unlock_irq(&cpu_rq(i)->lock);
-       }
  out:
        return err;
  }
  
+ static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
+                                  struct seq_file *m)
+ {
+       struct cpuacct *ca = cgroup_ca(cgroup);
+       u64 percpu;
+       int i;
+       for_each_present_cpu(i) {
+               percpu = cpuacct_cpuusage_read(ca, i);
+               seq_printf(m, "%llu ", (unsigned long long) percpu);
+       }
+       seq_printf(m, "\n");
+       return 0;
+ }
  static struct cftype files[] = {
        {
                .name = "usage",
                .read_u64 = cpuusage_read,
                .write_u64 = cpuusage_write,
        },
+       {
+               .name = "usage_percpu",
+               .read_seq_string = cpuacct_percpu_seq_read,
+       },
  };
  
  static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
  static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  {
        struct cpuacct *ca;
+       int cpu;
  
        if (!cpuacct_subsys.active)
                return;
  
+       cpu = task_cpu(tsk);
        ca = task_ca(tsk);
-       if (ca) {
-               u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
  
+       for (; ca; ca = ca->parent) {
+               u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
                *cpuusage += cputime;
        }
  }
index ae542e2e38d5fda318a2b62698aeaadd5ee1fe4e,01becf1f19ff78fad129aa6474ec550c428519ae..a5779bd975db0a386cd67d67629a2bf53e72764e
@@@ -202,6 -202,7 +202,6 @@@ static void start_stack_timer(int cpu
  
        hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hrtimer->function = stack_trace_timer_fn;
 -      hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
  
        hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
  }
@@@ -233,20 -234,10 +233,10 @@@ static void stop_stack_timers(void
                stop_stack_timer(cpu);
  }
  
- static void stack_reset(struct trace_array *tr)
- {
-       int cpu;
-       tr->time_start = ftrace_now(tr->cpu);
-       for_each_online_cpu(cpu)
-               tracing_reset(tr, cpu);
- }
  static void start_stack_trace(struct trace_array *tr)
  {
        mutex_lock(&sample_timer_lock);
-       stack_reset(tr);
+       tracing_reset_online_cpus(tr);
        start_stack_timers();
        tracer_enabled = 1;
        mutex_unlock(&sample_timer_lock);
@@@ -260,27 -251,17 +250,17 @@@ static void stop_stack_trace(struct tra
        mutex_unlock(&sample_timer_lock);
  }
  
- static void stack_trace_init(struct trace_array *tr)
+ static int stack_trace_init(struct trace_array *tr)
  {
        sysprof_trace = tr;
  
-       if (tr->ctrl)
-               start_stack_trace(tr);
+       start_stack_trace(tr);
+       return 0;
  }
  
  static void stack_trace_reset(struct trace_array *tr)
  {
-       if (tr->ctrl)
-               stop_stack_trace(tr);
- }
- static void stack_trace_ctrl_update(struct trace_array *tr)
- {
-       /* When starting a new trace, reset the buffers */
-       if (tr->ctrl)
-               start_stack_trace(tr);
-       else
-               stop_stack_trace(tr);
+       stop_stack_trace(tr);
  }
  
  static struct tracer stack_trace __read_mostly =
        .name           = "sysprof",
        .init           = stack_trace_init,
        .reset          = stack_trace_reset,
-       .ctrl_update    = stack_trace_ctrl_update,
  #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_sysprof,
  #endif
diff --combined sound/core/hrtimer.c
index 0000000000000000000000000000000000000000,c1d285921f807cc6ce1b626a87d2cf6684ecf89d..34c7d48f5061b9fd2025a23e78ac63c4bcd3fd55
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,155 +1,154 @@@
 -      stime->hrt.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
+ /*
+  * ALSA timer back-end using hrtimer
+  * Copyright (C) 2008 Takashi Iwai
+  *
+  *   This program is free software; you can redistribute it and/or modify
+  *   it under the terms of the GNU General Public License as published by
+  *   the Free Software Foundation; either version 2 of the License, or
+  *   (at your option) any later version.
+  *
+  *   This program is distributed in the hope that it will be useful,
+  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  *   GNU General Public License for more details.
+  *
+  *   You should have received a copy of the GNU General Public License
+  *   along with this program; if not, write to the Free Software
+  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+  *
+  */
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/hrtimer.h>
+ #include <sound/core.h>
+ #include <sound/timer.h>
+ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
+ MODULE_DESCRIPTION("ALSA hrtimer backend");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER));
+ #define NANO_SEC      1000000000UL    /* 10^9 in sec */
+ static unsigned int resolution;
+ struct snd_hrtimer {
+       struct snd_timer *timer;
+       struct hrtimer hrt;
+ };
+ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
+ {
+       struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
+       struct snd_timer *t = stime->timer;
+       hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+       snd_timer_interrupt(stime->timer, t->sticks);
+       return HRTIMER_RESTART;
+ }
+ static int snd_hrtimer_open(struct snd_timer *t)
+ {
+       struct snd_hrtimer *stime;
+       stime = kmalloc(sizeof(*stime), GFP_KERNEL);
+       if (!stime)
+               return -ENOMEM;
+       hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       stime->timer = t;
+       stime->hrt.function = snd_hrtimer_callback;
+       t->private_data = stime;
+       return 0;
+ }
+ static int snd_hrtimer_close(struct snd_timer *t)
+ {
+       struct snd_hrtimer *stime = t->private_data;
+       if (stime) {
+               hrtimer_cancel(&stime->hrt);
+               kfree(stime);
+               t->private_data = NULL;
+       }
+       return 0;
+ }
+ static int snd_hrtimer_start(struct snd_timer *t)
+ {
+       struct snd_hrtimer *stime = t->private_data;
+       hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
+                     HRTIMER_MODE_REL);
+       return 0;
+ }
+ static int snd_hrtimer_stop(struct snd_timer *t)
+ {
+       struct snd_hrtimer *stime = t->private_data;
+       hrtimer_cancel(&stime->hrt);
+       return 0;
+ }
+ static struct snd_timer_hardware hrtimer_hw = {
+       .flags =        SNDRV_TIMER_HW_AUTO,
+       .open =         snd_hrtimer_open,
+       .close =        snd_hrtimer_close,
+       .start =        snd_hrtimer_start,
+       .stop =         snd_hrtimer_stop,
+ };
+ /*
+  * entry functions
+  */
+ static struct snd_timer *mytimer;
+ static int __init snd_hrtimer_init(void)
+ {
+       struct snd_timer *timer;
+       struct timespec tp;
+       int err;
+       hrtimer_get_res(CLOCK_MONOTONIC, &tp);
+       if (tp.tv_sec > 0 || !tp.tv_nsec) {
+               snd_printk(KERN_ERR
+                          "snd-hrtimer: Invalid resolution %u.%09u",
+                          (unsigned)tp.tv_sec, (unsigned)tp.tv_nsec);
+               return -EINVAL;
+       }
+       resolution = tp.tv_nsec;
+       /* Create a new timer and set up the fields */
+       err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER,
+                                  &timer);
+       if (err < 0)
+               return err;
+       timer->module = THIS_MODULE;
+       strcpy(timer->name, "HR timer");
+       timer->hw = hrtimer_hw;
+       timer->hw.resolution = resolution;
+       timer->hw.ticks = NANO_SEC / resolution;
+       err = snd_timer_global_register(timer);
+       if (err < 0) {
+               snd_timer_global_free(timer);
+               return err;
+       }
+       mytimer = timer; /* remember this */
+       return 0;
+ }
+ static void __exit snd_hrtimer_exit(void)
+ {
+       if (mytimer) {
+               snd_timer_global_free(mytimer);
+               mytimer = NULL;
+       }
+ }
+ module_init(snd_hrtimer_init);
+ module_exit(snd_hrtimer_exit);
index 8e52b2a8a13a593abc1145224ca4aca73eccfc1c,2a02f704f366b31c58947e3d409cf5fd5212d06c..a4049eb94d35a28bafa41072e6e44af726d6f251
@@@ -96,6 -96,7 +96,6 @@@ static int __devinit snd_card_pcsp_prob
                return -EINVAL;
  
        hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 -      pcsp_chip.timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
        pcsp_chip.timer.function = pcsp_do_timer;
  
        card = snd_card_new(index, id, THIS_MODULE, 0);
@@@ -187,10 -188,8 +187,8 @@@ static int __devexit pcsp_remove(struc
  
  static void pcsp_stop_beep(struct snd_pcsp *chip)
  {
-       spin_lock_irq(&chip->substream_lock);
-       if (!chip->playback_substream)
-               pcspkr_stop_sound();
-       spin_unlock_irq(&chip->substream_lock);
+       pcsp_sync_stop(chip);
+       pcspkr_stop_sound();
  }
  
  #ifdef CONFIG_PM