#include <linux/cpufreq.h>
  #include <linux/compiler.h>
  #include <linux/dmi.h>
- #include <linux/ftrace.h>
+ #include <trace/power.h>
  
  #include <linux/acpi.h>
 +#include <linux/io.h>
 +#include <linux/delay.h>
 +#include <linux/uaccess.h>
 +
  #include <acpi/processor.h>
  
 -#include <asm/io.h>
  #include <asm/msr.h>
  #include <asm/processor.h>
  #include <asm/cpufeature.h>
 
  #include <linux/compiler.h>   /* for inline */
  #include <linux/types.h>      /* for size_t */
  #include <linux/stddef.h>     /* for NULL */
+ #include <stdarg.h>
  
  extern char *strndup_user(const char __user *, long);
 +extern void *memdup_user(const void __user *, size_t);
  
  /*
   * Include machine specific inline routines
 
  {
        unsigned int *m;
        int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+       void *ret;
  
 -      lockdep_trace_alloc(flags);
 +      lockdep_trace_alloc(gfp);
  
        if (size < PAGE_SIZE - align) {
                if (!size)
                if (!m)
                        return NULL;
                *m = size;
-               return (void *)m + align;
+               ret = (void *)m + align;
+ 
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, size + align, gfp, node);
        } else {
-               void *ret;
+               unsigned int order = get_order(size);
  
 -              ret = slob_new_page(gfp | __GFP_COMP, order, node);
 +              ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
                if (ret) {
                        struct page *page;
                        page = virt_to_page(ret);
  {
        void *b;
  
-       if (c->size < PAGE_SIZE)
+       if (c->size < PAGE_SIZE) {
                b = slob_alloc(c->size, flags, c->align, node);
-       else
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+                                         _RET_IP_, b, c->size,
+                                         SLOB_UNITS(c->size) * SLOB_UNIT,
+                                         flags, node);
+       } else {
 -              b = slob_new_page(flags, get_order(c->size), node);
 +              b = slob_new_pages(flags, get_order(c->size), node);
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+                                         _RET_IP_, b, c->size,
+                                         PAGE_SIZE << get_order(c->size),
+                                         flags, node);
+       }
  
        if (c->ctor)
                c->ctor(b);