extern void force_flush_all_skas(void);
 extern long execute_syscall_skas(void *r);
 extern void before_mem_skas(unsigned long unused);
-extern unsigned long set_task_sizes_skas(unsigned long *host_size_out,
-                                        unsigned long *task_size_out);
+extern unsigned long set_task_sizes_skas(unsigned long *task_size_out);
 extern int start_uml_skas(void);
 extern int external_pid_skas(struct task_struct *task);
 extern int thread_pid_skas(struct task_struct *task);
 
 extern void force_flush_all_tt(void);
 extern long execute_syscall_tt(void *r);
 extern void before_mem_tt(unsigned long brk_start);
-extern unsigned long set_task_sizes_tt(unsigned long *host_size_out,
-                                      unsigned long *task_size_out);
+extern unsigned long set_task_sizes_tt(unsigned long *task_size_out);
 extern int start_uml_tt(void);
 extern int external_pid_tt(struct task_struct *task);
 extern int thread_pid_tt(struct task_struct *task);
 
 #include "init.h"
 #include "kern_constants.h"
 
-extern char __binary_start;
-
 /* Changed during early boot */
 unsigned long *empty_zero_page = NULL;
 unsigned long *empty_bad_page = NULL;
 
 void mem_init(void)
 {
-       unsigned long start;
-
        max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT;
 
         /* clear the zero-page */
        free_bootmem(__pa(brk_end), uml_reserved - brk_end);
        uml_reserved = brk_end;
 
-       /* Fill in any hole at the start of the binary */
-       start = (unsigned long) &__binary_start & PAGE_MASK;
-       if(uml_physmem != start){
-               map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem,
-                          1, 1, 0);
-       }
-
        /* this will put all low memory onto the freelists */
        totalram_pages = free_all_bootmem();
        totalhigh_pages = highmem >> PAGE_SHIFT;
 
        }
 }
 
-extern int __syscall_stub_start, __binary_start;
+extern int __syscall_stub_start;
 
 void setup_physmem(unsigned long start, unsigned long reserve_end,
                   unsigned long len, unsigned long long highmem)
 
 #include "mem_user.h"
 #include "skas.h"
 
-unsigned long set_task_sizes_skas(unsigned long *host_size_out,
-                                 unsigned long *task_size_out)
+unsigned long set_task_sizes_skas(unsigned long *task_size_out)
 {
        /* Round up to the nearest 4M */
-       unsigned long top = ROUND_4M((unsigned long) &host_size_out);
+       unsigned long host_task_size = ROUND_4M((unsigned long)
+                                               &host_task_size);
 
 #ifdef CONFIG_HOST_TASK_SIZE
        *host_size_out = ROUND_4M(CONFIG_HOST_TASK_SIZE);
        *task_size_out = CONFIG_HOST_TASK_SIZE;
 #else
-       *host_size_out = top;
        if (!skas_needs_stub)
-               *task_size_out = top;
+               *task_size_out = host_task_size;
        else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
 #endif
-       return ((unsigned long) set_task_sizes_skas) & ~0xffffff;
+       return host_task_size;
 }
 
 #define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
 #define START (CONFIG_TOP_ADDR - SIZE)
 
-unsigned long set_task_sizes_tt(unsigned long *host_size_out,
-                               unsigned long *task_size_out)
+unsigned long set_task_sizes_tt(unsigned long *task_size_out)
 {
+       unsigned long host_task_size;
+
        /* Round up to the nearest 4M */
-       *host_size_out = ROUND_4M((unsigned long) &host_size_out);
+       host_task_size = ROUND_4M((unsigned long) &host_task_size);
        *task_size_out = START;
-       return START;
+
+       return host_task_size;
 }
 
 
 #define MIN_VMALLOC (32 * 1024 * 1024)
 
+extern char __binary_start;
+
 int linux_main(int argc, char **argv)
 {
        unsigned long avail, diff;
 
        printf("UML running in %s mode\n", mode);
 
-       uml_start = CHOOSE_MODE_PROC(set_task_sizes_tt, set_task_sizes_skas,
-                                    &host_task_size, &task_size);
+       uml_start = (unsigned long) &__binary_start;
+       host_task_size = CHOOSE_MODE_PROC(set_task_sizes_tt,
+                                         set_task_sizes_skas, &task_size);
 
        /*
         * Setting up handlers to 'sig_info' struct
                physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
        }
 
-       uml_physmem = uml_start;
+       uml_physmem = uml_start & PAGE_MASK;
 
        /* Reserve up to 4M after the current brk */
        uml_reserved = ROUND_4M(brk_start) + (1 << 22);
 
 
 SECTIONS
 {
-  /*This must contain the right address - not quite the default ELF one.*/
+  /* This must contain the right address - not quite the default ELF one.*/
   PROVIDE (__executable_start = START);
-  . = START + SIZEOF_HEADERS;
+  /* Static binaries stick stuff here, like the sigreturn trampoline,
+   * invisibly to objdump.  So, just make __binary_start equal to the very
+   * beginning of the executable, and if there are unmapped pages after this,
+   * they are forever unusable.
+   */
+  __binary_start = START;
 
-  /* Used in arch/um/kernel/mem.c. Any memory between START and __binary_start
-   * is remapped.*/
-  __binary_start = .;
+  . = START + SIZEOF_HEADERS;
 
 #ifdef MODE_TT
   .remap_data : { UNMAP_PATH (.data .bss) }