.long sys_io_submit
        .long sys_io_cancel
        .long sys_fadvise64     /* 250 */
-       .long sys_ni_syscall
+       .long sys_set_zone_reclaim
        .long sys_exit_group
        .long sys_lookup_dcookie
        .long sys_epoll_create
 
        data8 sys_keyctl
        data8 sys_ni_syscall
        data8 sys_ni_syscall                    // 1275
-       data8 sys_ni_syscall
+       data8 sys_set_zone_reclaim
        data8 sys_ni_syscall
        data8 sys_ni_syscall
        data8 sys_ni_syscall
 
 #define __NR_io_submit         248
 #define __NR_io_cancel         249
 #define __NR_fadvise64         250
-
+#define __NR_set_zone_reclaim  251
 #define __NR_exit_group                252
 #define __NR_lookup_dcookie    253
 #define __NR_epoll_create      254
 
 #define __NR_add_key                   1271
 #define __NR_request_key               1272
 #define __NR_keyctl                    1273
+#define __NR_set_zone_reclaim          1276
 
 #ifdef __KERNEL__
 
 
        unsigned long           pages_scanned;     /* since last reclaim */
        int                     all_unreclaimable; /* All pages pinned */
 
+       /*
+        * Does the allocator try to reclaim pages from the zone as soon
+        * as it fails a watermark_ok() in __alloc_pages?
+        */
+       int                     reclaim_pages;
+
        /*
         * prev_priority holds the scanning priority for this zone.  It is
         * defined as the scanning priority at which we achieved our reclaim
 
 
 /* linux/mm/vmscan.c */
 extern int try_to_free_pages(struct zone **, unsigned int, unsigned int);
+extern int zone_reclaim(struct zone *, unsigned int, unsigned int);
 extern int shrink_all_memory(int);
 extern int vm_swappiness;
 
 
 cond_syscall(sys_keyctl);
 cond_syscall(compat_sys_keyctl);
 cond_syscall(compat_sys_socketcall);
+cond_syscall(sys_set_zone_reclaim);
 
 /* arch-specific weak syscall entries */
 cond_syscall(sys_pciconfig_read);
 
        return 1;
 }
 
+static inline int
+should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
+{
+       if (!z->reclaim_pages)
+               return 0;
+       return 1;
+}
+
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
 
        classzone_idx = zone_idx(zones[0]);
 
- restart:
+restart:
        /* Go through the zonelist once, looking for a zone with enough free */
        for (i = 0; (z = zones[i]) != NULL; i++) {
-
-               if (!zone_watermark_ok(z, order, z->pages_low,
-                                      classzone_idx, 0, 0))
-                       continue;
+               int do_reclaim = should_reclaim_zone(z, gfp_mask);
 
                if (!cpuset_zone_allowed(z))
                        continue;
 
+               /*
+                * If the zone is to attempt early page reclaim then this loop
+                * will try to reclaim pages and check the watermark a second
+                * time before giving up and falling back to the next zone.
+                */
+zone_reclaim_retry:
+               if (!zone_watermark_ok(z, order, z->pages_low,
+                                      classzone_idx, 0, 0)) {
+                       if (!do_reclaim)
+                               continue;
+                       else {
+                               zone_reclaim(z, gfp_mask, order);
+                               /* Only try reclaim once */
+                               do_reclaim = 0;
+                               goto zone_reclaim_retry;
+                       }
+               }
+
                page = buffered_rmqueue(z, order, gfp_mask);
                if (page)
                        goto got_pg;
 
 }
 
 module_init(kswapd_init)
+
+
+/*
+ * Try to free up some pages from this zone through reclaim.
+ */
+int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
+{
+       struct scan_control sc;
+       int nr_pages = 1 << order;
+       int total_reclaimed = 0;
+
+       /* The reclaim may sleep, so don't do it if sleep isn't allowed */
+       if (!(gfp_mask & __GFP_WAIT))
+               return 0;
+       if (zone->all_unreclaimable)
+               return 0;
+
+       sc.gfp_mask = gfp_mask;
+       sc.may_writepage = 0;
+       sc.may_swap = 0;
+       sc.nr_mapped = read_page_state(nr_mapped);
+       sc.nr_scanned = 0;
+       sc.nr_reclaimed = 0;
+       /* scan at the highest priority */
+       sc.priority = 0;
+
+       if (nr_pages > SWAP_CLUSTER_MAX)
+               sc.swap_cluster_max = nr_pages;
+       else
+               sc.swap_cluster_max = SWAP_CLUSTER_MAX;
+
+       shrink_zone(zone, &sc);
+       total_reclaimed = sc.nr_reclaimed;
+
+       return total_reclaimed;
+}
+
+asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
+                                    unsigned int state)
+{
+       struct zone *z;
+       int i;
+
+       if (node >= MAX_NUMNODES || !node_online(node))
+               return -EINVAL;
+
+       /* This will break if we ever add more zones */
+       if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
+               return -EINVAL;
+
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               if (!(zone & 1<<i))
+                       continue;
+
+               z = &NODE_DATA(node)->node_zones[i];
+
+               if (state)
+                       z->reclaim_pages = 1;
+               else
+                       z->reclaim_pages = 0;
+       }
+
+       return 0;
+}