for_each_sched_entity(se) {
                struct load_weight *se_lw = &se->load;
 
+#ifdef CONFIG_FAIR_SCHED_GROUP
+               struct cfs_rq *cfs_rq = se->my_q;
+               struct task_group *tg = NULL
+
+               if (cfs_rq)
+                       tg = cfs_rq->tg;
+
+               if (tg && tg->shares < NICE_0_LOAD) {
+                       /*
+                        * scale shares to what it would have been had
+                        * tg->weight been NICE_0_LOAD:
+                        *
+                        *   weight = 1024 * shares / tg->weight
+                        */
+                       lw.weight *= se->load.weight;
+                       lw.weight /= tg->shares;
+
+                       lw.inv_weight = 0;
+
+                       se_lw = &lw;
+               } else
+#endif
+
                if (se->load.weight < NICE_0_LOAD)
                        se_lw = &lw;
 
         * More easily preempt - nice tasks, while not making it harder for
         * + nice tasks.
         */
-       gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
+       if (sched_feat(ASYM_GRAN))
+               gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
+       else
+               gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
 
        return gran;
 }
 
 SCHED_FEAT(SYNC_WAKEUPS, 1)
 SCHED_FEAT(HRTICK, 1)
 SCHED_FEAT(DOUBLE_TICK, 0)
+SCHED_FEAT(ASYM_GRAN, 1)