struct list_head queue[MAX_RT_PRIO];
};
-struct load_stat {
- struct load_weight load;
-};
-
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
unsigned long nr_running;
- s64 fair_clock;
u64 exec_clock;
u64 min_vruntime;
- s64 wait_runtime;
- u64 sleeper_bonus;
- unsigned long wait_runtime_overruns, wait_runtime_underruns;
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
#ifdef CONFIG_NO_HZ
unsigned char in_nohz_recently;
#endif
- struct load_stat ls; /* capture load from *all* tasks on this cpu */
+ struct load_weight load; /* capture load from *all* tasks on this cpu */
unsigned long nr_load_updates;
u64 nr_switches;
* Debugging: various feature bits
*/
enum {
- SCHED_FEAT_FAIR_SLEEPERS = 1,
- SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
- SCHED_FEAT_SLEEPER_AVG = 4,
- SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
- SCHED_FEAT_START_DEBIT = 16,
- SCHED_FEAT_SKIP_INITIAL = 32,
+ SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
+ SCHED_FEAT_START_DEBIT = 2,
+ SCHED_FEAT_USE_TREE_AVG = 4,
+ SCHED_FEAT_APPROX_AVG = 8,
};
const_debug unsigned int sysctl_sched_features =
- SCHED_FEAT_FAIR_SLEEPERS *0 |
SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
- SCHED_FEAT_SLEEPER_AVG *0 |
- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_START_DEBIT *1 |
- SCHED_FEAT_SKIP_INITIAL *0;
+ SCHED_FEAT_USE_TREE_AVG *0 |
+ SCHED_FEAT_APPROX_AVG *0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
}
#endif
-static u64 div64_likely32(u64 divident, unsigned long divisor)
-{
-#if BITS_PER_LONG == 32
- if (likely(divident <= 0xffffffffULL))
- return (u32)divident / divisor;
- do_div(divident, divisor);
-
- return divident;
-#else
- return divident / divisor;
-#endif
-}
-
#if BITS_PER_LONG == 32
# define WMULT_CONST (~0UL)
#else
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
- lw->inv_weight = WMULT_CONST / lw->weight;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
- if (likely(lw->weight))
- lw->inv_weight = WMULT_CONST / lw->weight;
}
/*
* Update delta_exec, delta_fair fields for rq.
*
* delta_fair clock advances at a rate inversely proportional to
- * total load (rq->ls.load.weight) on the runqueue, while
+ * total load (rq->load.weight) on the runqueue, while
* delta_exec advances at the same rate as wall-clock (provided
* cpu is not idle).
*
* runqueue over any given interval. This (smoothened) load is used
* during load balance.
*
- * This function is called /before/ updating rq->ls.load
+ * This function is called /before/ updating rq->load
* and when switching tasks.
*/
static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
- update_load_add(&rq->ls.load, p->se.load.weight);
+ update_load_add(&rq->load, p->se.load.weight);
}
static inline void dec_load(struct rq *rq, const struct task_struct *p)
{
- update_load_sub(&rq->ls.load, p->se.load.weight);
+ update_load_sub(&rq->load, p->se.load.weight);
}
static void inc_nr_running(struct task_struct *p, struct rq *rq)
static void set_load_weight(struct task_struct *p)
{
- p->se.wait_runtime = 0;
-
if (task_has_rt_policy(p)) {
p->se.load.weight = prio_to_weight[0] * 2;
p->se.load.inv_weight = prio_to_wmult[0] >> 1;
/* Used instead of source_load when we know the type == 0 */
unsigned long weighted_cpuload(const int cpu)
{
- return cpu_rq(cpu)->ls.load.weight;
+ return cpu_rq(cpu)->load.weight;
}
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
int old_cpu = task_cpu(p);
struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
- u64 clock_offset, fair_clock_offset;
+ u64 clock_offset;
clock_offset = old_rq->clock - new_rq->clock;
- fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
-
- if (p->se.wait_start_fair)
- p->se.wait_start_fair -= fair_clock_offset;
- if (p->se.sleep_start_fair)
- p->se.sleep_start_fair -= fair_clock_offset;
#ifdef CONFIG_SCHEDSTATS
if (p->se.wait_start)
if (p->se.block_start)
p->se.block_start -= clock_offset;
#endif
+ if (likely(new_rq->cfs.min_vruntime))
+ p->se.vruntime -= old_rq->cfs.min_vruntime -
+ new_rq->cfs.min_vruntime;
__set_task_cpu(p, new_cpu);
}
*/
static void __sched_fork(struct task_struct *p)
{
- p->se.wait_start_fair = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
- p->se.wait_runtime = 0;
- p->se.sleep_start_fair = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
- p->se.sum_wait_runtime = 0;
p->se.sum_sleep_runtime = 0;
p->se.sleep_start = 0;
p->se.block_start = 0;
p->se.exec_max = 0;
p->se.slice_max = 0;
p->se.wait_max = 0;
- p->se.wait_runtime_overruns = 0;
- p->se.wait_runtime_underruns = 0;
#endif
INIT_LIST_HEAD(&p->run_list);
*/
static void update_cpu_load(struct rq *this_rq)
{
- unsigned long this_load = this_rq->ls.load.weight;
+ unsigned long this_load = this_rq->load.weight;
int i, scale;
this_rq->nr_load_updates++;
static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
- cfs_rq->fair_clock = 1;
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
#endif
read_lock_irq(&tasklist_lock);
do_each_thread(g, p) {
p->se.fair_key = 0;
- p->se.wait_runtime = 0;
p->se.exec_start = 0;
- p->se.wait_start_fair = 0;
- p->se.sleep_start_fair = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
p->se.sleep_start = 0;
p->se.block_start = 0;
#endif
- task_rq(p)->cfs.fair_clock = 0;
task_rq(p)->clock = 0;
if (!rt_task(p)) {