extern int runqueue_is_locked(void);
extern void task_rq_unlock_wait(struct task_struct *p);
-- extern cpumask_t nohz_cpu_mask;
++ extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
#else
extern void cpu_init (void);
extern void trap_init(void);
-- extern void account_process_tick(struct task_struct *task, int user);
extern void update_process_times(int user);
extern void scheduler_tick(void);
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
-- #define BALANCE_FOR_MC_POWER \
-- (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
++ enum powersavings_balance_level {
++ POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
++ POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
++ * first for long running threads
++ */
++ POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
++ * cpu package for power savings
++ */
++ MAX_POWERSAVINGS_BALANCE_LEVELS
++ };
+
- #define BALANCE_FOR_PKG_POWER \
- ((sched_mc_power_savings || sched_smt_power_savings) ? \
- SD_POWERSAVINGS_BALANCE : 0)
++ extern int sched_mc_power_savings, sched_smt_power_savings;
+
- #define test_sd_parent(sd, flag) ((sd->parent && \
- (sd->parent->flags & flag)) ? 1 : 0)
++ static inline int sd_balance_for_mc_power(void)
++ {
++ if (sched_smt_power_savings)
++ return SD_POWERSAVINGS_BALANCE;
+
++ return 0;
++ }
++
++ static inline int sd_balance_for_package_power(void)
++ {
++ if (sched_mc_power_savings | sched_smt_power_savings)
++ return SD_POWERSAVINGS_BALANCE;
+
- #define BALANCE_FOR_PKG_POWER \
- ((sched_mc_power_savings || sched_smt_power_savings) ? \
- SD_POWERSAVINGS_BALANCE : 0)
++ return 0;
++ }
++
++ /*
++ * Optimise SD flags for power savings:
++ * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
++ * Keep default SD flags if sched_{smt,mc}_power_saving=0
++ */
+
- #define test_sd_parent(sd, flag) ((sd->parent && \
- (sd->parent->flags & flag)) ? 1 : 0)
++ static inline int sd_power_saving_flags(void)
++ {
++ if (sched_mc_power_savings | sched_smt_power_savings)
++ return SD_BALANCE_NEWIDLE;
+
++ return 0;
++ }
struct sched_group {
struct sched_group *next; /* Must be a circular list */
-- cpumask_t cpumask;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* (see include/linux/reciprocal_div.h)
*/
u32 reciprocal_cpu_power;
++
++ unsigned long cpumask[];
};
++ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
++ {
++ return to_cpumask(sg->cpumask);
++ }
++
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
-- cpumask_t span; /* span of all CPUs in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
++
++ /* span of all CPUs in this domain */
++ unsigned long span[];
};
-- extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
++ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
++ {
++ return to_cpumask(sd->span);
++ }
++
++ extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new);
---extern int arch_reinit_sched_domains(void);
++
++ /* Test a flag in parent sched domain */
++ static inline int test_sd_parent(struct sched_domain *sd, int flag)
++ {
++ if (sd->parent && (sd->parent->flags & flag))
++ return 1;
++
++ return 0;
++ }
#else /* CONFIG_SMP */
struct sched_domain_attr;
static inline void
-- partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
++ partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new)
{
}
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
-- const cpumask_t *newmask);
++ const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
-- const cpumask_t *new_mask);
++ const struct cpumask *new_mask);
#else
static inline int set_cpus_allowed_ptr(struct task_struct *p,
-- const cpumask_t *new_mask)
++ const struct cpumask *new_mask)
{
-- if (!cpu_isset(0, *new_mask))
++ if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
static inline void wake_up_idle_cpu(int cpu) { }
#endif
-- -#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
++ +extern unsigned int sysctl_sched_shares_ratelimit;
++ +extern unsigned int sysctl_sched_shares_thresh;
++ +#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
-- -extern unsigned int sysctl_sched_shares_ratelimit;
-- -extern unsigned int sysctl_sched_shares_thresh;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
}
#endif
-- extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
-- extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
--
-- extern int sched_mc_power_savings, sched_smt_power_savings;
++ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
++ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern void normalize_rt_tasks(void);