With the evolution of mempolicies, it is necessary to support mempolicy mode
flags that specify how the policy shall behave in certain circumstances. The
most immediate need for mode flag support is to suppress remapping the
nodemask of a policy at the time of rebind.
Both the mempolicy mode and flags are passed by the user in the 'int policy'
formal of either the set_mempolicy() or mbind() syscall. A new constant,
MPOL_MODE_FLAGS, represents the union of legal optional flags that may be
passed as part of this int. Mempolicies that include illegal flags as part of
their policy are rejected as invalid.
An additional member to struct mempolicy is added to support the mode flags:
struct mempolicy {
...
unsigned short policy;
unsigned short flags;
}
The splitting of the 'int' actual passed by the user is done in
sys_set_mempolicy() and sys_mbind() for their respective syscalls. This is
done by intersecting the actual with MPOL_MODE_FLAGS, rejecting the syscall of
there are additional flags, and storing it in the new 'flags' member of struct
mempolicy. The intersection of the actual with ~MPOL_MODE_FLAGS is stored in
the 'policy' member of the struct and all current users of pol->policy remain
unchanged.
The union of the policy mode and optional mode flags is passed back to the
user in get_mempolicy().
This combination of mode and flags within the same actual does not break
userspace code that relies on get_mempolicy(&policy, ...) and either
switch (policy) {
case MPOL_BIND:
...
case MPOL_INTERLEAVE:
...
};
statements or
if (policy == MPOL_INTERLEAVE) {
...
}
statements. Such applications would need to use optional mode flags when
calling set_mempolicy() or mbind() for these previously implemented statements
to stop working. If an application does start using optional mode flags, it
will need to mask the optional flags off the policy in switch and conditional
statements that only test mode.
An additional member is also added to struct shmem_sb_info to store the
optional mode flags.
[hugh@veritas.com: shmem mpol: fix build warning]
Cc: Paul Jackson <pj@sgi.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
- mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, NULL);
+ mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, 0, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
* Copyright 2003,2004 Andi Kleen SuSE Labs
*/
* Copyright 2003,2004 Andi Kleen SuSE Labs
*/
+/*
+ * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
+ * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
+ * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
+ */
+
/* Policies */
enum {
MPOL_DEFAULT,
/* Policies */
enum {
MPOL_DEFAULT,
MPOL_MAX, /* always last member of enum */
};
MPOL_MAX, /* always last member of enum */
};
-/* Flags for get_mem_policy */
+/* Flags for set_mempolicy */
+/*
+ * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
+ * either set_mempolicy() or mbind().
+ */
+#define MPOL_MODE_FLAGS (0)
+
+/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
struct mempolicy {
atomic_t refcnt;
unsigned short policy; /* See MPOL_* above */
struct mempolicy {
atomic_t refcnt;
unsigned short policy; /* See MPOL_* above */
+ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
union {
short preferred_node; /* preferred */
nodemask_t nodes; /* interleave/bind */
union {
short preferred_node; /* preferred */
nodemask_t nodes; /* interleave/bind */
};
void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
};
void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
+ unsigned short flags, nodemask_t *nodes);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
struct mempolicy *new);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
struct mempolicy *new);
}
static inline void mpol_shared_policy_init(struct shared_policy *info,
}
static inline void mpol_shared_policy_init(struct shared_policy *info,
- unsigned short policy, nodemask_t *nodes)
+ unsigned short policy, unsigned short flags, nodemask_t *nodes)
gid_t gid; /* Mount gid for root directory */
mode_t mode; /* Mount mode for root directory */
unsigned short policy; /* Default NUMA memory alloc policy */
gid_t gid; /* Mount gid for root directory */
mode_t mode; /* Mount mode for root directory */
unsigned short policy; /* Default NUMA memory alloc policy */
+ unsigned short flags; /* Optional mempolicy flags */
nodemask_t policy_nodes; /* nodemask for preferred and bind */
};
nodemask_t policy_nodes; /* nodemask for preferred and bind */
};
}
/* Create a new policy */
}
/* Create a new policy */
-static struct mempolicy *mpol_new(unsigned short mode, nodemask_t *nodes)
+static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
+ nodemask_t *nodes)
{
struct mempolicy *policy;
{
struct mempolicy *policy;
- pr_debug("setting mode %d nodes[0] %lx\n",
- mode, nodes ? nodes_addr(*nodes)[0] : -1);
+ pr_debug("setting mode %d flags %d nodes[0] %lx\n",
+ mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
if (mode == MPOL_DEFAULT)
return NULL;
if (mode == MPOL_DEFAULT)
return NULL;
BUG();
}
policy->policy = mode;
BUG();
}
policy->policy = mode;
policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
return policy;
}
policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
return policy;
}
}
/* Set the process memory policy */
}
/* Set the process memory policy */
-static long do_set_mempolicy(unsigned short mode, nodemask_t *nodes)
+static long do_set_mempolicy(unsigned short mode, unsigned short flags,
+ nodemask_t *nodes)
{
struct mempolicy *new;
if (mpol_check_policy(mode, nodes))
return -EINVAL;
{
struct mempolicy *new;
if (mpol_check_policy(mode, nodes))
return -EINVAL;
- new = mpol_new(mode, nodes);
+ new = mpol_new(mode, flags, nodes);
if (IS_ERR(new))
return PTR_ERR(new);
mpol_free(current->mempolicy);
if (IS_ERR(new))
return PTR_ERR(new);
mpol_free(current->mempolicy);
+ *policy = pol->policy | pol->flags;
if (vma) {
up_read(¤t->mm->mmap_sem);
if (vma) {
up_read(¤t->mm->mmap_sem);
#endif
static long do_mbind(unsigned long start, unsigned long len,
#endif
static long do_mbind(unsigned long start, unsigned long len,
- unsigned short mode, nodemask_t *nmask,
- unsigned long flags)
+ unsigned short mode, unsigned short mode_flags,
+ nodemask_t *nmask, unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
if (mpol_check_policy(mode, nmask))
return -EINVAL;
if (mpol_check_policy(mode, nmask))
return -EINVAL;
- new = mpol_new(mode, nmask);
+ new = mpol_new(mode, mode_flags, nmask);
if (IS_ERR(new))
return PTR_ERR(new);
if (IS_ERR(new))
return PTR_ERR(new);
if (!new)
flags |= MPOL_MF_DISCONTIG_OK;
if (!new)
flags |= MPOL_MF_DISCONTIG_OK;
- pr_debug("mbind %lx-%lx mode:%d nodes:%lx\n", start, start + len,
- mode, nmask ? nodes_addr(*nmask)[0] : -1);
+ pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
+ start, start + len, mode, mode_flags,
+ nmask ? nodes_addr(*nmask)[0] : -1);
down_write(&mm->mmap_sem);
vma = check_range(mm, start, end, nmask,
down_write(&mm->mmap_sem);
vma = check_range(mm, start, end, nmask,
{
nodemask_t nodes;
int err;
{
nodemask_t nodes;
int err;
+ unsigned short mode_flags;
+ mode_flags = mode & MPOL_MODE_FLAGS;
+ mode &= ~MPOL_MODE_FLAGS;
if (mode >= MPOL_MAX)
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
if (mode >= MPOL_MAX)
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
- return do_mbind(start, len, mode, &nodes, flags);
+ return do_mbind(start, len, mode, mode_flags, &nodes, flags);
}
/* Set the process memory policy */
}
/* Set the process memory policy */
{
int err;
nodemask_t nodes;
{
int err;
nodemask_t nodes;
- if (mode < 0 || mode >= MPOL_MAX)
+ flags = mode & MPOL_MODE_FLAGS;
+ mode &= ~MPOL_MODE_FLAGS;
+ if ((unsigned int)mode >= MPOL_MAX)
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
- return do_set_mempolicy(mode, &nodes);
+ return do_set_mempolicy(mode, flags, &nodes);
}
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
}
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
}
void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
}
void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
- nodemask_t *policy_nodes)
+ unsigned short flags, nodemask_t *policy_nodes)
{
info->root = RB_ROOT;
spin_lock_init(&info->lock);
{
info->root = RB_ROOT;
spin_lock_init(&info->lock);
struct mempolicy *newpol;
/* Falls back to MPOL_DEFAULT on any error */
struct mempolicy *newpol;
/* Falls back to MPOL_DEFAULT on any error */
- newpol = mpol_new(policy, policy_nodes);
+ newpol = mpol_new(policy, flags, policy_nodes);
if (!IS_ERR(newpol)) {
/* Create pseudo-vma that contains just the policy */
struct vm_area_struct pvma;
if (!IS_ERR(newpol)) {
/* Create pseudo-vma that contains just the policy */
struct vm_area_struct pvma;
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
- pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
+ pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
- sz, npol? npol->policy : -1,
+ sz, npol ? npol->policy : -1,
+ npol ? npol->flags : -1,
npol ? nodes_addr(npol->v.nodes)[0] : -1);
if (npol) {
npol ? nodes_addr(npol->v.nodes)[0] : -1);
if (npol) {
if (unlikely(nodes_empty(interleave_nodes)))
node_set(prefer, interleave_nodes);
if (unlikely(nodes_empty(interleave_nodes)))
node_set(prefer, interleave_nodes);
- if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
+ if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
printk("numa_policy_init: interleaving failed\n");
}
/* Reset policy of current process to default */
void numa_default_policy(void)
{
printk("numa_policy_init: interleaving failed\n");
}
/* Reset policy of current process to default */
void numa_default_policy(void)
{
- do_set_mempolicy(MPOL_DEFAULT, NULL);
+ do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
}
/* Migrate a policy to a different set of nodes */
}
/* Migrate a policy to a different set of nodes */
#ifdef CONFIG_NUMA
#ifdef CONFIG_TMPFS
static int shmem_parse_mpol(char *value, unsigned short *policy,
#ifdef CONFIG_NUMA
#ifdef CONFIG_TMPFS
static int shmem_parse_mpol(char *value, unsigned short *policy,
- nodemask_t *policy_nodes)
+ unsigned short *mode_flags, nodemask_t *policy_nodes)
{
char *nodelist = strchr(value, ':');
{
char *nodelist = strchr(value, ':');
+ char *flags = strchr(value, '=');
int err = 1;
if (nodelist) {
int err = 1;
if (nodelist) {
if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
goto out;
}
if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
goto out;
}
+ if (flags)
+ *flags++ = '\0';
if (!strcmp(value, "default")) {
*policy = MPOL_DEFAULT;
/* Don't allow a nodelist */
if (!strcmp(value, "default")) {
*policy = MPOL_DEFAULT;
/* Don't allow a nodelist */
*policy_nodes = node_states[N_HIGH_MEMORY];
err = 0;
}
*policy_nodes = node_states[N_HIGH_MEMORY];
err = 0;
}
out:
/* Restore string for error message */
if (nodelist)
out:
/* Restore string for error message */
if (nodelist)
}
static void shmem_show_mpol(struct seq_file *seq, unsigned short policy,
}
static void shmem_show_mpol(struct seq_file *seq, unsigned short policy,
- const nodemask_t policy_nodes)
+ unsigned short flags, const nodemask_t policy_nodes)
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
static inline int shmem_parse_mpol(char *value, unsigned short *policy,
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
static inline int shmem_parse_mpol(char *value, unsigned short *policy,
- nodemask_t *policy_nodes)
+ unsigned short *mode_flags, nodemask_t *policy_nodes)
{
return 1;
}
static inline void shmem_show_mpol(struct seq_file *seq, unsigned short policy,
{
return 1;
}
static inline void shmem_show_mpol(struct seq_file *seq, unsigned short policy,
- const nodemask_t policy_nodes)
+ unsigned short flags, const nodemask_t policy_nodes)
{
}
#endif /* CONFIG_TMPFS */
{
}
#endif /* CONFIG_TMPFS */
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
mpol_shared_policy_init(&info->policy, sbinfo->policy,
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
mpol_shared_policy_init(&info->policy, sbinfo->policy,
- &sbinfo->policy_nodes);
+ sbinfo->flags, &sbinfo->policy_nodes);
break;
case S_IFDIR:
inc_nlink(inode);
break;
case S_IFDIR:
inc_nlink(inode);
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
- mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
+ mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, 0,
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
if (shmem_parse_mpol(value, &sbinfo->policy,
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
if (shmem_parse_mpol(value, &sbinfo->policy,
- &sbinfo->policy_nodes))
+ &sbinfo->flags, &sbinfo->policy_nodes))
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
sbinfo->max_inodes = config.max_inodes;
sbinfo->free_inodes = config.max_inodes - inodes;
sbinfo->policy = config.policy;
sbinfo->max_inodes = config.max_inodes;
sbinfo->free_inodes = config.max_inodes - inodes;
sbinfo->policy = config.policy;
+ sbinfo->flags = config.flags;
sbinfo->policy_nodes = config.policy_nodes;
out:
spin_unlock(&sbinfo->stat_lock);
sbinfo->policy_nodes = config.policy_nodes;
out:
spin_unlock(&sbinfo->stat_lock);
seq_printf(seq, ",uid=%u", sbinfo->uid);
if (sbinfo->gid != 0)
seq_printf(seq, ",gid=%u", sbinfo->gid);
seq_printf(seq, ",uid=%u", sbinfo->uid);
if (sbinfo->gid != 0)
seq_printf(seq, ",gid=%u", sbinfo->gid);
- shmem_show_mpol(seq, sbinfo->policy, sbinfo->policy_nodes);
+ shmem_show_mpol(seq, sbinfo->policy, sbinfo->flags,
+ sbinfo->policy_nodes);
return 0;
}
#endif /* CONFIG_TMPFS */
return 0;
}
#endif /* CONFIG_TMPFS */
sbinfo->uid = current->fsuid;
sbinfo->gid = current->fsgid;
sbinfo->policy = MPOL_DEFAULT;
sbinfo->uid = current->fsuid;
sbinfo->gid = current->fsgid;
sbinfo->policy = MPOL_DEFAULT;
sbinfo->policy_nodes = node_states[N_HIGH_MEMORY];
sb->s_fs_info = sbinfo;
sbinfo->policy_nodes = node_states[N_HIGH_MEMORY];
sb->s_fs_info = sbinfo;