static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
 /* NOTE: change this value only with kprobe_mutex held */
-static bool kprobe_enabled;
+static bool kprobes_all_disarmed;
 
 static DEFINE_MUTEX(kprobe_mutex);     /* Protects kprobe_table */
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
                 * If the old_p has gone, its breakpoint has been disarmed.
                 * We have to arm it again after preparing real kprobes.
                 */
-               if (kprobe_enabled)
+               if (!kprobes_all_disarmed)
                        arch_arm_kprobe(ap);
        }
 
        hlist_add_head_rcu(&p->hlist,
                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
 
-       if (kprobe_enabled)
+       if (!kprobes_all_disarmed)
                arch_arm_kprobe(p);
 
 out_unlock_text:
                 * enabled and not gone - otherwise, the breakpoint would
                 * already have been removed. We save on flushing icache.
                 */
-               if (kprobe_enabled && !kprobe_gone(old_p)) {
+               if (!kprobes_all_disarmed && !kprobe_gone(old_p)) {
                        mutex_lock(&text_mutex);
                        arch_disarm_kprobe(p);
                        mutex_unlock(&text_mutex);
                }
        }
 
-       /* By default, kprobes are enabled */
-       kprobe_enabled = true;
+       /* By default, kprobes are armed */
+       kprobes_all_disarmed = false;
 
        err = arch_init_kprobes();
        if (!err)
        .release        = seq_release,
 };
 
-static void __kprobes enable_all_kprobes(void)
+static void __kprobes arm_all_kprobes(void)
 {
        struct hlist_head *head;
        struct hlist_node *node;
 
        mutex_lock(&kprobe_mutex);
 
-       /* If kprobes are already enabled, just return */
-       if (kprobe_enabled)
+       /* If kprobes are armed, just return */
+       if (!kprobes_all_disarmed)
                goto already_enabled;
 
        mutex_lock(&text_mutex);
        }
        mutex_unlock(&text_mutex);
 
-       kprobe_enabled = true;
+       kprobes_all_disarmed = false;
        printk(KERN_INFO "Kprobes globally enabled\n");
 
 already_enabled:
        return;
 }
 
-static void __kprobes disable_all_kprobes(void)
+static void __kprobes disarm_all_kprobes(void)
 {
        struct hlist_head *head;
        struct hlist_node *node;
 
        mutex_lock(&kprobe_mutex);
 
-       /* If kprobes are already disabled, just return */
-       if (!kprobe_enabled)
+       /* If kprobes are already disarmed, just return */
+       if (kprobes_all_disarmed)
                goto already_disabled;
 
-       kprobe_enabled = false;
+       kprobes_all_disarmed = true;
        printk(KERN_INFO "Kprobes globally disabled\n");
        mutex_lock(&text_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 {
        char buf[3];
 
-       if (kprobe_enabled)
+       if (!kprobes_all_disarmed)
                buf[0] = '1';
        else
                buf[0] = '0';
        case 'y':
        case 'Y':
        case '1':
-               enable_all_kprobes();
+               arm_all_kprobes();
                break;
        case 'n':
        case 'N':
        case '0':
-               disable_all_kprobes();
+               disarm_all_kprobes();
                break;
        }