]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/irq/proc.c
genirq: Expose default irq affinity mask (take 3)
[linux-2.6-omap-h63xx.git] / kernel / irq / proc.c
index ddde0ef9ccdcbb4dbd8834accc5f0264deee8403..6c6d35d68ee9c9628d5e5b67f2e32fdd91553f52 100644 (file)
@@ -19,7 +19,15 @@ static struct proc_dir_entry *root_irq_dir;
 static int irq_affinity_read_proc(char *page, char **start, off_t off,
                                  int count, int *eof, void *data)
 {
-       int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity);
+       struct irq_desc *desc = irq_desc + (long)data;
+       cpumask_t *mask = &desc->affinity;
+       int len;
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (desc->status & IRQ_MOVE_PENDING)
+               mask = &desc->pending_mask;
+#endif
+       len = cpumask_scnprintf(page, count, *mask);
 
        if (count - len < 2)
                return -EINVAL;
@@ -27,12 +35,16 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off,
        return len;
 }
 
+#ifndef is_affinity_mask_valid
+#define is_affinity_mask_valid(val) 1
+#endif
+
 int no_irq_affinity;
 static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
                                   unsigned long count, void *data)
 {
        unsigned int irq = (int)(long)data, full_count = count, err;
-       cpumask_t new_value, tmp;
+       cpumask_t new_value;
 
        if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
            irq_balancing_disabled(irq))
@@ -42,24 +54,73 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
        if (err)
                return err;
 
+       if (!is_affinity_mask_valid(new_value))
+               return -EINVAL;
+
        /*
         * Do not allow disabling IRQs completely - it's a too easy
         * way to make the system unusable accidentally :-) At least
         * one online CPU still has to be targeted.
         */
-       cpus_and(tmp, new_value, cpu_online_map);
-       if (cpus_empty(tmp))
+       if (!cpus_intersects(new_value, cpu_online_map))
                /* Special case for empty set - allow the architecture
                   code to set default SMP affinity. */
-               return select_smp_affinity(irq) ? -EINVAL : full_count;
+               return irq_select_affinity(irq) ? -EINVAL : full_count;
 
        irq_set_affinity(irq, new_value);
 
        return full_count;
 }
 
+static int default_affinity_read(char *page, char **start, off_t off,
+                                 int count, int *eof, void *data)
+{
+       int len = cpumask_scnprintf(page, count, irq_default_affinity);
+       if (count - len < 2)
+               return -EINVAL;
+       len += sprintf(page + len, "\n");
+       return len;
+}
+
+static int default_affinity_write(struct file *file, const char __user *buffer,
+                                  unsigned long count, void *data)
+{
+       unsigned int full_count = count, err;
+       cpumask_t new_value;
+
+       err = cpumask_parse_user(buffer, count, new_value);
+       if (err)
+               return err;
+
+       if (!is_affinity_mask_valid(new_value))
+               return -EINVAL;
+
+       /*
+        * Do not allow disabling IRQs completely - it's a too easy
+        * way to make the system unusable accidentally :-) At least
+        * one online CPU still has to be targeted.
+        */
+       if (!cpus_intersects(new_value, cpu_online_map))
+               return -EINVAL;
+
+       irq_default_affinity = new_value;
+
+       return full_count;
+}
 #endif
 
+static int irq_spurious_read(char *page, char **start, off_t off,
+                                 int count, int *eof, void *data)
+{
+       struct irq_desc *d = &irq_desc[(long) data];
+       return sprintf(page, "count %u\n"
+                            "unhandled %u\n"
+                            "last_unhandled %u ms\n",
+                       d->irq_count,
+                       d->irqs_unhandled,
+                       jiffies_to_msecs(d->last_unhandled));
+}
+
 #define MAX_NAMELEN 128
 
 static int name_unique(unsigned int irq, struct irqaction *new_action)
@@ -103,6 +164,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
 void register_irq_proc(unsigned int irq)
 {
        char name [MAX_NAMELEN];
+       struct proc_dir_entry *entry;
 
        if (!root_irq_dir ||
                (irq_desc[irq].chip == &no_irq_chip) ||
@@ -117,8 +179,6 @@ void register_irq_proc(unsigned int irq)
 
 #ifdef CONFIG_SMP
        {
-               struct proc_dir_entry *entry;
-
                /* create /proc/irq/<irq>/smp_affinity */
                entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
 
@@ -129,6 +189,12 @@ void register_irq_proc(unsigned int irq)
                }
        }
 #endif
+
+       entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
+       if (entry) {
+               entry->data = (void *)(long)irq;
+               entry->read_proc = irq_spurious_read;
+       }
 }
 
 #undef MAX_NAMELEN
@@ -139,6 +205,21 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)
                remove_proc_entry(action->dir->name, irq_desc[irq].dir);
 }
 
+void register_default_affinity_proc(void)
+{
+#ifdef CONFIG_SMP
+       struct proc_dir_entry *entry;
+
+       /* create /proc/irq/default_smp_affinity */
+       entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir);
+       if (entry) {
+               entry->data = NULL;
+               entry->read_proc  = default_affinity_read;
+               entry->write_proc = default_affinity_write;
+       }
+#endif
+}
+
 void init_irq_proc(void)
 {
        int i;
@@ -148,6 +229,8 @@ void init_irq_proc(void)
        if (!root_irq_dir)
                return;
 
+       register_default_affinity_proc();
+
        /*
         * Create entries for all existing IRQs.
         */