]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/blackfin/kernel/ipipe.c
ARM: OMAP: Fix GPIO switch initial output state handling
[linux-2.6-omap-h63xx.git] / arch / blackfin / kernel / ipipe.c
1 /* -*- linux-c -*-
2  * linux/arch/blackfin/kernel/ipipe.c
3  *
4  * Copyright (C) 2005-2007 Philippe Gerum.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9  * USA; either version 2 of the License, or (at your option) any later
10  * version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  *
21  * Architecture-dependent I-pipe support for the Blackfin.
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/module.h>
27 #include <linux/interrupt.h>
28 #include <linux/percpu.h>
29 #include <linux/bitops.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/kthread.h>
33 #include <asm/unistd.h>
34 #include <asm/system.h>
35 #include <asm/atomic.h>
36 #include <asm/io.h>
37
38 static int create_irq_threads;
39
40 DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
41
42 static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
43
44 static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
45
46 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
47
48 static void __ipipe_no_irqtail(void);
49
50 unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
51 EXPORT_SYMBOL(__ipipe_irq_tail_hook);
52
53 unsigned long __ipipe_core_clock;
54 EXPORT_SYMBOL(__ipipe_core_clock);
55
56 unsigned long __ipipe_freq_scale;
57 EXPORT_SYMBOL(__ipipe_freq_scale);
58
59 atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
60
61 unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
62 EXPORT_SYMBOL(__ipipe_irq_lvmask);
63
64 static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
65 {
66         desc->ipipe_ack(irq, desc);
67 }
68
69 /*
70  * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
71  * interrupts are off, and secondary CPUs are still lost in space.
72  */
73 void __ipipe_enable_pipeline(void)
74 {
75         unsigned irq;
76
77         __ipipe_core_clock = get_cclk(); /* Fetch this once. */
78         __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
79
80         for (irq = 0; irq < NR_IRQS; ++irq)
81                 ipipe_virtualize_irq(ipipe_root_domain,
82                                      irq,
83                                      (ipipe_irq_handler_t)&asm_do_IRQ,
84                                      NULL,
85                                      &__ipipe_ack_irq,
86                                      IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
87 }
88
89 /*
90  * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
91  * interrupt protection log is maintained here for each domain. Hw
92  * interrupts are masked on entry.
93  */
94 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
95 {
96         struct ipipe_domain *this_domain, *next_domain;
97         struct list_head *head, *pos;
98         int m_ack, s = -1;
99
100         /*
101          * Software-triggered IRQs do not need any ack.  The contents
102          * of the register frame should only be used when processing
103          * the timer interrupt, but not for handling any other
104          * interrupt.
105          */
106         m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
107
108         this_domain = ipipe_current_domain;
109
110         if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
111                 head = &this_domain->p_link;
112         else {
113                 head = __ipipe_pipeline.next;
114                 next_domain = list_entry(head, struct ipipe_domain, p_link);
115                 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
116                         if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
117                                 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
118                         if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
119                                 s = __test_and_set_bit(IPIPE_STALL_FLAG,
120                                                        &ipipe_root_cpudom_var(status));
121                         __ipipe_dispatch_wired(next_domain, irq);
122                                 goto finalize;
123                         return;
124                 }
125         }
126
127         /* Ack the interrupt. */
128
129         pos = head;
130
131         while (pos != &__ipipe_pipeline) {
132                 next_domain = list_entry(pos, struct ipipe_domain, p_link);
133                 /*
134                  * For each domain handling the incoming IRQ, mark it
135                  * as pending in its log.
136                  */
137                 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
138                         /*
139                          * Domains that handle this IRQ are polled for
140                          * acknowledging it by decreasing priority
141                          * order. The interrupt must be made pending
142                          * _first_ in the domain's status flags before
143                          * the PIC is unlocked.
144                          */
145                         __ipipe_set_irq_pending(next_domain, irq);
146
147                         if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
148                                 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
149                                 m_ack = 1;
150                         }
151                 }
152
153                 /*
154                  * If the domain does not want the IRQ to be passed
155                  * down the interrupt pipe, exit the loop now.
156                  */
157                 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
158                         break;
159
160                 pos = next_domain->p_link.next;
161         }
162
163         /*
164          * Now walk the pipeline, yielding control to the highest
165          * priority domain that has pending interrupt(s) or
166          * immediately to the current domain if the interrupt has been
167          * marked as 'sticky'. This search does not go beyond the
168          * current domain in the pipeline. We also enforce the
169          * additional root stage lock (blackfin-specific). */
170
171         if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
172                 s = __test_and_set_bit(IPIPE_STALL_FLAG,
173                                        &ipipe_root_cpudom_var(status));
174 finalize:
175
176         __ipipe_walk_pipeline(head);
177
178         if (!s)
179                 __clear_bit(IPIPE_STALL_FLAG,
180                             &ipipe_root_cpudom_var(status));
181 }
182
183 int __ipipe_check_root(void)
184 {
185         return ipipe_root_domain_p;
186 }
187
188 void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
189 {
190         struct irq_desc *desc = irq_desc + irq;
191         int prio = desc->ic_prio;
192
193         desc->depth = 0;
194         if (ipd != &ipipe_root &&
195             atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
196                 __set_bit(prio, &__ipipe_irq_lvmask);
197 }
198 EXPORT_SYMBOL(__ipipe_enable_irqdesc);
199
200 void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
201 {
202         struct irq_desc *desc = irq_desc + irq;
203         int prio = desc->ic_prio;
204
205         if (ipd != &ipipe_root &&
206             atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
207                 __clear_bit(prio, &__ipipe_irq_lvmask);
208 }
209 EXPORT_SYMBOL(__ipipe_disable_irqdesc);
210
211 void __ipipe_stall_root_raw(void)
212 {
213         /*
214          * This code is called by the ins{bwl} routines (see
215          * arch/blackfin/lib/ins.S), which are heavily used by the
216          * network stack. It masks all interrupts but those handled by
217          * non-root domains, so that we keep decent network transfer
218          * rates for Linux without inducing pathological jitter for
219          * the real-time domain.
220          */
221         __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
222
223         __set_bit(IPIPE_STALL_FLAG,
224                   &ipipe_root_cpudom_var(status));
225 }
226
227 void __ipipe_unstall_root_raw(void)
228 {
229         __clear_bit(IPIPE_STALL_FLAG,
230                     &ipipe_root_cpudom_var(status));
231
232         __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
233 }
234
235 int __ipipe_syscall_root(struct pt_regs *regs)
236 {
237         unsigned long flags;
238
239         /* We need to run the IRQ tail hook whenever we don't
240          * propagate a syscall to higher domains, because we know that
241          * important operations might be pending there (e.g. Xenomai
242          * deferred rescheduling). */
243
244         if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
245                 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
246                 hook();
247                 return 0;
248         }
249
250         /*
251          * This routine either returns:
252          * 0 -- if the syscall is to be passed to Linux;
253          * 1 -- if the syscall should not be passed to Linux, and no
254          * tail work should be performed;
255          * -1 -- if the syscall should not be passed to Linux but the
256          * tail work has to be performed (for handling signals etc).
257          */
258
259         if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
260             __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
261                 if (ipipe_root_domain_p && !in_atomic()) {
262                         /*
263                          * Sync pending VIRQs before _TIF_NEED_RESCHED
264                          * is tested.
265                          */
266                         local_irq_save_hw(flags);
267                         if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
268                                 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
269                         local_irq_restore_hw(flags);
270                         return -1;
271                 }
272                 return 1;
273         }
274
275         return 0;
276 }
277
278 unsigned long ipipe_critical_enter(void (*syncfn) (void))
279 {
280         unsigned long flags;
281
282         local_irq_save_hw(flags);
283
284         return flags;
285 }
286
287 void ipipe_critical_exit(unsigned long flags)
288 {
289         local_irq_restore_hw(flags);
290 }
291
292 static void __ipipe_no_irqtail(void)
293 {
294 }
295
296 int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
297 {
298         info->ncpus = num_online_cpus();
299         info->cpufreq = ipipe_cpu_freq();
300         info->archdep.tmirq = IPIPE_TIMER_IRQ;
301         info->archdep.tmfreq = info->cpufreq;
302
303         return 0;
304 }
305
306 /*
307  * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
308  * just like if it has been actually received from a hw source. Also
309  * works for virtual interrupts.
310  */
311 int ipipe_trigger_irq(unsigned irq)
312 {
313         unsigned long flags;
314
315         if (irq >= IPIPE_NR_IRQS ||
316             (ipipe_virtual_irq_p(irq)
317              && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
318                 return -EINVAL;
319
320         local_irq_save_hw(flags);
321
322         __ipipe_handle_irq(irq, NULL);
323
324         local_irq_restore_hw(flags);
325
326         return 1;
327 }
328
329 /* Move Linux IRQ to threads. */
330
331 static int do_irqd(void *__desc)
332 {
333         struct irq_desc *desc = __desc;
334         unsigned irq = desc - irq_desc;
335         int thrprio = desc->thr_prio;
336         int thrmask = 1 << thrprio;
337         int cpu = smp_processor_id();
338         cpumask_t cpumask;
339
340         sigfillset(&current->blocked);
341         current->flags |= PF_NOFREEZE;
342         cpumask = cpumask_of_cpu(cpu);
343         set_cpus_allowed(current, cpumask);
344         ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
345
346         while (!kthread_should_stop()) {
347                 local_irq_disable();
348                 if (!(desc->status & IRQ_SCHEDULED)) {
349                         set_current_state(TASK_INTERRUPTIBLE);
350 resched:
351                         local_irq_enable();
352                         schedule();
353                         local_irq_disable();
354                 }
355                 __set_current_state(TASK_RUNNING);
356                 /*
357                  * If higher priority interrupt servers are ready to
358                  * run, reschedule immediately. We need this for the
359                  * GPIO demux IRQ handler to unmask the interrupt line
360                  * _last_, after all GPIO IRQs have run.
361                  */
362                 if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
363                         goto resched;
364                 if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
365                         per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
366                 desc->status &= ~IRQ_SCHEDULED;
367                 desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
368                 local_irq_enable();
369         }
370         __set_current_state(TASK_RUNNING);
371         return 0;
372 }
373
374 static void kick_irqd(unsigned irq, void *cookie)
375 {
376         struct irq_desc *desc = irq_desc + irq;
377         int thrprio = desc->thr_prio;
378         int thrmask = 1 << thrprio;
379         int cpu = smp_processor_id();
380
381         if (!(desc->status & IRQ_SCHEDULED)) {
382                 desc->status |= IRQ_SCHEDULED;
383                 per_cpu(pending_irqthread_mask, cpu) |= thrmask;
384                 ++per_cpu(pending_irq_count[thrprio], cpu);
385                 wake_up_process(desc->thread);
386         }
387 }
388
389 int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
390 {
391         if (desc->thread || !create_irq_threads)
392                 return 0;
393
394         desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
395         if (desc->thread == NULL) {
396                 printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
397                 return -ENOMEM;
398         }
399
400         wake_up_process(desc->thread);
401
402         desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
403         ipipe_root_domain->irqs[irq].handler = &kick_irqd;
404
405         return 0;
406 }
407
408 void __init ipipe_init_irq_threads(void)
409 {
410         unsigned irq;
411         struct irq_desc *desc;
412
413         create_irq_threads = 1;
414
415         for (irq = 0; irq < NR_IRQS; irq++) {
416                 desc = irq_desc + irq;
417                 if (desc->action != NULL ||
418                         (desc->status & IRQ_NOREQUEST) != 0)
419                         ipipe_start_irq_thread(irq, desc);
420         }
421 }
422
423 EXPORT_SYMBOL(show_stack);
424
425 #ifdef CONFIG_IPIPE_TRACE_MCOUNT
426 void notrace _mcount(void);
427 EXPORT_SYMBOL(_mcount);
428 #endif /* CONFIG_IPIPE_TRACE_MCOUNT */