]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/clock.c
8d43d78f012156a8c871cba808ae24cdc6fafc5b
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / clock.c
1 /*
2  *  linux/arch/arm/plat-omap/clock.c
3  *
4  *  Copyright (C) 2004 - 2008 Nokia corporation
5  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6  *
7  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/clk.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpufreq.h>
24 #include <linux/debugfs.h>
25 #include <linux/io.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28
29 #include <mach/clock.h>
30
31 static LIST_HEAD(clocks);
32 static DEFINE_MUTEX(clocks_mutex);
33 static DEFINE_SPINLOCK(clockfw_lock);
34
35 static struct clk_functions *arch_clock;
36
37 /**
38  * omap_clk_for_each_child - call callback on each child clock of clk
39  * @clk: struct clk * to use as the "parent"
40  * @parent_rate: rate of the parent of @clk to pass along
41  * @rate_storage: flag indicating whether current or temporary rates are used
42  * @cb: pointer to a callback function
43  *
44  * For each child clock of @clk, call the callback function @cb, passing
45  * along the contents of @parent_rate and @rate_storage.  If the callback
46  * function returns non-zero, terminate the function and pass along the
47  * return value.
48  */
49 static int omap_clk_for_each_child(struct clk *clk, unsigned long parent_rate,
50                                    u8 rate_storage,
51                                    int (*cb)(struct clk *clk,
52                                              unsigned long parent_rate,
53                                              u8 rate_storage))
54 {
55         struct clk_child *child;
56         int ret;
57
58         list_for_each_entry(child, &clk->children, node) {
59                 ret = (*cb)(child->clk, parent_rate, rate_storage);
60                 if (ret)
61                         break;
62         }
63
64         return ret;
65 }
66
67 /**
68  * omap_clk_has_children - does clk @clk have any child clocks?
69  * @clk: struct clk * to test for child clocks
70  *
71  * If clock @clk has any child clocks, return 1; otherwise, return 0.
72  */
73 static int omap_clk_has_children(struct clk *clk)
74 {
75         return (list_empty(&clk->children)) ? 0 : 1;
76 }
77
78 /**
79  * _do_propagate_rate - callback function for rate propagation
80  * @clk: struct clk * to recalc and propagate from
81  * @parent_rate: rate of the parent of @clk, to use in recalculation
82  * @rate_storage: flag indicating whether current or temporary rates are used
83  *
84  * If @clk has a recalc function, call it.  If @clk has any children,
85  * propagate @clk's rate.  Returns 0.
86  */
87 static int _do_propagate_rate(struct clk *clk, unsigned long parent_rate,
88                               u8 rate_storage)
89 {
90         if (clk->recalc)
91                 clk->recalc(clk, parent_rate, rate_storage);
92         if (omap_clk_has_children(clk))
93                 propagate_rate(clk, rate_storage);
94         return 0;
95 }
96
97 /**
98  * omap_clk_add_child - add a child clock @clk2 to @clk
99  * @clk: parent struct clk *
100  * @clk2: new child struct clk *
101  *
102  * Add a child clock @clk2 to the list of children of parent clock
103  * @clk.  Will potentially allocate memory from bootmem or, if
104  * available, from slab.  Must only be called with the clock framework
105  * spinlock held.  No return value.
106  */
107 void omap_clk_add_child(struct clk *clk, struct clk *clk2)
108 {
109         struct clk_child *child;
110         int reuse = 0;
111
112         if (!clk->children.next)
113                 INIT_LIST_HEAD(&clk->children);
114
115         list_for_each_entry(child, &clk->children, node) {
116                 if (child->flags & CLK_CHILD_DELETED) {
117                         reuse = 1;
118                         child->flags &= ~CLK_CHILD_DELETED;
119                         break;
120                 }
121         }
122
123         if (!reuse) {
124                 if (slab_is_available())
125                         child = kmalloc(sizeof(struct clk_child), GFP_ATOMIC);
126                 else
127                         child = alloc_bootmem(sizeof(struct clk_child));
128
129                 if (!child) {
130                         WARN_ON(1);
131                         return;
132                 }
133
134                 memset(child, 0, sizeof(struct clk_child));
135
136                 if (slab_is_available())
137                         child->flags |= CLK_CHILD_SLAB_ALLOC;
138         }
139
140         child->clk = clk2;
141
142         list_add_tail(&child->node, &clk->children);
143 }
144
145 /**
146  * omap_clk_del_child - add a child clock @clk2 to @clk
147  * @clk: parent struct clk *
148  * @clk2: former child struct clk *
149  *
150  * Remove a child clock @clk2 from the list of children of parent
151  * clock @clk.  Must only be called with the clock framework spinlock
152  * held.  No return value.
153  */
154 void omap_clk_del_child(struct clk *clk, struct clk *clk2)
155 {
156         struct clk_child *child, *tmp;
157
158         /* Loop over all existing clk_childs, when found, deallocate */
159         list_for_each_entry_safe(child, tmp, &clk->children, node) {
160                 if (child->clk == clk2) {
161                         list_del(&child->node);
162                         if (child->flags & CLK_CHILD_SLAB_ALLOC) {
163                                 kfree(child);
164                         } else {
165                                 child->clk = NULL;
166                                 child->flags |= CLK_CHILD_DELETED;
167                         }
168                         break;
169                 }
170         }
171 }
172
173 /*-------------------------------------------------------------------------
174  * Standard clock functions defined in include/linux/clk.h
175  *-------------------------------------------------------------------------*/
176
177 /*
178  * Returns a clock. Note that we first try to use device id on the bus
179  * and clock name. If this fails, we try to use clock name only.
180  */
181 struct clk * clk_get(struct device *dev, const char *id)
182 {
183         struct clk *p, *clk = ERR_PTR(-ENOENT);
184         int idno;
185
186         if (dev == NULL || dev->bus != &platform_bus_type)
187                 idno = -1;
188         else
189                 idno = to_platform_device(dev)->id;
190
191         mutex_lock(&clocks_mutex);
192
193         list_for_each_entry(p, &clocks, node) {
194                 if (p->id == idno && strcmp(id, p->name) == 0) {
195                         clk = p;
196                         goto found;
197                 }
198         }
199
200         list_for_each_entry(p, &clocks, node) {
201                 if (strcmp(id, p->name) == 0) {
202                         clk = p;
203                         break;
204                 }
205         }
206
207 found:
208         mutex_unlock(&clocks_mutex);
209
210         return clk;
211 }
212 EXPORT_SYMBOL(clk_get);
213
214 int clk_enable(struct clk *clk)
215 {
216         unsigned long flags;
217         int ret = 0;
218
219         if (clk == NULL || IS_ERR(clk))
220                 return -EINVAL;
221
222         spin_lock_irqsave(&clockfw_lock, flags);
223         if (arch_clock->clk_enable) {
224                 ret = arch_clock->clk_enable(clk);
225                 if (ret == 0 && clk->flags & RECALC_ON_ENABLE)
226                         _do_propagate_rate(clk, clk->parent->rate,
227                                            CURRENT_RATE);
228         }
229
230         spin_unlock_irqrestore(&clockfw_lock, flags);
231
232         return ret;
233 }
234 EXPORT_SYMBOL(clk_enable);
235
236 void clk_disable(struct clk *clk)
237 {
238         unsigned long flags;
239
240         if (clk == NULL || IS_ERR(clk))
241                 return;
242
243         spin_lock_irqsave(&clockfw_lock, flags);
244         if (clk->usecount == 0) {
245                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
246                        clk->name);
247                 WARN_ON(1);
248                 goto out;
249         }
250
251         if (arch_clock->clk_disable) {
252                 arch_clock->clk_disable(clk);
253                 if (clk->flags & RECALC_ON_ENABLE)
254                         _do_propagate_rate(clk, clk->parent->rate,
255                                            CURRENT_RATE);
256         }
257
258 out:
259         spin_unlock_irqrestore(&clockfw_lock, flags);
260 }
261 EXPORT_SYMBOL(clk_disable);
262
263 unsigned long clk_get_rate(struct clk *clk)
264 {
265         unsigned long flags;
266         unsigned long ret = 0;
267
268         if (clk == NULL || IS_ERR(clk))
269                 return 0;
270
271         spin_lock_irqsave(&clockfw_lock, flags);
272         ret = clk->rate;
273         spin_unlock_irqrestore(&clockfw_lock, flags);
274
275         return ret;
276 }
277 EXPORT_SYMBOL(clk_get_rate);
278
279 void clk_put(struct clk *clk)
280 {
281 }
282 EXPORT_SYMBOL(clk_put);
283
284 /*-------------------------------------------------------------------------
285  * Optional clock functions defined in include/linux/clk.h
286  *-------------------------------------------------------------------------*/
287
288 long clk_round_rate(struct clk *clk, unsigned long rate)
289 {
290         unsigned long flags;
291         long ret = 0;
292
293         if (clk == NULL || IS_ERR(clk))
294                 return ret;
295
296         spin_lock_irqsave(&clockfw_lock, flags);
297         if (arch_clock->clk_round_rate)
298                 ret = arch_clock->clk_round_rate(clk, rate);
299         spin_unlock_irqrestore(&clockfw_lock, flags);
300
301         return ret;
302 }
303 EXPORT_SYMBOL(clk_round_rate);
304
305 int clk_set_rate(struct clk *clk, unsigned long rate)
306 {
307         unsigned long flags;
308         int ret = -EINVAL;
309
310         if (clk == NULL || IS_ERR(clk))
311                 return ret;
312
313         spin_lock_irqsave(&clockfw_lock, flags);
314
315         if (arch_clock->clk_set_rate) {
316                 ret = arch_clock->clk_set_rate(clk, rate);
317                 if (ret == 0)
318                         _do_propagate_rate(clk, clk->parent->rate,
319                                            CURRENT_RATE);
320         }
321
322         spin_unlock_irqrestore(&clockfw_lock, flags);
323
324         return ret;
325 }
326 EXPORT_SYMBOL(clk_set_rate);
327
328 int clk_set_parent(struct clk *clk, struct clk *parent)
329 {
330         unsigned long flags;
331         struct clk *prev_parent;
332         int ret = -EINVAL;
333
334         if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
335                 return ret;
336
337         spin_lock_irqsave(&clockfw_lock, flags);
338
339         if (arch_clock->clk_set_parent) {
340                 prev_parent = clk->parent;
341                 ret = arch_clock->clk_set_parent(clk, parent);
342                 if (ret == 0) {
343                         omap_clk_del_child(prev_parent, clk);
344                         omap_clk_add_child(parent, clk);
345                         _do_propagate_rate(clk, clk->parent->rate,
346                                            CURRENT_RATE);
347                 }
348         }
349
350         spin_unlock_irqrestore(&clockfw_lock, flags);
351
352         return ret;
353 }
354 EXPORT_SYMBOL(clk_set_parent);
355
356 struct clk *clk_get_parent(struct clk *clk)
357 {
358         unsigned long flags;
359         struct clk * ret = NULL;
360
361         if (clk == NULL || IS_ERR(clk))
362                 return ret;
363
364         spin_lock_irqsave(&clockfw_lock, flags);
365         if (arch_clock->clk_get_parent)
366                 ret = arch_clock->clk_get_parent(clk);
367         spin_unlock_irqrestore(&clockfw_lock, flags);
368
369         return ret;
370 }
371 EXPORT_SYMBOL(clk_get_parent);
372
373 /*-------------------------------------------------------------------------
374  * OMAP specific clock functions shared between omap1 and omap2
375  *-------------------------------------------------------------------------*/
376
377 unsigned int __initdata mpurate;
378
379 /*
380  * By default we use the rate set by the bootloader.
381  * You can override this with mpurate= cmdline option.
382  */
383 static int __init omap_clk_setup(char *str)
384 {
385         get_option(&str, &mpurate);
386
387         if (!mpurate)
388                 return 1;
389
390         if (mpurate < 1000)
391                 mpurate *= 1000000;
392
393         return 1;
394 }
395 __setup("mpurate=", omap_clk_setup);
396
397 /* Used for clocks that always have same value as the parent clock */
398 void followparent_recalc(struct clk *clk, unsigned long new_parent_rate,
399                          u8 rate_storage)
400 {
401         if (rate_storage == CURRENT_RATE)
402                 clk->rate = new_parent_rate;
403         else if (rate_storage == TEMP_RATE)
404                 clk->temp_rate = new_parent_rate;
405 }
406
407 /* Propagate rate to children */
408 void propagate_rate(struct clk *tclk, u8 rate_storage)
409 {
410         unsigned long parent_rate = 0;
411
412         if (tclk == NULL || IS_ERR(tclk))
413                 return;
414
415         if (rate_storage == CURRENT_RATE)
416                 parent_rate = tclk->rate;
417         else if (rate_storage == TEMP_RATE)
418                 parent_rate = tclk->temp_rate;
419
420         omap_clk_for_each_child(tclk, parent_rate, rate_storage,
421                                 _do_propagate_rate);
422 }
423
424 /**
425  * recalculate_root_clocks - recalculate and propagate all root clocks
426  *
427  * Recalculates all root clocks (clocks with no parent), which if the
428  * clock's .recalc is set correctly, should also propagate their rates.
429  * Called at init.
430  */
431 void recalculate_root_clocks(void)
432 {
433         struct clk *clkp;
434
435         list_for_each_entry(clkp, &clocks, node)
436                 if (unlikely(!clkp->parent))
437                         _do_propagate_rate(clkp, 0, CURRENT_RATE);
438 }
439
440 int clk_register(struct clk *clk)
441 {
442         if (clk == NULL || IS_ERR(clk))
443                 return -EINVAL;
444
445         mutex_lock(&clocks_mutex);
446         list_add(&clk->node, &clocks);
447         if (!clk->children.next)
448                 INIT_LIST_HEAD(&clk->children);
449         if (clk->parent)
450                 omap_clk_add_child(clk->parent, clk);
451         if (clk->init)
452                 clk->init(clk);
453         mutex_unlock(&clocks_mutex);
454
455         return 0;
456 }
457 EXPORT_SYMBOL(clk_register);
458
459 void clk_unregister(struct clk *clk)
460 {
461         struct clk_child *child, *tmp;
462
463         if (clk == NULL || IS_ERR(clk))
464                 return;
465
466         mutex_lock(&clocks_mutex);
467         list_del(&clk->node);
468         if (clk->parent)
469                 omap_clk_del_child(clk->parent, clk);
470         list_for_each_entry_safe(child, tmp, &clk->children, node)
471                 if (child->flags & CLK_CHILD_SLAB_ALLOC)
472                         kfree(child);
473         mutex_unlock(&clocks_mutex);
474 }
475 EXPORT_SYMBOL(clk_unregister);
476
477 void clk_deny_idle(struct clk *clk)
478 {
479         unsigned long flags;
480
481         if (clk == NULL || IS_ERR(clk))
482                 return;
483
484         spin_lock_irqsave(&clockfw_lock, flags);
485         if (arch_clock->clk_deny_idle)
486                 arch_clock->clk_deny_idle(clk);
487         spin_unlock_irqrestore(&clockfw_lock, flags);
488 }
489 EXPORT_SYMBOL(clk_deny_idle);
490
491 void clk_allow_idle(struct clk *clk)
492 {
493         unsigned long flags;
494
495         if (clk == NULL || IS_ERR(clk))
496                 return;
497
498         spin_lock_irqsave(&clockfw_lock, flags);
499         if (arch_clock->clk_allow_idle)
500                 arch_clock->clk_allow_idle(clk);
501         spin_unlock_irqrestore(&clockfw_lock, flags);
502 }
503 EXPORT_SYMBOL(clk_allow_idle);
504
505 void clk_enable_init_clocks(void)
506 {
507         struct clk *clkp;
508
509         list_for_each_entry(clkp, &clocks, node) {
510                 if (clkp->flags & ENABLE_ON_INIT)
511                         clk_enable(clkp);
512         }
513 }
514 EXPORT_SYMBOL(clk_enable_init_clocks);
515
516 #ifdef CONFIG_CPU_FREQ
517 void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&clockfw_lock, flags);
522         if (arch_clock->clk_init_cpufreq_table)
523                 arch_clock->clk_init_cpufreq_table(table);
524         spin_unlock_irqrestore(&clockfw_lock, flags);
525 }
526 EXPORT_SYMBOL(clk_init_cpufreq_table);
527 #endif
528
529 /*-------------------------------------------------------------------------*/
530
531 #ifdef CONFIG_OMAP_RESET_CLOCKS
532 /*
533  * Disable any unused clocks left on by the bootloader
534  */
535 static int __init clk_disable_unused(void)
536 {
537         struct clk *ck;
538         unsigned long flags;
539
540         list_for_each_entry(ck, &clocks, node) {
541                 if (ck->usecount > 0 ||
542                     (ck->flags & (ALWAYS_ENABLED | PARENT_CONTROLS_CLOCK)))
543                         continue;
544
545                 if (cpu_class_is_omap1() && ck->enable_reg == 0)
546                         continue;
547
548                 spin_lock_irqsave(&clockfw_lock, flags);
549                 if (arch_clock->clk_disable_unused)
550                         arch_clock->clk_disable_unused(ck);
551                 spin_unlock_irqrestore(&clockfw_lock, flags);
552         }
553
554         return 0;
555 }
556 late_initcall(clk_disable_unused);
557 #endif
558
559 int __init clk_init(struct clk_functions * custom_clocks)
560 {
561         if (!custom_clocks) {
562                 printk(KERN_ERR "No custom clock functions registered\n");
563                 BUG();
564         }
565
566         arch_clock = custom_clocks;
567
568         return 0;
569 }
570
571 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
572 /*
573  *      debugfs support to trace clock tree hierarchy and attributes
574  */
575 static struct dentry *clk_debugfs_root;
576
577 static int clk_debugfs_register_one(struct clk *c)
578 {
579         int err;
580         struct dentry *d, *child;
581         struct clk *pa = c->parent;
582         char s[255];
583         char *p = s;
584
585         p += sprintf(p, "%s", c->name);
586         if (c->id != 0)
587                 sprintf(p, ":%d", c->id);
588         d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
589         if (!d)
590                 return -ENOMEM;
591         c->dent = d;
592
593         d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
594         if (!d) {
595                 err = -ENOMEM;
596                 goto err_out;
597         }
598         d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
599         if (!d) {
600                 err = -ENOMEM;
601                 goto err_out;
602         }
603         d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
604         if (!d) {
605                 err = -ENOMEM;
606                 goto err_out;
607         }
608         return 0;
609
610 err_out:
611         d = c->dent;
612         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
613                 debugfs_remove(child);
614         debugfs_remove(c->dent);
615         return err;
616 }
617
618 static int clk_debugfs_register(struct clk *c)
619 {
620         int err;
621         struct clk *pa = c->parent;
622
623         if (pa && !pa->dent) {
624                 err = clk_debugfs_register(pa);
625                 if (err)
626                         return err;
627         }
628
629         if (!c->dent) {
630                 err = clk_debugfs_register_one(c);
631                 if (err)
632                         return err;
633         }
634         return 0;
635 }
636
637 static int __init clk_debugfs_init(void)
638 {
639         struct clk *c;
640         struct dentry *d;
641         int err;
642
643         d = debugfs_create_dir("clock", NULL);
644         if (!d)
645                 return -ENOMEM;
646         clk_debugfs_root = d;
647
648         list_for_each_entry(c, &clocks, node) {
649                 err = clk_debugfs_register(c);
650                 if (err)
651                         goto err_out;
652         }
653         return 0;
654 err_out:
655         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
656         return err;
657 }
658 late_initcall(clk_debugfs_init);
659
660 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */