]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/clock.c
OMAP clock: support "dry run" rate and parent changes
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / clock.c
1 /*
2  *  linux/arch/arm/plat-omap/clock.c
3  *
4  *  Copyright (C) 2004 - 2008 Nokia corporation
5  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6  *
7  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/clk.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpufreq.h>
24 #include <linux/debugfs.h>
25 #include <linux/io.h>
26
27 #include <mach/clock.h>
28
29 static LIST_HEAD(clocks);
30 static DEFINE_MUTEX(clocks_mutex);
31 static DEFINE_SPINLOCK(clockfw_lock);
32
33 static struct clk_functions *arch_clock;
34
35 /*-------------------------------------------------------------------------
36  * Standard clock functions defined in include/linux/clk.h
37  *-------------------------------------------------------------------------*/
38
39 /*
40  * Returns a clock. Note that we first try to use device id on the bus
41  * and clock name. If this fails, we try to use clock name only.
42  */
43 struct clk * clk_get(struct device *dev, const char *id)
44 {
45         struct clk *p, *clk = ERR_PTR(-ENOENT);
46         int idno;
47
48         if (dev == NULL || dev->bus != &platform_bus_type)
49                 idno = -1;
50         else
51                 idno = to_platform_device(dev)->id;
52
53         mutex_lock(&clocks_mutex);
54
55         list_for_each_entry(p, &clocks, node) {
56                 if (p->id == idno &&
57                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
58                         clk = p;
59                         goto found;
60                 }
61         }
62
63         list_for_each_entry(p, &clocks, node) {
64                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
65                         clk = p;
66                         break;
67                 }
68         }
69
70 found:
71         mutex_unlock(&clocks_mutex);
72
73         return clk;
74 }
75 EXPORT_SYMBOL(clk_get);
76
77 int clk_enable(struct clk *clk)
78 {
79         unsigned long flags;
80         int ret = 0;
81
82         if (clk == NULL || IS_ERR(clk))
83                 return -EINVAL;
84
85         spin_lock_irqsave(&clockfw_lock, flags);
86         if (arch_clock->clk_enable) {
87                 ret = arch_clock->clk_enable(clk);
88                 if (ret == 0 && clk->flags & RECALC_ON_ENABLE) {
89                         if (clk->recalc)
90                                 (*clk->recalc)(clk, clk->parent->rate,
91                                                CURRENT_RATE);
92                         if (clk->flags & RATE_PROPAGATES)
93                                 propagate_rate(clk, CURRENT_RATE);
94                 }
95         }
96
97         spin_unlock_irqrestore(&clockfw_lock, flags);
98
99         return ret;
100 }
101 EXPORT_SYMBOL(clk_enable);
102
103 void clk_disable(struct clk *clk)
104 {
105         unsigned long flags;
106
107         if (clk == NULL || IS_ERR(clk))
108                 return;
109
110         spin_lock_irqsave(&clockfw_lock, flags);
111         if (clk->usecount == 0) {
112                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
113                        clk->name);
114                 WARN_ON(1);
115                 goto out;
116         }
117
118         if (arch_clock->clk_disable) {
119                 arch_clock->clk_disable(clk);
120                 if (clk->flags & RECALC_ON_ENABLE) {
121                         if (clk->recalc)
122                                 (*clk->recalc)(clk, clk->parent->rate,
123                                                CURRENT_RATE);
124                         if (clk->flags & RATE_PROPAGATES)
125                                 propagate_rate(clk, CURRENT_RATE);
126                 }
127         }
128
129 out:
130         spin_unlock_irqrestore(&clockfw_lock, flags);
131 }
132 EXPORT_SYMBOL(clk_disable);
133
134 int clk_get_usecount(struct clk *clk)
135 {
136         unsigned long flags;
137         int ret = 0;
138
139         if (clk == NULL || IS_ERR(clk))
140                 return 0;
141
142         spin_lock_irqsave(&clockfw_lock, flags);
143         ret = clk->usecount;
144         spin_unlock_irqrestore(&clockfw_lock, flags);
145
146         return ret;
147 }
148 EXPORT_SYMBOL(clk_get_usecount);
149
150 unsigned long clk_get_rate(struct clk *clk)
151 {
152         unsigned long flags;
153         unsigned long ret = 0;
154
155         if (clk == NULL || IS_ERR(clk))
156                 return 0;
157
158         spin_lock_irqsave(&clockfw_lock, flags);
159         ret = clk->rate;
160         spin_unlock_irqrestore(&clockfw_lock, flags);
161
162         return ret;
163 }
164 EXPORT_SYMBOL(clk_get_rate);
165
166 void clk_put(struct clk *clk)
167 {
168         if (clk && !IS_ERR(clk))
169                 module_put(clk->owner);
170 }
171 EXPORT_SYMBOL(clk_put);
172
173 /*-------------------------------------------------------------------------
174  * Optional clock functions defined in include/linux/clk.h
175  *-------------------------------------------------------------------------*/
176
177 long clk_round_rate(struct clk *clk, unsigned long rate)
178 {
179         unsigned long flags;
180         long ret = 0;
181
182         if (clk == NULL || IS_ERR(clk))
183                 return ret;
184
185         spin_lock_irqsave(&clockfw_lock, flags);
186         if (arch_clock->clk_round_rate)
187                 ret = arch_clock->clk_round_rate(clk, rate);
188         spin_unlock_irqrestore(&clockfw_lock, flags);
189
190         return ret;
191 }
192 EXPORT_SYMBOL(clk_round_rate);
193
194 int clk_set_rate(struct clk *clk, unsigned long rate)
195 {
196         unsigned long flags;
197         int ret = -EINVAL;
198
199         if (clk == NULL || IS_ERR(clk))
200                 return ret;
201
202         spin_lock_irqsave(&clockfw_lock, flags);
203
204         if (arch_clock->clk_set_rate) {
205                 ret = arch_clock->clk_set_rate(clk, rate);
206                 if (ret == 0) {
207                         if (clk->recalc)
208                                 (*clk->recalc)(clk, clk->parent->rate,
209                                                CURRENT_RATE);
210                         if (clk->flags & RATE_PROPAGATES)
211                                 propagate_rate(clk, CURRENT_RATE);
212                 }
213         }
214
215         spin_unlock_irqrestore(&clockfw_lock, flags);
216
217         return ret;
218 }
219 EXPORT_SYMBOL(clk_set_rate);
220
221 int clk_set_parent(struct clk *clk, struct clk *parent)
222 {
223         unsigned long flags;
224         int ret = -EINVAL;
225
226         if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
227                 return ret;
228
229         spin_lock_irqsave(&clockfw_lock, flags);
230
231         if (arch_clock->clk_set_parent) {
232                 ret = arch_clock->clk_set_parent(clk, parent);
233                 if (ret == 0) {
234                         if (clk->recalc)
235                                 (*clk->recalc)(clk, clk->parent->rate,
236                                                CURRENT_RATE);
237                         if (clk->flags & RATE_PROPAGATES)
238                                 propagate_rate(clk, CURRENT_RATE);
239                 }
240         }
241
242         spin_unlock_irqrestore(&clockfw_lock, flags);
243
244         return ret;
245 }
246 EXPORT_SYMBOL(clk_set_parent);
247
248 struct clk *clk_get_parent(struct clk *clk)
249 {
250         unsigned long flags;
251         struct clk * ret = NULL;
252
253         if (clk == NULL || IS_ERR(clk))
254                 return ret;
255
256         spin_lock_irqsave(&clockfw_lock, flags);
257         if (arch_clock->clk_get_parent)
258                 ret = arch_clock->clk_get_parent(clk);
259         spin_unlock_irqrestore(&clockfw_lock, flags);
260
261         return ret;
262 }
263 EXPORT_SYMBOL(clk_get_parent);
264
265 /*-------------------------------------------------------------------------
266  * OMAP specific clock functions shared between omap1 and omap2
267  *-------------------------------------------------------------------------*/
268
269 unsigned int __initdata mpurate;
270
271 /*
272  * By default we use the rate set by the bootloader.
273  * You can override this with mpurate= cmdline option.
274  */
275 static int __init omap_clk_setup(char *str)
276 {
277         get_option(&str, &mpurate);
278
279         if (!mpurate)
280                 return 1;
281
282         if (mpurate < 1000)
283                 mpurate *= 1000000;
284
285         return 1;
286 }
287 __setup("mpurate=", omap_clk_setup);
288
289 /* Used for clocks that always have same value as the parent clock */
290 void followparent_recalc(struct clk *clk, unsigned long new_parent_rate,
291                          u8 rate_storage)
292 {
293         if (rate_storage == CURRENT_RATE)
294                 clk->rate = new_parent_rate;
295         else if (rate_storage == TEMP_RATE)
296                 clk->temp_rate = new_parent_rate;
297 }
298
299 /* Propagate rate to children */
300 void propagate_rate(struct clk *tclk, u8 rate_storage)
301 {
302         struct clk *clkp;
303         unsigned long parent_rate = 0;
304
305         if (tclk == NULL || IS_ERR(tclk))
306                 return;
307
308         list_for_each_entry(clkp, &clocks, node) {
309                 if (likely(clkp->parent != tclk))
310                         continue;
311
312                 if (rate_storage == CURRENT_RATE)
313                         parent_rate = tclk->rate;
314                 else if (rate_storage == TEMP_RATE)
315                         parent_rate = tclk->temp_rate;
316
317                 if (clkp->recalc)
318                         clkp->recalc(clkp, parent_rate, rate_storage);
319                 if (clkp->flags & RATE_PROPAGATES)
320                         propagate_rate(clkp, rate_storage);
321         }
322 }
323
324 /**
325  * recalculate_root_clocks - recalculate and propagate all root clocks
326  *
327  * Recalculates all root clocks (clocks with no parent), which if the
328  * clock's .recalc is set correctly, should also propagate their rates.
329  * Called at init.
330  */
331 void recalculate_root_clocks(void)
332 {
333         struct clk *clkp;
334
335         list_for_each_entry(clkp, &clocks, node) {
336                 if (unlikely(!clkp->parent)) {
337                         if (clkp->recalc)
338                                 clkp->recalc(clkp, 0, CURRENT_RATE);
339                         if (clkp->flags & RATE_PROPAGATES)
340                                 propagate_rate(clkp, CURRENT_RATE);
341                 }
342         }
343 }
344
345 int clk_register(struct clk *clk)
346 {
347         if (clk == NULL || IS_ERR(clk))
348                 return -EINVAL;
349
350         mutex_lock(&clocks_mutex);
351         list_add(&clk->node, &clocks);
352         if (clk->init)
353                 clk->init(clk);
354         mutex_unlock(&clocks_mutex);
355
356         return 0;
357 }
358 EXPORT_SYMBOL(clk_register);
359
360 void clk_unregister(struct clk *clk)
361 {
362         if (clk == NULL || IS_ERR(clk))
363                 return;
364
365         mutex_lock(&clocks_mutex);
366         list_del(&clk->node);
367         mutex_unlock(&clocks_mutex);
368 }
369 EXPORT_SYMBOL(clk_unregister);
370
371 void clk_deny_idle(struct clk *clk)
372 {
373         unsigned long flags;
374
375         if (clk == NULL || IS_ERR(clk))
376                 return;
377
378         spin_lock_irqsave(&clockfw_lock, flags);
379         if (arch_clock->clk_deny_idle)
380                 arch_clock->clk_deny_idle(clk);
381         spin_unlock_irqrestore(&clockfw_lock, flags);
382 }
383 EXPORT_SYMBOL(clk_deny_idle);
384
385 void clk_allow_idle(struct clk *clk)
386 {
387         unsigned long flags;
388
389         if (clk == NULL || IS_ERR(clk))
390                 return;
391
392         spin_lock_irqsave(&clockfw_lock, flags);
393         if (arch_clock->clk_allow_idle)
394                 arch_clock->clk_allow_idle(clk);
395         spin_unlock_irqrestore(&clockfw_lock, flags);
396 }
397 EXPORT_SYMBOL(clk_allow_idle);
398
399 void clk_enable_init_clocks(void)
400 {
401         struct clk *clkp;
402
403         list_for_each_entry(clkp, &clocks, node) {
404                 if (clkp->flags & ENABLE_ON_INIT)
405                         clk_enable(clkp);
406         }
407 }
408 EXPORT_SYMBOL(clk_enable_init_clocks);
409
410 #ifdef CONFIG_CPU_FREQ
411 void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
412 {
413         unsigned long flags;
414
415         spin_lock_irqsave(&clockfw_lock, flags);
416         if (arch_clock->clk_init_cpufreq_table)
417                 arch_clock->clk_init_cpufreq_table(table);
418         spin_unlock_irqrestore(&clockfw_lock, flags);
419 }
420 EXPORT_SYMBOL(clk_init_cpufreq_table);
421 #endif
422
423 /*-------------------------------------------------------------------------*/
424
425 #ifdef CONFIG_OMAP_RESET_CLOCKS
426 /*
427  * Disable any unused clocks left on by the bootloader
428  */
429 static int __init clk_disable_unused(void)
430 {
431         struct clk *ck;
432         unsigned long flags;
433
434         list_for_each_entry(ck, &clocks, node) {
435                 if (ck->usecount > 0 ||
436                     (ck->flags & (ALWAYS_ENABLED | PARENT_CONTROLS_CLOCK)))
437                         continue;
438
439                 if (cpu_class_is_omap1() && ck->enable_reg == 0)
440                         continue;
441
442                 spin_lock_irqsave(&clockfw_lock, flags);
443                 if (arch_clock->clk_disable_unused)
444                         arch_clock->clk_disable_unused(ck);
445                 spin_unlock_irqrestore(&clockfw_lock, flags);
446         }
447
448         return 0;
449 }
450 late_initcall(clk_disable_unused);
451 #endif
452
453 int __init clk_init(struct clk_functions * custom_clocks)
454 {
455         if (!custom_clocks) {
456                 printk(KERN_ERR "No custom clock functions registered\n");
457                 BUG();
458         }
459
460         arch_clock = custom_clocks;
461
462         return 0;
463 }
464
465 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
466 /*
467  *      debugfs support to trace clock tree hierarchy and attributes
468  */
469 static struct dentry *clk_debugfs_root;
470
471 static int clk_debugfs_register_one(struct clk *c)
472 {
473         int err;
474         struct dentry *d, *child;
475         struct clk *pa = c->parent;
476         char s[255];
477         char *p = s;
478
479         p += sprintf(p, "%s", c->name);
480         if (c->id != 0)
481                 sprintf(p, ":%d", c->id);
482         d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
483         if (!d)
484                 return -ENOMEM;
485         c->dent = d;
486
487         d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
488         if (!d) {
489                 err = -ENOMEM;
490                 goto err_out;
491         }
492         d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
493         if (!d) {
494                 err = -ENOMEM;
495                 goto err_out;
496         }
497         d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
498         if (!d) {
499                 err = -ENOMEM;
500                 goto err_out;
501         }
502         return 0;
503
504 err_out:
505         d = c->dent;
506         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
507                 debugfs_remove(child);
508         debugfs_remove(c->dent);
509         return err;
510 }
511
512 static int clk_debugfs_register(struct clk *c)
513 {
514         int err;
515         struct clk *pa = c->parent;
516
517         if (pa && !pa->dent) {
518                 err = clk_debugfs_register(pa);
519                 if (err)
520                         return err;
521         }
522
523         if (!c->dent) {
524                 err = clk_debugfs_register_one(c);
525                 if (err)
526                         return err;
527         }
528         return 0;
529 }
530
531 static int __init clk_debugfs_init(void)
532 {
533         struct clk *c;
534         struct dentry *d;
535         int err;
536
537         d = debugfs_create_dir("clock", NULL);
538         if (!d)
539                 return -ENOMEM;
540         clk_debugfs_root = d;
541
542         list_for_each_entry(c, &clocks, node) {
543                 err = clk_debugfs_register(c);
544                 if (err)
545                         goto err_out;
546         }
547         return 0;
548 err_out:
549         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
550         return err;
551 }
552 late_initcall(clk_debugfs_init);
553
554 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */