]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/clock.c
OMAP clock: move rate recalc, propagation code up to plat-omap/clock.c
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / clock.c
1 /*
2  *  linux/arch/arm/plat-omap/clock.c
3  *
4  *  Copyright (C) 2004 - 2008 Nokia corporation
5  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6  *
7  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/clk.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpufreq.h>
24 #include <linux/debugfs.h>
25 #include <linux/io.h>
26
27 #include <mach/clock.h>
28
29 static LIST_HEAD(clocks);
30 static DEFINE_MUTEX(clocks_mutex);
31 static DEFINE_SPINLOCK(clockfw_lock);
32
33 static struct clk_functions *arch_clock;
34
35 /*-------------------------------------------------------------------------
36  * Standard clock functions defined in include/linux/clk.h
37  *-------------------------------------------------------------------------*/
38
39 /*
40  * Returns a clock. Note that we first try to use device id on the bus
41  * and clock name. If this fails, we try to use clock name only.
42  */
43 struct clk * clk_get(struct device *dev, const char *id)
44 {
45         struct clk *p, *clk = ERR_PTR(-ENOENT);
46         int idno;
47
48         if (dev == NULL || dev->bus != &platform_bus_type)
49                 idno = -1;
50         else
51                 idno = to_platform_device(dev)->id;
52
53         mutex_lock(&clocks_mutex);
54
55         list_for_each_entry(p, &clocks, node) {
56                 if (p->id == idno &&
57                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
58                         clk = p;
59                         goto found;
60                 }
61         }
62
63         list_for_each_entry(p, &clocks, node) {
64                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
65                         clk = p;
66                         break;
67                 }
68         }
69
70 found:
71         mutex_unlock(&clocks_mutex);
72
73         return clk;
74 }
75 EXPORT_SYMBOL(clk_get);
76
77 int clk_enable(struct clk *clk)
78 {
79         unsigned long flags;
80         int ret = 0;
81
82         if (clk == NULL || IS_ERR(clk))
83                 return -EINVAL;
84
85         spin_lock_irqsave(&clockfw_lock, flags);
86         if (arch_clock->clk_enable)
87                 ret = arch_clock->clk_enable(clk);
88         spin_unlock_irqrestore(&clockfw_lock, flags);
89
90         return ret;
91 }
92 EXPORT_SYMBOL(clk_enable);
93
94 void clk_disable(struct clk *clk)
95 {
96         unsigned long flags;
97
98         if (clk == NULL || IS_ERR(clk))
99                 return;
100
101         spin_lock_irqsave(&clockfw_lock, flags);
102         if (clk->usecount == 0) {
103                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
104                        clk->name);
105                 WARN_ON(1);
106                 goto out;
107         }
108
109         if (arch_clock->clk_disable)
110                 arch_clock->clk_disable(clk);
111
112 out:
113         spin_unlock_irqrestore(&clockfw_lock, flags);
114 }
115 EXPORT_SYMBOL(clk_disable);
116
117 int clk_get_usecount(struct clk *clk)
118 {
119         unsigned long flags;
120         int ret = 0;
121
122         if (clk == NULL || IS_ERR(clk))
123                 return 0;
124
125         spin_lock_irqsave(&clockfw_lock, flags);
126         ret = clk->usecount;
127         spin_unlock_irqrestore(&clockfw_lock, flags);
128
129         return ret;
130 }
131 EXPORT_SYMBOL(clk_get_usecount);
132
133 unsigned long clk_get_rate(struct clk *clk)
134 {
135         unsigned long flags;
136         unsigned long ret = 0;
137
138         if (clk == NULL || IS_ERR(clk))
139                 return 0;
140
141         spin_lock_irqsave(&clockfw_lock, flags);
142         ret = clk->rate;
143         spin_unlock_irqrestore(&clockfw_lock, flags);
144
145         return ret;
146 }
147 EXPORT_SYMBOL(clk_get_rate);
148
149 void clk_put(struct clk *clk)
150 {
151         if (clk && !IS_ERR(clk))
152                 module_put(clk->owner);
153 }
154 EXPORT_SYMBOL(clk_put);
155
156 /*-------------------------------------------------------------------------
157  * Optional clock functions defined in include/linux/clk.h
158  *-------------------------------------------------------------------------*/
159
160 long clk_round_rate(struct clk *clk, unsigned long rate)
161 {
162         unsigned long flags;
163         long ret = 0;
164
165         if (clk == NULL || IS_ERR(clk))
166                 return ret;
167
168         spin_lock_irqsave(&clockfw_lock, flags);
169         if (arch_clock->clk_round_rate)
170                 ret = arch_clock->clk_round_rate(clk, rate);
171         spin_unlock_irqrestore(&clockfw_lock, flags);
172
173         return ret;
174 }
175 EXPORT_SYMBOL(clk_round_rate);
176
177 int clk_set_rate(struct clk *clk, unsigned long rate)
178 {
179         unsigned long flags;
180         int ret = -EINVAL;
181
182         if (clk == NULL || IS_ERR(clk))
183                 return ret;
184
185         spin_lock_irqsave(&clockfw_lock, flags);
186
187         if (arch_clock->clk_set_rate) {
188                 ret = arch_clock->clk_set_rate(clk, rate);
189                 if (ret == 0) {
190                         (*clk->recalc)(clk);
191                         if (clk->flags & RATE_PROPAGATES)
192                                 propagate_rate(clk);
193                 }
194         }
195
196         spin_unlock_irqrestore(&clockfw_lock, flags);
197
198         return ret;
199 }
200 EXPORT_SYMBOL(clk_set_rate);
201
202 int clk_set_parent(struct clk *clk, struct clk *parent)
203 {
204         unsigned long flags;
205         int ret = -EINVAL;
206
207         if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
208                 return ret;
209
210         spin_lock_irqsave(&clockfw_lock, flags);
211
212         if (arch_clock->clk_set_parent) {
213                 ret = arch_clock->clk_set_parent(clk, parent);
214                 if (ret == 0) {
215                         (*clk->recalc)(clk);
216                         if (clk->flags & RATE_PROPAGATES)
217                                 propagate_rate(clk);
218                 }
219         }
220
221         spin_unlock_irqrestore(&clockfw_lock, flags);
222
223         return ret;
224 }
225 EXPORT_SYMBOL(clk_set_parent);
226
227 struct clk *clk_get_parent(struct clk *clk)
228 {
229         unsigned long flags;
230         struct clk * ret = NULL;
231
232         if (clk == NULL || IS_ERR(clk))
233                 return ret;
234
235         spin_lock_irqsave(&clockfw_lock, flags);
236         if (arch_clock->clk_get_parent)
237                 ret = arch_clock->clk_get_parent(clk);
238         spin_unlock_irqrestore(&clockfw_lock, flags);
239
240         return ret;
241 }
242 EXPORT_SYMBOL(clk_get_parent);
243
244 /*-------------------------------------------------------------------------
245  * OMAP specific clock functions shared between omap1 and omap2
246  *-------------------------------------------------------------------------*/
247
248 unsigned int __initdata mpurate;
249
250 /*
251  * By default we use the rate set by the bootloader.
252  * You can override this with mpurate= cmdline option.
253  */
254 static int __init omap_clk_setup(char *str)
255 {
256         get_option(&str, &mpurate);
257
258         if (!mpurate)
259                 return 1;
260
261         if (mpurate < 1000)
262                 mpurate *= 1000000;
263
264         return 1;
265 }
266 __setup("mpurate=", omap_clk_setup);
267
268 /* Used for clocks that always have same value as the parent clock */
269 void followparent_recalc(struct clk *clk)
270 {
271         if (clk == NULL || IS_ERR(clk))
272                 return;
273
274         clk->rate = clk->parent->rate;
275 }
276
277 /* Propagate rate to children */
278 void propagate_rate(struct clk * tclk)
279 {
280         struct clk *clkp;
281
282         if (tclk == NULL || IS_ERR(tclk))
283                 return;
284
285         list_for_each_entry(clkp, &clocks, node) {
286                 if (likely(clkp->parent != tclk))
287                         continue;
288                 if (likely((u32)clkp->recalc)) {
289                         clkp->recalc(clkp);
290                         if (clkp->flags & RATE_PROPAGATES)
291                                 propagate_rate(clkp);
292                 }
293         }
294 }
295
296 /**
297  * recalculate_root_clocks - recalculate and propagate all root clocks
298  *
299  * Recalculates all root clocks (clocks with no parent), which if the
300  * clock's .recalc is set correctly, should also propagate their rates.
301  * Called at init.
302  */
303 void recalculate_root_clocks(void)
304 {
305         struct clk *clkp;
306
307         list_for_each_entry(clkp, &clocks, node) {
308                 if (unlikely(!clkp->parent) && likely((u32)clkp->recalc)) {
309                         clkp->recalc(clkp);
310                         if (clkp->flags & RATE_PROPAGATES)
311                                 propagate_rate(clkp);
312                 }
313         }
314 }
315
316 int clk_register(struct clk *clk)
317 {
318         if (clk == NULL || IS_ERR(clk))
319                 return -EINVAL;
320
321         mutex_lock(&clocks_mutex);
322         list_add(&clk->node, &clocks);
323         if (clk->init)
324                 clk->init(clk);
325         mutex_unlock(&clocks_mutex);
326
327         return 0;
328 }
329 EXPORT_SYMBOL(clk_register);
330
331 void clk_unregister(struct clk *clk)
332 {
333         if (clk == NULL || IS_ERR(clk))
334                 return;
335
336         mutex_lock(&clocks_mutex);
337         list_del(&clk->node);
338         mutex_unlock(&clocks_mutex);
339 }
340 EXPORT_SYMBOL(clk_unregister);
341
342 void clk_deny_idle(struct clk *clk)
343 {
344         unsigned long flags;
345
346         if (clk == NULL || IS_ERR(clk))
347                 return;
348
349         spin_lock_irqsave(&clockfw_lock, flags);
350         if (arch_clock->clk_deny_idle)
351                 arch_clock->clk_deny_idle(clk);
352         spin_unlock_irqrestore(&clockfw_lock, flags);
353 }
354 EXPORT_SYMBOL(clk_deny_idle);
355
356 void clk_allow_idle(struct clk *clk)
357 {
358         unsigned long flags;
359
360         if (clk == NULL || IS_ERR(clk))
361                 return;
362
363         spin_lock_irqsave(&clockfw_lock, flags);
364         if (arch_clock->clk_allow_idle)
365                 arch_clock->clk_allow_idle(clk);
366         spin_unlock_irqrestore(&clockfw_lock, flags);
367 }
368 EXPORT_SYMBOL(clk_allow_idle);
369
370 void clk_enable_init_clocks(void)
371 {
372         struct clk *clkp;
373
374         list_for_each_entry(clkp, &clocks, node) {
375                 if (clkp->flags & ENABLE_ON_INIT)
376                         clk_enable(clkp);
377         }
378 }
379 EXPORT_SYMBOL(clk_enable_init_clocks);
380
381 #ifdef CONFIG_CPU_FREQ
382 void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
383 {
384         unsigned long flags;
385
386         spin_lock_irqsave(&clockfw_lock, flags);
387         if (arch_clock->clk_init_cpufreq_table)
388                 arch_clock->clk_init_cpufreq_table(table);
389         spin_unlock_irqrestore(&clockfw_lock, flags);
390 }
391 EXPORT_SYMBOL(clk_init_cpufreq_table);
392 #endif
393
394 /*-------------------------------------------------------------------------*/
395
396 #ifdef CONFIG_OMAP_RESET_CLOCKS
397 /*
398  * Disable any unused clocks left on by the bootloader
399  */
400 static int __init clk_disable_unused(void)
401 {
402         struct clk *ck;
403         unsigned long flags;
404
405         list_for_each_entry(ck, &clocks, node) {
406                 if (ck->usecount > 0 ||
407                     (ck->flags & (ALWAYS_ENABLED | PARENT_CONTROLS_CLOCK)))
408                         continue;
409
410                 if (cpu_class_is_omap1() && ck->enable_reg == 0)
411                         continue;
412
413                 spin_lock_irqsave(&clockfw_lock, flags);
414                 if (arch_clock->clk_disable_unused)
415                         arch_clock->clk_disable_unused(ck);
416                 spin_unlock_irqrestore(&clockfw_lock, flags);
417         }
418
419         return 0;
420 }
421 late_initcall(clk_disable_unused);
422 #endif
423
424 int __init clk_init(struct clk_functions * custom_clocks)
425 {
426         if (!custom_clocks) {
427                 printk(KERN_ERR "No custom clock functions registered\n");
428                 BUG();
429         }
430
431         arch_clock = custom_clocks;
432
433         return 0;
434 }
435
436 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
437 /*
438  *      debugfs support to trace clock tree hierarchy and attributes
439  */
440 static struct dentry *clk_debugfs_root;
441
442 static int clk_debugfs_register_one(struct clk *c)
443 {
444         int err;
445         struct dentry *d, *child;
446         struct clk *pa = c->parent;
447         char s[255];
448         char *p = s;
449
450         p += sprintf(p, "%s", c->name);
451         if (c->id != 0)
452                 sprintf(p, ":%d", c->id);
453         d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
454         if (!d)
455                 return -ENOMEM;
456         c->dent = d;
457
458         d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
459         if (!d) {
460                 err = -ENOMEM;
461                 goto err_out;
462         }
463         d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
464         if (!d) {
465                 err = -ENOMEM;
466                 goto err_out;
467         }
468         d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
469         if (!d) {
470                 err = -ENOMEM;
471                 goto err_out;
472         }
473         return 0;
474
475 err_out:
476         d = c->dent;
477         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
478                 debugfs_remove(child);
479         debugfs_remove(c->dent);
480         return err;
481 }
482
483 static int clk_debugfs_register(struct clk *c)
484 {
485         int err;
486         struct clk *pa = c->parent;
487
488         if (pa && !pa->dent) {
489                 err = clk_debugfs_register(pa);
490                 if (err)
491                         return err;
492         }
493
494         if (!c->dent) {
495                 err = clk_debugfs_register_one(c);
496                 if (err)
497                         return err;
498         }
499         return 0;
500 }
501
502 static int __init clk_debugfs_init(void)
503 {
504         struct clk *c;
505         struct dentry *d;
506         int err;
507
508         d = debugfs_create_dir("clock", NULL);
509         if (!d)
510                 return -ENOMEM;
511         clk_debugfs_root = d;
512
513         list_for_each_entry(c, &clocks, node) {
514                 err = clk_debugfs_register(c);
515                 if (err)
516                         goto err_out;
517         }
518         return 0;
519 err_out:
520         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
521         return err;
522 }
523 late_initcall(clk_debugfs_init);
524
525 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */