]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/w1/masters/omap_hdq.c
34129e7066e20cc28ca18bbd0d182df31be9f5a5
[linux-2.6-omap-h63xx.git] / drivers / w1 / masters / omap_hdq.c
1 /*
2  * drivers/w1/masters/omap_hdq.c
3  *
4  * Copyright (C) 2007 Texas Instruments, Inc.
5  *
6  * This file is licensed under the terms of the GNU General Public License
7  * version 2. This program is licensed "as is" without any warranty of any
8  * kind, whether express or implied.
9  *
10  */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/err.h>
16 #include <linux/clk.h>
17 #include <linux/io.h>
18 #include <asm/irq.h>
19 #include <mach/hardware.h>
20
21 #include "../w1.h"
22 #include "../w1_int.h"
23
24 #define MOD_NAME        "OMAP_HDQ:"
25
26 #define OMAP_HDQ_REVISION                       0x00
27 #define OMAP_HDQ_TX_DATA                        0x04
28 #define OMAP_HDQ_RX_DATA                        0x08
29 #define OMAP_HDQ_CTRL_STATUS                    0x0c
30 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK      (1<<6)
31 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE        (1<<5)
32 #define OMAP_HDQ_CTRL_STATUS_GO                 (1<<4)
33 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION     (1<<2)
34 #define OMAP_HDQ_CTRL_STATUS_DIR                (1<<1)
35 #define OMAP_HDQ_CTRL_STATUS_MODE               (1<<0)
36 #define OMAP_HDQ_INT_STATUS                     0x10
37 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE          (1<<2)
38 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE          (1<<1)
39 #define OMAP_HDQ_INT_STATUS_TIMEOUT             (1<<0)
40 #define OMAP_HDQ_SYSCONFIG                      0x14
41 #define OMAP_HDQ_SYSCONFIG_SOFTRESET            (1<<1)
42 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE             (1<<0)
43 #define OMAP_HDQ_SYSSTATUS                      0x18
44 #define OMAP_HDQ_SYSSTATUS_RESETDONE            (1<<0)
45
46 #define OMAP_HDQ_FLAG_CLEAR                     0
47 #define OMAP_HDQ_FLAG_SET                       1
48 #define OMAP_HDQ_TIMEOUT                        (HZ/5)
49
50 #define OMAP_HDQ_MAX_USER                       4
51
52 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
53 static int w1_id;
54
55 struct hdq_data {
56         struct device           *dev;
57         void __iomem            *hdq_base;
58         struct  mutex           hdq_mutex;
59         int                     hdq_usecount;
60         struct  clk             *hdq_ick;
61         struct  clk             *hdq_fck;
62         u8                      hdq_irqstatus;
63         spinlock_t              hdq_spinlock;
64         /*
65          * Used to control the call to omap_hdq_get and omap_hdq_put.
66          * HDQ Protocol: Write the CMD|REG_address first, followed by
67          * the data wrire or read.
68          */
69         int                     init_trans;
70 };
71
72 static int omap_hdq_get(struct hdq_data *hdq_data);
73 static int omap_hdq_put(struct hdq_data *hdq_data);
74 static int omap_hdq_break(struct hdq_data *hdq_data);
75
76 static int __init omap_hdq_probe(struct platform_device *pdev);
77 static int omap_hdq_remove(struct platform_device *pdev);
78
79 static struct platform_driver omap_hdq_driver = {
80         .probe = omap_hdq_probe,
81         .remove = omap_hdq_remove,
82         .suspend = NULL,
83         .resume = NULL,
84         .driver = {
85                 .name = "omap_hdq",
86         },
87 };
88
89 static u8 omap_w1_read_byte(void *_hdq);
90 static void omap_w1_write_byte(void *_hdq, u8 byte);
91 static u8 omap_w1_reset_bus(void *_hdq);
92 static void omap_w1_search_bus(void *_hdq, u8 search_type,
93         w1_slave_found_callback slave_found);
94
95
96 static struct w1_bus_master omap_w1_master = {
97         .read_byte      = omap_w1_read_byte,
98         .write_byte     = omap_w1_write_byte,
99         .reset_bus      = omap_w1_reset_bus,
100         .search         = omap_w1_search_bus,
101 };
102
103 /*
104  * HDQ register I/O routines
105  */
106 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
107 {
108         return __raw_readb(hdq_data->hdq_base + offset);
109 }
110
111 static inline u8 hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
112 {
113         __raw_writeb(val, hdq_data->hdq_base + offset);
114
115         return val;
116 }
117
118 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
119                         u8 val, u8 mask)
120 {
121         u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
122                         | (val & mask);
123         __raw_writeb(new_val, hdq_data->hdq_base + offset);
124
125         return new_val;
126 }
127
128 /*
129  * Wait for one or more bits in flag change.
130  * HDQ_FLAG_SET: wait until any bit in the flag is set.
131  * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
132  * return 0 on success and -ETIMEDOUT in the case of timeout.
133  */
134 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
135                 u8 flag, u8 flag_set, u8 *status)
136 {
137         int ret = 0;
138         unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
139
140         if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
141                 /* wait for the flag clear */
142                 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
143                         && time_before(jiffies, timeout)) {
144                         set_current_state(TASK_UNINTERRUPTIBLE);
145                         schedule_timeout(1);
146                 }
147                 if (*status & flag)
148                         ret = -ETIMEDOUT;
149         } else if (flag_set == OMAP_HDQ_FLAG_SET) {
150                 /* wait for the flag set */
151                 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
152                         && time_before(jiffies, timeout)) {
153                         set_current_state(TASK_UNINTERRUPTIBLE);
154                         schedule_timeout(1);
155                 }
156                 if (!(*status & flag))
157                         ret = -ETIMEDOUT;
158         } else
159                 return -EINVAL;
160
161         return ret;
162 }
163
164 /*
165  * write out a byte and fill *status with HDQ_INT_STATUS
166  */
167 static int
168 hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
169 {
170         int ret;
171         u8 tmp_status;
172         unsigned long irqflags;
173
174         *status = 0;
175
176         spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
177         /* clear interrupt flags via a dummy read */
178         hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
179         /* ISR loads it with new INT_STATUS */
180         hdq_data->hdq_irqstatus = 0;
181         spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
182
183         hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
184
185         /* set the GO bit */
186         hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
187                 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
188         /* wait for the TXCOMPLETE bit */
189         ret = wait_event_interruptible_timeout(hdq_wait_queue,
190                 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
191         if (ret < 0) {
192                 dev_dbg(hdq_data->dev, "wait interrupted");
193                 return -EINTR;
194         }
195
196         spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
197         *status = hdq_data->hdq_irqstatus;
198         spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
199         /* check irqstatus */
200         if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
201                 dev_dbg(hdq_data->dev, "timeout waiting for"
202                         "TXCOMPLETE/RXCOMPLETE, %x", *status);
203                 return -ETIMEDOUT;
204         }
205
206         /* wait for the GO bit return to zero */
207         ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
208                         OMAP_HDQ_CTRL_STATUS_GO,
209                         OMAP_HDQ_FLAG_CLEAR, &tmp_status);
210         if (ret) {
211                 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
212                         "return to zero, %x", tmp_status);
213                 return ret;
214         }
215
216         return ret;
217 }
218
219 /*
220  * HDQ Interrupt service routine.
221  */
222 static irqreturn_t hdq_isr(int irq, void *_hdq)
223 {
224         struct hdq_data *hdq_data = _hdq;
225         unsigned long irqflags;
226
227         spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
228         hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
229         spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
230         dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
231
232         if (hdq_data->hdq_irqstatus &
233                 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
234                 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
235                 /* wake up sleeping process */
236                 wake_up_interruptible(&hdq_wait_queue);
237         }
238
239         return IRQ_HANDLED;
240 }
241
242 /*
243  * HDQ Mode: always return success.
244  */
245 static u8 omap_w1_reset_bus(void *_hdq)
246 {
247         return 0;
248 }
249
250 /*
251  * W1 search callback function.
252  */
253 static void omap_w1_search_bus(void *_hdq, u8 search_type,
254         w1_slave_found_callback slave_found)
255 {
256         u64 module_id, rn_le, cs, id;
257
258         if (w1_id)
259                 module_id = w1_id;
260         else
261                 module_id = 0x1;
262
263         rn_le = cpu_to_le64(module_id);
264         /*
265          * HDQ might not obey truly the 1-wire spec.
266          * So calculate CRC based on module parameter.
267          */
268         cs = w1_calc_crc8((u8 *)&rn_le, 7);
269         id = (cs << 56) | module_id;
270
271         slave_found(_hdq, id);
272 }
273
274 static int _omap_hdq_reset(struct hdq_data *hdq_data)
275 {
276         int ret;
277         u8 tmp_status;
278
279         hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
280         /*
281          * Select HDQ mode & enable clocks.
282          * It is observed that INT flags can't be cleared via a read and GO/INIT
283          * won't return to zero if interrupt is disabled. So we always enable
284          * interrupt.
285          */
286         hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
287                 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
288                 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
289
290         /* wait for reset to complete */
291         ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
292                 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
293         if (ret)
294                 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
295                                 tmp_status);
296         else {
297                 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
298                         OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
299                         OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
300                 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
301                         OMAP_HDQ_SYSCONFIG_AUTOIDLE);
302         }
303
304         return ret;
305 }
306
307 /*
308  * Issue break pulse to the device.
309  */
310 static int
311 omap_hdq_break(struct hdq_data *hdq_data)
312 {
313         int ret;
314         u8 tmp_status;
315         unsigned long irqflags;
316
317         ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
318         if (ret < 0)
319                 return -EINTR;
320
321         if (!hdq_data->hdq_usecount) {
322                 mutex_unlock(&hdq_data->hdq_mutex);
323                 return -EINVAL;
324         }
325
326         spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
327         /* clear interrupt flags via a dummy read */
328         hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
329         /* ISR loads it with new INT_STATUS */
330         hdq_data->hdq_irqstatus = 0;
331         spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
332
333         /* set the INIT and GO bit */
334         hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
335                 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
336                 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
337                 OMAP_HDQ_CTRL_STATUS_GO);
338
339         /* wait for the TIMEOUT bit */
340         ret = wait_event_interruptible_timeout(hdq_wait_queue,
341                 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
342         if (ret < 0) {
343                 dev_dbg(hdq_data->dev, "wait interrupted");
344                 mutex_unlock(&hdq_data->hdq_mutex);
345                 return -EINTR;
346         }
347
348         spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
349         tmp_status = hdq_data->hdq_irqstatus;
350         spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
351         /* check irqstatus */
352         if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
353                 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
354                                 tmp_status);
355                 mutex_unlock(&hdq_data->hdq_mutex);
356                 return -ETIMEDOUT;
357         }
358         /*
359          * wait for both INIT and GO bits rerurn to zero.
360          * zero wait time expected for interrupt mode.
361          */
362         ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
363                         OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
364                         OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
365                         &tmp_status);
366         if (ret)
367                 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
368                         "return to zero, %x", tmp_status);
369
370         mutex_unlock(&hdq_data->hdq_mutex);
371         return ret;
372 }
373
374 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
375 {
376         int ret;
377         u8 status;
378         unsigned long irqflags;
379
380         ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
381         if (ret < 0)
382                 return -EINTR;
383
384         if (!hdq_data->hdq_usecount) {
385                 mutex_unlock(&hdq_data->hdq_mutex);
386                 return -EINVAL;
387         }
388
389         if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
390                 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
391                         OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
392                         OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
393                 /*
394                  * The RX comes immediately after TX. It
395                  * triggers another interrupt before we
396                  * sleep. So we have to wait for RXCOMPLETE bit.
397                  */
398                 {
399                         unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
400                         while (!(hdq_data->hdq_irqstatus
401                                 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
402                                 && time_before(jiffies, timeout)) {
403                                 set_current_state(TASK_UNINTERRUPTIBLE);
404                                 schedule_timeout(1);
405                         }
406                 }
407                 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
408                         OMAP_HDQ_CTRL_STATUS_DIR);
409                 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
410                 status = hdq_data->hdq_irqstatus;
411                 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
412                 /* check irqstatus */
413                 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
414                         dev_dbg(hdq_data->dev, "timeout waiting for"
415                                 "RXCOMPLETE, %x", status);
416                         mutex_unlock(&hdq_data->hdq_mutex);
417                         return -ETIMEDOUT;
418                 }
419         }
420         /* the data is ready. Read it in! */
421         *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
422         mutex_unlock(&hdq_data->hdq_mutex);
423
424         return 0;
425
426 }
427
428 /*
429  * Enable clocks and set the controller to HDQ mode.
430  */
431 static int
432 omap_hdq_get(struct hdq_data *hdq_data)
433 {
434         int ret = 0;
435
436         ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
437         if (ret < 0)
438                 return -EINTR;
439
440         if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
441                 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
442                 mutex_unlock(&hdq_data->hdq_mutex);
443                 ret = -EINVAL;
444         } else {
445                 hdq_data->hdq_usecount++;
446                 try_module_get(THIS_MODULE);
447                 if (1 == hdq_data->hdq_usecount) {
448                         if (clk_enable(hdq_data->hdq_ick)) {
449                                 dev_dbg(hdq_data->dev, "Can not enable ick\n");
450                                 clk_put(hdq_data->hdq_ick);
451                                 clk_put(hdq_data->hdq_fck);
452                                 mutex_unlock(&hdq_data->hdq_mutex);
453                                 return -ENODEV;
454                         }
455                         if (clk_enable(hdq_data->hdq_fck)) {
456                                 dev_dbg(hdq_data->dev, "Can not enable fck\n");
457                                 clk_put(hdq_data->hdq_ick);
458                                 clk_put(hdq_data->hdq_fck);
459                                 mutex_unlock(&hdq_data->hdq_mutex);
460                                 return -ENODEV;
461                         }
462
463                         /* make sure HDQ is out of reset */
464                         if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
465                                 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
466                                 ret = _omap_hdq_reset(hdq_data);
467                                 if (ret)
468                                         /* back up the count */
469                                         hdq_data->hdq_usecount--;
470                         } else {
471                                 /* select HDQ mode & enable clocks */
472                                 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
473                                         OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
474                                         OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
475                                 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
476                                         OMAP_HDQ_SYSCONFIG_AUTOIDLE);
477                                 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
478                         }
479                 }
480         }
481         mutex_unlock(&hdq_data->hdq_mutex);
482         return ret;
483 }
484
485 /*
486  * Disable clocks to the module.
487  */
488 static int
489 omap_hdq_put(struct hdq_data *hdq_data)
490 {
491         int ret = 0;
492
493         ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
494         if (ret < 0)
495                 return -EINTR;
496
497         if (0 == hdq_data->hdq_usecount) {
498                 dev_dbg(hdq_data->dev, "attempt to decrement use count"
499                         "when it is zero");
500                 ret = -EINVAL;
501         } else {
502                 hdq_data->hdq_usecount--;
503                 module_put(THIS_MODULE);
504                 if (0 == hdq_data->hdq_usecount) {
505                         clk_disable(hdq_data->hdq_ick);
506                         clk_disable(hdq_data->hdq_fck);
507                 }
508         }
509         mutex_unlock(&hdq_data->hdq_mutex);
510         return ret;
511 }
512
513 /*
514  * Read a byte of data from the device.
515  */
516 static u8 omap_w1_read_byte(void *_hdq)
517 {
518         struct hdq_data *hdq_data = _hdq;
519         u8 val;
520         int ret;
521
522         ret = hdq_read_byte(hdq_data, &val);
523         if (ret) {
524                 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
525                 if (ret < 0) {
526                         dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
527                         return -EINTR;
528                 }
529                 hdq_data->init_trans = 0;
530                 mutex_unlock(&hdq_data->hdq_mutex);
531                 omap_hdq_put(hdq_data);
532                 return -1;
533         }
534
535         /* Write followed by a read, release the module */
536         if (hdq_data->init_trans) {
537                 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
538                 if (ret < 0) {
539                         dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
540                         return -EINTR;
541                 }
542                 hdq_data->init_trans = 0;
543                 mutex_unlock(&hdq_data->hdq_mutex);
544                 omap_hdq_put(hdq_data);
545         }
546
547         return val;
548 }
549
550 /*
551  * Write a byte of data to the device.
552  */
553 static void omap_w1_write_byte(void *_hdq, u8 byte)
554 {
555         struct hdq_data *hdq_data = _hdq;
556         int ret;
557         u8 status;
558
559         /* First write to initialize the transfer */
560         if (hdq_data->init_trans == 0)
561                 omap_hdq_get(hdq_data);
562
563         ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
564         if (ret < 0) {
565                 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
566                 return;
567         }
568         hdq_data->init_trans++;
569         mutex_unlock(&hdq_data->hdq_mutex);
570
571         hdq_write_byte(hdq_data, byte, &status);
572         dev_dbg(hdq_data->dev, "Ctrl status %x\n", status);
573
574         /* Second write, data transfered. Release the module */
575         if (hdq_data->init_trans > 1) {
576                 omap_hdq_put(hdq_data);
577                 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
578                 if (ret < 0) {
579                         dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
580                         return;
581                 }
582                 hdq_data->init_trans = 0;
583                 mutex_unlock(&hdq_data->hdq_mutex);
584         }
585
586         return;
587 }
588
589 static int __init omap_hdq_probe(struct platform_device *pdev)
590 {
591         struct hdq_data *hdq_data;
592         struct resource *res;
593         int ret, irq;
594         u8 rev;
595
596         if (!pdev)
597                 return -ENODEV;
598
599         hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
600         if (!hdq_data) {
601                 dev_dbg(&pdev->dev, "unable to allocate memory\n");
602                 ret = -ENODEV;
603                 goto err_kmalloc;
604         }
605
606         hdq_data->dev = &pdev->dev;
607         platform_set_drvdata(pdev, hdq_data);
608
609         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
610         if (!res) {
611                 dev_dbg(&pdev->dev, "unable to get resource\n");
612                 ret = ENXIO;
613                 goto err_resource;
614         }
615
616         hdq_data->hdq_base = ioremap(res->start, SZ_4K);
617         if (!hdq_data->hdq_base) {
618                 dev_dbg(&pdev->dev, "ioremap failed\n");
619                 ret = -EINVAL;
620                 goto err_ioremap;
621         }
622
623         /* get interface & functional clock objects */
624         hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
625         hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
626
627         if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
628                 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
629                 if (IS_ERR(hdq_data->hdq_ick)) {
630                         ret = PTR_ERR(hdq_data->hdq_ick);
631                         goto err_clk;
632                 }
633                 if (IS_ERR(hdq_data->hdq_fck)) {
634                         ret = PTR_ERR(hdq_data->hdq_fck);
635                         clk_put(hdq_data->hdq_ick);
636                         goto err_clk;
637                 }
638         }
639
640         hdq_data->hdq_usecount = 0;
641         mutex_init(&hdq_data->hdq_mutex);
642
643         if (clk_enable(hdq_data->hdq_ick)) {
644                 dev_dbg(&pdev->dev, "Can not enable ick\n");
645                 ret = -ENODEV;
646                 goto err_ick;
647         }
648
649         if (clk_enable(hdq_data->hdq_fck)) {
650                 dev_dbg(&pdev->dev, "Can not enable fck\n");
651                 ret = -ENODEV;
652                 goto err_fck;
653         }
654
655         rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
656         dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
657                 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
658
659         spin_lock_init(&hdq_data->hdq_spinlock);
660         omap_hdq_break(hdq_data);
661
662         irq = platform_get_irq(pdev, 0);
663         if (irq < 0) {
664                 ret = -ENXIO;
665                 goto err_irq;
666         }
667
668         ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
669         if (ret < 0) {
670                 dev_dbg(&pdev->dev, "could not request irq\n");
671                 goto err_irq;
672         }
673
674         /* don't clock the HDQ until it is needed */
675         clk_disable(hdq_data->hdq_ick);
676         clk_disable(hdq_data->hdq_fck);
677
678         omap_w1_master.data = hdq_data;
679
680         ret = w1_add_master_device(&omap_w1_master);
681         if (ret) {
682                 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
683                 goto err_w1;
684         }
685
686         return 0;
687
688 err_w1:
689 err_irq:
690         clk_disable(hdq_data->hdq_fck);
691
692 err_fck:
693         clk_disable(hdq_data->hdq_ick);
694
695 err_ick:
696         clk_put(hdq_data->hdq_ick);
697         clk_put(hdq_data->hdq_fck);
698
699 err_clk:
700         iounmap(hdq_data->hdq_base);
701
702 err_ioremap:
703 err_resource:
704         platform_set_drvdata(pdev, NULL);
705         kfree(hdq_data);
706
707 err_kmalloc:
708         return ret;
709
710 }
711
712 static int omap_hdq_remove(struct platform_device *pdev)
713 {
714         struct hdq_data *hdq_data = platform_get_drvdata(pdev);
715
716         mutex_lock(&hdq_data->hdq_mutex);
717
718         if (0 != hdq_data->hdq_usecount) {
719                 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
720                 return -EBUSY;
721         }
722
723         mutex_unlock(&hdq_data->hdq_mutex);
724
725         /* remove module dependency */
726         clk_put(hdq_data->hdq_ick);
727         clk_put(hdq_data->hdq_fck);
728         free_irq(INT_24XX_HDQ_IRQ, hdq_data);
729         platform_set_drvdata(pdev, NULL);
730         iounmap(hdq_data->hdq_base);
731         kfree(hdq_data);
732
733         return 0;
734 }
735
736 static int __init
737 omap_hdq_init(void)
738 {
739         return platform_driver_register(&omap_hdq_driver);
740 }
741
742 static void __exit
743 omap_hdq_exit(void)
744 {
745         platform_driver_unregister(&omap_hdq_driver);
746 }
747
748 module_init(omap_hdq_init);
749 module_exit(omap_hdq_exit);
750
751 module_param(w1_id, int, S_IRUSR);
752
753 MODULE_AUTHOR("Texas Instruments");
754 MODULE_DESCRIPTION("HDQ driver Library");
755 MODULE_LICENSE("GPL");