2 * drivers/w1/masters/omap_hdq.c
4 * Copyright (C) 2007 Texas Instruments, Inc.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/err.h>
16 #include <linux/clk.h>
19 #include <mach/hardware.h>
22 #include "../w1_int.h"
24 #define MOD_NAME "OMAP_HDQ:"
26 #define OMAP_HDQ_REVISION 0x00
27 #define OMAP_HDQ_TX_DATA 0x04
28 #define OMAP_HDQ_RX_DATA 0x08
29 #define OMAP_HDQ_CTRL_STATUS 0x0c
30 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
31 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
32 #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
33 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
34 #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
35 #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
36 #define OMAP_HDQ_INT_STATUS 0x10
37 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
38 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
39 #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
40 #define OMAP_HDQ_SYSCONFIG 0x14
41 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
42 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
43 #define OMAP_HDQ_SYSSTATUS 0x18
44 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
46 #define OMAP_HDQ_FLAG_CLEAR 0
47 #define OMAP_HDQ_FLAG_SET 1
48 #define OMAP_HDQ_TIMEOUT (HZ/5)
50 #define OMAP_HDQ_MAX_USER 4
52 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
57 void __iomem *hdq_base;
58 struct mutex hdq_mutex;
63 spinlock_t hdq_spinlock;
65 * Used to control the call to omap_hdq_get and omap_hdq_put.
66 * HDQ Protocol: Write the CMD|REG_address first, followed by
67 * the data wrire or read.
72 static int omap_hdq_get(struct hdq_data *hdq_data);
73 static int omap_hdq_put(struct hdq_data *hdq_data);
74 static int omap_hdq_break(struct hdq_data *hdq_data);
76 static int __init omap_hdq_probe(struct platform_device *pdev);
77 static int omap_hdq_remove(struct platform_device *pdev);
79 static struct platform_driver omap_hdq_driver = {
80 .probe = omap_hdq_probe,
81 .remove = omap_hdq_remove,
89 static u8 omap_w1_read_byte(void *_hdq);
90 static void omap_w1_write_byte(void *_hdq, u8 byte);
91 static u8 omap_w1_reset_bus(void *_hdq);
92 static void omap_w1_search_bus(void *_hdq, u8 search_type,
93 w1_slave_found_callback slave_found);
96 static struct w1_bus_master omap_w1_master = {
97 .read_byte = omap_w1_read_byte,
98 .write_byte = omap_w1_write_byte,
99 .reset_bus = omap_w1_reset_bus,
100 .search = omap_w1_search_bus,
103 /* HDQ register I/O routines */
104 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
106 return __raw_readb(hdq_data->hdq_base + offset);
109 static inline u8 hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
111 __raw_writeb(val, hdq_data->hdq_base + offset);
116 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
119 u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
121 __raw_writeb(new_val, hdq_data->hdq_base + offset);
127 * Wait for one or more bits in flag change.
128 * HDQ_FLAG_SET: wait until any bit in the flag is set.
129 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
130 * return 0 on success and -ETIMEDOUT in the case of timeout.
132 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
133 u8 flag, u8 flag_set, u8 *status)
136 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
138 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
139 /* wait for the flag clear */
140 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
141 && time_before(jiffies, timeout)) {
142 set_current_state(TASK_UNINTERRUPTIBLE);
147 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
148 /* wait for the flag set */
149 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
150 && time_before(jiffies, timeout)) {
151 set_current_state(TASK_UNINTERRUPTIBLE);
154 if (!(*status & flag))
162 /* write out a byte and fill *status with HDQ_INT_STATUS */
163 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
167 unsigned long irqflags;
171 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
172 /* clear interrupt flags via a dummy read */
173 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
174 /* ISR loads it with new INT_STATUS */
175 hdq_data->hdq_irqstatus = 0;
176 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
178 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
181 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
182 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
183 /* wait for the TXCOMPLETE bit */
184 ret = wait_event_interruptible_timeout(hdq_wait_queue,
185 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
187 dev_dbg(hdq_data->dev, "wait interrupted");
191 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
192 *status = hdq_data->hdq_irqstatus;
193 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
194 /* check irqstatus */
195 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
196 dev_dbg(hdq_data->dev, "timeout waiting for"
197 "TXCOMPLETE/RXCOMPLETE, %x", *status);
201 /* wait for the GO bit return to zero */
202 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
203 OMAP_HDQ_CTRL_STATUS_GO,
204 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
206 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
207 "return to zero, %x", tmp_status);
214 /* HDQ Interrupt service routine */
215 static irqreturn_t hdq_isr(int irq, void *_hdq)
217 struct hdq_data *hdq_data = _hdq;
218 unsigned long irqflags;
220 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
221 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
222 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
223 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
225 if (hdq_data->hdq_irqstatus &
226 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
227 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
228 /* wake up sleeping process */
229 wake_up_interruptible(&hdq_wait_queue);
235 /* HDQ Mode: always return success */
236 static u8 omap_w1_reset_bus(void *_hdq)
241 /* W1 search callback function */
242 static void omap_w1_search_bus(void *_hdq, u8 search_type,
243 w1_slave_found_callback slave_found)
245 u64 module_id, rn_le, cs, id;
252 rn_le = cpu_to_le64(module_id);
254 * HDQ might not obey truly the 1-wire spec.
255 * So calculate CRC based on module parameter.
257 cs = w1_calc_crc8((u8 *)&rn_le, 7);
258 id = (cs << 56) | module_id;
260 slave_found(_hdq, id);
263 static int _omap_hdq_reset(struct hdq_data *hdq_data)
268 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
270 * Select HDQ mode & enable clocks.
271 * It is observed that INT flags can't be cleared via a read and GO/INIT
272 * won't return to zero if interrupt is disabled. So we always enable
275 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
276 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
277 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
279 /* wait for reset to complete */
280 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
281 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
283 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
286 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
287 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
288 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
289 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
290 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
296 /* Issue break pulse to the device */
297 static int omap_hdq_break(struct hdq_data *hdq_data)
301 unsigned long irqflags;
303 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
307 if (!hdq_data->hdq_usecount) {
308 mutex_unlock(&hdq_data->hdq_mutex);
312 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
313 /* clear interrupt flags via a dummy read */
314 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
315 /* ISR loads it with new INT_STATUS */
316 hdq_data->hdq_irqstatus = 0;
317 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
319 /* set the INIT and GO bit */
320 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
321 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
322 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
323 OMAP_HDQ_CTRL_STATUS_GO);
325 /* wait for the TIMEOUT bit */
326 ret = wait_event_interruptible_timeout(hdq_wait_queue,
327 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
329 dev_dbg(hdq_data->dev, "wait interrupted");
330 mutex_unlock(&hdq_data->hdq_mutex);
334 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
335 tmp_status = hdq_data->hdq_irqstatus;
336 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
337 /* check irqstatus */
338 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
339 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
341 mutex_unlock(&hdq_data->hdq_mutex);
345 * wait for both INIT and GO bits rerurn to zero.
346 * zero wait time expected for interrupt mode.
348 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
349 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
350 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
353 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
354 "return to zero, %x", tmp_status);
356 mutex_unlock(&hdq_data->hdq_mutex);
361 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
365 unsigned long irqflags;
366 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
368 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
372 if (!hdq_data->hdq_usecount) {
373 mutex_unlock(&hdq_data->hdq_mutex);
377 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
378 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
379 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
380 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
382 * The RX comes immediately after TX. It
383 * triggers another interrupt before we
384 * sleep. So we have to wait for RXCOMPLETE bit.
386 while (!(hdq_data->hdq_irqstatus
387 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
388 && time_before(jiffies, timeout)) {
389 set_current_state(TASK_UNINTERRUPTIBLE);
392 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
393 OMAP_HDQ_CTRL_STATUS_DIR);
394 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
395 status = hdq_data->hdq_irqstatus;
396 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
397 /* check irqstatus */
398 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
399 dev_dbg(hdq_data->dev, "timeout waiting for"
400 "RXCOMPLETE, %x", status);
401 mutex_unlock(&hdq_data->hdq_mutex);
405 /* the data is ready. Read it in! */
406 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
407 mutex_unlock(&hdq_data->hdq_mutex);
413 /* Enable clocks and set the controller to HDQ mode */
414 static int omap_hdq_get(struct hdq_data *hdq_data)
418 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
422 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
423 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
424 mutex_unlock(&hdq_data->hdq_mutex);
427 hdq_data->hdq_usecount++;
428 try_module_get(THIS_MODULE);
429 if (1 == hdq_data->hdq_usecount) {
430 if (clk_enable(hdq_data->hdq_ick)) {
431 dev_dbg(hdq_data->dev, "Can not enable ick\n");
432 clk_put(hdq_data->hdq_ick);
433 clk_put(hdq_data->hdq_fck);
434 mutex_unlock(&hdq_data->hdq_mutex);
437 if (clk_enable(hdq_data->hdq_fck)) {
438 dev_dbg(hdq_data->dev, "Can not enable fck\n");
439 clk_put(hdq_data->hdq_ick);
440 clk_put(hdq_data->hdq_fck);
441 mutex_unlock(&hdq_data->hdq_mutex);
445 /* make sure HDQ is out of reset */
446 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
447 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
448 ret = _omap_hdq_reset(hdq_data);
450 /* back up the count */
451 hdq_data->hdq_usecount--;
453 /* select HDQ mode & enable clocks */
454 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
455 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
456 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
457 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
458 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
459 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
463 mutex_unlock(&hdq_data->hdq_mutex);
468 /* Disable clocks to the module */
469 static int omap_hdq_put(struct hdq_data *hdq_data)
473 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
477 if (0 == hdq_data->hdq_usecount) {
478 dev_dbg(hdq_data->dev, "attempt to decrement use count"
482 hdq_data->hdq_usecount--;
483 module_put(THIS_MODULE);
484 if (0 == hdq_data->hdq_usecount) {
485 clk_disable(hdq_data->hdq_ick);
486 clk_disable(hdq_data->hdq_fck);
489 mutex_unlock(&hdq_data->hdq_mutex);
494 /* Read a byte of data from the device */
495 static u8 omap_w1_read_byte(void *_hdq)
497 struct hdq_data *hdq_data = _hdq;
501 ret = hdq_read_byte(hdq_data, &val);
503 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
505 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
508 hdq_data->init_trans = 0;
509 mutex_unlock(&hdq_data->hdq_mutex);
510 omap_hdq_put(hdq_data);
514 /* Write followed by a read, release the module */
515 if (hdq_data->init_trans) {
516 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
518 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
521 hdq_data->init_trans = 0;
522 mutex_unlock(&hdq_data->hdq_mutex);
523 omap_hdq_put(hdq_data);
529 /* Write a byte of data to the device */
530 static void omap_w1_write_byte(void *_hdq, u8 byte)
532 struct hdq_data *hdq_data = _hdq;
536 /* First write to initialize the transfer */
537 if (hdq_data->init_trans == 0)
538 omap_hdq_get(hdq_data);
540 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
542 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
545 hdq_data->init_trans++;
546 mutex_unlock(&hdq_data->hdq_mutex);
548 hdq_write_byte(hdq_data, byte, &status);
549 dev_dbg(hdq_data->dev, "Ctrl status %x\n", status);
551 /* Second write, data transfered. Release the module */
552 if (hdq_data->init_trans > 1) {
553 omap_hdq_put(hdq_data);
554 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
556 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
559 hdq_data->init_trans = 0;
560 mutex_unlock(&hdq_data->hdq_mutex);
566 static int __init omap_hdq_probe(struct platform_device *pdev)
568 struct hdq_data *hdq_data;
569 struct resource *res;
576 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
578 dev_dbg(&pdev->dev, "unable to allocate memory\n");
583 hdq_data->dev = &pdev->dev;
584 platform_set_drvdata(pdev, hdq_data);
586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
588 dev_dbg(&pdev->dev, "unable to get resource\n");
593 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
594 if (!hdq_data->hdq_base) {
595 dev_dbg(&pdev->dev, "ioremap failed\n");
600 /* get interface & functional clock objects */
601 hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
602 hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
604 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
605 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
606 if (IS_ERR(hdq_data->hdq_ick)) {
607 ret = PTR_ERR(hdq_data->hdq_ick);
610 if (IS_ERR(hdq_data->hdq_fck)) {
611 ret = PTR_ERR(hdq_data->hdq_fck);
612 clk_put(hdq_data->hdq_ick);
617 hdq_data->hdq_usecount = 0;
618 mutex_init(&hdq_data->hdq_mutex);
620 if (clk_enable(hdq_data->hdq_ick)) {
621 dev_dbg(&pdev->dev, "Can not enable ick\n");
626 if (clk_enable(hdq_data->hdq_fck)) {
627 dev_dbg(&pdev->dev, "Can not enable fck\n");
632 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
633 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
634 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
636 spin_lock_init(&hdq_data->hdq_spinlock);
637 omap_hdq_break(hdq_data);
639 irq = platform_get_irq(pdev, 0);
645 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
647 dev_dbg(&pdev->dev, "could not request irq\n");
651 /* don't clock the HDQ until it is needed */
652 clk_disable(hdq_data->hdq_ick);
653 clk_disable(hdq_data->hdq_fck);
655 omap_w1_master.data = hdq_data;
657 ret = w1_add_master_device(&omap_w1_master);
659 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
667 clk_disable(hdq_data->hdq_fck);
670 clk_disable(hdq_data->hdq_ick);
673 clk_put(hdq_data->hdq_ick);
674 clk_put(hdq_data->hdq_fck);
677 iounmap(hdq_data->hdq_base);
681 platform_set_drvdata(pdev, NULL);
689 static int omap_hdq_remove(struct platform_device *pdev)
691 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
693 mutex_lock(&hdq_data->hdq_mutex);
695 if (0 != hdq_data->hdq_usecount) {
696 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
700 mutex_unlock(&hdq_data->hdq_mutex);
702 /* remove module dependency */
703 clk_put(hdq_data->hdq_ick);
704 clk_put(hdq_data->hdq_fck);
705 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
706 platform_set_drvdata(pdev, NULL);
707 iounmap(hdq_data->hdq_base);
716 return platform_driver_register(&omap_hdq_driver);
722 platform_driver_unregister(&omap_hdq_driver);
725 module_init(omap_hdq_init);
726 module_exit(omap_hdq_exit);
728 module_param(w1_id, int, S_IRUSR);
730 MODULE_AUTHOR("Texas Instruments");
731 MODULE_DESCRIPTION("HDQ driver Library");
732 MODULE_LICENSE("GPL");