2 * drivers/w1/masters/omap_hdq.c
4 * Copyright (C) 2007 Texas Instruments, Inc.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/err.h>
16 #include <linux/clk.h>
19 #include <mach/hardware.h>
22 #include "../w1_int.h"
24 #define MOD_NAME "OMAP_HDQ:"
26 #define OMAP_HDQ_REVISION 0x00
27 #define OMAP_HDQ_TX_DATA 0x04
28 #define OMAP_HDQ_RX_DATA 0x08
29 #define OMAP_HDQ_CTRL_STATUS 0x0c
30 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
31 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
32 #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
33 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
34 #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
35 #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
36 #define OMAP_HDQ_INT_STATUS 0x10
37 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
38 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
39 #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
40 #define OMAP_HDQ_SYSCONFIG 0x14
41 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
42 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
43 #define OMAP_HDQ_SYSSTATUS 0x18
44 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
46 #define OMAP_HDQ_FLAG_CLEAR 0
47 #define OMAP_HDQ_FLAG_SET 1
48 #define OMAP_HDQ_TIMEOUT (HZ/5)
50 #define OMAP_HDQ_MAX_USER 4
52 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
57 void __iomem *hdq_base;
58 struct mutex hdq_mutex;
63 spinlock_t hdq_spinlock;
65 * Used to control the call to omap_hdq_get and omap_hdq_put.
66 * HDQ Protocol: Write the CMD|REG_address first, followed by
67 * the data wrire or read.
72 static int omap_hdq_get(struct hdq_data *hdq_data);
73 static int omap_hdq_put(struct hdq_data *hdq_data);
74 static int omap_hdq_break(struct hdq_data *hdq_data);
76 static int __init omap_hdq_probe(struct platform_device *pdev);
77 static int omap_hdq_remove(struct platform_device *pdev);
79 static struct platform_driver omap_hdq_driver = {
80 .probe = omap_hdq_probe,
81 .remove = omap_hdq_remove,
89 static u8 omap_w1_read_byte(void *_hdq);
90 static void omap_w1_write_byte(void *_hdq, u8 byte);
91 static u8 omap_w1_reset_bus(void *_hdq);
92 static void omap_w1_search_bus(void *_hdq, u8 search_type,
93 w1_slave_found_callback slave_found);
96 static struct w1_bus_master omap_w1_master = {
97 .read_byte = omap_w1_read_byte,
98 .write_byte = omap_w1_write_byte,
99 .reset_bus = omap_w1_reset_bus,
100 .search = omap_w1_search_bus,
104 * HDQ register I/O routines
106 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
108 return __raw_readb(hdq_data->hdq_base + offset);
111 static inline u8 hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
113 __raw_writeb(val, hdq_data->hdq_base + offset);
118 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
121 u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
123 __raw_writeb(new_val, hdq_data->hdq_base + offset);
129 * Wait for one or more bits in flag change.
130 * HDQ_FLAG_SET: wait until any bit in the flag is set.
131 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
132 * return 0 on success and -ETIMEDOUT in the case of timeout.
134 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
135 u8 flag, u8 flag_set, u8 *status)
138 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
140 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
141 /* wait for the flag clear */
142 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
143 && time_before(jiffies, timeout)) {
144 set_current_state(TASK_UNINTERRUPTIBLE);
149 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
150 /* wait for the flag set */
151 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
152 && time_before(jiffies, timeout)) {
153 set_current_state(TASK_UNINTERRUPTIBLE);
156 if (!(*status & flag))
165 * write out a byte and fill *status with HDQ_INT_STATUS
168 hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
172 unsigned long irqflags;
176 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
177 /* clear interrupt flags via a dummy read */
178 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
179 /* ISR loads it with new INT_STATUS */
180 hdq_data->hdq_irqstatus = 0;
181 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
183 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
186 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
187 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
188 /* wait for the TXCOMPLETE bit */
189 ret = wait_event_interruptible_timeout(hdq_wait_queue,
190 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
192 dev_dbg(hdq_data->dev, "wait interrupted");
196 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
197 *status = hdq_data->hdq_irqstatus;
198 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
199 /* check irqstatus */
200 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
201 dev_dbg(hdq_data->dev, "timeout waiting for"
202 "TXCOMPLETE/RXCOMPLETE, %x", *status);
206 /* wait for the GO bit return to zero */
207 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
208 OMAP_HDQ_CTRL_STATUS_GO,
209 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
211 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
212 "return to zero, %x", tmp_status);
220 * HDQ Interrupt service routine.
222 static irqreturn_t hdq_isr(int irq, void *_hdq)
224 struct hdq_data *hdq_data = _hdq;
225 unsigned long irqflags;
227 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
228 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
229 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
230 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
232 if (hdq_data->hdq_irqstatus &
233 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
234 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
235 /* wake up sleeping process */
236 wake_up_interruptible(&hdq_wait_queue);
243 * HDQ Mode: always return success.
245 static u8 omap_w1_reset_bus(void *_hdq)
251 * W1 search callback function.
253 static void omap_w1_search_bus(void *_hdq, u8 search_type,
254 w1_slave_found_callback slave_found)
256 u64 module_id, rn_le, cs, id;
263 rn_le = cpu_to_le64(module_id);
265 * HDQ might not obey truly the 1-wire spec.
266 * So calculate CRC based on module parameter.
268 cs = w1_calc_crc8((u8 *)&rn_le, 7);
269 id = (cs << 56) | module_id;
271 slave_found(_hdq, id);
274 static int _omap_hdq_reset(struct hdq_data *hdq_data)
279 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
281 * Select HDQ mode & enable clocks.
282 * It is observed that INT flags can't be cleared via a read and GO/INIT
283 * won't return to zero if interrupt is disabled. So we always enable
286 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
287 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
288 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
290 /* wait for reset to complete */
291 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
292 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
294 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
297 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
298 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
299 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
300 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
301 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
308 * Issue break pulse to the device.
311 omap_hdq_break(struct hdq_data *hdq_data)
315 unsigned long irqflags;
317 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
321 if (!hdq_data->hdq_usecount) {
322 mutex_unlock(&hdq_data->hdq_mutex);
326 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
327 /* clear interrupt flags via a dummy read */
328 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
329 /* ISR loads it with new INT_STATUS */
330 hdq_data->hdq_irqstatus = 0;
331 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
333 /* set the INIT and GO bit */
334 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
335 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
336 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
337 OMAP_HDQ_CTRL_STATUS_GO);
339 /* wait for the TIMEOUT bit */
340 ret = wait_event_interruptible_timeout(hdq_wait_queue,
341 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
343 dev_dbg(hdq_data->dev, "wait interrupted");
344 mutex_unlock(&hdq_data->hdq_mutex);
348 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
349 tmp_status = hdq_data->hdq_irqstatus;
350 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
351 /* check irqstatus */
352 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
353 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
355 mutex_unlock(&hdq_data->hdq_mutex);
359 * wait for both INIT and GO bits rerurn to zero.
360 * zero wait time expected for interrupt mode.
362 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
363 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
364 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
367 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
368 "return to zero, %x", tmp_status);
370 mutex_unlock(&hdq_data->hdq_mutex);
374 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
378 unsigned long irqflags;
380 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
384 if (!hdq_data->hdq_usecount) {
385 mutex_unlock(&hdq_data->hdq_mutex);
389 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
390 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
391 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
392 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
394 * The RX comes immediately after TX. It
395 * triggers another interrupt before we
396 * sleep. So we have to wait for RXCOMPLETE bit.
399 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
400 while (!(hdq_data->hdq_irqstatus
401 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
402 && time_before(jiffies, timeout)) {
403 set_current_state(TASK_UNINTERRUPTIBLE);
407 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
408 OMAP_HDQ_CTRL_STATUS_DIR);
409 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
410 status = hdq_data->hdq_irqstatus;
411 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
412 /* check irqstatus */
413 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
414 dev_dbg(hdq_data->dev, "timeout waiting for"
415 "RXCOMPLETE, %x", status);
416 mutex_unlock(&hdq_data->hdq_mutex);
420 /* the data is ready. Read it in! */
421 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
422 mutex_unlock(&hdq_data->hdq_mutex);
429 * Enable clocks and set the controller to HDQ mode.
432 omap_hdq_get(struct hdq_data *hdq_data)
436 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
440 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
441 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
442 mutex_unlock(&hdq_data->hdq_mutex);
445 hdq_data->hdq_usecount++;
446 try_module_get(THIS_MODULE);
447 if (1 == hdq_data->hdq_usecount) {
448 if (clk_enable(hdq_data->hdq_ick)) {
449 dev_dbg(hdq_data->dev, "Can not enable ick\n");
450 clk_put(hdq_data->hdq_ick);
451 clk_put(hdq_data->hdq_fck);
452 mutex_unlock(&hdq_data->hdq_mutex);
455 if (clk_enable(hdq_data->hdq_fck)) {
456 dev_dbg(hdq_data->dev, "Can not enable fck\n");
457 clk_put(hdq_data->hdq_ick);
458 clk_put(hdq_data->hdq_fck);
459 mutex_unlock(&hdq_data->hdq_mutex);
463 /* make sure HDQ is out of reset */
464 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
465 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
466 ret = _omap_hdq_reset(hdq_data);
468 /* back up the count */
469 hdq_data->hdq_usecount--;
471 /* select HDQ mode & enable clocks */
472 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
473 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
474 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
475 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
476 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
477 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
481 mutex_unlock(&hdq_data->hdq_mutex);
486 * Disable clocks to the module.
489 omap_hdq_put(struct hdq_data *hdq_data)
493 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
497 if (0 == hdq_data->hdq_usecount) {
498 dev_dbg(hdq_data->dev, "attempt to decrement use count"
502 hdq_data->hdq_usecount--;
503 module_put(THIS_MODULE);
504 if (0 == hdq_data->hdq_usecount) {
505 clk_disable(hdq_data->hdq_ick);
506 clk_disable(hdq_data->hdq_fck);
509 mutex_unlock(&hdq_data->hdq_mutex);
514 * Read a byte of data from the device.
516 static u8 omap_w1_read_byte(void *_hdq)
518 struct hdq_data *hdq_data = _hdq;
522 ret = hdq_read_byte(hdq_data, &val);
524 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
526 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
529 hdq_data->init_trans = 0;
530 mutex_unlock(&hdq_data->hdq_mutex);
531 omap_hdq_put(hdq_data);
535 /* Write followed by a read, release the module */
536 if (hdq_data->init_trans) {
537 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
539 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
542 hdq_data->init_trans = 0;
543 mutex_unlock(&hdq_data->hdq_mutex);
544 omap_hdq_put(hdq_data);
551 * Write a byte of data to the device.
553 static void omap_w1_write_byte(void *_hdq, u8 byte)
555 struct hdq_data *hdq_data = _hdq;
559 /* First write to initialize the transfer */
560 if (hdq_data->init_trans == 0)
561 omap_hdq_get(hdq_data);
563 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
565 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
568 hdq_data->init_trans++;
569 mutex_unlock(&hdq_data->hdq_mutex);
571 hdq_write_byte(hdq_data, byte, &status);
572 dev_dbg(hdq_data->dev, "Ctrl status %x\n", status);
574 /* Second write, data transfered. Release the module */
575 if (hdq_data->init_trans > 1) {
576 omap_hdq_put(hdq_data);
577 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
579 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
582 hdq_data->init_trans = 0;
583 mutex_unlock(&hdq_data->hdq_mutex);
589 static int __init omap_hdq_probe(struct platform_device *pdev)
591 struct hdq_data *hdq_data;
592 struct resource *res;
599 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
601 dev_dbg(&pdev->dev, "unable to allocate memory\n");
606 hdq_data->dev = &pdev->dev;
607 platform_set_drvdata(pdev, hdq_data);
609 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
611 dev_dbg(&pdev->dev, "unable to get resource\n");
616 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
617 if (!hdq_data->hdq_base) {
618 dev_dbg(&pdev->dev, "ioremap failed\n");
623 /* get interface & functional clock objects */
624 hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
625 hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
627 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
628 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
629 if (IS_ERR(hdq_data->hdq_ick)) {
630 ret = PTR_ERR(hdq_data->hdq_ick);
633 if (IS_ERR(hdq_data->hdq_fck)) {
634 ret = PTR_ERR(hdq_data->hdq_fck);
635 clk_put(hdq_data->hdq_ick);
640 hdq_data->hdq_usecount = 0;
641 mutex_init(&hdq_data->hdq_mutex);
643 if (clk_enable(hdq_data->hdq_ick)) {
644 dev_dbg(&pdev->dev, "Can not enable ick\n");
649 if (clk_enable(hdq_data->hdq_fck)) {
650 dev_dbg(&pdev->dev, "Can not enable fck\n");
655 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
656 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
657 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
659 spin_lock_init(&hdq_data->hdq_spinlock);
660 omap_hdq_break(hdq_data);
662 irq = platform_get_irq(pdev, 0);
668 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
670 dev_dbg(&pdev->dev, "could not request irq\n");
674 /* don't clock the HDQ until it is needed */
675 clk_disable(hdq_data->hdq_ick);
676 clk_disable(hdq_data->hdq_fck);
678 omap_w1_master.data = hdq_data;
680 ret = w1_add_master_device(&omap_w1_master);
682 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
690 clk_disable(hdq_data->hdq_fck);
693 clk_disable(hdq_data->hdq_ick);
696 clk_put(hdq_data->hdq_ick);
697 clk_put(hdq_data->hdq_fck);
700 iounmap(hdq_data->hdq_base);
704 platform_set_drvdata(pdev, NULL);
712 static int omap_hdq_remove(struct platform_device *pdev)
714 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
716 mutex_lock(&hdq_data->hdq_mutex);
718 if (0 != hdq_data->hdq_usecount) {
719 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
723 mutex_unlock(&hdq_data->hdq_mutex);
725 /* remove module dependency */
726 clk_put(hdq_data->hdq_ick);
727 clk_put(hdq_data->hdq_fck);
728 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
729 platform_set_drvdata(pdev, NULL);
730 iounmap(hdq_data->hdq_base);
739 return platform_driver_register(&omap_hdq_driver);
745 platform_driver_unregister(&omap_hdq_driver);
748 module_init(omap_hdq_init);
749 module_exit(omap_hdq_exit);
751 module_param(w1_id, int, S_IRUSR);
753 MODULE_AUTHOR("Texas Instruments");
754 MODULE_DESCRIPTION("HDQ driver Library");
755 MODULE_LICENSE("GPL");