]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/sh/maple/maple.c
maple: fix up whitespace damage.
[linux-2.6-omap-h63xx.git] / drivers / sh / maple / maple.c
1 /*
2  * Core maple bus functionality
3  *
4  *  Copyright (C) 2007 Adrian McMenamin
5  *
6  * Based on 2.4 code by:
7  *
8  *  Copyright (C) 2000-2001 YAEGASHI Takeshi
9  *  Copyright (C) 2001 M. R. Brown
10  *  Copyright (C) 2001 Paul Mundt
11  *
12  * and others.
13  *
14  * This file is subject to the terms and conditions of the GNU General Public
15  * License.  See the file "COPYING" in the main directory of this archive
16  * for more details.
17  */
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/list.h>
24 #include <linux/io.h>
25 #include <linux/slab.h>
26 #include <linux/maple.h>
27 #include <linux/dma-mapping.h>
28 #include <asm/cacheflush.h>
29 #include <asm/dma.h>
30 #include <asm/io.h>
31 #include <asm/mach/dma.h>
32 #include <asm/mach/sysasic.h>
33 #include <asm/mach/maple.h>
34
35 MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
36 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37 MODULE_LICENSE("GPL v2");
38 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
39
40 static void maple_dma_handler(struct work_struct *work);
41 static void maple_vblank_handler(struct work_struct *work);
42
43 static DECLARE_WORK(maple_dma_process, maple_dma_handler);
44 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
45
46 static LIST_HEAD(maple_waitq);
47 static LIST_HEAD(maple_sentq);
48
49 static DEFINE_MUTEX(maple_list_lock);
50
51 static struct maple_driver maple_dummy_driver;
52 static struct device maple_bus;
53 static int subdevice_map[MAPLE_PORTS];
54 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
55 static unsigned long maple_pnp_time;
56 static int started, scanning, liststatus;
57 static struct kmem_cache *maple_queue_cache;
58
59 struct maple_device_specify {
60         int port;
61         int unit;
62 };
63
64 /**
65  *  maple_driver_register - register a device driver
66  *  automatically makes the driver bus a maple bus
67  *  @drv: the driver to be registered
68  */
69 int maple_driver_register(struct device_driver *drv)
70 {
71         if (!drv)
72                 return -EINVAL;
73         drv->bus = &maple_bus_type;
74         return driver_register(drv);
75 }
76
77 EXPORT_SYMBOL_GPL(maple_driver_register);
78
79 /* set hardware registers to enable next round of dma */
80 static void maplebus_dma_reset(void)
81 {
82         ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
83         /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
84         ctrl_outl(1, MAPLE_TRIGTYPE);
85         ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
86         ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
87         ctrl_outl(1, MAPLE_ENABLE);
88 }
89
90 /**
91  * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
92  * @dev: device responding
93  * @callback: handler callback
94  * @interval: interval in jiffies between callbacks
95  * @function: the function code for the device
96  */
97 void maple_getcond_callback(struct maple_device *dev,
98                             void (*callback) (struct mapleq * mq),
99                             unsigned long interval, unsigned long function)
100 {
101         dev->callback = callback;
102         dev->interval = interval;
103         dev->function = cpu_to_be32(function);
104         dev->when = jiffies;
105 }
106
107 EXPORT_SYMBOL_GPL(maple_getcond_callback);
108
109 static int maple_dma_done(void)
110 {
111         return (ctrl_inl(MAPLE_STATE) & 1) == 0;
112 }
113
114 static void maple_release_device(struct device *dev)
115 {
116         if (dev->type) {
117                 kfree(dev->type->name);
118                 kfree(dev->type);
119         }
120 }
121
122 /**
123  * maple_add_packet - add a single instruction to the queue
124  * @mq: instruction to add to waiting queue
125  */
126 void maple_add_packet(struct mapleq *mq)
127 {
128         mutex_lock(&maple_list_lock);
129         list_add(&mq->list, &maple_waitq);
130         mutex_unlock(&maple_list_lock);
131 }
132
133 EXPORT_SYMBOL_GPL(maple_add_packet);
134
135 static struct mapleq *maple_allocq(struct maple_device *dev)
136 {
137         struct mapleq *mq;
138
139         mq = kmalloc(sizeof(*mq), GFP_KERNEL);
140         if (!mq)
141                 return NULL;
142
143         mq->dev = dev;
144         mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
145         mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
146         if (!mq->recvbuf) {
147                 kfree(mq);
148                 return NULL;
149         }
150
151         return mq;
152 }
153
154 static struct maple_device *maple_alloc_dev(int port, int unit)
155 {
156         struct maple_device *dev;
157
158         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
159         if (!dev)
160                 return NULL;
161
162         dev->port = port;
163         dev->unit = unit;
164         dev->mq = maple_allocq(dev);
165
166         if (!dev->mq) {
167                 kfree(dev);
168                 return NULL;
169         }
170
171         return dev;
172 }
173
174 static void maple_free_dev(struct maple_device *mdev)
175 {
176         if (!mdev)
177                 return;
178         if (mdev->mq) {
179                 kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp);
180                 kfree(mdev->mq);
181         }
182         kfree(mdev);
183 }
184
185 /* process the command queue into a maple command block
186  * terminating command has bit 32 of first long set to 0
187  */
188 static void maple_build_block(struct mapleq *mq)
189 {
190         int port, unit, from, to, len;
191         unsigned long *lsendbuf = mq->sendbuf;
192
193         port = mq->dev->port & 3;
194         unit = mq->dev->unit;
195         len = mq->length;
196         from = port << 6;
197         to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
198
199         *maple_lastptr &= 0x7fffffff;
200         maple_lastptr = maple_sendptr;
201
202         *maple_sendptr++ = (port << 16) | len | 0x80000000;
203         *maple_sendptr++ = PHYSADDR(mq->recvbuf);
204         *maple_sendptr++ =
205             mq->command | (to << 8) | (from << 16) | (len << 24);
206
207         while (len-- > 0)
208                 *maple_sendptr++ = *lsendbuf++;
209 }
210
211 /* build up command queue */
212 static void maple_send(void)
213 {
214         int i;
215         int maple_packets;
216         struct mapleq *mq, *nmq;
217
218         if (!list_empty(&maple_sentq))
219                 return;
220         if (list_empty(&maple_waitq) || !maple_dma_done())
221                 return;
222         maple_packets = 0;
223         maple_sendptr = maple_lastptr = maple_sendbuf;
224         list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
225                 maple_build_block(mq);
226                 list_move(&mq->list, &maple_sentq);
227                 if (maple_packets++ > MAPLE_MAXPACKETS)
228                         break;
229         }
230         if (maple_packets > 0) {
231                 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
232                         dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
233                                        PAGE_SIZE, DMA_BIDIRECTIONAL);
234         }
235 }
236
237 static int attach_matching_maple_driver(struct device_driver *driver,
238                                         void *devptr)
239 {
240         struct maple_driver *maple_drv;
241         struct maple_device *mdev;
242
243         mdev = devptr;
244         maple_drv = to_maple_driver(driver);
245         if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) {
246                 if (maple_drv->connect(mdev) == 0) {
247                         mdev->driver = maple_drv;
248                         return 1;
249                 }
250         }
251         return 0;
252 }
253
254 static void maple_detach_driver(struct maple_device *mdev)
255 {
256         if (!mdev)
257                 return;
258         if (mdev->driver) {
259                 if (mdev->driver->disconnect)
260                         mdev->driver->disconnect(mdev);
261         }
262         mdev->driver = NULL;
263         if (mdev->registered) {
264                 maple_release_device(&mdev->dev);
265                 device_unregister(&mdev->dev);
266         }
267         mdev->registered = 0;
268         maple_free_dev(mdev);
269 }
270
271 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
272 static void maple_attach_driver(struct maple_device *dev)
273 {
274         char *p;
275
276         char *recvbuf;
277         unsigned long function;
278         int matched, retval;
279
280         recvbuf = dev->mq->recvbuf;
281         memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo));
282         memcpy(dev->product_name, dev->devinfo.product_name, 30);
283         memcpy(dev->product_licence, dev->devinfo.product_licence, 60);
284         dev->product_name[30] = '\0';
285         dev->product_licence[60] = '\0';
286
287         for (p = dev->product_name + 29; dev->product_name <= p; p--)
288                 if (*p == ' ')
289                         *p = '\0';
290                 else
291                         break;
292
293         for (p = dev->product_licence + 59; dev->product_licence <= p; p--)
294                 if (*p == ' ')
295                         *p = '\0';
296                 else
297                         break;
298
299         function = be32_to_cpu(dev->devinfo.function);
300
301         if (function > 0x200) {
302                 /* Do this silently - as not a real device */
303                 function = 0;
304                 dev->driver = &maple_dummy_driver;
305                 sprintf(dev->dev.bus_id, "%d:0.port", dev->port);
306         } else {
307                 printk(KERN_INFO
308                        "Maple bus at (%d, %d): Connected function 0x%lX\n",
309                        dev->port, dev->unit, function);
310
311                 matched =
312                     bus_for_each_drv(&maple_bus_type, NULL, dev,
313                                      attach_matching_maple_driver);
314
315                 if (matched == 0) {
316                         /* Driver does not exist yet */
317                         printk(KERN_INFO
318                                "No maple driver found for this device\n");
319                         dev->driver = &maple_dummy_driver;
320                 }
321
322                 sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port,
323                         dev->unit, function);
324         }
325         dev->function = function;
326         dev->dev.bus = &maple_bus_type;
327         dev->dev.parent = &maple_bus;
328         dev->dev.release = &maple_release_device;
329         retval = device_register(&dev->dev);
330         if (retval) {
331                 printk(KERN_INFO
332                        "Maple bus: Attempt to register device (%x, %x) failed.\n",
333                        dev->port, dev->unit);
334                 maple_free_dev(dev);
335         }
336         dev->registered = 1;
337 }
338
339 /*
340  * if device has been registered for the given
341  * port and unit then return 1 - allows identification
342  * of which devices need to be attached or detached
343  */
344 static int detach_maple_device(struct device *device, void *portptr)
345 {
346         struct maple_device_specify *ds;
347         struct maple_device *mdev;
348
349         ds = portptr;
350         mdev = to_maple_dev(device);
351         if (mdev->port == ds->port && mdev->unit == ds->unit)
352                 return 1;
353         return 0;
354 }
355
356 static int setup_maple_commands(struct device *device, void *ignored)
357 {
358         struct maple_device *maple_dev = to_maple_dev(device);
359
360         if ((maple_dev->interval > 0)
361             && time_after(jiffies, maple_dev->when)) {
362                 maple_dev->when = jiffies + maple_dev->interval;
363                 maple_dev->mq->command = MAPLE_COMMAND_GETCOND;
364                 maple_dev->mq->sendbuf = &maple_dev->function;
365                 maple_dev->mq->length = 1;
366                 maple_add_packet(maple_dev->mq);
367                 liststatus++;
368         } else {
369                 if (time_after(jiffies, maple_pnp_time)) {
370                         maple_dev->mq->command = MAPLE_COMMAND_DEVINFO;
371                         maple_dev->mq->length = 0;
372                         maple_add_packet(maple_dev->mq);
373                         liststatus++;
374                 }
375         }
376
377         return 0;
378 }
379
380 /* VBLANK bottom half - implemented via workqueue */
381 static void maple_vblank_handler(struct work_struct *work)
382 {
383         if (!maple_dma_done())
384                 return;
385         if (!list_empty(&maple_sentq))
386                 return;
387         ctrl_outl(0, MAPLE_ENABLE);
388         liststatus = 0;
389         bus_for_each_dev(&maple_bus_type, NULL, NULL,
390                          setup_maple_commands);
391         if (time_after(jiffies, maple_pnp_time))
392                 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
393         if (liststatus && list_empty(&maple_sentq)) {
394                 INIT_LIST_HEAD(&maple_sentq);
395                 maple_send();
396         }
397         maplebus_dma_reset();
398 }
399
400 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/
401 static void maple_map_subunits(struct maple_device *mdev, int submask)
402 {
403         int retval, k, devcheck;
404         struct maple_device *mdev_add;
405         struct maple_device_specify ds;
406
407         for (k = 0; k < 5; k++) {
408                 ds.port = mdev->port;
409                 ds.unit = k + 1;
410                 retval =
411                     bus_for_each_dev(&maple_bus_type, NULL, &ds,
412                                      detach_maple_device);
413                 if (retval) {
414                         submask = submask >> 1;
415                         continue;
416                 }
417                 devcheck = submask & 0x01;
418                 if (devcheck) {
419                         mdev_add = maple_alloc_dev(mdev->port, k + 1);
420                         if (!mdev_add)
421                                 return;
422                         mdev_add->mq->command = MAPLE_COMMAND_DEVINFO;
423                         mdev_add->mq->length = 0;
424                         maple_add_packet(mdev_add->mq);
425                         scanning = 1;
426                 }
427                 submask = submask >> 1;
428         }
429 }
430
431 /* mark a device as removed */
432 static void maple_clean_submap(struct maple_device *mdev)
433 {
434         int killbit;
435
436         killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
437         killbit = ~killbit;
438         killbit &= 0xFF;
439         subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
440 }
441
442 /* handle empty port or hotplug removal */
443 static void maple_response_none(struct maple_device *mdev,
444                                 struct mapleq *mq)
445 {
446         if (mdev->unit != 0) {
447                 list_del(&mq->list);
448                 maple_clean_submap(mdev);
449                 printk(KERN_INFO
450                        "Maple bus device detaching at (%d, %d)\n",
451                        mdev->port, mdev->unit);
452                 maple_detach_driver(mdev);
453                 return;
454         }
455         if (!started) {
456                 printk(KERN_INFO "No maple devices attached to port %d\n",
457                        mdev->port);
458                 return;
459         }
460         maple_clean_submap(mdev);
461 }
462
463 /* preprocess hotplugs or scans */
464 static void maple_response_devinfo(struct maple_device *mdev,
465                                    char *recvbuf)
466 {
467         char submask;
468         if ((!started) || (scanning == 2)) {
469                 maple_attach_driver(mdev);
470                 return;
471         }
472         if (mdev->unit == 0) {
473                 submask = recvbuf[2] & 0x1F;
474                 if (submask ^ subdevice_map[mdev->port]) {
475                         maple_map_subunits(mdev, submask);
476                         subdevice_map[mdev->port] = submask;
477                 }
478         }
479 }
480
481 /* maple dma end bottom half - implemented via workqueue */
482 static void maple_dma_handler(struct work_struct *work)
483 {
484         struct mapleq *mq, *nmq;
485         struct maple_device *dev;
486         char *recvbuf;
487         enum maple_code code;
488
489         if (!maple_dma_done())
490                 return;
491         ctrl_outl(0, MAPLE_ENABLE);
492         if (!list_empty(&maple_sentq)) {
493                 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
494                         recvbuf = mq->recvbuf;
495                         code = recvbuf[0];
496                         dev = mq->dev;
497                         switch (code) {
498                         case MAPLE_RESPONSE_NONE:
499                                 maple_response_none(dev, mq);
500                                 break;
501
502                         case MAPLE_RESPONSE_DEVINFO:
503                                 maple_response_devinfo(dev, recvbuf);
504                                 break;
505
506                         case MAPLE_RESPONSE_DATATRF:
507                                 if (dev->callback)
508                                         dev->callback(mq);
509                                 break;
510
511                         case MAPLE_RESPONSE_FILEERR:
512                         case MAPLE_RESPONSE_AGAIN:
513                         case MAPLE_RESPONSE_BADCMD:
514                         case MAPLE_RESPONSE_BADFUNC:
515                                 printk(KERN_DEBUG
516                                        "Maple non-fatal error 0x%X\n",
517                                        code);
518                                 break;
519
520                         case MAPLE_RESPONSE_ALLINFO:
521                                 printk(KERN_DEBUG
522                                        "Maple - extended device information not supported\n");
523                                 break;
524
525                         case MAPLE_RESPONSE_OK:
526                                 break;
527
528                         default:
529                                 break;
530                         }
531                 }
532                 INIT_LIST_HEAD(&maple_sentq);
533                 if (scanning == 1) {
534                         maple_send();
535                         scanning = 2;
536                 } else
537                         scanning = 0;
538
539                 if (started == 0)
540                         started = 1;
541         }
542         maplebus_dma_reset();
543 }
544
545 static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
546 {
547         /* Load everything into the bottom half */
548         schedule_work(&maple_dma_process);
549         return IRQ_HANDLED;
550 }
551
552 static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
553 {
554         schedule_work(&maple_vblank_process);
555         return IRQ_HANDLED;
556 }
557
558 static struct irqaction maple_dma_irq = {
559         .name = "maple bus DMA handler",
560         .handler = maplebus_dma_interrupt,
561         .flags = IRQF_SHARED,
562 };
563
564 static struct irqaction maple_vblank_irq = {
565         .name = "maple bus VBLANK handler",
566         .handler = maplebus_vblank_interrupt,
567         .flags = IRQF_SHARED,
568 };
569
570 static int maple_set_dma_interrupt_handler(void)
571 {
572         return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq);
573 }
574
575 static int maple_set_vblank_interrupt_handler(void)
576 {
577         return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq);
578 }
579
580 static int maple_get_dma_buffer(void)
581 {
582         maple_sendbuf =
583             (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
584                                       MAPLE_DMA_PAGES);
585         if (!maple_sendbuf)
586                 return -ENOMEM;
587         return 0;
588 }
589
590 static int match_maple_bus_driver(struct device *devptr,
591                                   struct device_driver *drvptr)
592 {
593         struct maple_driver *maple_drv;
594         struct maple_device *maple_dev;
595
596         maple_drv = container_of(drvptr, struct maple_driver, drv);
597         maple_dev = container_of(devptr, struct maple_device, dev);
598         /* Trap empty port case */
599         if (maple_dev->devinfo.function == 0xFFFFFFFF)
600                 return 0;
601         else if (maple_dev->devinfo.function &
602                  be32_to_cpu(maple_drv->function))
603                 return 1;
604         return 0;
605 }
606
607 static int maple_bus_uevent(struct device *dev,
608                             struct kobj_uevent_env *env)
609 {
610         return 0;
611 }
612
613 static void maple_bus_release(struct device *dev)
614 {
615 }
616
617 static struct maple_driver maple_dummy_driver = {
618         .drv = {
619                 .name = "maple_dummy_driver",
620                 .bus = &maple_bus_type,
621                 },
622 };
623
624 struct bus_type maple_bus_type = {
625         .name = "maple",
626         .match = match_maple_bus_driver,
627         .uevent = maple_bus_uevent,
628 };
629
630 EXPORT_SYMBOL_GPL(maple_bus_type);
631
632 static struct device maple_bus = {
633         .bus_id = "maple",
634         .release = maple_bus_release,
635 };
636
637 static int __init maple_bus_init(void)
638 {
639         int retval, i;
640         struct maple_device *mdev[MAPLE_PORTS];
641         ctrl_outl(0, MAPLE_STATE);
642
643         retval = device_register(&maple_bus);
644         if (retval)
645                 goto cleanup;
646
647         retval = bus_register(&maple_bus_type);
648         if (retval)
649                 goto cleanup_device;
650
651         retval = driver_register(&maple_dummy_driver.drv);
652
653         if (retval)
654                 goto cleanup_bus;
655
656         /* allocate memory for maple bus dma */
657         retval = maple_get_dma_buffer();
658         if (retval) {
659                 printk(KERN_INFO
660                        "Maple bus: Failed to allocate Maple DMA buffers\n");
661                 goto cleanup_basic;
662         }
663
664         /* set up DMA interrupt handler */
665         retval = maple_set_dma_interrupt_handler();
666         if (retval) {
667                 printk(KERN_INFO
668                        "Maple bus: Failed to grab maple DMA IRQ\n");
669                 goto cleanup_dma;
670         }
671
672         /* set up VBLANK interrupt handler */
673         retval = maple_set_vblank_interrupt_handler();
674         if (retval) {
675                 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
676                 goto cleanup_irq;
677         }
678
679         maple_queue_cache =
680             kmem_cache_create("maple_queue_cache", 0x400, 0,
681                               SLAB_HWCACHE_ALIGN, NULL);
682
683         if (!maple_queue_cache)
684                 goto cleanup_bothirqs;
685
686         /* setup maple ports */
687         for (i = 0; i < MAPLE_PORTS; i++) {
688                 mdev[i] = maple_alloc_dev(i, 0);
689                 if (!mdev[i]) {
690                         while (i-- > 0)
691                                 maple_free_dev(mdev[i]);
692                         goto cleanup_cache;
693                 }
694                 mdev[i]->registered = 0;
695                 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
696                 mdev[i]->mq->length = 0;
697                 maple_attach_driver(mdev[i]);
698                 maple_add_packet(mdev[i]->mq);
699                 subdevice_map[i] = 0;
700         }
701
702         /* setup maplebus hardware */
703         maplebus_dma_reset();
704
705         /* initial detection */
706         maple_send();
707
708         maple_pnp_time = jiffies;
709
710         printk(KERN_INFO "Maple bus core now registered.\n");
711
712         return 0;
713
714       cleanup_cache:
715         kmem_cache_destroy(maple_queue_cache);
716
717       cleanup_bothirqs:
718         free_irq(HW_EVENT_VSYNC, 0);
719
720       cleanup_irq:
721         free_irq(HW_EVENT_MAPLE_DMA, 0);
722
723       cleanup_dma:
724         free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
725
726       cleanup_basic:
727         driver_unregister(&maple_dummy_driver.drv);
728
729       cleanup_bus:
730         bus_unregister(&maple_bus_type);
731
732       cleanup_device:
733         device_unregister(&maple_bus);
734
735       cleanup:
736         printk(KERN_INFO "Maple bus registration failed\n");
737         return retval;
738 }
739
740 subsys_initcall(maple_bus_init);