2 * Core maple bus functionality
4 * Copyright (C) 2007 - 2009 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt
6 * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
7 * Copyright (C) 2001 M. R. Brown
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/maple.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h>
23 #include <asm/cacheflush.h>
27 #include <mach/sysasic.h>
29 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
30 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
31 MODULE_LICENSE("GPL v2");
32 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
34 static void maple_dma_handler(struct work_struct *work);
35 static void maple_vblank_handler(struct work_struct *work);
37 static DECLARE_WORK(maple_dma_process, maple_dma_handler);
38 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
40 static LIST_HEAD(maple_waitq);
41 static LIST_HEAD(maple_sentq);
43 /* mutex to protect queue of waiting packets */
44 static DEFINE_MUTEX(maple_wlist_lock);
46 static struct maple_driver maple_unsupported_device;
47 static struct device maple_bus;
48 static int subdevice_map[MAPLE_PORTS];
49 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
50 static unsigned long maple_pnp_time;
51 static int started, scanning, fullscan;
52 static struct kmem_cache *maple_queue_cache;
54 struct maple_device_specify {
59 static bool checked[MAPLE_PORTS];
60 static bool empty[MAPLE_PORTS];
61 static struct maple_device *baseunits[MAPLE_PORTS];
64 * maple_driver_register - register a maple driver
65 * @drv: maple driver to be registered.
67 * Registers the passed in @drv, while updating the bus type.
68 * Devices with matching function IDs will be automatically probed.
70 int maple_driver_register(struct maple_driver *drv)
75 drv->drv.bus = &maple_bus_type;
77 return driver_register(&drv->drv);
79 EXPORT_SYMBOL_GPL(maple_driver_register);
82 * maple_driver_unregister - unregister a maple driver.
83 * @drv: maple driver to unregister.
85 * Cleans up after maple_driver_register(). To be invoked in the exit
86 * path of any module drivers.
88 void maple_driver_unregister(struct maple_driver *drv)
90 driver_unregister(&drv->drv);
92 EXPORT_SYMBOL_GPL(maple_driver_unregister);
94 /* set hardware registers to enable next round of dma */
95 static void maple_dma_reset(void)
97 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
98 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
99 ctrl_outl(1, MAPLE_TRIGTYPE);
101 * Maple system register
102 * bits 31 - 16 timeout in units of 20nsec
103 * bit 12 hard trigger - set 0 to keep responding to VBLANK
104 * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
105 * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
108 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
109 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
110 ctrl_outl(1, MAPLE_ENABLE);
114 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
115 * @dev: device responding
116 * @callback: handler callback
117 * @interval: interval in jiffies between callbacks
118 * @function: the function code for the device
120 void maple_getcond_callback(struct maple_device *dev,
121 void (*callback) (struct mapleq *mq),
122 unsigned long interval, unsigned long function)
124 dev->callback = callback;
125 dev->interval = interval;
126 dev->function = cpu_to_be32(function);
129 EXPORT_SYMBOL_GPL(maple_getcond_callback);
131 static int maple_dma_done(void)
133 return (ctrl_inl(MAPLE_STATE) & 1) == 0;
136 static void maple_release_device(struct device *dev)
138 struct maple_device *mdev;
141 mdev = to_maple_dev(dev);
143 kmem_cache_free(maple_queue_cache, mq->recvbuf);
149 * maple_add_packet - add a single instruction to the maple bus queue
150 * @mdev: maple device
151 * @function: function on device being queried
152 * @command: maple command to add
153 * @length: length of command string (in 32 bit words)
154 * @data: remainder of command string
156 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
157 size_t length, void *data)
160 void *sendbuf = NULL;
163 sendbuf = kzalloc(length * 4, GFP_KERNEL);
168 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
171 mdev->mq->command = command;
172 mdev->mq->length = length;
174 memcpy(sendbuf + 4, data, (length - 1) * 4);
175 mdev->mq->sendbuf = sendbuf;
177 mutex_lock(&maple_wlist_lock);
178 list_add_tail(&mdev->mq->list, &maple_waitq);
179 mutex_unlock(&maple_wlist_lock);
183 EXPORT_SYMBOL_GPL(maple_add_packet);
185 static struct mapleq *maple_allocq(struct maple_device *mdev)
189 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
193 INIT_LIST_HEAD(&mq->list);
195 mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
198 mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
205 dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
206 mdev->port, mdev->unit);
210 static struct maple_device *maple_alloc_dev(int port, int unit)
212 struct maple_device *mdev;
214 /* zero this out to avoid kobj subsystem
215 * thinking it has already been registered */
217 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
224 mdev->mq = maple_allocq(mdev);
230 mdev->dev.bus = &maple_bus_type;
231 mdev->dev.parent = &maple_bus;
232 init_waitqueue_head(&mdev->maple_wait);
236 static void maple_free_dev(struct maple_device *mdev)
238 kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
243 /* process the command queue into a maple command block
244 * terminating command has bit 32 of first long set to 0
246 static void maple_build_block(struct mapleq *mq)
248 int port, unit, from, to, len;
249 unsigned long *lsendbuf = mq->sendbuf;
251 port = mq->dev->port & 3;
252 unit = mq->dev->unit;
255 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
257 *maple_lastptr &= 0x7fffffff;
258 maple_lastptr = maple_sendptr;
260 *maple_sendptr++ = (port << 16) | len | 0x80000000;
261 *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf);
263 mq->command | (to << 8) | (from << 16) | (len << 24);
265 *maple_sendptr++ = *lsendbuf++;
268 /* build up command queue */
269 static void maple_send(void)
271 int i, maple_packets = 0;
272 struct mapleq *mq, *nmq;
274 if (!maple_dma_done())
278 ctrl_outl(0, MAPLE_ENABLE);
280 if (!list_empty(&maple_sentq))
283 mutex_lock(&maple_wlist_lock);
284 if (list_empty(&maple_waitq)) {
285 mutex_unlock(&maple_wlist_lock);
289 maple_lastptr = maple_sendbuf;
290 maple_sendptr = maple_sendbuf;
292 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
293 maple_build_block(mq);
294 list_del_init(&mq->list);
295 list_add_tail(&mq->list, &maple_sentq);
296 if (maple_packets++ > MAPLE_MAXPACKETS)
299 mutex_unlock(&maple_wlist_lock);
300 if (maple_packets > 0) {
301 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
302 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
303 PAGE_SIZE, DMA_BIDIRECTIONAL);
310 /* check if there is a driver registered likely to match this device */
311 static int maple_check_matching_driver(struct device_driver *driver,
314 struct maple_driver *maple_drv;
315 struct maple_device *mdev;
318 maple_drv = to_maple_driver(driver);
319 if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
324 static void maple_detach_driver(struct maple_device *mdev)
326 device_unregister(&mdev->dev);
329 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
330 static void maple_attach_driver(struct maple_device *mdev)
333 unsigned long function;
336 recvbuf = mdev->mq->recvbuf->buf;
337 /* copy the data as individual elements in
338 * case of memory optimisation */
339 memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
340 memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
341 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
342 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
343 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
344 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
345 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
346 memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
347 mdev->product_name[30] = '\0';
348 memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
349 mdev->product_licence[60] = '\0';
351 for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
356 for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
362 function = be32_to_cpu(mdev->devinfo.function);
364 dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
365 mdev->product_name, function, mdev->port, mdev->unit);
367 if (function > 0x200) {
368 /* Do this silently - as not a real device */
370 mdev->driver = &maple_unsupported_device;
371 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
376 bus_for_each_drv(&maple_bus_type, NULL, mdev,
377 maple_check_matching_driver);
380 /* Driver does not exist yet */
381 dev_info(&mdev->dev, "no driver found\n");
382 mdev->driver = &maple_unsupported_device;
384 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
385 mdev->unit, function);
388 mdev->function = function;
389 mdev->dev.release = &maple_release_device;
391 atomic_set(&mdev->busy, 0);
392 error = device_register(&mdev->dev);
394 dev_warn(&mdev->dev, "could not register device at"
395 " (%d, %d), with error 0x%X\n", mdev->unit,
397 maple_free_dev(mdev);
404 * if device has been registered for the given
405 * port and unit then return 1 - allows identification
406 * of which devices need to be attached or detached
408 static int check_maple_device(struct device *device, void *portptr)
410 struct maple_device_specify *ds;
411 struct maple_device *mdev;
414 mdev = to_maple_dev(device);
415 if (mdev->port == ds->port && mdev->unit == ds->unit)
420 static int setup_maple_commands(struct device *device, void *ignored)
423 struct maple_device *mdev = to_maple_dev(device);
424 if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
425 time_after(jiffies, mdev->when)) {
426 /* bounce if we cannot add */
427 add = maple_add_packet(mdev,
428 be32_to_cpu(mdev->devinfo.function),
429 MAPLE_COMMAND_GETCOND, 1, NULL);
431 mdev->when = jiffies + mdev->interval;
433 if (time_after(jiffies, maple_pnp_time))
434 /* Ensure we don't have block reads and devinfo
435 * calls interfering with one another - so flag the
437 if (atomic_read(&mdev->busy) == 0) {
438 atomic_set(&mdev->busy, 1);
439 maple_add_packet(mdev, 0,
440 MAPLE_COMMAND_DEVINFO, 0, NULL);
446 /* VBLANK bottom half - implemented via workqueue */
447 static void maple_vblank_handler(struct work_struct *work)
450 struct maple_device *mdev;
452 if (!maple_dma_done())
455 ctrl_outl(0, MAPLE_ENABLE);
457 if (!list_empty(&maple_sentq))
461 * Set up essential commands - to fetch data and
462 * check devices are still present
464 bus_for_each_dev(&maple_bus_type, NULL, NULL,
465 setup_maple_commands);
467 if (time_after(jiffies, maple_pnp_time)) {
469 * Scan the empty ports - bus is flakey and may have
470 * mis-reported emptyness
472 for (x = 0; x < MAPLE_PORTS; x++) {
473 if (checked[x] && empty[x]) {
477 atomic_set(&mdev->busy, 1);
478 locking = maple_add_packet(mdev, 0,
479 MAPLE_COMMAND_DEVINFO, 0, NULL);
485 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
492 /* handle devices added via hotplugs - placing them on queue for DEVINFO */
493 static void maple_map_subunits(struct maple_device *mdev, int submask)
495 int retval, k, devcheck;
496 struct maple_device *mdev_add;
497 struct maple_device_specify ds;
499 ds.port = mdev->port;
500 for (k = 0; k < 5; k++) {
503 bus_for_each_dev(&maple_bus_type, NULL, &ds,
506 submask = submask >> 1;
509 devcheck = submask & 0x01;
511 mdev_add = maple_alloc_dev(mdev->port, k + 1);
514 atomic_set(&mdev_add->busy, 1);
515 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
517 /* mark that we are checking sub devices */
520 submask = submask >> 1;
524 /* mark a device as removed */
525 static void maple_clean_submap(struct maple_device *mdev)
529 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
532 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
535 /* handle empty port or hotplug removal */
536 static void maple_response_none(struct maple_device *mdev)
538 maple_clean_submap(mdev);
540 if (likely(mdev->unit != 0)) {
542 * Block devices play up
543 * and give the impression they have
544 * been removed even when still in place or
545 * trip the mtd layer when they have
546 * really gone - this code traps that eventuality
547 * and ensures we aren't overloaded with useless
550 if (mdev->can_unload) {
551 if (!mdev->can_unload(mdev)) {
552 atomic_set(&mdev->busy, 2);
553 wake_up(&mdev->maple_wait);
558 dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
559 mdev->port, mdev->unit);
560 maple_detach_driver(mdev);
563 if (!started || !fullscan) {
564 if (checked[mdev->port] == false) {
565 checked[mdev->port] = true;
566 empty[mdev->port] = true;
567 dev_info(&mdev->dev, "no devices"
568 " to port %d\n", mdev->port);
573 /* Some hardware devices generate false detach messages on unit 0 */
574 atomic_set(&mdev->busy, 0);
577 /* preprocess hotplugs or scans */
578 static void maple_response_devinfo(struct maple_device *mdev,
582 if (!started || (scanning == 2) || !fullscan) {
583 if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
584 checked[mdev->port] = true;
585 maple_attach_driver(mdev);
588 maple_attach_driver(mdev);
589 if (mdev->unit == 0) {
590 empty[mdev->port] = false;
591 maple_attach_driver(mdev);
595 if (mdev->unit == 0) {
596 submask = recvbuf[2] & 0x1F;
597 if (submask ^ subdevice_map[mdev->port]) {
598 maple_map_subunits(mdev, submask);
599 subdevice_map[mdev->port] = submask;
604 static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
606 if (mdev->fileerr_handler) {
607 mdev->fileerr_handler(mdev, recvbuf);
610 dev_warn(&mdev->dev, "device at (%d, %d) reports"
611 "file error 0x%X\n", mdev->port, mdev->unit,
612 ((int *)recvbuf)[1]);
615 static void maple_port_rescan(void)
618 struct maple_device *mdev;
621 for (i = 0; i < MAPLE_PORTS; i++) {
622 if (checked[i] == false) {
625 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
631 /* maple dma end bottom half - implemented via workqueue */
632 static void maple_dma_handler(struct work_struct *work)
634 struct mapleq *mq, *nmq;
635 struct maple_device *mdev;
637 enum maple_code code;
639 if (!maple_dma_done())
641 ctrl_outl(0, MAPLE_ENABLE);
642 if (!list_empty(&maple_sentq)) {
643 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
645 recvbuf = mq->recvbuf->buf;
646 dma_cache_sync(&mdev->dev, recvbuf, 0x400,
650 list_del_init(&mq->list);
652 case MAPLE_RESPONSE_NONE:
653 maple_response_none(mdev);
656 case MAPLE_RESPONSE_DEVINFO:
657 maple_response_devinfo(mdev, recvbuf);
658 atomic_set(&mdev->busy, 0);
661 case MAPLE_RESPONSE_DATATRF:
664 atomic_set(&mdev->busy, 0);
665 wake_up(&mdev->maple_wait);
668 case MAPLE_RESPONSE_FILEERR:
669 maple_response_fileerr(mdev, recvbuf);
670 atomic_set(&mdev->busy, 0);
671 wake_up(&mdev->maple_wait);
674 case MAPLE_RESPONSE_AGAIN:
675 case MAPLE_RESPONSE_BADCMD:
676 case MAPLE_RESPONSE_BADFUNC:
677 dev_warn(&mdev->dev, "non-fatal error"
678 " 0x%X at (%d, %d)\n", code,
679 mdev->port, mdev->unit);
680 atomic_set(&mdev->busy, 0);
683 case MAPLE_RESPONSE_ALLINFO:
684 dev_notice(&mdev->dev, "extended"
685 " device information request for (%d, %d)"
686 " but call is not supported\n", mdev->port,
688 atomic_set(&mdev->busy, 0);
691 case MAPLE_RESPONSE_OK:
692 atomic_set(&mdev->busy, 0);
693 wake_up(&mdev->maple_wait);
700 /* if scanning is 1 then we have subdevices to check */
706 /*check if we have actually tested all ports yet */
709 /* mark that we have been through the first scan */
715 static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
717 /* Load everything into the bottom half */
718 schedule_work(&maple_dma_process);
722 static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
724 schedule_work(&maple_vblank_process);
728 static int maple_set_dma_interrupt_handler(void)
730 return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
731 IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
734 static int maple_set_vblank_interrupt_handler(void)
736 return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
737 IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
740 static int maple_get_dma_buffer(void)
743 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
750 static int maple_match_bus_driver(struct device *devptr,
751 struct device_driver *drvptr)
753 struct maple_driver *maple_drv = to_maple_driver(drvptr);
754 struct maple_device *maple_dev = to_maple_dev(devptr);
756 /* Trap empty port case */
757 if (maple_dev->devinfo.function == 0xFFFFFFFF)
759 else if (maple_dev->devinfo.function &
760 cpu_to_be32(maple_drv->function))
765 static int maple_bus_uevent(struct device *dev,
766 struct kobj_uevent_env *env)
771 static void maple_bus_release(struct device *dev)
775 static struct maple_driver maple_unsupported_device = {
777 .name = "maple_unsupported_device",
778 .bus = &maple_bus_type,
782 * maple_bus_type - core maple bus structure
784 struct bus_type maple_bus_type = {
786 .match = maple_match_bus_driver,
787 .uevent = maple_bus_uevent,
789 EXPORT_SYMBOL_GPL(maple_bus_type);
791 static struct device maple_bus = {
793 .release = maple_bus_release,
796 static int __init maple_bus_init(void)
799 struct maple_device *mdev[MAPLE_PORTS];
801 ctrl_outl(0, MAPLE_ENABLE);
803 retval = device_register(&maple_bus);
807 retval = bus_register(&maple_bus_type);
811 retval = driver_register(&maple_unsupported_device.drv);
815 /* allocate memory for maple bus dma */
816 retval = maple_get_dma_buffer();
818 dev_err(&maple_bus, "failed to allocate DMA buffers\n");
822 /* set up DMA interrupt handler */
823 retval = maple_set_dma_interrupt_handler();
825 dev_err(&maple_bus, "bus failed to grab maple "
830 /* set up VBLANK interrupt handler */
831 retval = maple_set_vblank_interrupt_handler();
833 dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
837 maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
839 if (!maple_queue_cache)
840 goto cleanup_bothirqs;
842 INIT_LIST_HEAD(&maple_waitq);
843 INIT_LIST_HEAD(&maple_sentq);
845 /* setup maple ports */
846 for (i = 0; i < MAPLE_PORTS; i++) {
849 mdev[i] = maple_alloc_dev(i, 0);
852 maple_free_dev(mdev[i]);
855 baseunits[i] = mdev[i];
856 atomic_set(&mdev[i]->busy, 1);
857 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
858 subdevice_map[i] = 0;
861 maple_pnp_time = jiffies + HZ;
862 /* prepare initial queue */
864 dev_info(&maple_bus, "bus core now registered\n");
869 kmem_cache_destroy(maple_queue_cache);
872 free_irq(HW_EVENT_VSYNC, 0);
875 free_irq(HW_EVENT_MAPLE_DMA, 0);
878 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
881 driver_unregister(&maple_unsupported_device.drv);
884 bus_unregister(&maple_bus_type);
887 device_unregister(&maple_bus);
890 printk(KERN_ERR "Maple bus registration failed\n");
893 /* Push init to later to ensure hardware gets detected */
894 fs_initcall(maple_bus_init);