]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jan 2009 23:30:54 +0000 (15:30 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jan 2009 23:31:07 +0000 (15:31 -0800)
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6:
  Revert "driver core: create a private portion of struct device"
  Revert "driver core: move klist_children into private structure"
  Revert "driver core: move knode_driver into private structure"
  Revert "driver core: move knode_bus into private structure"

141 files changed:
Documentation/filesystems/squashfs.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Documentation/nommu-mmap.txt
Documentation/sysctl/vm.txt
MAINTAINERS
arch/arm/include/asm/mmu.h
arch/arm/mm/dma-mapping.c
arch/blackfin/include/asm/mmu.h
arch/blackfin/kernel/ptrace.c
arch/blackfin/kernel/traps.c
arch/frv/kernel/ptrace.c
arch/h8300/include/asm/mmu.h
arch/m68knommu/include/asm/mmu.h
arch/s390/include/asm/chpid.h
arch/s390/include/asm/chsc.h
arch/s390/include/asm/cmb.h
arch/s390/include/asm/dasd.h
arch/s390/include/asm/kvm.h
arch/s390/include/asm/posix_types.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/qeth.h
arch/s390/include/asm/schid.h
arch/s390/include/asm/swab.h
arch/s390/include/asm/types.h
arch/s390/kernel/entry.h
arch/s390/kernel/smp.c
arch/s390/kernel/sys_s390.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kvm/diag.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/priv.c
arch/sh/include/asm/mmu.h
drivers/char/hvc_iucv.c
drivers/firewire/fw-card.c
drivers/firewire/fw-device.c
drivers/isdn/hardware/mISDN/Kconfig
drivers/isdn/hardware/mISDN/Makefile
drivers/isdn/hardware/mISDN/hfc_multi.h
drivers/isdn/hardware/mISDN/hfc_pci.h
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/hfcsusb.c [new file with mode: 0644]
drivers/isdn/hardware/mISDN/hfcsusb.h [new file with mode: 0644]
drivers/isdn/mISDN/Makefile
drivers/isdn/mISDN/clock.c [new file with mode: 0644]
drivers/isdn/mISDN/core.c
drivers/isdn/mISDN/core.h
drivers/isdn/mISDN/dsp.h
drivers/isdn/mISDN/dsp_cmx.c
drivers/isdn/mISDN/dsp_core.c
drivers/isdn/mISDN/dsp_pipeline.c
drivers/isdn/mISDN/hwchannel.c
drivers/isdn/mISDN/l1oip_core.c
drivers/isdn/mISDN/layer1.c
drivers/isdn/mISDN/socket.c
drivers/isdn/mISDN/stack.c
drivers/isdn/mISDN/tei.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/leds-alix2.c [new file with mode: 0644]
drivers/leds/leds-ams-delta.c
drivers/leds/leds-clevo-mail.c
drivers/leds/leds-fsg.c
drivers/leds/leds-gpio.c
drivers/leds/leds-hp-disk.c
drivers/leds/leds-hp6xx.c
drivers/leds/leds-net48xx.c
drivers/leds/leds-pca9532.c
drivers/leds/leds-s3c24xx.c
drivers/leds/leds-wm8350.c [new file with mode: 0644]
drivers/leds/leds-wrap.c
drivers/leds/ledtrig-timer.c
drivers/mfd/wm8350-core.c
drivers/regulator/wm8350-regulator.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/char/Kconfig
drivers/s390/cio/qdio_debug.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/backlight.c
drivers/video/backlight/corgi_bl.c [deleted file]
drivers/video/backlight/cr_bllcd.c
drivers/video/backlight/generic_bl.c [new file with mode: 0644]
drivers/video/backlight/hp680_bl.c
drivers/video/backlight/mbp_nvidia_bl.c
drivers/video/backlight/progear_bl.c
drivers/video/backlight/tdo24m.c
drivers/video/backlight/tosa_lcd.c
drivers/video/backlight/vgg2432a4.c
fs/Kconfig
fs/Makefile
fs/binfmt_elf_fdpic.c
fs/binfmt_flat.c
fs/jffs2/nodelist.h
fs/proc/internal.h
fs/proc/meminfo.c
fs/proc/nommu.c
fs/proc/task_nommu.c
fs/ramfs/file-nommu.c
fs/squashfs/Makefile [new file with mode: 0644]
fs/squashfs/block.c [new file with mode: 0644]
fs/squashfs/cache.c [new file with mode: 0644]
fs/squashfs/dir.c [new file with mode: 0644]
fs/squashfs/export.c [new file with mode: 0644]
fs/squashfs/file.c [new file with mode: 0644]
fs/squashfs/fragment.c [new file with mode: 0644]
fs/squashfs/id.c [new file with mode: 0644]
fs/squashfs/inode.c [new file with mode: 0644]
fs/squashfs/namei.c [new file with mode: 0644]
fs/squashfs/squashfs.h [new file with mode: 0644]
fs/squashfs/squashfs_fs.h [new file with mode: 0644]
fs/squashfs/squashfs_fs_i.h [new file with mode: 0644]
fs/squashfs/squashfs_fs_sb.h [new file with mode: 0644]
fs/squashfs/super.c [new file with mode: 0644]
fs/squashfs/symlink.c [new file with mode: 0644]
include/asm-frv/mmu.h
include/asm-m32r/mmu.h
include/linux/backlight.h
include/linux/leds-pca9532.h
include/linux/leds.h
include/linux/mISDNhw.h
include/linux/mISDNif.h
include/linux/mfd/wm8350/pmic.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/spi/tdo24m.h [new file with mode: 0644]
init/do_mounts_rd.c
init/initramfs.c
ipc/shm.c
kernel/cred.c
kernel/fork.c
kernel/sysctl.c
lib/Kconfig.debug
mm/mmap.c
mm/nommu.c

diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
new file mode 100644 (file)
index 0000000..3e79e4a
--- /dev/null
@@ -0,0 +1,225 @@
+SQUASHFS 4.0 FILESYSTEM
+=======================
+
+Squashfs is a compressed read-only filesystem for Linux.
+It uses zlib compression to compress files, inodes and directories.
+Inodes in the system are very small and all blocks are packed to minimise
+data overhead. Block sizes greater than 4K are supported up to a maximum
+of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+Mailing list: squashfs-devel@lists.sourceforge.net
+Web site: www.squashfs.org
+
+1. FILESYSTEM FEATURES
+----------------------
+
+Squashfs filesystem features versus Cramfs:
+
+                               Squashfs                Cramfs
+
+Max filesystem size:           2^64                    16 MiB
+Max file size:                 ~ 2 TiB                 16 MiB
+Max files:                     unlimited               unlimited
+Max directories:               unlimited               unlimited
+Max entries per directory:     unlimited               unlimited
+Max block size:                        1 MiB                   4 KiB
+Metadata compression:          yes                     no
+Directory indexes:             yes                     no
+Sparse file support:           yes                     no
+Tail-end packing (fragments):  yes                     no
+Exportable (NFS etc.):         yes                     no
+Hard link support:             yes                     no
+"." and ".." in readdir:       yes                     no
+Real inode numbers:            yes                     no
+32-bit uids/gids:              yes                     no
+File creation time:            yes                     no
+Xattr and ACL support:         no                      no
+
+Squashfs compresses data, inodes and directories.  In addition, inode and
+directory data are highly compacted, and packed on byte boundaries.  Each
+compressed inode is on average 8 bytes in length (the exact length varies on
+file type, i.e. regular file, directory, symbolic link, and block/char device
+inodes have different sizes).
+
+2. USING SQUASHFS
+-----------------
+
+As squashfs is a read-only filesystem, the mksquashfs program must be used to
+create populated squashfs filesystems.  This and other squashfs utilities
+can be obtained from http://www.squashfs.org.  Usage instructions can be
+obtained from this site also.
+
+
+3. SQUASHFS FILESYSTEM DESIGN
+-----------------------------
+
+A squashfs filesystem consists of seven parts, packed together on a byte
+alignment:
+
+        ---------------
+       |  superblock   |
+       |---------------|
+       |  datablocks   |
+       |  & fragments  |
+       |---------------|
+       |  inode table  |
+       |---------------|
+       |   directory   |
+       |     table     |
+       |---------------|
+       |   fragment    |
+       |    table      |
+       |---------------|
+       |    export     |
+       |    table      |
+       |---------------|
+       |    uid/gid    |
+       |  lookup table |
+        ---------------
+
+Compressed data blocks are written to the filesystem as files are read from
+the source directory, and checked for duplicates.  Once all file data has been
+written the completed inode, directory, fragment, export and uid/gid lookup
+tables are written.
+
+3.1 Inodes
+----------
+
+Metadata (inodes and directories) are compressed in 8Kbyte blocks.  Each
+compressed block is prefixed by a two byte length, the top bit is set if the
+block is uncompressed.  A block will be uncompressed if the -noI option is set,
+or if the compressed block was larger than the uncompressed block.
+
+Inodes are packed into the metadata blocks, and are not aligned to block
+boundaries, therefore inodes overlap compressed blocks.  Inodes are identified
+by a 48-bit number which encodes the location of the compressed metadata block
+containing the inode, and the byte offset into that block where the inode is
+placed (<block, offset>).
+
+To maximise compression there are different inodes for each file type
+(regular file, directory, device, etc.), the inode contents and length
+varying with the type.
+
+To further maximise compression, two types of regular file inode and
+directory inode are defined: inodes optimised for frequently occurring
+regular files and directories, and extended types where extra
+information has to be stored.
+
+3.2 Directories
+---------------
+
+Like inodes, directories are packed into compressed metadata blocks, stored
+in a directory table.  Directories are accessed using the start address of
+the metablock containing the directory and the offset into the
+decompressed block (<block, offset>).
+
+Directories are organised in a slightly complex way, and are not simply
+a list of file names.  The organisation takes advantage of the
+fact that (in most cases) the inodes of the files will be in the same
+compressed metadata block, and therefore, can share the start block.
+Directories are therefore organised in a two level list, a directory
+header containing the shared start block value, and a sequence of directory
+entries, each of which share the shared start block.  A new directory header
+is written once/if the inode start block changes.  The directory
+header/directory entry list is repeated as many times as necessary.
+
+Directories are sorted, and can contain a directory index to speed up
+file lookup.  Directory indexes store one entry per metablock, each entry
+storing the index/filename mapping to the first directory header
+in each metadata block.  Directories are sorted in alphabetical order,
+and at lookup the index is scanned linearly looking for the first filename
+alphabetically larger than the filename being looked up.  At this point the
+location of the metadata block the filename is in has been found.
+The general idea of the index is ensure only one metadata block needs to be
+decompressed to do a lookup irrespective of the length of the directory.
+This scheme has the advantage that it doesn't require extra memory overhead
+and doesn't require much extra storage on disk.
+
+3.3 File data
+-------------
+
+Regular files consist of a sequence of contiguous compressed blocks, and/or a
+compressed fragment block (tail-end packed block).   The compressed size
+of each datablock is stored in a block list contained within the
+file inode.
+
+To speed up access to datablocks when reading 'large' files (256 Mbytes or
+larger), the code implements an index cache that caches the mapping from
+block index to datablock location on disk.
+
+The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
+retaining a simple and space-efficient block list on disk.  The cache
+is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
+Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
+The index cache is designed to be memory efficient, and by default uses
+16 KiB.
+
+3.4 Fragment lookup table
+-------------------------
+
+Regular files can contain a fragment index which is mapped to a fragment
+location on disk and compressed size using a fragment lookup table.  This
+fragment lookup table is itself stored compressed into metadata blocks.
+A second index table is used to locate these.  This second index table for
+speed of access (and because it is small) is read at mount time and cached
+in memory.
+
+3.5 Uid/gid lookup table
+------------------------
+
+For space efficiency regular files store uid and gid indexes, which are
+converted to 32-bit uids/gids using an id look up table.  This table is
+stored compressed into metadata blocks.  A second index table is used to
+locate these.  This second index table for speed of access (and because it
+is small) is read at mount time and cached in memory.
+
+3.6 Export table
+----------------
+
+To enable Squashfs filesystems to be exportable (via NFS etc.) filesystems
+can optionally (disabled with the -no-exports Mksquashfs option) contain
+an inode number to inode disk location lookup table.  This is required to
+enable Squashfs to map inode numbers passed in filehandles to the inode
+location on disk, which is necessary when the export code reinstantiates
+expired/flushed inodes.
+
+This table is stored compressed into metadata blocks.  A second index table is
+used to locate these.  This second index table for speed of access (and because
+it is small) is read at mount time and cached in memory.
+
+
+4. TODOS AND OUTSTANDING ISSUES
+-------------------------------
+
+4.1 Todo list
+-------------
+
+Implement Xattr and ACL support.  The Squashfs 4.0 filesystem layout has hooks
+for these but the code has not been written.  Once the code has been written
+the existing layout should not require modification.
+
+4.2 Squashfs internal cache
+---------------------------
+
+Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
+recently accessed data Squashfs uses two small metadata and fragment caches.
+
+The cache is not used for file datablocks, these are decompressed and cached in
+the page-cache in the normal way.  The cache is used to temporarily cache
+fragment and metadata blocks which have been read as a result of a metadata
+(i.e. inode or directory) or fragment access.  Because metadata and fragments
+are packed together into blocks (to gain greater compression) the read of a
+particular piece of metadata or fragment will retrieve other metadata/fragments
+which have been packed with it, these because of locality-of-reference may be
+read in the near future. Temporarily caching them ensures they are available
+for near future access without requiring an additional read and decompress.
+
+In the future this internal cache may be replaced with an implementation which
+uses the kernel page cache.  Because the page cache operates on page sized
+units this may introduce additional complexity in terms of locking and
+associated race conditions.
index fcc48bf722a8f635768912ee421156b37aab44fe..8511d3532c27743ef1f4f46bc9d0fe94e59bdeec 100644 (file)
@@ -834,8 +834,8 @@ and is between 256 and 4096 characters. It is defined in the file
 
        hlt             [BUGS=ARM,SH]
 
-       hvc_iucv=       [S390] Number of z/VM IUCV Hypervisor console (HVC)
-                              back-ends. Valid parameters: 0..8
+       hvc_iucv=       [S390] Number of z/VM IUCV hypervisor console (HVC)
+                              terminal devices. Valid values: 0..8
 
        i8042.debug     [HW] Toggle i8042 debug mode
        i8042.direct    [HW] Put keyboard port into non-translated mode
index 7714f57caad5b4312e7289761c0561c68b822049..b565e8279d133f969fbd6cc2642592c3d44058ed 100644 (file)
@@ -109,12 +109,18 @@ and it's also much more restricted in the latter case:
 FURTHER NOTES ON NO-MMU MMAP
 ============================
 
- (*) A request for a private mapping of less than a page in size may not return
-     a page-aligned buffer. This is because the kernel calls kmalloc() to
-     allocate the buffer, not get_free_page().
+ (*) A request for a private mapping of a file may return a buffer that is not
+     page-aligned.  This is because XIP may take place, and the data may not be
+     paged aligned in the backing store.
 
- (*) A list of all the mappings on the system is visible through /proc/maps in
-     no-MMU mode.
+ (*) A request for an anonymous mapping will always be page aligned.  If
+     possible the size of the request should be a power of two otherwise some
+     of the space may be wasted as the kernel must allocate a power-of-2
+     granule but will only discard the excess if appropriately configured as
+     this has an effect on fragmentation.
+
+ (*) A list of all the private copy and anonymous mappings on the system is
+     visible through /proc/maps in no-MMU mode.
 
  (*) A list of all the mappings in use by a process is visible through
      /proc/<pid>/maps in no-MMU mode.
@@ -242,3 +248,18 @@ PROVIDING SHAREABLE BLOCK DEVICE SUPPORT
 Provision of shared mappings on block device files is exactly the same as for
 character devices. If there isn't a real device underneath, then the driver
 should allocate sufficient contiguous memory to honour any supported mapping.
+
+
+=================================
+ADJUSTING PAGE TRIMMING BEHAVIOUR
+=================================
+
+NOMMU mmap automatically rounds up to the nearest power-of-2 number of pages
+when performing an allocation.  This can have adverse effects on memory
+fragmentation, and as such, is left configurable.  The default behaviour is to
+aggressively trim allocations and discard any excess pages back in to the page
+allocator.  In order to retain finer-grained control over fragmentation, this
+behaviour can either be disabled completely, or bumped up to a higher page
+watermark where trimming begins.
+
+Page trimming behaviour is configurable via the sysctl `vm.nr_trim_pages'.
index cd05994a49e69795db29f75675180371e1fe3ae4..a3415070bcac1fe6e16c8eafebccea30e3c55cb7 100644 (file)
@@ -38,6 +38,7 @@ Currently, these files are in /proc/sys/vm:
 - numa_zonelist_order
 - nr_hugepages
 - nr_overcommit_hugepages
+- nr_trim_pages                (only if CONFIG_MMU=n)
 
 ==============================================================
 
@@ -348,3 +349,20 @@ Change the maximum size of the hugepage pool. The maximum is
 nr_hugepages + nr_overcommit_hugepages.
 
 See Documentation/vm/hugetlbpage.txt
+
+==============================================================
+
+nr_trim_pages
+
+This is available only on NOMMU kernels.
+
+This value adjusts the excess page trimming behaviour of power-of-2 aligned
+NOMMU mmap allocations.
+
+A value of 0 disables trimming of allocations entirely, while a value of 1
+trims excess pages aggressively. Any value >= 1 acts as the watermark where
+trimming of allocations is initiated.
+
+The default value is 1.
+
+See Documentation/nommu-mmap.txt for more information.
index 57e0309243cc5c8c3ef1accd4c52987ef129ccc1..6f65a269cb17f5d767ee1e62d67b4eb54b1dafad 100644 (file)
@@ -4081,6 +4081,13 @@ L:       cbe-oss-dev@ozlabs.org
 W:     http://www.ibm.com/developerworks/power/cell/
 S:     Supported
 
+SQUASHFS FILE SYSTEM
+P:     Phillip Lougher
+M:     phillip@lougher.demon.co.uk
+L:     squashfs-devel@lists.sourceforge.net (subscribers-only)
+W:     http://squashfs.org.uk
+S:     Maintained
+
 SRM (Alpha) environment access
 P:     Jan-Benedict Glaw
 M:     jbglaw@lug-owl.de
index 53099d4ee4211d072741841a6513aebf74161a54..b561584d04a18ef6c631d1e05d69f6ff4b22d194 100644 (file)
@@ -24,7 +24,6 @@ typedef struct {
  *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
  */
 typedef struct {
-       struct vm_list_struct   *vmlist;
        unsigned long           end_brk;
 } mm_context_t;
 
index 67960017dc8f1f79c40bd083205f3409cdf0fbd1..310e479309efbfa038b8bbc411ef4a991f942310 100644 (file)
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(consistent_lock);
  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
  * would have to initialise this each time prior to calling vm_region_alloc().
  */
-struct vm_region {
+struct arm_vm_region {
        struct list_head        vm_list;
        unsigned long           vm_start;
        unsigned long           vm_end;
@@ -79,20 +79,20 @@ struct vm_region {
        int                     vm_active;
 };
 
-static struct vm_region consistent_head = {
+static struct arm_vm_region consistent_head = {
        .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
        .vm_start       = CONSISTENT_BASE,
        .vm_end         = CONSISTENT_END,
 };
 
-static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
+static struct arm_vm_region *
+arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
 {
        unsigned long addr = head->vm_start, end = head->vm_end - size;
        unsigned long flags;
-       struct vm_region *c, *new;
+       struct arm_vm_region *c, *new;
 
-       new = kmalloc(sizeof(struct vm_region), gfp);
+       new = kmalloc(sizeof(struct arm_vm_region), gfp);
        if (!new)
                goto out;
 
@@ -127,9 +127,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
        return NULL;
 }
 
-static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
+static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
 {
-       struct vm_region *c;
+       struct arm_vm_region *c;
        
        list_for_each_entry(c, &head->vm_list, vm_list) {
                if (c->vm_active && c->vm_start == addr)
@@ -149,7 +149,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
            pgprot_t prot)
 {
        struct page *page;
-       struct vm_region *c;
+       struct arm_vm_region *c;
        unsigned long order;
        u64 mask = ISA_DMA_THRESHOLD, limit;
 
@@ -214,7 +214,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        /*
         * Allocate a virtual address in the consistent mapping region.
         */
-       c = vm_region_alloc(&consistent_head, size,
+       c = arm_vm_region_alloc(&consistent_head, size,
                            gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
        if (c) {
                pte_t *pte;
@@ -311,13 +311,13 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
                    void *cpu_addr, dma_addr_t dma_addr, size_t size)
 {
        unsigned long flags, user_size, kern_size;
-       struct vm_region *c;
+       struct arm_vm_region *c;
        int ret = -ENXIO;
 
        user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
        spin_lock_irqsave(&consistent_lock, flags);
-       c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+       c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
        spin_unlock_irqrestore(&consistent_lock, flags);
 
        if (c) {
@@ -359,7 +359,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
  */
 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
 {
-       struct vm_region *c;
+       struct arm_vm_region *c;
        unsigned long flags, addr;
        pte_t *ptep;
        int idx;
@@ -378,7 +378,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
        size = PAGE_ALIGN(size);
 
        spin_lock_irqsave(&consistent_lock, flags);
-       c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+       c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
        if (!c)
                goto no_area;
 
index 757e43906ed4b45b5b3bf410ecb24db3f946ef0b..dbfd686360e6bf65dd0aa0f5a7dc2f178b84f377 100644 (file)
@@ -10,7 +10,6 @@ struct sram_list_struct {
 };
 
 typedef struct {
-       struct vm_list_struct *vmlist;
        unsigned long end_brk;
        unsigned long stack_start;
 
index d2d3885366304ea38826c0c2897c0a4008184c20..594e325b40e4fa078ce1c7aee07041a20a0ac4d0 100644 (file)
@@ -160,15 +160,15 @@ put_reg(struct task_struct *task, int regno, unsigned long data)
 static inline int is_user_addr_valid(struct task_struct *child,
                                     unsigned long start, unsigned long len)
 {
-       struct vm_list_struct *vml;
+       struct vm_area_struct *vma;
        struct sram_list_struct *sraml;
 
        /* overflow */
        if (start + len < start)
                return -EIO;
 
-       for (vml = child->mm->context.vmlist; vml; vml = vml->next)
-               if (start >= vml->vma->vm_start && start + len < vml->vma->vm_end)
+       vma = find_vma(child->mm, start);
+       if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
                        return 0;
 
        for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
index 17d8e4172896c6dabfe3e48eb122b6e517d0435f..5b0667da8d05ef3256c3b403111075028ef31bb7 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/fs.h>
+#include <linux/rbtree.h>
 #include <asm/traps.h>
 #include <asm/cacheflush.h>
 #include <asm/cplb.h>
@@ -83,6 +84,7 @@ static void decode_address(char *buf, unsigned long address)
        struct mm_struct *mm;
        unsigned long flags, offset;
        unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+       struct rb_node *n;
 
 #ifdef CONFIG_KALLSYMS
        unsigned long symsize;
@@ -128,9 +130,10 @@ static void decode_address(char *buf, unsigned long address)
                if (!mm)
                        continue;
 
-               vml = mm->context.vmlist;
-               while (vml) {
-                       struct vm_area_struct *vma = vml->vma;
+               for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+                       struct vm_area_struct *vma;
+
+                       vma = rb_entry(n, struct vm_area_struct, vm_rb);
 
                        if (address >= vma->vm_start && address < vma->vm_end) {
                                char _tmpbuf[256];
@@ -176,8 +179,6 @@ static void decode_address(char *buf, unsigned long address)
 
                                goto done;
                        }
-
-                       vml = vml->next;
                }
                if (!in_atomic)
                        mmput(mm);
index 709e9bdc6126de06417624a0edd8297dca38dc22..5e7d401d21e7d6455883f118213a040e97754fbf 100644 (file)
@@ -69,7 +69,8 @@ static inline int put_reg(struct task_struct *task, int regno,
 }
 
 /*
- * check that an address falls within the bounds of the target process's memory mappings
+ * check that an address falls within the bounds of the target process's memory
+ * mappings
  */
 static inline int is_user_addr_valid(struct task_struct *child,
                                     unsigned long start, unsigned long len)
@@ -79,11 +80,11 @@ static inline int is_user_addr_valid(struct task_struct *child,
                return -EIO;
        return 0;
 #else
-       struct vm_list_struct *vml;
+       struct vm_area_struct *vma;
 
-       for (vml = child->mm->context.vmlist; vml; vml = vml->next)
-               if (start >= vml->vma->vm_start && start + len <= vml->vma->vm_end)
-                       return 0;
+       vma = find_vma(child->mm, start);
+       if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
+               return 0;
 
        return -EIO;
 #endif
index 2ce06ea46104c36cb8f59e9c65abc6d4f437c9bd..31309969df705ce5c2a80b8638cb6ceac8e0ef9a 100644 (file)
@@ -4,7 +4,6 @@
 /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
 
 typedef struct {
-       struct vm_list_struct   *vmlist;
        unsigned long           end_brk;
 } mm_context_t;
 
index 5fa6b68353bab9403e1f9bbd543880427516c5d7..e2da1e6f09fe4c16a1576a407ca168d296edd141 100644 (file)
@@ -4,7 +4,6 @@
 /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
 
 typedef struct {
-       struct vm_list_struct   *vmlist;
        unsigned long           end_brk;
 } mm_context_t;
 
index dfe3c7f3439a71f1cedb51ea0ca5ae71575ca329..fc71d8a6709b53f34306fc6a286ad7d69770b372 100644 (file)
@@ -9,7 +9,7 @@
 #define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
 
 #include <linux/string.h>
-#include <asm/types.h>
+#include <linux/types.h>
 
 #define __MAX_CHPID 255
 
index d38d0cf62d4bdd1406f0b23f2fe5811b6586f352..807997f7414b15f8b5c186da14882a0378250b5b 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _ASM_CHSC_H
 #define _ASM_CHSC_H
 
+#include <linux/types.h>
 #include <asm/chpid.h>
 #include <asm/schid.h>
 
index 50196857d27a8e55dc4939333bb44fbc5d23fa0b..39ae03294794f23b852a233e724d8a1b98342c6e 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef S390_CMB_H
 #define S390_CMB_H
+
+#include <linux/types.h>
+
 /**
  * struct cmbdata - channel measurement block data for user space
  * @size: size of the stored data
index 55b2b80cdf6e0926da9663a52292fafb0ca27762..e2db6f16d9c8bd6273ba0b26e56d9f2efe8c62e4 100644 (file)
@@ -14,6 +14,7 @@
 
 #ifndef DASD_H
 #define DASD_H
+#include <linux/types.h>
 #include <linux/ioctl.h>
 
 #define DASD_IOCTL_LETTER 'D'
@@ -78,6 +79,7 @@ typedef struct dasd_information2_t {
 #define DASD_FEATURE_USEDIAG        0x02
 #define DASD_FEATURE_INITIAL_ONLINE  0x04
 #define DASD_FEATURE_ERPLOG         0x08
+#define DASD_FEATURE_FAILFAST       0x10
 
 #define DASD_PARTN_BITS 2
 
index d74002f9579482f1bb13fc0a223b88802e6c1826..e1f54654e3ae5f9abdbc6b01668a49a7d4b6af26 100644 (file)
@@ -13,7 +13,7 @@
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
-#include <asm/types.h>
+#include <linux/types.h>
 
 /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
 struct kvm_pic_state {
index 397d93fba3a77c762a8002805ce8ef5b175b5e8d..8cc113f9252352d3070ee4f03c0bb5a9aa833f7a 100644 (file)
@@ -68,11 +68,7 @@ typedef unsigned short __kernel_old_dev_t;
 #endif /* __s390x__ */
 
 typedef struct {
-#if defined(__KERNEL__) || defined(__USE_ALL)
         int     val[2];
-#else                        /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
-        int     __val[2];
-#endif                       /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
 } __kernel_fsid_t;
 
 
index 5396f9f122634bda4d0ce33bd78bd1747856e7d2..8920025c3c02ed3bb95919861bf4b16b823a731f 100644 (file)
@@ -272,12 +272,15 @@ typedef struct
 #define PSW_ASC_SECONDARY      0x0000800000000000UL
 #define PSW_ASC_HOME           0x0000C00000000000UL
 
-extern long psw_user32_bits;
-
 #endif /* __s390x__ */
 
+#ifdef __KERNEL__
 extern long psw_kernel_bits;
 extern long psw_user_bits;
+#ifdef CONFIG_64BIT
+extern long psw_user32_bits;
+#endif
+#endif
 
 /* This macro merges a NEW PSW mask specified by the user into
    the currently active PSW mask CURRENT, modifying only those
index 930d378ef75a80b79b54a0a2a25cf5e7af1cb1b5..06cbd1e8c9433e551323ef932e5d55a8965fc37b 100644 (file)
@@ -10,6 +10,7 @@
  */
 #ifndef __ASM_S390_QETH_IOCTL_H__
 #define __ASM_S390_QETH_IOCTL_H__
+#include <linux/types.h>
 #include <linux/ioctl.h>
 
 #define SIOC_QETH_ARP_SET_NO_ENTRIES    (SIOCDEVPRIVATE)
index 825503cf3dc2fa6f97a92b927debf3ed71add376..3e4d401b4e45978a9cad899495d11a05efa07437 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef ASM_SCHID_H
 #define ASM_SCHID_H
 
+#include <linux/types.h>
+
 struct subchannel_id {
        __u32 cssid : 8;
        __u32 : 4;
index bd9321aa55a956d9b0fce55d6579b28c4a4c5eb7..eb18dc1f327bf906187358713a92e1c7066417e3 100644 (file)
@@ -9,7 +9,7 @@
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #ifndef __s390x__
 # define __SWAB_64_THRU_32__
index 41c547656130e75bb206312f3175d23bd2e868a7..3dc3fc228812a3ce22070d8b52705c211b23ea47 100644 (file)
@@ -9,11 +9,7 @@
 #ifndef _S390_TYPES_H
 #define _S390_TYPES_H
 
-#ifndef __s390x__
-# include <asm-generic/int-ll64.h>
-#else
-# include <asm-generic/int-l64.h>
-#endif
+#include <asm-generic/int-ll64.h>
 
 #ifndef __ASSEMBLY__
 
index 6b1896345edad2f0b2f49f26b17d4f82ad44d766..a65afc91e8aa86dab1f859797033da334ac226d0 100644 (file)
@@ -54,7 +54,5 @@ long sys_sigreturn(void);
 long sys_rt_sigreturn(void);
 long sys32_sigreturn(void);
 long sys32_rt_sigreturn(void);
-long old_select(struct sel_arg_struct __user *arg);
-long sys_ptrace(long request, long pid, long addr, long data);
 
 #endif /* _ENTRY_H */
index 9c0ccb532a4564fa5457a97e64bffbaf41ea4a79..2d337cbb9329c33e07f9dfccb2f51daa94269e30 100644 (file)
@@ -685,7 +685,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        if (MACHINE_HAS_IEEE)
                lowcore->extended_save_area_addr = (u32) save_area;
 #else
-       BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
+       if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
+               BUG();
 #endif
        set_prefix((u32)(unsigned long) lowcore);
        local_mcck_enable();
index 4fe952e557ac3bf683bb9535ff89b553035aad25..c34be4568b80fcc75250e3fe018994fa5683843e 100644 (file)
@@ -103,25 +103,6 @@ out:
        return error;
 }
 
-#ifndef CONFIG_64BIT
-struct sel_arg_struct {
-       unsigned long n;
-       fd_set __user *inp, *outp, *exp;
-       struct timeval __user *tvp;
-};
-
-asmlinkage long old_select(struct sel_arg_struct __user *arg)
-{
-       struct sel_arg_struct a;
-
-       if (copy_from_user(&a, arg, sizeof(a)))
-               return -EFAULT;
-       /* sys_select() does the appropriate kernel locking */
-       return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
-
-}
-#endif /* CONFIG_64BIT */
-
 /*
  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  *
index 25a6a82f1c02309e8258553435112e44d9ec4b64..690e17819686534c9061b03e5408354bdb00f5d8 100644 (file)
@@ -322,7 +322,8 @@ static int __init vdso_init(void)
        vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
        vdso64_pagelist[vdso64_pages] = NULL;
 #ifndef CONFIG_SMP
-       BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore));
+       if (vdso_alloc_per_cpu(0, &S390_lowcore))
+               BUG();
 #endif
        vdso_init_cr5();
 #endif /* CONFIG_64BIT */
index c32f29c3d70c7a62de324fe94b35d376bafae6f2..ad8acfc949fbdb5d0a7f59375bf567770780686e 100644 (file)
@@ -9,10 +9,6 @@
  * it under the terms of the GNU General Public License (version 2 only)
  * as published by the Free Software Foundation.
  */
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
index a0775e1f08df8a73d9d06127f0b675b976785a76..8300309698fa3abce3b9b84a5884aba432c372fe 100644 (file)
@@ -47,7 +47,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
        vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
-       VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+       VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
          vcpu->run->s390_reset_flags);
        return -EREMOTE;
 }
index 2960702b48246488087546669b1459524ead2958..f4fe28a2521a10aa7e5d8c99d58c5e83603b631e 100644 (file)
@@ -160,7 +160,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                break;
 
        case KVM_S390_INT_VIRTIO:
-               VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+               VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
                           inti->ext.ext_params, inti->ext.ext_params2);
                vcpu->stat.deliver_virtio_interrupt++;
                rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
@@ -360,7 +360,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
        vcpu->arch.ckc_timer.expires = jiffies + sltime;
 
        add_timer(&vcpu->arch.ckc_timer);
-       VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+       VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
 no_timer:
        spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
        spin_lock_bh(&vcpu->arch.local_int.lock);
@@ -491,7 +491,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
 
        switch (s390int->type) {
        case KVM_S390_INT_VIRTIO:
-               VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+               VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
                         s390int->parm, s390int->parm64);
                inti->type = s390int->type;
                inti->ext.ext_params = s390int->parm;
index cce40ff2913bbf851059c8ea0f8d604d4eb94304..3605df45dd419fb2082b193c8e7438bf5b815dfe 100644 (file)
@@ -118,7 +118,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
                goto out;
        }
 
-       VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+       VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
 out:
        return 0;
 }
index fdcb93bc6d11d3126093409580f9243e7d1b1e8a..6c43625bb1a5ecbee71f8249546e297178ab8980 100644 (file)
@@ -9,7 +9,6 @@ typedef struct {
        mm_context_id_t         id;
        void                    *vdso;
 #else
-       struct vm_list_struct   *vmlist;
        unsigned long           end_brk;
 #endif
 #ifdef CONFIG_BINFMT_ELF_FDPIC
index 5ea7d7713fca42d0c5b091393669e20b46a9ca0f..a53496828b76949a3e0b50c82fa4372d8c6f21dd 100644 (file)
@@ -1,26 +1,30 @@
 /*
- * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC)
+ * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
  *
- * This back-end for HVC provides terminal access via
+ * This HVC device driver provides terminal access using
  * z/VM IUCV communication paths.
  *
- * Copyright IBM Corp. 2008.
+ * Copyright IBM Corp. 2008
  *
  * Author(s):  Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  */
 #define KMSG_COMPONENT         "hvc_iucv"
+#define pr_fmt(fmt)            KMSG_COMPONENT ": " fmt
 
 #include <linux/types.h>
 #include <asm/ebcdic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/tty.h>
+#include <linux/wait.h>
 #include <net/iucv/iucv.h>
 
 #include "hvc_console.h"
 
 
-/* HVC backend for z/VM IUCV */
+/* General device driver settings */
 #define HVC_IUCV_MAGIC         0xc9e4c3e5
 #define MAX_HVC_IUCV_LINES     HVC_ALLOC_TTY_ADAPTERS
 #define MEMPOOL_MIN_NR         (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
 #define MSG_TYPE_WINSIZE       0x08    /* Terminal window size update */
 #define MSG_TYPE_DATA          0x10    /* Terminal data */
 
-#define MSG_SIZE(s)            ((s) + offsetof(struct iucv_tty_msg, data))
 struct iucv_tty_msg {
        u8      version;                /* Message version */
        u8      type;                   /* Message type */
-#define MSG_MAX_DATALEN                (~(u16)0)
+#define MSG_MAX_DATALEN                ((u16)(~0))
        u16     datalen;                /* Payload length */
        u8      data[];                 /* Payload buffer */
 } __attribute__((packed));
+#define MSG_SIZE(s)            ((s) + offsetof(struct iucv_tty_msg, data))
 
 enum iucv_state_t {
        IUCV_DISCONN    = 0,
@@ -54,19 +58,26 @@ enum tty_state_t {
 };
 
 struct hvc_iucv_private {
-       struct hvc_struct       *hvc; /* HVC console struct reference */
+       struct hvc_struct       *hvc;           /* HVC struct reference */
        u8                      srv_name[8];    /* IUCV service name (ebcdic) */
+       unsigned char           is_console;     /* Linux console usage flag */
        enum iucv_state_t       iucv_state;     /* IUCV connection status */
        enum tty_state_t        tty_state;      /* TTY status */
        struct iucv_path        *path;          /* IUCV path pointer */
        spinlock_t              lock;           /* hvc_iucv_private lock */
+#define SNDBUF_SIZE            (PAGE_SIZE)     /* must be < MSG_MAX_DATALEN */
+       void                    *sndbuf;        /* send buffer            */
+       size_t                  sndbuf_len;     /* length of send buffer  */
+#define QUEUE_SNDBUF_DELAY     (HZ / 25)
+       struct delayed_work     sndbuf_work;    /* work: send iucv msg(s) */
+       wait_queue_head_t       sndbuf_waitq;   /* wait for send completion */
        struct list_head        tty_outqueue;   /* outgoing IUCV messages */
        struct list_head        tty_inqueue;    /* incoming IUCV messages */
 };
 
 struct iucv_tty_buffer {
        struct list_head        list;   /* list pointer */
-       struct iucv_message     msg;    /* store an incoming IUCV message */
+       struct iucv_message     msg;    /* store an IUCV message */
        size_t                  offset; /* data buffer offset */
        struct iucv_tty_msg     *mbuf;  /* buffer to store input/output data */
 };
@@ -78,11 +89,12 @@ static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
 
 
-/* Kernel module parameters */
-static unsigned long hvc_iucv_devices;
+/* Kernel module parameter: use one terminal device as default */
+static unsigned long hvc_iucv_devices = 1;
 
 /* Array of allocated hvc iucv tty lines... */
 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
+#define IUCV_HVC_CON_IDX       (0)
 
 /* Kmem cache and mempool for iucv_tty_buffer elements */
 static struct kmem_cache *hvc_iucv_buffer_cache;
@@ -112,7 +124,7 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
 }
 
 /**
- * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element.
+ * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
  * @size:      Size of the internal buffer used to store data.
  * @flags:     Memory allocation flags passed to mempool.
  *
@@ -120,7 +132,6 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
  * allocates an internal data buffer with the specified size @size.
  * Note: The total message size arises from the internal buffer size and the
  *      members of the iucv_tty_msg structure.
- *
  * The function returns NULL if memory allocation has failed.
  */
 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
@@ -130,7 +141,7 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
        bufp = mempool_alloc(hvc_iucv_mempool, flags);
        if (!bufp)
                return NULL;
-       memset(bufp, 0, sizeof(struct iucv_tty_buffer));
+       memset(bufp, 0, sizeof(*bufp));
 
        if (size > 0) {
                bufp->msg.length = MSG_SIZE(size);
@@ -149,9 +160,6 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
 /**
  * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
  * @bufp:      Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
- *
- * The destroy_tty_buffer() function frees the internal data buffer and returns
- * the struct iucv_tty_buffer element back to the mempool for freeing.
  */
 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 {
@@ -161,11 +169,7 @@ static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 
 /**
  * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
- * @list:      List head pointer to a list containing struct iucv_tty_buffer
- *             elements.
- *
- * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the
- * list @list.
+ * @list:      List containing struct iucv_tty_buffer elements.
  */
 static void destroy_tty_buffer_list(struct list_head *list)
 {
@@ -178,24 +182,24 @@ static void destroy_tty_buffer_list(struct list_head *list)
 }
 
 /**
- * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer.
- * @priv:              Pointer to hvc_iucv_private structure.
- * @buf:               HVC console buffer for writing received terminal data.
- * @count:             HVC console buffer size.
+ * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
+ * @priv:              Pointer to struct hvc_iucv_private
+ * @buf:               HVC buffer for writing received terminal data.
+ * @count:             HVC buffer size.
  * @has_more_data:     Pointer to an int variable.
  *
  * The function picks up pending messages from the input queue and receives
  * the message data that is then written to the specified buffer @buf.
- * If the buffer size @count is less than the data message size, then the
+ * If the buffer size @count is less than the data message size, the
  * message is kept on the input queue and @has_more_data is set to 1.
- * If the message data has been entirely written, the message is removed from
+ * If all message data has been written, the message is removed from
  * the input queue.
  *
  * The function returns the number of bytes written to the terminal, zero if
  * there are no pending data messages available or if there is no established
  * IUCV path.
  * If the IUCV path has been severed, then -EPIPE is returned to cause a
- * hang up (that is issued by the HVC console layer).
+ * hang up (that is issued by the HVC layer).
  */
 static int hvc_iucv_write(struct hvc_iucv_private *priv,
                          char *buf, int count, int *has_more_data)
@@ -204,12 +208,12 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
        int written;
        int rc;
 
-       /* Immediately return if there is no IUCV connection */
+       /* immediately return if there is no IUCV connection */
        if (priv->iucv_state == IUCV_DISCONN)
                return 0;
 
-       /* If the IUCV path has been severed, return -EPIPE to inform the
-        * hvc console layer to hang up the tty device. */
+       /* if the IUCV path has been severed, return -EPIPE to inform the
+        * HVC layer to hang up the tty device. */
        if (priv->iucv_state == IUCV_SEVERED)
                return -EPIPE;
 
@@ -217,7 +221,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
        if (list_empty(&priv->tty_inqueue))
                return 0;
 
-       /* receive a iucv message and flip data to the tty (ldisc) */
+       /* receive an iucv message and flip data to the tty (ldisc) */
        rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
 
        written = 0;
@@ -260,7 +264,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
        case MSG_TYPE_WINSIZE:
                if (rb->mbuf->datalen != sizeof(struct winsize))
                        break;
-               hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data));
+               hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
                break;
 
        case MSG_TYPE_ERROR:    /* ignored ... */
@@ -284,10 +288,9 @@ out_written:
  * @buf:       Pointer to a buffer to store data
  * @count:     Size of buffer available for writing
  *
- * The hvc_console thread calls this method to read characters from
- * the terminal backend. If an IUCV communication path has been established,
- * pending IUCV messages are received and data is copied into buffer @buf
- * up to @count bytes.
+ * The HVC thread calls this method to read characters from the back-end.
+ * If an IUCV communication path has been established, pending IUCV messages
+ * are received and data is copied into buffer @buf up to @count bytes.
  *
  * Locking:    The routine gets called under an irqsave() spinlock; and
  *             the routine locks the struct hvc_iucv_private->lock to call
@@ -318,66 +321,122 @@ static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
 }
 
 /**
- * hvc_iucv_send() - Send an IUCV message containing terminal data.
+ * hvc_iucv_queue() - Buffer terminal data for sending.
  * @priv:      Pointer to struct hvc_iucv_private instance.
  * @buf:       Buffer containing data to send.
- * @size:      Size of buffer and amount of data to send.
+ * @count:     Size of buffer and amount of data to send.
+ *
+ * The function queues data for sending. To actually send the buffered data,
+ * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
+ * The function returns the number of data bytes that has been buffered.
  *
- * If an IUCV communication path is established, the function copies the buffer
- * data to a newly allocated struct iucv_tty_buffer element, sends the data and
- * puts the element to the outqueue.
+ * If the device is not connected, data is ignored and the function returns
+ * @count.
+ * If the buffer is full, the function returns 0.
+ * If an existing IUCV communicaton path has been severed, -EPIPE is returned
+ * (that can be passed to HVC layer to cause a tty hangup).
+ */
+static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
+                         int count)
+{
+       size_t len;
+
+       if (priv->iucv_state == IUCV_DISCONN)
+               return count;                   /* ignore data */
+
+       if (priv->iucv_state == IUCV_SEVERED)
+               return -EPIPE;
+
+       len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
+       if (!len)
+               return 0;
+
+       memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
+       priv->sndbuf_len += len;
+
+       if (priv->iucv_state == IUCV_CONNECTED)
+               schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
+
+       return len;
+}
+
+/**
+ * hvc_iucv_send() - Send an IUCV message containing terminal data.
+ * @priv:      Pointer to struct hvc_iucv_private instance.
  *
- * If there is no IUCV communication path established, the function returns 0.
- * If an existing IUCV communicaton path has been severed, the function returns
- * -EPIPE (can be passed to HVC layer to cause a tty hangup).
+ * If an IUCV communication path has been established, the buffered output data
+ * is sent via an IUCV message and the number of bytes sent is returned.
+ * Returns 0 if there is no established IUCV communication path or
+ * -EPIPE if an existing IUCV communicaton path has been severed.
  */
-static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
-                        int count)
+static int hvc_iucv_send(struct hvc_iucv_private *priv)
 {
        struct iucv_tty_buffer *sb;
-       int rc;
-       u16 len;
+       int rc, len;
 
        if (priv->iucv_state == IUCV_SEVERED)
                return -EPIPE;
 
        if (priv->iucv_state == IUCV_DISCONN)
-               return 0;
+               return -EIO;
 
-       len = min_t(u16, MSG_MAX_DATALEN, count);
+       if (!priv->sndbuf_len)
+               return 0;
 
        /* allocate internal buffer to store msg data and also compute total
         * message length */
-       sb = alloc_tty_buffer(len, GFP_ATOMIC);
+       sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
        if (!sb)
                return -ENOMEM;
 
-       sb->mbuf->datalen = len;
-       memcpy(sb->mbuf->data, buf, len);
+       memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
+       sb->mbuf->datalen = (u16) priv->sndbuf_len;
+       sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
 
        list_add_tail(&sb->list, &priv->tty_outqueue);
 
        rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
                                 (void *) sb->mbuf, sb->msg.length);
        if (rc) {
+               /* drop the message here; however we might want to handle
+                * 0x03 (msg limit reached) by trying again... */
                list_del(&sb->list);
                destroy_tty_buffer(sb);
-               len = 0;
        }
+       len = priv->sndbuf_len;
+       priv->sndbuf_len = 0;
 
        return len;
 }
 
+/**
+ * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
+ * @work:      Work structure.
+ *
+ * This work queue function sends buffered output data over IUCV and,
+ * if not all buffered data could be sent, reschedules itself.
+ */
+static void hvc_iucv_sndbuf_work(struct work_struct *work)
+{
+       struct hvc_iucv_private *priv;
+
+       priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
+       if (!priv)
+               return;
+
+       spin_lock_bh(&priv->lock);
+       hvc_iucv_send(priv);
+       spin_unlock_bh(&priv->lock);
+}
+
 /**
  * hvc_iucv_put_chars() - HVC put_chars operation.
  * @vtermno:   HVC virtual terminal number.
  * @buf:       Pointer to an buffer to read data from
  * @count:     Size of buffer available for reading
  *
- * The hvc_console thread calls this method to write characters from
- * to the terminal backend.
- * The function calls hvc_iucv_send() under the lock of the
- * struct hvc_iucv_private instance that corresponds to the tty @vtermno.
+ * The HVC thread calls this method to write characters to the back-end.
+ * The function calls hvc_iucv_queue() to queue terminal data for sending.
  *
  * Locking:    The method gets called under an irqsave() spinlock; and
  *             locks struct hvc_iucv_private->lock.
@@ -385,7 +444,7 @@ static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
 {
        struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
-       int sent;
+       int queued;
 
        if (count <= 0)
                return 0;
@@ -394,10 +453,10 @@ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
                return -ENODEV;
 
        spin_lock(&priv->lock);
-       sent = hvc_iucv_send(priv, buf, count);
+       queued = hvc_iucv_queue(priv, buf, count);
        spin_unlock(&priv->lock);
 
-       return sent;
+       return queued;
 }
 
 /**
@@ -406,7 +465,7 @@ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
  * @id:        Additional data (originally passed to hvc_alloc): the index of an struct
  *     hvc_iucv_private instance.
  *
- * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private
+ * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
  * instance that is derived from @id. Always returns 0.
  *
  * Locking:    struct hvc_iucv_private->lock, spin_lock_bh
@@ -427,12 +486,8 @@ static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
 }
 
 /**
- * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed.
+ * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
  * @priv:      Pointer to the struct hvc_iucv_private instance.
- *
- * The functions severs the established IUCV communication path (if any), and
- * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally,
- * the functions resets the states to TTY_CLOSED and IUCV_DISCONN.
  */
 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 {
@@ -441,25 +496,62 @@ static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 
        priv->tty_state = TTY_CLOSED;
        priv->iucv_state = IUCV_DISCONN;
+
+       priv->sndbuf_len = 0;
 }
 
 /**
- * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups.
- * @hp: Pointer to the HVC device (struct hvc_struct)
- * @id: Additional data (originally passed to hvc_alloc): the index of an struct
- *     hvc_iucv_private instance.
+ * tty_outqueue_empty() - Test if the tty outq is empty
+ * @priv:      Pointer to struct hvc_iucv_private instance.
+ */
+static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
+{
+       int rc;
+
+       spin_lock_bh(&priv->lock);
+       rc = list_empty(&priv->tty_outqueue);
+       spin_unlock_bh(&priv->lock);
+
+       return rc;
+}
+
+/**
+ * flush_sndbuf_sync() - Flush send buffer and wait for completion
+ * @priv:      Pointer to struct hvc_iucv_private instance.
  *
- * This routine notifies the HVC backend that a tty hangup (carrier loss,
- * virtual or otherwise) has occured.
+ * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
+ * to flush any buffered terminal output data and waits for completion.
+ */
+static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
+{
+       int sync_wait;
+
+       cancel_delayed_work_sync(&priv->sndbuf_work);
+
+       spin_lock_bh(&priv->lock);
+       hvc_iucv_send(priv);            /* force sending buffered data */
+       sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
+       spin_unlock_bh(&priv->lock);
+
+       if (sync_wait)
+               wait_event_timeout(priv->sndbuf_waitq,
+                                  tty_outqueue_empty(priv), HZ);
+}
+
+/**
+ * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
+ * @hp:                Pointer to the HVC device (struct hvc_struct)
+ * @id:                Additional data (originally passed to hvc_alloc):
+ *             the index of an struct hvc_iucv_private instance.
  *
- * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep
- * an existing IUCV communication path established.
+ * This routine notifies the HVC back-end that a tty hangup (carrier loss,
+ * virtual or otherwise) has occured.
+ * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
+ * to keep an existing IUCV communication path established.
  * (Background: vhangup() is called from user space (by getty or login) to
  *             disable writing to the tty by other applications).
- *
- * If the tty has been opened (e.g. getty) and an established IUCV path has been
- * severed (we caused the tty hangup in that case), then the functions invokes
- * hvc_iucv_cleanup() to clean up.
+ * If the tty has been opened and an established IUCV path has been severed
+ * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
  *
  * Locking:    struct hvc_iucv_private->lock
  */
@@ -471,12 +563,12 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
        if (!priv)
                return;
 
+       flush_sndbuf_sync(priv);
+
        spin_lock_bh(&priv->lock);
        /* NOTE: If the hangup was scheduled by ourself (from the iucv
-        *       path_servered callback [IUCV_SEVERED]), then we have to
-        *       finally clean up the tty backend structure and set state to
-        *       TTY_CLOSED.
-        *
+        *       path_servered callback [IUCV_SEVERED]), we have to clean up
+        *       our structure and to set state to TTY_CLOSED.
         *       If the tty was hung up otherwise (e.g. vhangup()), then we
         *       ignore this hangup and keep an established IUCV path open...
         *       (...the reason is that we are not able to connect back to the
@@ -494,10 +586,9 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
  * @id:                Additional data (originally passed to hvc_alloc):
  *             the index of an struct hvc_iucv_private instance.
  *
- * This routine notifies the HVC backend that the last tty device file
- * descriptor has been closed.
- * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private
- * instance.
+ * This routine notifies the HVC back-end that the last tty device fd has been
+ * closed.  The function calls hvc_iucv_cleanup() to clean up the struct
+ * hvc_iucv_private instance.
  *
  * Locking:    struct hvc_iucv_private->lock
  */
@@ -510,6 +601,8 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
        if (!priv)
                return;
 
+       flush_sndbuf_sync(priv);
+
        spin_lock_bh(&priv->lock);
        path = priv->path;              /* save reference to IUCV path */
        priv->path = NULL;
@@ -527,20 +620,18 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
 /**
  * hvc_iucv_path_pending() - IUCV handler to process a connection request.
  * @path:      Pending path (struct iucv_path)
- * @ipvmid:    Originator z/VM system identifier
+ * @ipvmid:    z/VM system identifier of originator
  * @ipuser:    User specified data for this path
  *             (AF_IUCV: port/service name and originator port)
  *
- * The function uses the @ipuser data to check to determine if the pending
- * path belongs to a terminal managed by this HVC backend.
- * If the check is successful, then an additional check is done to ensure
- * that a terminal cannot be accessed multiple times (only one connection
- * to a terminal is allowed). In that particular case, the pending path is
- * severed. If it is the first connection, the pending path is accepted and
- * associated to the struct hvc_iucv_private. The iucv state is updated to
- * reflect that a communication path has been established.
+ * The function uses the @ipuser data to determine if the pending path belongs
+ * to a terminal managed by this device driver.
+ * If the path belongs to this driver, ensure that the terminal is not accessed
+ * multiple times (only one connection to a terminal is allowed).
+ * If the terminal is not yet connected, the pending path is accepted and is
+ * associated to the appropriate struct hvc_iucv_private instance.
  *
- * Returns 0 if the path belongs to a terminal managed by the this HVC backend;
+ * Returns 0 if @path belongs to a terminal managed by the this device driver;
  * otherwise returns -ENODEV in order to dispatch this path to other handlers.
  *
  * Locking:    struct hvc_iucv_private->lock
@@ -559,7 +650,6 @@ static      int hvc_iucv_path_pending(struct iucv_path *path,
                        priv = hvc_iucv_table[i];
                        break;
                }
-
        if (!priv)
                return -ENODEV;
 
@@ -588,6 +678,9 @@ static      int hvc_iucv_path_pending(struct iucv_path *path,
        priv->path = path;
        priv->iucv_state = IUCV_CONNECTED;
 
+       /* flush buffered output data... */
+       schedule_delayed_work(&priv->sndbuf_work, 5);
+
 out_path_handled:
        spin_unlock(&priv->lock);
        return 0;
@@ -603,8 +696,7 @@ out_path_handled:
  * sets the iucv state to IUCV_SEVERED for the associated struct
  * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
  * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
- *
- * If tty portion of the HVC is closed then clean up the outqueue in addition.
+ * If tty portion of the HVC is closed, clean up the outqueue.
  *
  * Locking:    struct hvc_iucv_private->lock
  */
@@ -615,15 +707,25 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
        spin_lock(&priv->lock);
        priv->iucv_state = IUCV_SEVERED;
 
-       /* NOTE: If the tty has not yet been opened by a getty program
-        *       (e.g. to see console messages), then cleanup the
-        *       hvc_iucv_private structure to allow re-connects.
+       /* If the tty has not yet been opened, clean up the hvc_iucv_private
+        * structure to allow re-connects.
+        * This is also done for our console device because console hangups
+        * are handled specially and no notifier is called by HVC.
+        * The tty session is active (TTY_OPEN) and ready for re-connects...
         *
-        *       If the tty has been opened, the get_chars() callback returns
-        *       -EPIPE to signal the hvc console layer to hang up the tty. */
+        * If it has been opened, let get_chars() return -EPIPE to signal the
+        * HVC layer to hang up the tty.
+        * If so, we need to wake up the HVC thread to call get_chars()...
+        */
        priv->path = NULL;
        if (priv->tty_state == TTY_CLOSED)
                hvc_iucv_cleanup(priv);
+       else
+               if (priv->is_console) {
+                       hvc_iucv_cleanup(priv);
+                       priv->tty_state = TTY_OPENED;
+               } else
+                       hvc_kick();
        spin_unlock(&priv->lock);
 
        /* finally sever path (outside of priv->lock due to lock ordering) */
@@ -636,9 +738,9 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  * @path:      Pending path (struct iucv_path)
  * @msg:       Pointer to the IUCV message
  *
- * The function stores an incoming message on the input queue for later
+ * The function puts an incoming message on the input queue for later
  * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
- * However, if the tty has not yet been opened, the message is rejected.
+ * If the tty has not yet been opened, the message is rejected.
  *
  * Locking:    struct hvc_iucv_private->lock
  */
@@ -648,6 +750,12 @@ static void hvc_iucv_msg_pending(struct iucv_path *path,
        struct hvc_iucv_private *priv = path->private;
        struct iucv_tty_buffer *rb;
 
+       /* reject messages that exceed max size of iucv_tty_msg->datalen */
+       if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
+               iucv_message_reject(path, msg);
+               return;
+       }
+
        spin_lock(&priv->lock);
 
        /* reject messages if tty has not yet been opened */
@@ -656,7 +764,7 @@ static void hvc_iucv_msg_pending(struct iucv_path *path,
                goto unlock_return;
        }
 
-       /* allocate buffer an empty buffer element */
+       /* allocate tty buffer to save iucv msg only */
        rb = alloc_tty_buffer(0, GFP_ATOMIC);
        if (!rb) {
                iucv_message_reject(path, msg);
@@ -666,7 +774,7 @@ static void hvc_iucv_msg_pending(struct iucv_path *path,
 
        list_add_tail(&rb->list, &priv->tty_inqueue);
 
-       hvc_kick();     /* wakup hvc console thread */
+       hvc_kick();     /* wake up hvc thread */
 
 unlock_return:
        spin_unlock(&priv->lock);
@@ -677,10 +785,10 @@ unlock_return:
  * @path:      Pending path (struct iucv_path)
  * @msg:       Pointer to the IUCV message
  *
- * The function is called upon completion of message delivery and the
- * message is removed from the outqueue. Additional delivery information
- * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and
- * purged messages (0x010000 (IPADPGNR)).
+ * The function is called upon completion of message delivery to remove the
+ * message from the outqueue. Additional delivery information can be found
+ * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
+ *            purged messages   (0x010000 (IPADPGNR)).
  *
  * Locking:    struct hvc_iucv_private->lock
  */
@@ -697,6 +805,7 @@ static void hvc_iucv_msg_complete(struct iucv_path *path,
                        list_move(&ent->list, &list_remove);
                        break;
                }
+       wake_up(&priv->sndbuf_waitq);
        spin_unlock(&priv->lock);
        destroy_tty_buffer_list(&list_remove);
 }
@@ -713,13 +822,14 @@ static struct hv_ops hvc_iucv_ops = {
 
 /**
  * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
- * @id:        hvc_iucv_table index
+ * @id:                        hvc_iucv_table index
+ * @is_console:                Flag if the instance is used as Linux console
  *
- * This function allocates a new hvc_iucv_private struct and put the
- * instance into hvc_iucv_table at index @id.
+ * This function allocates a new hvc_iucv_private structure and stores
+ * the instance in hvc_iucv_table at index @id.
  * Returns 0 on success; otherwise non-zero.
  */
-static int __init hvc_iucv_alloc(int id)
+static int __init hvc_iucv_alloc(int id, unsigned int is_console)
 {
        struct hvc_iucv_private *priv;
        char name[9];
@@ -732,18 +842,33 @@ static int __init hvc_iucv_alloc(int id)
        spin_lock_init(&priv->lock);
        INIT_LIST_HEAD(&priv->tty_outqueue);
        INIT_LIST_HEAD(&priv->tty_inqueue);
+       INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
+       init_waitqueue_head(&priv->sndbuf_waitq);
+
+       priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
+       if (!priv->sndbuf) {
+               kfree(priv);
+               return -ENOMEM;
+       }
+
+       /* set console flag */
+       priv->is_console = is_console;
 
-       /* Finally allocate hvc */
-       priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
-                             HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE);
+       /* finally allocate hvc */
+       priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*             PAGE_SIZE */
+                             HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
        if (IS_ERR(priv->hvc)) {
                rc = PTR_ERR(priv->hvc);
+               free_page((unsigned long) priv->sndbuf);
                kfree(priv);
                return rc;
        }
 
+       /* notify HVC thread instead of using polling */
+       priv->hvc->irq_requested = 1;
+
        /* setup iucv related information */
-       snprintf(name, 9, "ihvc%-4d", id);
+       snprintf(name, 9, "lnxhvc%-2d", id);
        memcpy(priv->srv_name, name, 8);
        ASCEBC(priv->srv_name, 8);
 
@@ -752,15 +877,16 @@ static int __init hvc_iucv_alloc(int id)
 }
 
 /**
- * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV
+ * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
  */
 static int __init hvc_iucv_init(void)
 {
-       int rc, i;
+       int rc;
+       unsigned int i;
 
        if (!MACHINE_IS_VM) {
-               pr_warning("The z/VM IUCV Hypervisor console cannot be "
-                          "used without z/VM.\n");
+               pr_info("The z/VM IUCV HVC device driver cannot "
+                          "be used without z/VM\n");
                return -ENODEV;
        }
 
@@ -774,26 +900,33 @@ static int __init hvc_iucv_init(void)
                                           sizeof(struct iucv_tty_buffer),
                                           0, 0, NULL);
        if (!hvc_iucv_buffer_cache) {
-               pr_err("Not enough memory for driver initialization "
-                       "(rs=%d).\n", 1);
+               pr_err("Allocating memory failed with reason code=%d\n", 1);
                return -ENOMEM;
        }
 
        hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
                                                    hvc_iucv_buffer_cache);
        if (!hvc_iucv_mempool) {
-               pr_err("Not enough memory for driver initialization "
-                       "(rs=%d).\n", 2);
+               pr_err("Allocating memory failed with reason code=%d\n", 2);
                kmem_cache_destroy(hvc_iucv_buffer_cache);
                return -ENOMEM;
        }
 
+       /* register the first terminal device as console
+        * (must be done before allocating hvc terminal devices) */
+       rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
+       if (rc) {
+               pr_err("Registering HVC terminal device as "
+                      "Linux console failed\n");
+               goto out_error_memory;
+       }
+
        /* allocate hvc_iucv_private structs */
        for (i = 0; i < hvc_iucv_devices; i++) {
-               rc = hvc_iucv_alloc(i);
+               rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
                if (rc) {
-                       pr_err("Could not create new z/VM IUCV HVC backend "
-                               "rc=%d.\n", rc);
+                       pr_err("Creating a new HVC terminal device "
+                               "failed with error code=%d\n", rc);
                        goto out_error_hvc;
                }
        }
@@ -801,7 +934,8 @@ static int __init hvc_iucv_init(void)
        /* register IUCV callback handler */
        rc = iucv_register(&hvc_iucv_handler, 0);
        if (rc) {
-               pr_err("Could not register iucv handler (rc=%d).\n", rc);
+               pr_err("Registering IUCV handlers failed with error code=%d\n",
+                       rc);
                goto out_error_iucv;
        }
 
@@ -816,21 +950,12 @@ out_error_hvc:
                                hvc_remove(hvc_iucv_table[i]->hvc);
                        kfree(hvc_iucv_table[i]);
                }
+out_error_memory:
        mempool_destroy(hvc_iucv_mempool);
        kmem_cache_destroy(hvc_iucv_buffer_cache);
        return rc;
 }
 
-/**
- * hvc_iucv_console_init() - Early console initialization
- */
-static int __init hvc_iucv_console_init(void)
-{
-       if (!MACHINE_IS_VM || !hvc_iucv_devices)
-               return -ENODEV;
-       return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops);
-}
-
 /**
  * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
  * @val:       Parameter value (numeric)
@@ -841,10 +966,5 @@ static     int __init hvc_iucv_config(char *val)
 }
 
 
-module_init(hvc_iucv_init);
-console_initcall(hvc_iucv_console_init);
+device_initcall(hvc_iucv_init);
 __setup("hvc_iucv=", hvc_iucv_config);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("HVC back-end for z/VM IUCV.");
-MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
index 799f94424c8a5f0ebedf185059d94a0c64ecb893..6bd91a15d5e6a8397f9aa0988c510cb97f11df4b 100644 (file)
@@ -209,6 +209,8 @@ fw_card_bm_work(struct work_struct *work)
        unsigned long flags;
        int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
        bool do_reset = false;
+       bool root_device_is_running;
+       bool root_device_is_cmc;
        __be32 lock_data[2];
 
        spin_lock_irqsave(&card->lock, flags);
@@ -224,8 +226,9 @@ fw_card_bm_work(struct work_struct *work)
 
        generation = card->generation;
        root_device = root_node->data;
-       if (root_device)
-               fw_device_get(root_device);
+       root_device_is_running = root_device &&
+                       atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+       root_device_is_cmc = root_device && root_device->cmc;
        root_id = root_node->node_id;
        grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
 
@@ -308,14 +311,14 @@ fw_card_bm_work(struct work_struct *work)
                 * config rom.  In either case, pick another root.
                 */
                new_root_id = local_node->node_id;
-       } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) {
+       } else if (!root_device_is_running) {
                /*
                 * If we haven't probed this device yet, bail out now
                 * and let's try again once that's done.
                 */
                spin_unlock_irqrestore(&card->lock, flags);
                goto out;
-       } else if (root_device->cmc) {
+       } else if (root_device_is_cmc) {
                /*
                 * FIXME: I suppose we should set the cmstr bit in the
                 * STATE_CLEAR register of this node, as described in
@@ -362,8 +365,6 @@ fw_card_bm_work(struct work_struct *work)
                fw_core_initiate_bus_reset(card, 1);
        }
  out:
-       if (root_device)
-               fw_device_put(root_device);
        fw_node_put(root_node);
        fw_node_put(local_node);
  out_put_card:
index c173be383725c05fe98d0218ad9c41e6d144eeae..2af5a8d1e012112ceacd08c69b50ae2c3fc202ea 100644 (file)
@@ -159,7 +159,8 @@ static void fw_device_release(struct device *dev)
 
        /*
         * Take the card lock so we don't set this to NULL while a
-        * FW_NODE_UPDATED callback is being handled.
+        * FW_NODE_UPDATED callback is being handled or while the
+        * bus manager work looks at this node.
         */
        spin_lock_irqsave(&card->lock, flags);
        device->node->data = NULL;
@@ -695,12 +696,13 @@ static void fw_device_init(struct work_struct *work)
                return;
        }
 
-       err = -ENOMEM;
+       device_initialize(&device->device);
 
        fw_device_get(device);
        down_write(&fw_device_rwsem);
-       if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
-               err = idr_get_new(&fw_device_idr, device, &minor);
+       err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+             idr_get_new(&fw_device_idr, device, &minor) :
+             -ENOMEM;
        up_write(&fw_device_rwsem);
 
        if (err < 0)
@@ -911,13 +913,14 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
 
                /*
                 * Do minimal intialization of the device here, the
-                * rest will happen in fw_device_init().  We need the
-                * card and node so we can read the config rom and we
-                * need to do device_initialize() now so
-                * device_for_each_child() in FW_NODE_UPDATED is
-                * doesn't freak out.
+                * rest will happen in fw_device_init().
+                *
+                * Attention:  A lot of things, even fw_device_get(),
+                * cannot be done before fw_device_init() finished!
+                * You can basically just check device->state and
+                * schedule work until then, but only while holding
+                * card->lock.
                 */
-               device_initialize(&device->device);
                atomic_set(&device->state, FW_DEVICE_INITIALIZING);
                device->card = fw_card_get(card);
                device->node = fw_node_get(node);
index 14793480c453e0e915a4188dc697af6e9e5b416e..fd112ae252cfb9d871c985df5aa88a8ddaffabf7 100644 (file)
@@ -23,3 +23,10 @@ config MISDN_HFCMULTI
           * HFC-8S (8 S/T interfaces on one chip)
           * HFC-E1 (E1 interface for 2Mbit ISDN)
 
+config MISDN_HFCUSB
+       tristate "Support for HFC-S USB based TAs"
+       depends on USB
+       help
+         Enable support for USB ISDN TAs with Cologne Chip AG's
+         HFC-S USB ISDN Controller
+
index 1e7ca5332ad7e27ed7c873eaa07347be995e6733..b0403526bbba01d25582c862613c045ab89cb584 100644 (file)
@@ -5,3 +5,4 @@
 
 obj-$(CONFIG_MISDN_HFCPCI) += hfcpci.o
 obj-$(CONFIG_MISDN_HFCMULTI) += hfcmulti.o
+obj-$(CONFIG_MISDN_HFCUSB) += hfcsusb.o
index 7bbf7300593d3c01c9115a4ce9a6b737c21e4d8c..663b77f578bea047cf134aee7bc739622a2cf910 100644 (file)
@@ -2,10 +2,6 @@
  * see notice in hfc_multi.c
  */
 
-extern void ztdummy_extern_interrupt(void);
-extern void ztdummy_register_interrupt(void);
-extern int ztdummy_unregister_interrupt(void);
-
 #define DEBUG_HFCMULTI_FIFO    0x00010000
 #define        DEBUG_HFCMULTI_CRC      0x00020000
 #define        DEBUG_HFCMULTI_INIT     0x00040000
@@ -13,6 +9,7 @@ extern int ztdummy_unregister_interrupt(void);
 #define        DEBUG_HFCMULTI_MODE     0x00100000
 #define        DEBUG_HFCMULTI_MSG      0x00200000
 #define        DEBUG_HFCMULTI_STATE    0x00400000
+#define        DEBUG_HFCMULTI_FILL     0x00800000
 #define        DEBUG_HFCMULTI_SYNC     0x01000000
 #define        DEBUG_HFCMULTI_DTMF     0x02000000
 #define        DEBUG_HFCMULTI_LOCK     0x80000000
@@ -170,6 +167,8 @@ struct hfc_multi {
 
        u_long          chip;   /* chip configuration */
        int             masterclk; /* port that provides master clock -1=off */
+       unsigned char   silence;/* silence byte */
+       unsigned char   silence_data[128];/* silence block */
        int             dtmf;   /* flag that dtmf is currently in process */
        int             Flen;   /* F-buffer size */
        int             Zlen;   /* Z-buffer size (must be int for calculation)*/
@@ -198,6 +197,9 @@ struct hfc_multi {
 
        spinlock_t      lock;   /* the lock */
 
+       struct mISDNclock *iclock; /* isdn clock support */
+       int             iclock_on;
+
        /*
         * the channel index is counted from 0, regardless where the channel
         * is located on the hfc-channel.
index 5783d22a18fe0f4d5201b5bca21a28ae8be43944..3132ddc99fcd73fc785a02bcd457332f41e1fd83 100644 (file)
@@ -26,7 +26,7 @@
  * change mask and threshold simultaneously
  */
 #define HFCPCI_BTRANS_THRESHOLD 128
-#define HFCPCI_BTRANS_MAX      256
+#define HFCPCI_FILLEMPTY       64
 #define HFCPCI_BTRANS_THRESMASK 0x00
 
 /* defines for PCI config */
index c63e2f49da8ad38df99133197bf12ab6df670ea7..97f4708b38795fe030fc6e92d3004697edc44c9b 100644 (file)
  *     Give the value of the clock control register (A_ST_CLK_DLY)
  *     of the S/T interfaces in TE mode.
  *     This register is needed for the TBR3 certification, so don't change it.
+ *
+ * clock:
+ *     NOTE: only one clock value must be given once
+ *     Selects interface with clock source for mISDN and applications.
+ *     Set to card number starting with 1. Set to -1 to disable.
+ *     By default, the first card is used as clock source.
  */
 
 /*
  * #define HFC_REGISTER_DEBUG
  */
 
-static const char *hfcmulti_revision = "2.02";
+#define HFC_MULTI_VERSION      "2.03"
 
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -165,10 +171,6 @@ static LIST_HEAD(HFClist);
 static spinlock_t HFClock; /* global hfc list lock */
 
 static void ph_state_change(struct dchannel *);
-static void (*hfc_interrupt)(void);
-static void (*register_interrupt)(void);
-static int (*unregister_interrupt)(void);
-static int interrupt_registered;
 
 static struct hfc_multi *syncmaster;
 static int plxsd_master; /* if we have a master card (yet) */
@@ -184,7 +186,6 @@ static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30  };
 #define        CLKDEL_TE       0x0f    /* CLKDEL in TE mode */
 #define        CLKDEL_NT       0x6c    /* CLKDEL in NT mode
                                   (0x60 MUST be included!) */
-static u_char silence =        0xff;   /* silence by LAW */
 
 #define        DIP_4S  0x1             /* DIP Switches for Beronet 1S/2S/4S cards */
 #define        DIP_8S  0x2             /* DIP Switches for Beronet 8S+ cards */
@@ -195,12 +196,13 @@ static u_char silence =   0xff;   /* silence by LAW */
  */
 
 static uint    type[MAX_CARDS];
-static uint    pcm[MAX_CARDS];
-static uint    dslot[MAX_CARDS];
+static int     pcm[MAX_CARDS];
+static int     dslot[MAX_CARDS];
 static uint    iomode[MAX_CARDS];
 static uint    port[MAX_PORTS];
 static uint    debug;
 static uint    poll;
+static int     clock;
 static uint    timer;
 static uint    clockdelay_te = CLKDEL_TE;
 static uint    clockdelay_nt = CLKDEL_NT;
@@ -209,14 +211,16 @@ static int        HFC_cnt, Port_cnt, PCM_cnt = 99;
 
 MODULE_AUTHOR("Andreas Eversberg");
 MODULE_LICENSE("GPL");
+MODULE_VERSION(HFC_MULTI_VERSION);
 module_param(debug, uint, S_IRUGO | S_IWUSR);
 module_param(poll, uint, S_IRUGO | S_IWUSR);
+module_param(clock, int, S_IRUGO | S_IWUSR);
 module_param(timer, uint, S_IRUGO | S_IWUSR);
 module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
 module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
 module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
-module_param_array(pcm, uint, NULL, S_IRUGO | S_IWUSR);
-module_param_array(dslot, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
+module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR);
 module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
 module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
 
@@ -1419,19 +1423,6 @@ controller_fail:
        HFC_outb(hc, R_TI_WD, poll_timer);
        hc->hw.r_irqmsk_misc |= V_TI_IRQMSK;
 
-       /*
-        * set up 125us interrupt, only if function pointer is available
-        * and module parameter timer is set
-        */
-       if (timer && hfc_interrupt && register_interrupt) {
-               /* only one chip should use this interrupt */
-               timer = 0;
-               interrupt_registered = 1;
-               hc->hw.r_irqmsk_misc |= V_PROC_IRQMSK;
-               /* deactivate other interrupts in ztdummy */
-               register_interrupt();
-       }
-
        /* set E1 state machine IRQ */
        if (hc->type == 1)
                hc->hw.r_irqmsk_misc |= V_STA_IRQMSK;
@@ -1991,6 +1982,17 @@ next_frame:
                return; /* no data */
        }
 
+       /* "fill fifo if empty" feature */
+       if (bch && test_bit(FLG_FILLEMPTY, &bch->Flags)
+               && !test_bit(FLG_HDLC, &bch->Flags) && z2 == z1) {
+               if (debug & DEBUG_HFCMULTI_FILL)
+                       printk(KERN_DEBUG "%s: buffer empty, so we have "
+                               "underrun\n", __func__);
+               /* fill buffer, to prevent future underrun */
+               hc->write_fifo(hc, hc->silence_data, poll >> 1);
+               Zspace -= (poll >> 1);
+       }
+
        /* if audio data and connected slot */
        if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending)
                && slot_tx >= 0) {
@@ -2027,7 +2029,6 @@ next_frame:
                        __func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i,
                        temp ? "HDLC":"TRANS");
 
-
        /* Have to prep the audio data */
        hc->write_fifo(hc, d, ii - i);
        *idxp = ii;
@@ -2066,7 +2067,7 @@ next_frame:
         * no more data at all. this prevents sending an undefined value.
         */
        if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
-               HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+               HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
 }
 
 
@@ -2583,7 +2584,6 @@ hfcmulti_interrupt(int intno, void *dev_id)
        static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0,
            iq5 = 0, iq6 = 0, iqcnt = 0;
 #endif
-       static int              count;
        struct hfc_multi        *hc = dev_id;
        struct dchannel         *dch;
        u_char                  r_irq_statech, status, r_irq_misc, r_irq_oview;
@@ -2637,6 +2637,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
                iqcnt = 0;
        }
 #endif
+
        if (!r_irq_statech &&
            !(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA |
            V_MISC_IRQSTA | V_FR_IRQSTA))) {
@@ -2657,6 +2658,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
        if (status & V_MISC_IRQSTA) {
                /* misc IRQ */
                r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC);
+               r_irq_misc &= hc->hw.r_irqmsk_misc; /* ignore disabled irqs */
                if (r_irq_misc & V_STA_IRQ) {
                        if (hc->type == 1) {
                                /* state machine */
@@ -2691,23 +2693,20 @@ hfcmulti_interrupt(int intno, void *dev_id)
                                        plxsd_checksync(hc, 0);
                        }
                }
-               if (r_irq_misc & V_TI_IRQ)
+               if (r_irq_misc & V_TI_IRQ) {
+                       if (hc->iclock_on)
+                               mISDN_clock_update(hc->iclock, poll, NULL);
                        handle_timer_irq(hc);
+               }
 
                if (r_irq_misc & V_DTMF_IRQ) {
-                       /* -> DTMF IRQ */
                        hfcmulti_dtmf(hc);
                }
-               /* TODO: REPLACE !!!! 125 us Interrupts are not acceptable  */
                if (r_irq_misc & V_IRQ_PROC) {
-                       /* IRQ every 125us */
-                       count++;
-                       /* generate 1kHz signal */
-                       if (count == 8) {
-                               if (hfc_interrupt)
-                                       hfc_interrupt();
-                               count = 0;
-                       }
+                       static int irq_proc_cnt;
+                       if (!irq_proc_cnt++)
+                               printk(KERN_WARNING "%s: got V_IRQ_PROC -"
+                                   " this should not happen\n", __func__);
                }
 
        }
@@ -2954,7 +2953,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
                        HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
                        HFC_wait(hc);
                        /* tx silence */
-                       HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+                       HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
                        HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
                            ((ch % 4) * 4)) << 1);
                        HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1));
@@ -2969,7 +2968,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
                        HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
                        HFC_wait(hc);
                        /* tx silence */
-                       HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+                       HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
                        /* enable RX fifo */
                        HFC_outb(hc, R_FIFO, (ch<<1)|1);
                        HFC_wait(hc);
@@ -3461,7 +3460,7 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
        switch (cq->op) {
        case MISDN_CTRL_GETOP:
                cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP
-                       | MISDN_CTRL_RX_OFF;
+                       | MISDN_CTRL_RX_OFF | MISDN_CTRL_FILL_EMPTY;
                break;
        case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
                hc->chan[bch->slot].rx_off = !!cq->p1;
@@ -3476,6 +3475,12 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
                        printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
                            __func__, bch->nr, hc->chan[bch->slot].rx_off);
                break;
+       case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
+               test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+               if (debug & DEBUG_HFCMULTI_MSG)
+                       printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
+                               "off=%d)\n", __func__, bch->nr, !!cq->p1);
+               break;
        case MISDN_CTRL_HW_FEATURES: /* fill features structure */
                if (debug & DEBUG_HFCMULTI_MSG)
                        printk(KERN_DEBUG "%s: HW_FEATURE request\n",
@@ -3992,6 +3997,7 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
        }
        if (test_and_set_bit(FLG_OPEN, &bch->Flags))
                return -EBUSY; /* b-channel can be only open once */
+       test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
        bch->ch.protocol = rq->protocol;
        hc->chan[ch].rx_off = 0;
        rq->ch = &bch->ch;
@@ -4081,6 +4087,15 @@ hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
        return err;
 }
 
+static int
+clockctl(void *priv, int enable)
+{
+       struct hfc_multi *hc = priv;
+
+       hc->iclock_on = enable;
+       return 0;
+}
+
 /*
  * initialize the card
  */
@@ -4495,10 +4510,14 @@ release_card(struct hfc_multi *hc)
                printk(KERN_WARNING "%s: release card (%d) entered\n",
                    __func__, hc->id);
 
+       /* unregister clock source */
+       if (hc->iclock)
+               mISDN_unregister_clock(hc->iclock);
+
+       /* disable irq */
        spin_lock_irqsave(&hc->lock, flags);
        disable_hwirq(hc);
        spin_unlock_irqrestore(&hc->lock, flags);
-
        udelay(1000);
 
        /* dimm leds */
@@ -4699,7 +4718,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
        } else
                hc->chan[hc->dslot].jitter = 2; /* default */
        snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
-       ret = mISDN_register_device(&dch->dev, name);
+       ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
        if (ret)
                goto free_chan;
        hc->created[0] = 1;
@@ -4807,9 +4826,9 @@ init_multi_port(struct hfc_multi *hc, int pt)
                test_and_set_bit(HFC_CFG_DIS_ECHANNEL,
                    &hc->chan[i + 2].cfg);
        }
-       snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d/%d",
+       snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d-%d",
                hc->type, HFC_cnt + 1, pt + 1);
-       ret = mISDN_register_device(&dch->dev, name);
+       ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
        if (ret)
                goto free_chan;
        hc->created[pt] = 1;
@@ -4828,6 +4847,7 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct hfc_multi        *hc;
        u_long          flags;
        u_char          dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
+       int             i;
 
        if (HFC_cnt >= MAX_CARDS) {
                printk(KERN_ERR "too many cards (max=%d).\n",
@@ -4861,11 +4881,11 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
        hc->id = HFC_cnt;
        hc->pcm = pcm[HFC_cnt];
        hc->io_mode = iomode[HFC_cnt];
-       if (dslot[HFC_cnt] < 0) {
+       if (dslot[HFC_cnt] < 0 && hc->type == 1) {
                hc->dslot = 0;
                printk(KERN_INFO "HFC-E1 card has disabled D-channel, but "
                        "31 B-channels\n");
-       } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32) {
+       } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 && hc->type == 1) {
                hc->dslot = dslot[HFC_cnt];
                printk(KERN_INFO "HFC-E1 card has alternating D-channel on "
                        "time slot %d\n", dslot[HFC_cnt]);
@@ -4876,9 +4896,17 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
        hc->masterclk = -1;
        if (type[HFC_cnt] & 0x100) {
                test_and_set_bit(HFC_CHIP_ULAW, &hc->chip);
-               silence = 0xff; /* ulaw silence */
+               hc->silence = 0xff; /* ulaw silence */
        } else
-               silence = 0x2a; /* alaw silence */
+               hc->silence = 0x2a; /* alaw silence */
+       if ((poll >> 1) > sizeof(hc->silence_data)) {
+               printk(KERN_ERR "HFCMULTI error: silence_data too small, "
+                       "please fix\n");
+               return -EINVAL;
+       }
+       for (i = 0; i < (poll >> 1); i++)
+               hc->silence_data[i] = hc->silence;
+
        if (!(type[HFC_cnt] & 0x200))
                test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
 
@@ -4945,9 +4973,7 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
        switch (m->dip_type) {
        case DIP_4S:
                /*
-                * get DIP Setting for beroNet 1S/2S/4S cards
-                *  check if Port Jumper config matches
-                * module param 'protocol'
+                * Get DIP setting for beroNet 1S/2S/4S cards
                 * DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) +
                 * GPI 19/23 (R_GPI_IN2))
                 */
@@ -4966,9 +4992,8 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
                break;
        case DIP_8S:
                /*
-                * get DIP Setting for beroNet 8S0+ cards
-                *
-                * enable PCI auxbridge function
+                * Get DIP Setting for beroNet 8S0+ cards
+                * Enable PCI auxbridge function
                 */
                HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
                /* prepare access to auxport */
@@ -5003,6 +5028,10 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
        list_add_tail(&hc->list, &HFClist);
        spin_unlock_irqrestore(&HFClock, flags);
 
+       /* use as clock source */
+       if (clock == HFC_cnt + 1)
+               hc->iclock = mISDN_register_clock("HFCMulti", 0, clockctl, hc);
+
        /* initialize hardware */
        ret_err = init_card(hc);
        if (ret_err) {
@@ -5137,8 +5166,7 @@ static struct pci_device_id hfmultipci_ids[] __devinitdata = {
        { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
        PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */
        { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
-       PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)},
-           /* IOB8ST Recording */
+       PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)}, /* IOB8ST Recording */
        { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
                PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST  */
        { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
@@ -5188,18 +5216,16 @@ hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct hm_map   *m = (struct hm_map *)ent->driver_data;
        int             ret;
 
-       if (m == NULL) {
-               if (ent->vendor == PCI_VENDOR_ID_CCD)
-                       if (ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
-                           ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
-                           ent->device == PCI_DEVICE_ID_CCD_HFCE1)
-                               printk(KERN_ERR
-                                   "unknown HFC multiport controller "
-                                   "(vendor:%x device:%x subvendor:%x "
-                                   "subdevice:%x) Please contact the "
-                                   "driver maintainer for support.\n",
-                                   ent->vendor, ent->device,
-                                   ent->subvendor, ent->subdevice);
+       if (m == NULL && ent->vendor == PCI_VENDOR_ID_CCD && (
+           ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
+           ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
+           ent->device == PCI_DEVICE_ID_CCD_HFCE1)) {
+               printk(KERN_ERR
+                   "Unknown HFC multiport controller (vendor:%x device:%x "
+                   "subvendor:%x subdevice:%x)\n", ent->vendor, ent->device,
+                   ent->subvendor, ent->subdevice);
+               printk(KERN_ERR
+                   "Please contact the driver maintainer for support.\n");
                return -ENODEV;
        }
        ret = hfcmulti_init(pdev, ent);
@@ -5222,22 +5248,9 @@ HFCmulti_cleanup(void)
 {
        struct hfc_multi *card, *next;
 
-       /* unload interrupt function symbol */
-       if (hfc_interrupt)
-               symbol_put(ztdummy_extern_interrupt);
-       if (register_interrupt)
-               symbol_put(ztdummy_register_interrupt);
-       if (unregister_interrupt) {
-               if (interrupt_registered) {
-                       interrupt_registered = 0;
-                       unregister_interrupt();
-               }
-               symbol_put(ztdummy_unregister_interrupt);
-       }
-
+       /* get rid of all devices of this driver */
        list_for_each_entry_safe(card, next, &HFClist, list)
                release_card(card);
-       /* get rid of all devices of this driver */
        pci_unregister_driver(&hfcmultipci_driver);
 }
 
@@ -5246,8 +5259,10 @@ HFCmulti_init(void)
 {
        int err;
 
+       printk(KERN_INFO "mISDN: HFC-multi driver %s\n", HFC_MULTI_VERSION);
+
 #ifdef IRQ_DEBUG
-       printk(KERN_ERR "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
+       printk(KERN_DEBUG "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
 #endif
 
        spin_lock_init(&HFClock);
@@ -5256,22 +5271,11 @@ HFCmulti_init(void)
        if (debug & DEBUG_HFCMULTI_INIT)
                printk(KERN_DEBUG "%s: init entered\n", __func__);
 
-       hfc_interrupt = symbol_get(ztdummy_extern_interrupt);
-       register_interrupt = symbol_get(ztdummy_register_interrupt);
-       unregister_interrupt = symbol_get(ztdummy_unregister_interrupt);
-       printk(KERN_INFO "mISDN: HFC-multi driver %s\n",
-           hfcmulti_revision);
-
        switch (poll) {
        case 0:
                poll_timer = 6;
                poll = 128;
                break;
-               /*
-                * wenn dieses break nochmal verschwindet,
-                * gibt es heisse ohren :-)
-                * "without the break you will get hot ears ???"
-                */
        case 8:
                poll_timer = 2;
                break;
@@ -5298,20 +5302,12 @@ HFCmulti_init(void)
 
        }
 
+       if (!clock)
+               clock = 1;
+
        err = pci_register_driver(&hfcmultipci_driver);
        if (err < 0) {
                printk(KERN_ERR "error registering pci driver: %x\n", err);
-               if (hfc_interrupt)
-                       symbol_put(ztdummy_extern_interrupt);
-               if (register_interrupt)
-                       symbol_put(ztdummy_register_interrupt);
-               if (unregister_interrupt) {
-                       if (interrupt_registered) {
-                               interrupt_registered = 0;
-                               unregister_interrupt();
-                       }
-                       symbol_put(ztdummy_unregister_interrupt);
-               }
                return err;
        }
        return 0;
index cd8302af40ebace556b7dc5dc89b1465c0ac3dd3..917bf41a293b2324596e4c0ec13853704aaacf79 100644 (file)
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
+ * Module options:
+ *
+ * debug:
+ *     NOTE: only one poll value must be given for all cards
+ *     See hfc_pci.h for debug flags.
+ *
+ * poll:
+ *     NOTE: only one poll value must be given for all cards
+ *     Give the number of samples for each fifo process.
+ *     By default 128 is used. Decrease to reduce delay, increase to
+ *     reduce cpu load. If unsure, don't mess with it!
+ *     A value of 128 will use controller's interrupt. Other values will
+ *     use kernel timer, because the controller will not allow lower values
+ *     than 128.
+ *     Also note that the value depends on the kernel timer frequency.
+ *     If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
+ *     If the kernel uses 100 Hz, steps of 80 samples are possible.
+ *     If the kernel uses 300 Hz, steps of about 26 samples are possible.
+ *
  */
 
 #include <linux/module.h>
 
 static const char *hfcpci_revision = "2.0";
 
-#define MAX_CARDS      8
 static int HFC_cnt;
 static uint debug;
+static uint poll, tics;
+struct timer_list hfc_tl;
+u32    hfc_jiffies;
 
 MODULE_AUTHOR("Karsten Keil");
 MODULE_LICENSE("GPL");
 module_param(debug, uint, 0);
-
-static LIST_HEAD(HFClist);
-static DEFINE_RWLOCK(HFClock);
+module_param(poll, uint, S_IRUGO | S_IWUSR);
 
 enum {
        HFC_CCD_2BD0,
@@ -114,7 +133,6 @@ struct hfcPCI_hw {
 
 
 struct hfc_pci {
-       struct list_head        list;
        u_char                  subtype;
        u_char                  chanlimit;
        u_char                  initdone;
@@ -520,9 +538,9 @@ receive_dmsg(struct hfc_pci *hc)
 }
 
 /*
- * check for transparent receive data and read max one threshold size if avail
+ * check for transparent receive data and read max one 'poll' size if avail
  */
-static int
+static void
 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
 {
         __le16 *z1r, *z2r;
@@ -534,17 +552,19 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
 
        fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
        if (!fcnt)
-               return 0;       /* no data avail */
+               return; /* no data avail */
 
        if (fcnt <= 0)
                fcnt += B_FIFO_SIZE;    /* bytes actually buffered */
-       if (fcnt > HFCPCI_BTRANS_THRESHOLD)
-               fcnt = HFCPCI_BTRANS_THRESHOLD;         /* limit size */
-
        new_z2 = le16_to_cpu(*z2r) + fcnt;      /* new position in fifo */
        if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
                new_z2 -= B_FIFO_SIZE;  /* buffer wrap */
 
+       if (fcnt > MAX_DATA_SIZE) {     /* flush, if oversized */
+               *z2r = cpu_to_le16(new_z2);             /* new position */
+               return;
+       }
+
        bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
        if (bch->rx_skb) {
                ptr = skb_put(bch->rx_skb, fcnt);
@@ -569,7 +589,6 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
                printk(KERN_WARNING "HFCPCI: receive out of memory\n");
 
        *z2r = cpu_to_le16(new_z2);             /* new position */
-       return 1;
 }
 
 /*
@@ -580,12 +599,11 @@ main_rec_hfcpci(struct bchannel *bch)
 {
        struct hfc_pci  *hc = bch->hw;
        int             rcnt, real_fifo;
-       int             receive, count = 5;
+       int             receive = 0, count = 5;
        struct bzfifo   *bz;
        u_char          *bdata;
        struct zt       *zp;
 
-
        if ((bch->nr & 2) && (!hc->hw.bswapped)) {
                bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
                bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
@@ -625,9 +643,10 @@ Begin:
                        receive = 1;
                else
                        receive = 0;
-       } else if (test_bit(FLG_TRANSPARENT, &bch->Flags))
-               receive = hfcpci_empty_fifo_trans(bch, bz, bdata);
-       else
+       } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+               hfcpci_empty_fifo_trans(bch, bz, bdata);
+               return;
+       } else
                receive = 0;
        if (count && receive)
                goto Begin;
@@ -751,11 +770,41 @@ hfcpci_fill_fifo(struct bchannel *bch)
                            /* fcnt contains available bytes in fifo */
                fcnt = B_FIFO_SIZE - fcnt;
                    /* remaining bytes to send (bytes in fifo) */
+
+               /* "fill fifo if empty" feature */
+               if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) {
+                       /* printk(KERN_DEBUG "%s: buffer empty, so we have "
+                               "underrun\n", __func__); */
+                       /* fill buffer, to prevent future underrun */
+                       count = HFCPCI_FILLEMPTY;
+                       new_z1 = le16_to_cpu(*z1t) + count;
+                          /* new buffer Position */
+                       if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
+                               new_z1 -= B_FIFO_SIZE;  /* buffer wrap */
+                       dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
+                       maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
+                           /* end of fifo */
+                       if (bch->debug & DEBUG_HW_BFIFO)
+                               printk(KERN_DEBUG "hfcpci_FFt fillempty "
+                                   "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
+                                   fcnt, maxlen, new_z1, dst);
+                       fcnt += count;
+                       if (maxlen > count)
+                               maxlen = count;         /* limit size */
+                       memset(dst, 0x2a, maxlen);      /* first copy */
+                       count -= maxlen;                /* remaining bytes */
+                       if (count) {
+                               dst = bdata;            /* start of buffer */
+                               memset(dst, 0x2a, count);
+                       }
+                       *z1t = cpu_to_le16(new_z1);     /* now send data */
+               }
+
 next_t_frame:
                count = bch->tx_skb->len - bch->tx_idx;
-               /* maximum fill shall be HFCPCI_BTRANS_MAX */
-               if (count > HFCPCI_BTRANS_MAX - fcnt)
-                       count = HFCPCI_BTRANS_MAX - fcnt;
+               /* maximum fill shall be poll*2 */
+               if (count > (poll << 1) - fcnt)
+                       count = (poll << 1) - fcnt;
                if (count <= 0)
                        return;
                /* data is suitable for fifo */
@@ -1135,37 +1184,37 @@ hfcpci_int(int intno, void *dev_id)
                val &= ~0x80;
                Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
        }
-       if (val & 0x08) {
+       if (val & 0x08) {       /* B1 rx */
                bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
                if (bch)
                        main_rec_hfcpci(bch);
                else if (hc->dch.debug)
                        printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
        }
-       if (val & 0x10) {
+       if (val & 0x10) {       /* B2 rx */
                bch = Sel_BCS(hc, 2);
                if (bch)
                        main_rec_hfcpci(bch);
                else if (hc->dch.debug)
                        printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
        }
-       if (val & 0x01) {
+       if (val & 0x01) {       /* B1 tx */
                bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
                if (bch)
                        tx_birq(bch);
                else if (hc->dch.debug)
                        printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
        }
-       if (val & 0x02) {
+       if (val & 0x02) {       /* B2 tx */
                bch = Sel_BCS(hc, 2);
                if (bch)
                        tx_birq(bch);
                else if (hc->dch.debug)
                        printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
        }
-       if (val & 0x20)
+       if (val & 0x20)         /* D rx */
                receive_dmsg(hc);
-       if (val & 0x04) {       /* dframe transmitted */
+       if (val & 0x04) {       /* D tx */
                if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
                        del_timer(&hc->dch.timer);
                tx_dirq(&hc->dch);
@@ -1283,14 +1332,16 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
                }
                if (fifo2 & 2) {
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
-                       hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
-                           HFCPCI_INTS_B2REC);
+                       if (!tics)
+                               hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+                                   HFCPCI_INTS_B2REC);
                        hc->hw.ctmt |= 2;
                        hc->hw.conn &= ~0x18;
                } else {
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
-                       hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
-                           HFCPCI_INTS_B1REC);
+                       if (!tics)
+                               hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+                                   HFCPCI_INTS_B1REC);
                        hc->hw.ctmt |= 1;
                        hc->hw.conn &= ~0x03;
                }
@@ -1398,7 +1449,8 @@ set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
                if (chan & 2) {
                        hc->hw.sctrl_r |= SCTRL_B2_ENA;
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
-                       hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
+                       if (!tics)
+                               hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
                        hc->hw.ctmt |= 2;
                        hc->hw.conn &= ~0x18;
 #ifdef REVERSE_BITORDER
@@ -1407,7 +1459,8 @@ set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
                } else {
                        hc->hw.sctrl_r |= SCTRL_B1_ENA;
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
-                       hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
+                       if (!tics)
+                               hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
                        hc->hw.ctmt |= 1;
                        hc->hw.conn &= ~0x03;
 #ifdef REVERSE_BITORDER
@@ -1481,11 +1534,17 @@ deactivate_bchannel(struct bchannel *bch)
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-       int                     ret = 0;
+       int     ret = 0;
 
        switch (cq->op) {
        case MISDN_CTRL_GETOP:
-               cq->op = 0;
+               cq->op = MISDN_CTRL_FILL_EMPTY;
+               break;
+       case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
+               test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+               if (debug & DEBUG_HW_OPEN)
+                       printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
+                               "off=%d)\n", __func__, bch->nr, !!cq->p1);
                break;
        default:
                printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
@@ -1859,6 +1918,10 @@ open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
                    hc->dch.dev.id, __builtin_return_address(0));
        if (rq->protocol == ISDN_P_NONE)
                return -EINVAL;
+       if (rq->adr.channel == 1) {
+               /* TODO: E-Channel */
+               return -EINVAL;
+       }
        if (!hc->initdone) {
                if (rq->protocol == ISDN_P_TE_S0) {
                        err = create_l1(&hc->dch, hfc_l1callback);
@@ -1874,6 +1937,11 @@ open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
                if (rq->protocol != ch->protocol) {
                        if (hc->hw.protocol == ISDN_P_TE_S0)
                                l1_event(hc->dch.l1, CLOSE_CHANNEL);
+                       if (rq->protocol == ISDN_P_TE_S0) {
+                               err = create_l1(&hc->dch, hfc_l1callback);
+                               if (err)
+                                       return err;
+                       }
                        hc->hw.protocol = rq->protocol;
                        ch->protocol = rq->protocol;
                        hfcpci_setmode(hc);
@@ -1903,6 +1971,7 @@ open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
        bch = &hc->bch[rq->adr.channel - 1];
        if (test_and_set_bit(FLG_OPEN, &bch->Flags))
                return -EBUSY; /* b-channel can be only open once */
+       test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
        bch->ch.protocol = rq->protocol;
        rq->ch = &bch->ch; /* TODO: E-channel */
        if (!try_module_get(THIS_MODULE))
@@ -1928,7 +1997,8 @@ hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
        switch (cmd) {
        case OPEN_CHANNEL:
                rq = arg;
-               if (rq->adr.channel == 0)
+               if ((rq->protocol == ISDN_P_TE_S0) ||
+                   (rq->protocol == ISDN_P_NT_S0))
                        err = open_dchannel(hc, ch, rq);
                else
                        err = open_bchannel(hc, rq);
@@ -2027,7 +2097,6 @@ release_card(struct hfc_pci *hc) {
        mISDN_freebchannel(&hc->bch[1]);
        mISDN_freebchannel(&hc->bch[0]);
        mISDN_freedchannel(&hc->dch);
-       list_del(&hc->list);
        pci_set_drvdata(hc->pdev, NULL);
        kfree(hc);
 }
@@ -2037,12 +2106,8 @@ setup_card(struct hfc_pci *card)
 {
        int             err = -EINVAL;
        u_int           i;
-       u_long          flags;
        char            name[MISDN_MAX_IDLEN];
 
-       if (HFC_cnt >= MAX_CARDS)
-               return -EINVAL; /* maybe better value */
-
        card->dch.debug = debug;
        spin_lock_init(&card->lock);
        mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
@@ -2068,13 +2133,10 @@ setup_card(struct hfc_pci *card)
        if (err)
                goto error;
        snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
-       err = mISDN_register_device(&card->dch.dev, name);
+       err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
        if (err)
                goto error;
        HFC_cnt++;
-       write_lock_irqsave(&HFClock, flags);
-       list_add_tail(&card->list, &HFClist);
-       write_unlock_irqrestore(&HFClock, flags);
        printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
        return 0;
 error:
@@ -2210,15 +2272,12 @@ static void __devexit
 hfc_remove_pci(struct pci_dev *pdev)
 {
        struct hfc_pci  *card = pci_get_drvdata(pdev);
-       u_long          flags;
 
-       if (card) {
-               write_lock_irqsave(&HFClock, flags);
+       if (card)
                release_card(card);
-               write_unlock_irqrestore(&HFClock, flags);
-       } else
+       else
                if (debug)
-                       printk(KERN_WARNING "%s: drvdata allready removed\n",
+                       printk(KERN_WARNING "%s: drvdata already removed\n",
                            __func__);
 }
 
@@ -2230,25 +2289,97 @@ static struct pci_driver hfc_driver = {
        .id_table = hfc_ids,
 };
 
+static int
+_hfcpci_softirq(struct device *dev, void *arg)
+{
+       struct hfc_pci  *hc = dev_get_drvdata(dev);
+       struct bchannel *bch;
+       if (hc == NULL)
+               return 0;
+
+       if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
+               spin_lock(&hc->lock);
+               bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
+               if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
+                       main_rec_hfcpci(bch);
+                       tx_birq(bch);
+               }
+               bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
+               if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
+                       main_rec_hfcpci(bch);
+                       tx_birq(bch);
+               }
+               spin_unlock(&hc->lock);
+       }
+       return 0;
+}
+
+static void
+hfcpci_softirq(void *arg)
+{
+       (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
+                                       _hfcpci_softirq);
+
+       /* if next event would be in the past ... */
+       if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
+               hfc_jiffies = jiffies + 1;
+       else
+               hfc_jiffies += tics;
+       hfc_tl.expires = hfc_jiffies;
+       add_timer(&hfc_tl);
+}
+
 static int __init
 HFC_init(void)
 {
        int             err;
 
+       if (!poll)
+               poll = HFCPCI_BTRANS_THRESHOLD;
+
+       if (poll != HFCPCI_BTRANS_THRESHOLD) {
+               tics = (poll * HZ) / 8000;
+               if (tics < 1)
+                       tics = 1;
+               poll = (tics * 8000) / HZ;
+               if (poll > 256 || poll < 8) {
+                       printk(KERN_ERR "%s: Wrong poll value %d not in range "
+                               "of 8..256.\n", __func__, poll);
+                       err = -EINVAL;
+                       return err;
+               }
+       }
+       if (poll != HFCPCI_BTRANS_THRESHOLD) {
+               printk(KERN_INFO "%s: Using alternative poll value of %d\n",
+                       __func__, poll);
+               hfc_tl.function = (void *)hfcpci_softirq;
+               hfc_tl.data = 0;
+               init_timer(&hfc_tl);
+               hfc_tl.expires = jiffies + tics;
+               hfc_jiffies = hfc_tl.expires;
+               add_timer(&hfc_tl);
+       } else
+               tics = 0; /* indicate the use of controller's timer */
+
        err = pci_register_driver(&hfc_driver);
+       if (err) {
+               if (timer_pending(&hfc_tl))
+                       del_timer(&hfc_tl);
+       }
+
        return err;
 }
 
 static void __exit
 HFC_cleanup(void)
 {
-       struct hfc_pci  *card, *next;
+       if (timer_pending(&hfc_tl))
+               del_timer(&hfc_tl);
 
-       list_for_each_entry_safe(card, next, &HFClist, list) {
-               release_card(card);
-       }
        pci_unregister_driver(&hfc_driver);
 }
 
 module_init(HFC_init);
 module_exit(HFC_cleanup);
+
+MODULE_DEVICE_TABLE(pci, hfc_ids);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
new file mode 100644 (file)
index 0000000..ba6925f
--- /dev/null
@@ -0,0 +1,2196 @@
+/* hfcsusb.c
+ * mISDN driver for Colognechip HFC-S USB chip
+ *
+ * Copyright 2001 by Peter Sprenger (sprenger@moving-bytes.de)
+ * Copyright 2008 by Martin Bachem (info@bachem-it.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * module params
+ *   debug=<n>, default=0, with n=0xHHHHGGGG
+ *      H - l1 driver flags described in hfcsusb.h
+ *      G - common mISDN debug flags described at mISDNhw.h
+ *
+ *   poll=<n>, default 128
+ *     n : burst size of PH_DATA_IND at transparent rx data
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/mISDNhw.h>
+#include "hfcsusb.h"
+
+const char *hfcsusb_rev = "Revision: 0.3.3 (socket), 2008-11-05";
+
+static unsigned int debug;
+static int poll = DEFAULT_TRANSP_BURST_SZ;
+
+static LIST_HEAD(HFClist);
+static DEFINE_RWLOCK(HFClock);
+
+
+MODULE_AUTHOR("Martin Bachem");
+MODULE_LICENSE("GPL");
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+module_param(poll, int, 0);
+
+static int hfcsusb_cnt;
+
+/* some function prototypes */
+static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command);
+static void release_hw(struct hfcsusb *hw);
+static void reset_hfcsusb(struct hfcsusb *hw);
+static void setPortMode(struct hfcsusb *hw);
+static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
+static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
+static int  hfcsusb_setup_bch(struct bchannel *bch, int protocol);
+static void deactivate_bchannel(struct bchannel *bch);
+static void hfcsusb_ph_info(struct hfcsusb *hw);
+
+/* start next background transfer for control channel */
+static void
+ctrl_start_transfer(struct hfcsusb *hw)
+{
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       if (hw->ctrl_cnt) {
+               hw->ctrl_urb->pipe = hw->ctrl_out_pipe;
+               hw->ctrl_urb->setup_packet = (u_char *)&hw->ctrl_write;
+               hw->ctrl_urb->transfer_buffer = NULL;
+               hw->ctrl_urb->transfer_buffer_length = 0;
+               hw->ctrl_write.wIndex =
+                   cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].hfcs_reg);
+               hw->ctrl_write.wValue =
+                   cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].reg_val);
+
+               usb_submit_urb(hw->ctrl_urb, GFP_ATOMIC);
+       }
+}
+
+/*
+ * queue a control transfer request to write HFC-S USB
+ * chip register using CTRL resuest queue
+ */
+static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
+{
+       struct ctrl_buf *buf;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s reg(0x%02x) val(0x%02x)\n",
+                       hw->name, __func__, reg, val);
+
+       spin_lock(&hw->ctrl_lock);
+       if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE)
+               return 1;
+       buf = &hw->ctrl_buff[hw->ctrl_in_idx];
+       buf->hfcs_reg = reg;
+       buf->reg_val = val;
+       if (++hw->ctrl_in_idx >= HFC_CTRL_BUFSIZE)
+               hw->ctrl_in_idx = 0;
+       if (++hw->ctrl_cnt == 1)
+               ctrl_start_transfer(hw);
+       spin_unlock(&hw->ctrl_lock);
+
+       return 0;
+}
+
+/* control completion routine handling background control cmds */
+static void
+ctrl_complete(struct urb *urb)
+{
+       struct hfcsusb *hw = (struct hfcsusb *) urb->context;
+       struct ctrl_buf *buf;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       urb->dev = hw->dev;
+       if (hw->ctrl_cnt) {
+               buf = &hw->ctrl_buff[hw->ctrl_out_idx];
+               hw->ctrl_cnt--; /* decrement actual count */
+               if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
+                       hw->ctrl_out_idx = 0;   /* pointer wrap */
+
+               ctrl_start_transfer(hw); /* start next transfer */
+       }
+}
+
+/* handle LED bits   */
+static void
+set_led_bit(struct hfcsusb *hw, signed short led_bits, int set_on)
+{
+       if (set_on) {
+               if (led_bits < 0)
+                       hw->led_state &= ~abs(led_bits);
+               else
+                       hw->led_state |= led_bits;
+       } else {
+               if (led_bits < 0)
+                       hw->led_state |= abs(led_bits);
+               else
+                       hw->led_state &= ~led_bits;
+       }
+}
+
+/* handle LED requests  */
+static void
+handle_led(struct hfcsusb *hw, int event)
+{
+       struct hfcsusb_vdata *driver_info = (struct hfcsusb_vdata *)
+               hfcsusb_idtab[hw->vend_idx].driver_info;
+       __u8 tmpled;
+
+       if (driver_info->led_scheme == LED_OFF)
+               return;
+       tmpled = hw->led_state;
+
+       switch (event) {
+       case LED_POWER_ON:
+               set_led_bit(hw, driver_info->led_bits[0], 1);
+               set_led_bit(hw, driver_info->led_bits[1], 0);
+               set_led_bit(hw, driver_info->led_bits[2], 0);
+               set_led_bit(hw, driver_info->led_bits[3], 0);
+               break;
+       case LED_POWER_OFF:
+               set_led_bit(hw, driver_info->led_bits[0], 0);
+               set_led_bit(hw, driver_info->led_bits[1], 0);
+               set_led_bit(hw, driver_info->led_bits[2], 0);
+               set_led_bit(hw, driver_info->led_bits[3], 0);
+               break;
+       case LED_S0_ON:
+               set_led_bit(hw, driver_info->led_bits[1], 1);
+               break;
+       case LED_S0_OFF:
+               set_led_bit(hw, driver_info->led_bits[1], 0);
+               break;
+       case LED_B1_ON:
+               set_led_bit(hw, driver_info->led_bits[2], 1);
+               break;
+       case LED_B1_OFF:
+               set_led_bit(hw, driver_info->led_bits[2], 0);
+               break;
+       case LED_B2_ON:
+               set_led_bit(hw, driver_info->led_bits[3], 1);
+               break;
+       case LED_B2_OFF:
+               set_led_bit(hw, driver_info->led_bits[3], 0);
+               break;
+       }
+
+       if (hw->led_state != tmpled) {
+               if (debug & DBG_HFC_CALL_TRACE)
+                       printk(KERN_DEBUG "%s: %s reg(0x%02x) val(x%02x)\n",
+                           hw->name, __func__,
+                           HFCUSB_P_DATA, hw->led_state);
+
+               write_reg(hw, HFCUSB_P_DATA, hw->led_state);
+       }
+}
+
+/*
+ * Layer2 -> Layer 1 Bchannel data
+ */
+static int
+hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+       struct bchannel         *bch = container_of(ch, struct bchannel, ch);
+       struct hfcsusb          *hw = bch->hw;
+       int                     ret = -EINVAL;
+       struct mISDNhead        *hh = mISDN_HEAD_P(skb);
+       u_long                  flags;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       switch (hh->prim) {
+       case PH_DATA_REQ:
+               spin_lock_irqsave(&hw->lock, flags);
+               ret = bchannel_senddata(bch, skb);
+               spin_unlock_irqrestore(&hw->lock, flags);
+               if (debug & DBG_HFC_CALL_TRACE)
+                       printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
+                               hw->name, __func__, ret);
+               if (ret > 0) {
+                       /*
+                        * other l1 drivers don't send early confirms on
+                        * transp data, but hfcsusb does because tx_next
+                        * skb is needed in tx_iso_complete()
+                        */
+                       queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
+                       ret = 0;
+               }
+               return ret;
+       case PH_ACTIVATE_REQ:
+               if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
+                       hfcsusb_start_endpoint(hw, bch->nr);
+                       ret = hfcsusb_setup_bch(bch, ch->protocol);
+               } else
+                       ret = 0;
+               if (!ret)
+                       _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
+                               0, NULL, GFP_KERNEL);
+               break;
+       case PH_DEACTIVATE_REQ:
+               deactivate_bchannel(bch);
+               _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY,
+                       0, NULL, GFP_KERNEL);
+               ret = 0;
+               break;
+       }
+       if (!ret)
+               dev_kfree_skb(skb);
+       return ret;
+}
+
+/*
+ * send full D/B channel status information
+ * as MPH_INFORMATION_IND
+ */
+static void
+hfcsusb_ph_info(struct hfcsusb *hw)
+{
+       struct ph_info *phi;
+       struct dchannel *dch = &hw->dch;
+       int i;
+
+       phi = kzalloc(sizeof(struct ph_info) +
+               dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
+       phi->dch.ch.protocol = hw->protocol;
+       phi->dch.ch.Flags = dch->Flags;
+       phi->dch.state = dch->state;
+       phi->dch.num_bch = dch->dev.nrbchan;
+       for (i = 0; i < dch->dev.nrbchan; i++) {
+               phi->bch[i].protocol = hw->bch[i].ch.protocol;
+               phi->bch[i].Flags = hw->bch[i].Flags;
+       }
+       _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
+               sizeof(struct ph_info_dch) + dch->dev.nrbchan *
+               sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
+}
+
+/*
+ * Layer2 -> Layer 1 Dchannel data
+ */
+static int
+hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+       struct mISDNdevice      *dev = container_of(ch, struct mISDNdevice, D);
+       struct dchannel         *dch = container_of(dev, struct dchannel, dev);
+       struct mISDNhead        *hh = mISDN_HEAD_P(skb);
+       struct hfcsusb          *hw = dch->hw;
+       int                     ret = -EINVAL;
+       u_long                  flags;
+
+       switch (hh->prim) {
+       case PH_DATA_REQ:
+               if (debug & DBG_HFC_CALL_TRACE)
+                       printk(KERN_DEBUG "%s: %s: PH_DATA_REQ\n",
+                               hw->name, __func__);
+
+               spin_lock_irqsave(&hw->lock, flags);
+               ret = dchannel_senddata(dch, skb);
+               spin_unlock_irqrestore(&hw->lock, flags);
+               if (ret > 0) {
+                       ret = 0;
+                       queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
+               }
+               break;
+
+       case PH_ACTIVATE_REQ:
+               if (debug & DBG_HFC_CALL_TRACE)
+                       printk(KERN_DEBUG "%s: %s: PH_ACTIVATE_REQ %s\n",
+                               hw->name, __func__,
+                               (hw->protocol == ISDN_P_NT_S0) ? "NT" : "TE");
+
+               if (hw->protocol == ISDN_P_NT_S0) {
+                       ret = 0;
+                       if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+                               _queue_data(&dch->dev.D,
+                                       PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
+                                       NULL, GFP_ATOMIC);
+                       } else {
+                               hfcsusb_ph_command(hw,
+                                       HFC_L1_ACTIVATE_NT);
+                               test_and_set_bit(FLG_L2_ACTIVATED,
+                                       &dch->Flags);
+                       }
+               } else {
+                       hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_TE);
+                       ret = l1_event(dch->l1, hh->prim);
+               }
+               break;
+
+       case PH_DEACTIVATE_REQ:
+               if (debug & DBG_HFC_CALL_TRACE)
+                       printk(KERN_DEBUG "%s: %s: PH_DEACTIVATE_REQ\n",
+                               hw->name, __func__);
+               test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+
+               if (hw->protocol == ISDN_P_NT_S0) {
+                       hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
+                       spin_lock_irqsave(&hw->lock, flags);
+                       skb_queue_purge(&dch->squeue);
+                       if (dch->tx_skb) {
+                               dev_kfree_skb(dch->tx_skb);
+                               dch->tx_skb = NULL;
+                       }
+                       dch->tx_idx = 0;
+                       if (dch->rx_skb) {
+                               dev_kfree_skb(dch->rx_skb);
+                               dch->rx_skb = NULL;
+                       }
+                       test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+                       spin_unlock_irqrestore(&hw->lock, flags);
+#ifdef FIXME
+                       if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+                               dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+#endif
+                       ret = 0;
+               } else
+                       ret = l1_event(dch->l1, hh->prim);
+               break;
+       case MPH_INFORMATION_REQ:
+               hfcsusb_ph_info(hw);
+               ret = 0;
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * Layer 1 callback function
+ */
+static int
+hfc_l1callback(struct dchannel *dch, u_int cmd)
+{
+       struct hfcsusb *hw = dch->hw;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s cmd 0x%x\n",
+                       hw->name, __func__, cmd);
+
+       switch (cmd) {
+       case INFO3_P8:
+       case INFO3_P10:
+       case HW_RESET_REQ:
+       case HW_POWERUP_REQ:
+               break;
+
+       case HW_DEACT_REQ:
+               skb_queue_purge(&dch->squeue);
+               if (dch->tx_skb) {
+                       dev_kfree_skb(dch->tx_skb);
+                       dch->tx_skb = NULL;
+               }
+               dch->tx_idx = 0;
+               if (dch->rx_skb) {
+                       dev_kfree_skb(dch->rx_skb);
+                       dch->rx_skb = NULL;
+               }
+               test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+               break;
+       case PH_ACTIVATE_IND:
+               test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+               _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+                       GFP_ATOMIC);
+               break;
+       case PH_DEACTIVATE_IND:
+               test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+               _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+                       GFP_ATOMIC);
+               break;
+       default:
+               if (dch->debug & DEBUG_HW)
+                       printk(KERN_DEBUG "%s: %s: unknown cmd %x\n",
+                       hw->name, __func__, cmd);
+               return -1;
+       }
+       hfcsusb_ph_info(hw);
+       return 0;
+}
+
+static int
+open_dchannel(struct hfcsusb *hw, struct mISDNchannel *ch,
+    struct channel_req *rq)
+{
+       int err = 0;
+
+       if (debug & DEBUG_HW_OPEN)
+               printk(KERN_DEBUG "%s: %s: dev(%d) open addr(%i) from %p\n",
+                   hw->name, __func__, hw->dch.dev.id, rq->adr.channel,
+                   __builtin_return_address(0));
+       if (rq->protocol == ISDN_P_NONE)
+               return -EINVAL;
+
+       test_and_clear_bit(FLG_ACTIVE, &hw->dch.Flags);
+       test_and_clear_bit(FLG_ACTIVE, &hw->ech.Flags);
+       hfcsusb_start_endpoint(hw, HFC_CHAN_D);
+
+       /* E-Channel logging */
+       if (rq->adr.channel == 1) {
+               if (hw->fifos[HFCUSB_PCM_RX].pipe) {
+                       hfcsusb_start_endpoint(hw, HFC_CHAN_E);
+                       set_bit(FLG_ACTIVE, &hw->ech.Flags);
+                       _queue_data(&hw->ech.dev.D, PH_ACTIVATE_IND,
+                                    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+               } else
+                       return -EINVAL;
+       }
+
+       if (!hw->initdone) {
+               hw->protocol = rq->protocol;
+               if (rq->protocol == ISDN_P_TE_S0) {
+                       err = create_l1(&hw->dch, hfc_l1callback);
+                       if (err)
+                               return err;
+               }
+               setPortMode(hw);
+               ch->protocol = rq->protocol;
+               hw->initdone = 1;
+       } else {
+               if (rq->protocol != ch->protocol)
+                       return -EPROTONOSUPPORT;
+       }
+
+       if (((ch->protocol == ISDN_P_NT_S0) && (hw->dch.state == 3)) ||
+           ((ch->protocol == ISDN_P_TE_S0) && (hw->dch.state == 7)))
+               _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
+                   0, NULL, GFP_KERNEL);
+       rq->ch = ch;
+       if (!try_module_get(THIS_MODULE))
+               printk(KERN_WARNING "%s: %s: cannot get module\n",
+                   hw->name, __func__);
+       return 0;
+}
+
+static int
+open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
+{
+       struct bchannel         *bch;
+
+       if (rq->adr.channel > 2)
+               return -EINVAL;
+       if (rq->protocol == ISDN_P_NONE)
+               return -EINVAL;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s B%i\n",
+                       hw->name, __func__, rq->adr.channel);
+
+       bch = &hw->bch[rq->adr.channel - 1];
+       if (test_and_set_bit(FLG_OPEN, &bch->Flags))
+               return -EBUSY; /* b-channel can be only open once */
+       test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
+       bch->ch.protocol = rq->protocol;
+       rq->ch = &bch->ch;
+
+       /* start USB endpoint for bchannel */
+       if (rq->adr.channel  == 1)
+               hfcsusb_start_endpoint(hw, HFC_CHAN_B1);
+       else
+               hfcsusb_start_endpoint(hw, HFC_CHAN_B2);
+
+       if (!try_module_get(THIS_MODULE))
+               printk(KERN_WARNING "%s: %s:cannot get module\n",
+                   hw->name, __func__);
+       return 0;
+}
+
+static int
+channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq)
+{
+       int ret = 0;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s op(0x%x) channel(0x%x)\n",
+                   hw->name, __func__, (cq->op), (cq->channel));
+
+       switch (cq->op) {
+       case MISDN_CTRL_GETOP:
+               cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
+                        MISDN_CTRL_DISCONNECT;
+               break;
+       default:
+               printk(KERN_WARNING "%s: %s: unknown Op %x\n",
+                       hw->name, __func__, cq->op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+/*
+ * device control function
+ */
+static int
+hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+       struct mISDNdevice      *dev = container_of(ch, struct mISDNdevice, D);
+       struct dchannel         *dch = container_of(dev, struct dchannel, dev);
+       struct hfcsusb          *hw = dch->hw;
+       struct channel_req      *rq;
+       int                     err = 0;
+
+       if (dch->debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: %s: cmd:%x %p\n",
+                   hw->name, __func__, cmd, arg);
+       switch (cmd) {
+       case OPEN_CHANNEL:
+               rq = arg;
+               if ((rq->protocol == ISDN_P_TE_S0) ||
+                   (rq->protocol == ISDN_P_NT_S0))
+                       err = open_dchannel(hw, ch, rq);
+               else
+                       err = open_bchannel(hw, rq);
+               if (!err)
+                       hw->open++;
+               break;
+       case CLOSE_CHANNEL:
+               hw->open--;
+               if (debug & DEBUG_HW_OPEN)
+                       printk(KERN_DEBUG
+                               "%s: %s: dev(%d) close from %p (open %d)\n",
+                               hw->name, __func__, hw->dch.dev.id,
+                               __builtin_return_address(0), hw->open);
+               if (!hw->open) {
+                       hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
+                       if (hw->fifos[HFCUSB_PCM_RX].pipe)
+                               hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
+                       handle_led(hw, LED_POWER_ON);
+               }
+               module_put(THIS_MODULE);
+               break;
+       case CONTROL_CHANNEL:
+               err = channel_ctrl(hw, arg);
+               break;
+       default:
+               if (dch->debug & DEBUG_HW)
+                       printk(KERN_DEBUG "%s: %s: unknown command %x\n",
+                               hw->name, __func__, cmd);
+               return -EINVAL;
+       }
+       return err;
+}
+
+/*
+ * S0 TE state change event handler
+ */
+static void
+ph_state_te(struct dchannel *dch)
+{
+       struct hfcsusb *hw = dch->hw;
+
+       if (debug & DEBUG_HW) {
+               if (dch->state <= HFC_MAX_TE_LAYER1_STATE)
+                       printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__,
+                           HFC_TE_LAYER1_STATES[dch->state]);
+               else
+                       printk(KERN_DEBUG "%s: %s: TE F%d\n",
+                           hw->name, __func__, dch->state);
+       }
+
+       switch (dch->state) {
+       case 0:
+               l1_event(dch->l1, HW_RESET_IND);
+               break;
+       case 3:
+               l1_event(dch->l1, HW_DEACT_IND);
+               break;
+       case 5:
+       case 8:
+               l1_event(dch->l1, ANYSIGNAL);
+               break;
+       case 6:
+               l1_event(dch->l1, INFO2);
+               break;
+       case 7:
+               l1_event(dch->l1, INFO4_P8);
+               break;
+       }
+       if (dch->state == 7)
+               handle_led(hw, LED_S0_ON);
+       else
+               handle_led(hw, LED_S0_OFF);
+}
+
+/*
+ * S0 NT state change event handler
+ */
+static void
+ph_state_nt(struct dchannel *dch)
+{
+       struct hfcsusb *hw = dch->hw;
+
+       if (debug & DEBUG_HW) {
+               if (dch->state <= HFC_MAX_NT_LAYER1_STATE)
+                       printk(KERN_DEBUG "%s: %s: %s\n",
+                           hw->name, __func__,
+                           HFC_NT_LAYER1_STATES[dch->state]);
+
+               else
+                       printk(KERN_INFO DRIVER_NAME "%s: %s: NT G%d\n",
+                           hw->name, __func__, dch->state);
+       }
+
+       switch (dch->state) {
+       case (1):
+               test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+               test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+               hw->nt_timer = 0;
+               hw->timers &= ~NT_ACTIVATION_TIMER;
+               handle_led(hw, LED_S0_OFF);
+               break;
+
+       case (2):
+               if (hw->nt_timer < 0) {
+                       hw->nt_timer = 0;
+                       hw->timers &= ~NT_ACTIVATION_TIMER;
+                       hfcsusb_ph_command(dch->hw, HFC_L1_DEACTIVATE_NT);
+               } else {
+                       hw->timers |= NT_ACTIVATION_TIMER;
+                       hw->nt_timer = NT_T1_COUNT;
+                       /* allow G2 -> G3 transition */
+                       write_reg(hw, HFCUSB_STATES, 2 | HFCUSB_NT_G2_G3);
+               }
+               break;
+       case (3):
+               hw->nt_timer = 0;
+               hw->timers &= ~NT_ACTIVATION_TIMER;
+               test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+               _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
+                       MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+               handle_led(hw, LED_S0_ON);
+               break;
+       case (4):
+               hw->nt_timer = 0;
+               hw->timers &= ~NT_ACTIVATION_TIMER;
+               break;
+       default:
+               break;
+       }
+       hfcsusb_ph_info(hw);
+}
+
+static void
+ph_state(struct dchannel *dch)
+{
+       struct hfcsusb *hw = dch->hw;
+
+       if (hw->protocol == ISDN_P_NT_S0)
+               ph_state_nt(dch);
+       else if (hw->protocol == ISDN_P_TE_S0)
+               ph_state_te(dch);
+}
+
+/*
+ * disable/enable BChannel for desired protocoll
+ */
+static int
+hfcsusb_setup_bch(struct bchannel *bch, int protocol)
+{
+       struct hfcsusb *hw = bch->hw;
+       __u8 conhdlc, sctrl, sctrl_r;
+
+       if (debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n",
+                   hw->name, __func__, bch->state, protocol,
+                   bch->nr);
+
+       /* setup val for CON_HDLC */
+       conhdlc = 0;
+       if (protocol > ISDN_P_NONE)
+               conhdlc = 8;    /* enable FIFO */
+
+       switch (protocol) {
+       case (-1):      /* used for init */
+               bch->state = -1;
+               /* fall trough */
+       case (ISDN_P_NONE):
+               if (bch->state == ISDN_P_NONE)
+                       return 0; /* already in idle state */
+               bch->state = ISDN_P_NONE;
+               clear_bit(FLG_HDLC, &bch->Flags);
+               clear_bit(FLG_TRANSPARENT, &bch->Flags);
+               break;
+       case (ISDN_P_B_RAW):
+               conhdlc |= 2;
+               bch->state = protocol;
+               set_bit(FLG_TRANSPARENT, &bch->Flags);
+               break;
+       case (ISDN_P_B_HDLC):
+               bch->state = protocol;
+               set_bit(FLG_HDLC, &bch->Flags);
+               break;
+       default:
+               if (debug & DEBUG_HW)
+                       printk(KERN_DEBUG "%s: %s: prot not known %x\n",
+                               hw->name, __func__, protocol);
+               return -ENOPROTOOPT;
+       }
+
+       if (protocol >= ISDN_P_NONE) {
+               write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2);
+               write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
+               write_reg(hw, HFCUSB_INC_RES_F, 2);
+               write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3);
+               write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
+               write_reg(hw, HFCUSB_INC_RES_F, 2);
+
+               sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04);
+               sctrl_r = 0x0;
+               if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) {
+                       sctrl |= 1;
+                       sctrl_r |= 1;
+               }
+               if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) {
+                       sctrl |= 2;
+                       sctrl_r |= 2;
+               }
+               write_reg(hw, HFCUSB_SCTRL, sctrl);
+               write_reg(hw, HFCUSB_SCTRL_R, sctrl_r);
+
+               if (protocol > ISDN_P_NONE)
+                       handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON);
+               else
+                       handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
+                               LED_B2_OFF);
+       }
+       hfcsusb_ph_info(hw);
+       return 0;
+}
+
+static void
+hfcsusb_ph_command(struct hfcsusb *hw, u_char command)
+{
+       if (debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: %s: %x\n",
+                  hw->name, __func__, command);
+
+       switch (command) {
+       case HFC_L1_ACTIVATE_TE:
+               /* force sending sending INFO1 */
+               write_reg(hw, HFCUSB_STATES, 0x14);
+               /* start l1 activation */
+               write_reg(hw, HFCUSB_STATES, 0x04);
+               break;
+
+       case HFC_L1_FORCE_DEACTIVATE_TE:
+               write_reg(hw, HFCUSB_STATES, 0x10);
+               write_reg(hw, HFCUSB_STATES, 0x03);
+               break;
+
+       case HFC_L1_ACTIVATE_NT:
+               if (hw->dch.state == 3)
+                       _queue_data(&hw->dch.dev.D, PH_ACTIVATE_IND,
+                               MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+               else
+                       write_reg(hw, HFCUSB_STATES, HFCUSB_ACTIVATE |
+                               HFCUSB_DO_ACTION | HFCUSB_NT_G2_G3);
+               break;
+
+       case HFC_L1_DEACTIVATE_NT:
+               write_reg(hw, HFCUSB_STATES,
+                       HFCUSB_DO_ACTION);
+               break;
+       }
+}
+
+/*
+ * Layer 1 B-channel hardware access
+ */
+static int
+channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+       int     ret = 0;
+
+       switch (cq->op) {
+       case MISDN_CTRL_GETOP:
+               cq->op = MISDN_CTRL_FILL_EMPTY;
+               break;
+       case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
+               test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+               if (debug & DEBUG_HW_OPEN)
+                       printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
+                               "off=%d)\n", __func__, bch->nr, !!cq->p1);
+               break;
+       default:
+               printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+/* collect data from incoming interrupt or isochron USB data */
+static void
+hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
+       int finish)
+{
+       struct hfcsusb  *hw = fifo->hw;
+       struct sk_buff  *rx_skb = NULL;
+       int             maxlen = 0;
+       int             fifon = fifo->fifonum;
+       int             i;
+       int             hdlc = 0;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
+                   "dch(%p) bch(%p) ech(%p)\n",
+                   hw->name, __func__, fifon, len,
+                   fifo->dch, fifo->bch, fifo->ech);
+
+       if (!len)
+               return;
+
+       if ((!!fifo->dch + !!fifo->bch + !!fifo->ech) != 1) {
+               printk(KERN_DEBUG "%s: %s: undefined channel\n",
+                      hw->name, __func__);
+               return;
+       }
+
+       spin_lock(&hw->lock);
+       if (fifo->dch) {
+               rx_skb = fifo->dch->rx_skb;
+               maxlen = fifo->dch->maxlen;
+               hdlc = 1;
+       }
+       if (fifo->bch) {
+               rx_skb = fifo->bch->rx_skb;
+               maxlen = fifo->bch->maxlen;
+               hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
+       }
+       if (fifo->ech) {
+               rx_skb = fifo->ech->rx_skb;
+               maxlen = fifo->ech->maxlen;
+               hdlc = 1;
+       }
+
+       if (!rx_skb) {
+               rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
+               if (rx_skb) {
+                       if (fifo->dch)
+                               fifo->dch->rx_skb = rx_skb;
+                       if (fifo->bch)
+                               fifo->bch->rx_skb = rx_skb;
+                       if (fifo->ech)
+                               fifo->ech->rx_skb = rx_skb;
+                       skb_trim(rx_skb, 0);
+               } else {
+                       printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
+                           hw->name, __func__);
+                       spin_unlock(&hw->lock);
+                       return;
+               }
+       }
+
+       if (fifo->dch || fifo->ech) {
+               /* D/E-Channel SKB range check */
+               if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) {
+                       printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
+                           "for fifo(%d) HFCUSB_D_RX\n",
+                           hw->name, __func__, fifon);
+                       skb_trim(rx_skb, 0);
+                       spin_unlock(&hw->lock);
+                       return;
+               }
+       } else if (fifo->bch) {
+               /* B-Channel SKB range check */
+               if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) {
+                       printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
+                           "for fifo(%d) HFCUSB_B_RX\n",
+                           hw->name, __func__, fifon);
+                       skb_trim(rx_skb, 0);
+                       spin_unlock(&hw->lock);
+                       return;
+               }
+       }
+
+       memcpy(skb_put(rx_skb, len), data, len);
+
+       if (hdlc) {
+               /* we have a complete hdlc packet */
+               if (finish) {
+                       if ((rx_skb->len > 3) &&
+                          (!(rx_skb->data[rx_skb->len - 1]))) {
+                               if (debug & DBG_HFC_FIFO_VERBOSE) {
+                                       printk(KERN_DEBUG "%s: %s: fifon(%i)"
+                                           " new RX len(%i): ",
+                                           hw->name, __func__, fifon,
+                                           rx_skb->len);
+                                       i = 0;
+                                       while (i < rx_skb->len)
+                                               printk("%02x ",
+                                                   rx_skb->data[i++]);
+                                       printk("\n");
+                               }
+
+                               /* remove CRC & status */
+                               skb_trim(rx_skb, rx_skb->len - 3);
+
+                               if (fifo->dch)
+                                       recv_Dchannel(fifo->dch);
+                               if (fifo->bch)
+                                       recv_Bchannel(fifo->bch);
+                               if (fifo->ech)
+                                       recv_Echannel(fifo->ech,
+                                                    &hw->dch);
+                       } else {
+                               if (debug & DBG_HFC_FIFO_VERBOSE) {
+                                       printk(KERN_DEBUG
+                                           "%s: CRC or minlen ERROR fifon(%i) "
+                                           "RX len(%i): ",
+                                           hw->name, fifon, rx_skb->len);
+                                       i = 0;
+                                       while (i < rx_skb->len)
+                                               printk("%02x ",
+                                                   rx_skb->data[i++]);
+                                       printk("\n");
+                               }
+                               skb_trim(rx_skb, 0);
+                       }
+               }
+       } else {
+               /* deliver transparent data to layer2 */
+               if (rx_skb->len >= poll)
+                       recv_Bchannel(fifo->bch);
+       }
+       spin_unlock(&hw->lock);
+}
+
+void
+fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
+             void *buf, int num_packets, int packet_size, int interval,
+             usb_complete_t complete, void *context)
+{
+       int k;
+
+       usb_fill_bulk_urb(urb, dev, pipe, buf, packet_size * num_packets,
+           complete, context);
+
+       urb->number_of_packets = num_packets;
+       urb->transfer_flags = URB_ISO_ASAP;
+       urb->actual_length = 0;
+       urb->interval = interval;
+
+       for (k = 0; k < num_packets; k++) {
+               urb->iso_frame_desc[k].offset = packet_size * k;
+               urb->iso_frame_desc[k].length = packet_size;
+               urb->iso_frame_desc[k].actual_length = 0;
+       }
+}
+
+/* receive completion routine for all ISO tx fifos   */
+static void
+rx_iso_complete(struct urb *urb)
+{
+       struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
+       struct usb_fifo *fifo = context_iso_urb->owner_fifo;
+       struct hfcsusb *hw = fifo->hw;
+       int k, len, errcode, offset, num_isoc_packets, fifon, maxlen,
+           status, iso_status, i;
+       __u8 *buf;
+       static __u8 eof[8];
+       __u8 s0_state;
+
+       fifon = fifo->fifonum;
+       status = urb->status;
+
+       spin_lock(&hw->lock);
+       if (fifo->stop_gracefull) {
+               fifo->stop_gracefull = 0;
+               fifo->active = 0;
+               spin_unlock(&hw->lock);
+               return;
+       }
+       spin_unlock(&hw->lock);
+
+       /*
+        * ISO transfer only partially completed,
+        * look at individual frame status for details
+        */
+       if (status == -EXDEV) {
+               if (debug & DEBUG_HW)
+                       printk(KERN_DEBUG "%s: %s: with -EXDEV "
+                           "urb->status %d, fifonum %d\n",
+                           hw->name, __func__,  status, fifon);
+
+               /* clear status, so go on with ISO transfers */
+               status = 0;
+       }
+
+       s0_state = 0;
+       if (fifo->active && !status) {
+               num_isoc_packets = iso_packets[fifon];
+               maxlen = fifo->usb_packet_maxlen;
+
+               for (k = 0; k < num_isoc_packets; ++k) {
+                       len = urb->iso_frame_desc[k].actual_length;
+                       offset = urb->iso_frame_desc[k].offset;
+                       buf = context_iso_urb->buffer + offset;
+                       iso_status = urb->iso_frame_desc[k].status;
+
+                       if (iso_status && (debug & DBG_HFC_FIFO_VERBOSE)) {
+                               printk(KERN_DEBUG "%s: %s: "
+                                   "ISO packet %i, status: %i\n",
+                                   hw->name, __func__, k, iso_status);
+                       }
+
+                       /* USB data log for every D ISO in */
+                       if ((fifon == HFCUSB_D_RX) &&
+                           (debug & DBG_HFC_USB_VERBOSE)) {
+                               printk(KERN_DEBUG
+                                   "%s: %s: %d (%d/%d) len(%d) ",
+                                   hw->name, __func__, urb->start_frame,
+                                   k, num_isoc_packets-1,
+                                   len);
+                               for (i = 0; i < len; i++)
+                                       printk("%x ", buf[i]);
+                               printk("\n");
+                       }
+
+                       if (!iso_status) {
+                               if (fifo->last_urblen != maxlen) {
+                                       /*
+                                        * save fifo fill-level threshold bits
+                                        * to use them later in TX ISO URB
+                                        * completions
+                                        */
+                                       hw->threshold_mask = buf[1];
+
+                                       if (fifon == HFCUSB_D_RX)
+                                               s0_state = (buf[0] >> 4);
+
+                                       eof[fifon] = buf[0] & 1;
+                                       if (len > 2)
+                                               hfcsusb_rx_frame(fifo, buf + 2,
+                                                       len - 2, (len < maxlen)
+                                                       ? eof[fifon] : 0);
+                               } else
+                                       hfcsusb_rx_frame(fifo, buf, len,
+                                               (len < maxlen) ?
+                                               eof[fifon] : 0);
+                               fifo->last_urblen = len;
+                       }
+               }
+
+               /* signal S0 layer1 state change */
+               if ((s0_state) && (hw->initdone) &&
+                   (s0_state != hw->dch.state)) {
+                       hw->dch.state = s0_state;
+                       schedule_event(&hw->dch, FLG_PHCHANGE);
+               }
+
+               fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
+                             context_iso_urb->buffer, num_isoc_packets,
+                             fifo->usb_packet_maxlen, fifo->intervall,
+                             (usb_complete_t)rx_iso_complete, urb->context);
+               errcode = usb_submit_urb(urb, GFP_ATOMIC);
+               if (errcode < 0) {
+                       if (debug & DEBUG_HW)
+                               printk(KERN_DEBUG "%s: %s: error submitting "
+                                   "ISO URB: %d\n",
+                                   hw->name, __func__, errcode);
+               }
+       } else {
+               if (status && (debug & DBG_HFC_URB_INFO))
+                       printk(KERN_DEBUG "%s: %s: rx_iso_complete : "
+                           "urb->status %d, fifonum %d\n",
+                           hw->name, __func__, status, fifon);
+       }
+}
+
+/* receive completion routine for all interrupt rx fifos */
+static void
+rx_int_complete(struct urb *urb)
+{
+       int len, status, i;
+       __u8 *buf, maxlen, fifon;
+       struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
+       struct hfcsusb *hw = fifo->hw;
+       static __u8 eof[8];
+
+       spin_lock(&hw->lock);
+       if (fifo->stop_gracefull) {
+               fifo->stop_gracefull = 0;
+               fifo->active = 0;
+               spin_unlock(&hw->lock);
+               return;
+       }
+       spin_unlock(&hw->lock);
+
+       fifon = fifo->fifonum;
+       if ((!fifo->active) || (urb->status)) {
+               if (debug & DBG_HFC_URB_ERROR)
+                       printk(KERN_DEBUG
+                           "%s: %s: RX-Fifo %i is going down (%i)\n",
+                           hw->name, __func__, fifon, urb->status);
+
+               fifo->urb->interval = 0; /* cancel automatic rescheduling */
+               return;
+       }
+       len = urb->actual_length;
+       buf = fifo->buffer;
+       maxlen = fifo->usb_packet_maxlen;
+
+       /* USB data log for every D INT in */
+       if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) {
+               printk(KERN_DEBUG "%s: %s: D RX INT len(%d) ",
+                   hw->name, __func__, len);
+               for (i = 0; i < len; i++)
+                       printk("%02x ", buf[i]);
+               printk("\n");
+       }
+
+       if (fifo->last_urblen != fifo->usb_packet_maxlen) {
+               /* the threshold mask is in the 2nd status byte */
+               hw->threshold_mask = buf[1];
+
+               /* signal S0 layer1 state change */
+               if (hw->initdone && ((buf[0] >> 4) != hw->dch.state)) {
+                       hw->dch.state = (buf[0] >> 4);
+                       schedule_event(&hw->dch, FLG_PHCHANGE);
+               }
+
+               eof[fifon] = buf[0] & 1;
+               /* if we have more than the 2 status bytes -> collect data */
+               if (len > 2)
+                       hfcsusb_rx_frame(fifo, buf + 2,
+                          urb->actual_length - 2,
+                          (len < maxlen) ? eof[fifon] : 0);
+       } else {
+               hfcsusb_rx_frame(fifo, buf, urb->actual_length,
+                                (len < maxlen) ? eof[fifon] : 0);
+       }
+       fifo->last_urblen = urb->actual_length;
+
+       status = usb_submit_urb(urb, GFP_ATOMIC);
+       if (status) {
+               if (debug & DEBUG_HW)
+                       printk(KERN_DEBUG "%s: %s: error resubmitting USB\n",
+                           hw->name, __func__);
+       }
+}
+
+/* transmit completion routine for all ISO tx fifos */
+static void
+tx_iso_complete(struct urb *urb)
+{
+       struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
+       struct usb_fifo *fifo = context_iso_urb->owner_fifo;
+       struct hfcsusb *hw = fifo->hw;
+       struct sk_buff *tx_skb;
+       int k, tx_offset, num_isoc_packets, sink, remain, current_len,
+           errcode, hdlc, i;
+       int *tx_idx;
+       int frame_complete, fifon, status;
+       __u8 threshbit;
+
+       spin_lock(&hw->lock);
+       if (fifo->stop_gracefull) {
+               fifo->stop_gracefull = 0;
+               fifo->active = 0;
+               spin_unlock(&hw->lock);
+               return;
+       }
+
+       if (fifo->dch) {
+               tx_skb = fifo->dch->tx_skb;
+               tx_idx = &fifo->dch->tx_idx;
+               hdlc = 1;
+       } else if (fifo->bch) {
+               tx_skb = fifo->bch->tx_skb;
+               tx_idx = &fifo->bch->tx_idx;
+               hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
+       } else {
+               printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
+                   hw->name, __func__);
+               spin_unlock(&hw->lock);
+               return;
+       }
+
+       fifon = fifo->fifonum;
+       status = urb->status;
+
+       tx_offset = 0;
+
+       /*
+        * ISO transfer only partially completed,
+        * look at individual frame status for details
+        */
+       if (status == -EXDEV) {
+               if (debug & DBG_HFC_URB_ERROR)
+                       printk(KERN_DEBUG "%s: %s: "
+                           "-EXDEV (%i) fifon (%d)\n",
+                           hw->name, __func__, status, fifon);
+
+               /* clear status, so go on with ISO transfers */
+               status = 0;
+       }
+
+       if (fifo->active && !status) {
+               /* is FifoFull-threshold set for our channel? */
+               threshbit = (hw->threshold_mask & (1 << fifon));
+               num_isoc_packets = iso_packets[fifon];
+
+               /* predict dataflow to avoid fifo overflow */
+               if (fifon >= HFCUSB_D_TX)
+                       sink = (threshbit) ? SINK_DMIN : SINK_DMAX;
+               else
+                       sink = (threshbit) ? SINK_MIN : SINK_MAX;
+               fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
+                             context_iso_urb->buffer, num_isoc_packets,
+                             fifo->usb_packet_maxlen, fifo->intervall,
+                             (usb_complete_t)tx_iso_complete, urb->context);
+               memset(context_iso_urb->buffer, 0,
+                      sizeof(context_iso_urb->buffer));
+               frame_complete = 0;
+
+               for (k = 0; k < num_isoc_packets; ++k) {
+                       /* analyze tx success of previous ISO packets */
+                       if (debug & DBG_HFC_URB_ERROR) {
+                               errcode = urb->iso_frame_desc[k].status;
+                               if (errcode) {
+                                       printk(KERN_DEBUG "%s: %s: "
+                                           "ISO packet %i, status: %i\n",
+                                            hw->name, __func__, k, errcode);
+                               }
+                       }
+
+                       /* Generate next ISO Packets */
+                       if (tx_skb)
+                               remain = tx_skb->len - *tx_idx;
+                       else
+                               remain = 0;
+
+                       if (remain > 0) {
+                               fifo->bit_line -= sink;
+                               current_len = (0 - fifo->bit_line) / 8;
+                               if (current_len > 14)
+                                       current_len = 14;
+                               if (current_len < 0)
+                                       current_len = 0;
+                               if (remain < current_len)
+                                       current_len = remain;
+
+                               /* how much bit do we put on the line? */
+                               fifo->bit_line += current_len * 8;
+
+                               context_iso_urb->buffer[tx_offset] = 0;
+                               if (current_len == remain) {
+                                       if (hdlc) {
+                                               /* signal frame completion */
+                                               context_iso_urb->
+                                                   buffer[tx_offset] = 1;
+                                               /* add 2 byte flags and 16bit
+                                                * CRC at end of ISDN frame */
+                                               fifo->bit_line += 32;
+                                       }
+                                       frame_complete = 1;
+                               }
+
+                               /* copy tx data to iso-urb buffer */
+                               memcpy(context_iso_urb->buffer + tx_offset + 1,
+                                      (tx_skb->data + *tx_idx), current_len);
+                               *tx_idx += current_len;
+
+                               urb->iso_frame_desc[k].offset = tx_offset;
+                               urb->iso_frame_desc[k].length = current_len + 1;
+
+                               /* USB data log for every D ISO out */
+                               if ((fifon == HFCUSB_D_RX) &&
+                                   (debug & DBG_HFC_USB_VERBOSE)) {
+                                       printk(KERN_DEBUG
+                                           "%s: %s (%d/%d) offs(%d) len(%d) ",
+                                           hw->name, __func__,
+                                           k, num_isoc_packets-1,
+                                           urb->iso_frame_desc[k].offset,
+                                           urb->iso_frame_desc[k].length);
+
+                                       for (i = urb->iso_frame_desc[k].offset;
+                                            i < (urb->iso_frame_desc[k].offset
+                                            + urb->iso_frame_desc[k].length);
+                                            i++)
+                                               printk("%x ",
+                                                   context_iso_urb->buffer[i]);
+
+                                       printk(" skb->len(%i) tx-idx(%d)\n",
+                                           tx_skb->len, *tx_idx);
+                               }
+
+                               tx_offset += (current_len + 1);
+                       } else {
+                               urb->iso_frame_desc[k].offset = tx_offset++;
+                               urb->iso_frame_desc[k].length = 1;
+                               /* we lower data margin every msec */
+                               fifo->bit_line -= sink;
+                               if (fifo->bit_line < BITLINE_INF)
+                                       fifo->bit_line = BITLINE_INF;
+                       }
+
+                       if (frame_complete) {
+                               frame_complete = 0;
+
+                               if (debug & DBG_HFC_FIFO_VERBOSE) {
+                                       printk(KERN_DEBUG  "%s: %s: "
+                                           "fifon(%i) new TX len(%i): ",
+                                           hw->name, __func__,
+                                           fifon, tx_skb->len);
+                                       i = 0;
+                                       while (i < tx_skb->len)
+                                               printk("%02x ",
+                                                   tx_skb->data[i++]);
+                                       printk("\n");
+                               }
+
+                               dev_kfree_skb(tx_skb);
+                               tx_skb = NULL;
+                               if (fifo->dch && get_next_dframe(fifo->dch))
+                                       tx_skb = fifo->dch->tx_skb;
+                               else if (fifo->bch &&
+                                   get_next_bframe(fifo->bch)) {
+                                       if (test_bit(FLG_TRANSPARENT,
+                                           &fifo->bch->Flags))
+                                               confirm_Bsend(fifo->bch);
+                                       tx_skb = fifo->bch->tx_skb;
+                               }
+                       }
+               }
+               errcode = usb_submit_urb(urb, GFP_ATOMIC);
+               if (errcode < 0) {
+                       if (debug & DEBUG_HW)
+                               printk(KERN_DEBUG
+                                   "%s: %s: error submitting ISO URB: %d \n",
+                                   hw->name, __func__, errcode);
+               }
+
+               /*
+                * abuse DChannel tx iso completion to trigger NT mode state
+                * changes tx_iso_complete is assumed to be called every
+                * fifo->intervall (ms)
+                */
+               if ((fifon == HFCUSB_D_TX) && (hw->protocol == ISDN_P_NT_S0)
+                   && (hw->timers & NT_ACTIVATION_TIMER)) {
+                       if ((--hw->nt_timer) < 0)
+                               schedule_event(&hw->dch, FLG_PHCHANGE);
+               }
+
+       } else {
+               if (status && (debug & DBG_HFC_URB_ERROR))
+                       printk(KERN_DEBUG  "%s: %s: urb->status %s (%i)"
+                           "fifonum=%d\n",
+                           hw->name, __func__,
+                           symbolic(urb_errlist, status), status, fifon);
+       }
+       spin_unlock(&hw->lock);
+}
+
+/*
+ * allocs urbs and start isoc transfer with two pending urbs to avoid
+ * gaps in the transfer chain
+ */
+static int
+start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
+                usb_complete_t complete, int packet_size)
+{
+       struct hfcsusb *hw = fifo->hw;
+       int i, k, errcode;
+
+       if (debug)
+               printk(KERN_DEBUG "%s: %s: fifo %i\n",
+                   hw->name, __func__, fifo->fifonum);
+
+       /* allocate Memory for Iso out Urbs */
+       for (i = 0; i < 2; i++) {
+               if (!(fifo->iso[i].urb)) {
+                       fifo->iso[i].urb =
+                           usb_alloc_urb(num_packets_per_urb, GFP_KERNEL);
+                       if (!(fifo->iso[i].urb)) {
+                               printk(KERN_DEBUG
+                                   "%s: %s: alloc urb for fifo %i failed",
+                                   hw->name, __func__, fifo->fifonum);
+                       }
+                       fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
+                       fifo->iso[i].indx = i;
+
+                       /* Init the first iso */
+                       if (ISO_BUFFER_SIZE >=
+                           (fifo->usb_packet_maxlen *
+                            num_packets_per_urb)) {
+                               fill_isoc_urb(fifo->iso[i].urb,
+                                   fifo->hw->dev, fifo->pipe,
+                                   fifo->iso[i].buffer,
+                                   num_packets_per_urb,
+                                   fifo->usb_packet_maxlen,
+                                   fifo->intervall, complete,
+                                   &fifo->iso[i]);
+                               memset(fifo->iso[i].buffer, 0,
+                                      sizeof(fifo->iso[i].buffer));
+
+                               for (k = 0; k < num_packets_per_urb; k++) {
+                                       fifo->iso[i].urb->
+                                           iso_frame_desc[k].offset =
+                                           k * packet_size;
+                                       fifo->iso[i].urb->
+                                           iso_frame_desc[k].length =
+                                           packet_size;
+                               }
+                       } else {
+                               printk(KERN_DEBUG
+                                   "%s: %s: ISO Buffer size to small!\n",
+                                   hw->name, __func__);
+                       }
+               }
+               fifo->bit_line = BITLINE_INF;
+
+               errcode = usb_submit_urb(fifo->iso[i].urb, GFP_KERNEL);
+               fifo->active = (errcode >= 0) ? 1 : 0;
+               fifo->stop_gracefull = 0;
+               if (errcode < 0) {
+                       printk(KERN_DEBUG "%s: %s: %s URB nr:%d\n",
+                           hw->name, __func__,
+                           symbolic(urb_errlist, errcode), i);
+               }
+       }
+       return fifo->active;
+}
+
+static void
+stop_iso_gracefull(struct usb_fifo *fifo)
+{
+       struct hfcsusb *hw = fifo->hw;
+       int i, timeout;
+       u_long flags;
+
+       for (i = 0; i < 2; i++) {
+               spin_lock_irqsave(&hw->lock, flags);
+               if (debug)
+                       printk(KERN_DEBUG "%s: %s for fifo %i.%i\n",
+                              hw->name, __func__, fifo->fifonum, i);
+               fifo->stop_gracefull = 1;
+               spin_unlock_irqrestore(&hw->lock, flags);
+       }
+
+       for (i = 0; i < 2; i++) {
+               timeout = 3;
+               while (fifo->stop_gracefull && timeout--)
+                       schedule_timeout_interruptible((HZ/1000)*16);
+               if (debug && fifo->stop_gracefull)
+                       printk(KERN_DEBUG "%s: ERROR %s for fifo %i.%i\n",
+                               hw->name, __func__, fifo->fifonum, i);
+       }
+}
+
+static void
+stop_int_gracefull(struct usb_fifo *fifo)
+{
+       struct hfcsusb *hw = fifo->hw;
+       int timeout;
+       u_long flags;
+
+       spin_lock_irqsave(&hw->lock, flags);
+       if (debug)
+               printk(KERN_DEBUG "%s: %s for fifo %i\n",
+                      hw->name, __func__, fifo->fifonum);
+       fifo->stop_gracefull = 1;
+       spin_unlock_irqrestore(&hw->lock, flags);
+
+       timeout = 3;
+       while (fifo->stop_gracefull && timeout--)
+               schedule_timeout_interruptible((HZ/1000)*3);
+       if (debug && fifo->stop_gracefull)
+               printk(KERN_DEBUG "%s: ERROR %s for fifo %i\n",
+                      hw->name, __func__, fifo->fifonum);
+}
+
+/* start the interrupt transfer for the given fifo */
+static void
+start_int_fifo(struct usb_fifo *fifo)
+{
+       struct hfcsusb *hw = fifo->hw;
+       int errcode;
+
+       if (debug)
+               printk(KERN_DEBUG "%s: %s: INT IN fifo:%d\n",
+                   hw->name, __func__, fifo->fifonum);
+
+       if (!fifo->urb) {
+               fifo->urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!fifo->urb)
+                       return;
+       }
+       usb_fill_int_urb(fifo->urb, fifo->hw->dev, fifo->pipe,
+           fifo->buffer, fifo->usb_packet_maxlen,
+           (usb_complete_t)rx_int_complete, fifo, fifo->intervall);
+       fifo->active = 1;
+       fifo->stop_gracefull = 0;
+       errcode = usb_submit_urb(fifo->urb, GFP_KERNEL);
+       if (errcode) {
+               printk(KERN_DEBUG "%s: %s: submit URB: status:%i\n",
+                   hw->name, __func__, errcode);
+               fifo->active = 0;
+       }
+}
+
+static void
+setPortMode(struct hfcsusb *hw)
+{
+       if (debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: %s %s\n", hw->name, __func__,
+                  (hw->protocol == ISDN_P_TE_S0) ? "TE" : "NT");
+
+       if (hw->protocol == ISDN_P_TE_S0) {
+               write_reg(hw, HFCUSB_SCTRL, 0x40);
+               write_reg(hw, HFCUSB_SCTRL_E, 0x00);
+               write_reg(hw, HFCUSB_CLKDEL, CLKDEL_TE);
+               write_reg(hw, HFCUSB_STATES, 3 | 0x10);
+               write_reg(hw, HFCUSB_STATES, 3);
+       } else {
+               write_reg(hw, HFCUSB_SCTRL, 0x44);
+               write_reg(hw, HFCUSB_SCTRL_E, 0x09);
+               write_reg(hw, HFCUSB_CLKDEL, CLKDEL_NT);
+               write_reg(hw, HFCUSB_STATES, 1 | 0x10);
+               write_reg(hw, HFCUSB_STATES, 1);
+       }
+}
+
+static void
+reset_hfcsusb(struct hfcsusb *hw)
+{
+       struct usb_fifo *fifo;
+       int i;
+
+       if (debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       /* do Chip reset */
+       write_reg(hw, HFCUSB_CIRM, 8);
+
+       /* aux = output, reset off */
+       write_reg(hw, HFCUSB_CIRM, 0x10);
+
+       /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
+       write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) |
+           ((hw->packet_size / 8) << 4));
+
+       /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */
+       write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size);
+
+       /* enable PCM/GCI master mode */
+       write_reg(hw, HFCUSB_MST_MODE1, 0);     /* set default values */
+       write_reg(hw, HFCUSB_MST_MODE0, 1);     /* enable master mode */
+
+       /* init the fifos */
+       write_reg(hw, HFCUSB_F_THRES,
+           (HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4));
+
+       fifo = hw->fifos;
+       for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
+               write_reg(hw, HFCUSB_FIFO, i);  /* select the desired fifo */
+               fifo[i].max_size =
+                   (i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN;
+               fifo[i].last_urblen = 0;
+
+               /* set 2 bit for D- & E-channel */
+               write_reg(hw, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2));
+
+               /* enable all fifos */
+               if (i == HFCUSB_D_TX)
+                       write_reg(hw, HFCUSB_CON_HDLC,
+                           (hw->protocol == ISDN_P_NT_S0) ? 0x08 : 0x09);
+               else
+                       write_reg(hw, HFCUSB_CON_HDLC, 0x08);
+               write_reg(hw, HFCUSB_INC_RES_F, 2); /* reset the fifo */
+       }
+
+       write_reg(hw, HFCUSB_SCTRL_R, 0); /* disable both B receivers */
+       handle_led(hw, LED_POWER_ON);
+}
+
+/* start USB data pipes dependand on device's endpoint configuration */
+static void
+hfcsusb_start_endpoint(struct hfcsusb *hw, int channel)
+{
+       /* quick check if endpoint already running */
+       if ((channel == HFC_CHAN_D) && (hw->fifos[HFCUSB_D_RX].active))
+               return;
+       if ((channel == HFC_CHAN_B1) && (hw->fifos[HFCUSB_B1_RX].active))
+               return;
+       if ((channel == HFC_CHAN_B2) && (hw->fifos[HFCUSB_B2_RX].active))
+               return;
+       if ((channel == HFC_CHAN_E) && (hw->fifos[HFCUSB_PCM_RX].active))
+               return;
+
+       /* start rx endpoints using USB INT IN method */
+       if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
+               start_int_fifo(hw->fifos + channel*2 + 1);
+
+       /* start rx endpoints using USB ISO IN method */
+       if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) {
+               switch (channel) {
+               case HFC_CHAN_D:
+                       start_isoc_chain(hw->fifos + HFCUSB_D_RX,
+                               ISOC_PACKETS_D,
+                               (usb_complete_t)rx_iso_complete,
+                               16);
+                       break;
+               case HFC_CHAN_E:
+                       start_isoc_chain(hw->fifos + HFCUSB_PCM_RX,
+                               ISOC_PACKETS_D,
+                               (usb_complete_t)rx_iso_complete,
+                               16);
+                       break;
+               case HFC_CHAN_B1:
+                       start_isoc_chain(hw->fifos + HFCUSB_B1_RX,
+                               ISOC_PACKETS_B,
+                               (usb_complete_t)rx_iso_complete,
+                               16);
+                       break;
+               case HFC_CHAN_B2:
+                       start_isoc_chain(hw->fifos + HFCUSB_B2_RX,
+                               ISOC_PACKETS_B,
+                               (usb_complete_t)rx_iso_complete,
+                               16);
+                       break;
+               }
+       }
+
+       /* start tx endpoints using USB ISO OUT method */
+       switch (channel) {
+       case HFC_CHAN_D:
+               start_isoc_chain(hw->fifos + HFCUSB_D_TX,
+                       ISOC_PACKETS_B,
+                       (usb_complete_t)tx_iso_complete, 1);
+               break;
+       case HFC_CHAN_B1:
+               start_isoc_chain(hw->fifos + HFCUSB_B1_TX,
+                       ISOC_PACKETS_D,
+                       (usb_complete_t)tx_iso_complete, 1);
+               break;
+       case HFC_CHAN_B2:
+               start_isoc_chain(hw->fifos + HFCUSB_B2_TX,
+                       ISOC_PACKETS_B,
+                       (usb_complete_t)tx_iso_complete, 1);
+               break;
+       }
+}
+
+/* stop USB data pipes dependand on device's endpoint configuration */
+static void
+hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
+{
+       /* quick check if endpoint currently running */
+       if ((channel == HFC_CHAN_D) && (!hw->fifos[HFCUSB_D_RX].active))
+               return;
+       if ((channel == HFC_CHAN_B1) && (!hw->fifos[HFCUSB_B1_RX].active))
+               return;
+       if ((channel == HFC_CHAN_B2) && (!hw->fifos[HFCUSB_B2_RX].active))
+               return;
+       if ((channel == HFC_CHAN_E) && (!hw->fifos[HFCUSB_PCM_RX].active))
+               return;
+
+       /* rx endpoints using USB INT IN method */
+       if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
+               stop_int_gracefull(hw->fifos + channel*2 + 1);
+
+       /* rx endpoints using USB ISO IN method */
+       if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO)
+               stop_iso_gracefull(hw->fifos + channel*2 + 1);
+
+       /* tx endpoints using USB ISO OUT method */
+       if (channel != HFC_CHAN_E)
+               stop_iso_gracefull(hw->fifos + channel*2);
+}
+
+
+/* Hardware Initialization */
+int
+setup_hfcsusb(struct hfcsusb *hw)
+{
+       int err;
+       u_char b;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       /* check the chip id */
+       if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+               printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
+                   hw->name, __func__);
+               return 1;
+       }
+       if (b != HFCUSB_CHIPID) {
+               printk(KERN_DEBUG "%s: %s: Invalid chip id 0x%02x\n",
+                   hw->name, __func__, b);
+               return 1;
+       }
+
+       /* first set the needed config, interface and alternate */
+       err = usb_set_interface(hw->dev, hw->if_used, hw->alt_used);
+
+       hw->led_state = 0;
+
+       /* init the background machinery for control requests */
+       hw->ctrl_read.bRequestType = 0xc0;
+       hw->ctrl_read.bRequest = 1;
+       hw->ctrl_read.wLength = cpu_to_le16(1);
+       hw->ctrl_write.bRequestType = 0x40;
+       hw->ctrl_write.bRequest = 0;
+       hw->ctrl_write.wLength = 0;
+       usb_fill_control_urb(hw->ctrl_urb, hw->dev, hw->ctrl_out_pipe,
+           (u_char *)&hw->ctrl_write, NULL, 0,
+           (usb_complete_t)ctrl_complete, hw);
+
+       reset_hfcsusb(hw);
+       return 0;
+}
+
+static void
+release_hw(struct hfcsusb *hw)
+{
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       /*
+        * stop all endpoints gracefully
+        * TODO: mISDN_core should generate CLOSE_CHANNEL
+        *       signals after calling mISDN_unregister_device()
+        */
+       hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
+       hfcsusb_stop_endpoint(hw, HFC_CHAN_B1);
+       hfcsusb_stop_endpoint(hw, HFC_CHAN_B2);
+       if (hw->fifos[HFCUSB_PCM_RX].pipe)
+               hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
+       if (hw->protocol == ISDN_P_TE_S0)
+               l1_event(hw->dch.l1, CLOSE_CHANNEL);
+
+       mISDN_unregister_device(&hw->dch.dev);
+       mISDN_freebchannel(&hw->bch[1]);
+       mISDN_freebchannel(&hw->bch[0]);
+       mISDN_freedchannel(&hw->dch);
+
+       if (hw->ctrl_urb) {
+               usb_kill_urb(hw->ctrl_urb);
+               usb_free_urb(hw->ctrl_urb);
+               hw->ctrl_urb = NULL;
+       }
+
+       if (hw->intf)
+               usb_set_intfdata(hw->intf, NULL);
+       list_del(&hw->list);
+       kfree(hw);
+       hw = NULL;
+}
+
+static void
+deactivate_bchannel(struct bchannel *bch)
+{
+       struct hfcsusb *hw = bch->hw;
+       u_long flags;
+
+       if (bch->debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: %s: bch->nr(%i)\n",
+                   hw->name, __func__, bch->nr);
+
+       spin_lock_irqsave(&hw->lock, flags);
+       if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
+               dev_kfree_skb(bch->next_skb);
+               bch->next_skb = NULL;
+       }
+       if (bch->tx_skb) {
+               dev_kfree_skb(bch->tx_skb);
+               bch->tx_skb = NULL;
+       }
+       bch->tx_idx = 0;
+       if (bch->rx_skb) {
+               dev_kfree_skb(bch->rx_skb);
+               bch->rx_skb = NULL;
+       }
+       clear_bit(FLG_ACTIVE, &bch->Flags);
+       clear_bit(FLG_TX_BUSY, &bch->Flags);
+       spin_unlock_irqrestore(&hw->lock, flags);
+       hfcsusb_setup_bch(bch, ISDN_P_NONE);
+       hfcsusb_stop_endpoint(hw, bch->nr);
+}
+
+/*
+ * Layer 1 B-channel hardware access
+ */
+static int
+hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+       struct bchannel *bch = container_of(ch, struct bchannel, ch);
+       int             ret = -EINVAL;
+
+       if (bch->debug & DEBUG_HW)
+               printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
+
+       switch (cmd) {
+       case HW_TESTRX_RAW:
+       case HW_TESTRX_HDLC:
+       case HW_TESTRX_OFF:
+               ret = -EINVAL;
+               break;
+
+       case CLOSE_CHANNEL:
+               test_and_clear_bit(FLG_OPEN, &bch->Flags);
+               if (test_bit(FLG_ACTIVE, &bch->Flags))
+                       deactivate_bchannel(bch);
+               ch->protocol = ISDN_P_NONE;
+               ch->peer = NULL;
+               module_put(THIS_MODULE);
+               ret = 0;
+               break;
+       case CONTROL_CHANNEL:
+               ret = channel_bctrl(bch, arg);
+               break;
+       default:
+               printk(KERN_WARNING "%s: unknown prim(%x)\n",
+                       __func__, cmd);
+       }
+       return ret;
+}
+
+static int
+setup_instance(struct hfcsusb *hw, struct device *parent)
+{
+       u_long  flags;
+       int     err, i;
+
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+       spin_lock_init(&hw->ctrl_lock);
+       spin_lock_init(&hw->lock);
+
+       mISDN_initdchannel(&hw->dch, MAX_DFRAME_LEN_L1, ph_state);
+       hw->dch.debug = debug & 0xFFFF;
+       hw->dch.hw = hw;
+       hw->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
+       hw->dch.dev.D.send = hfcusb_l2l1D;
+       hw->dch.dev.D.ctrl = hfc_dctrl;
+
+       /* enable E-Channel logging */
+       if (hw->fifos[HFCUSB_PCM_RX].pipe)
+               mISDN_initdchannel(&hw->ech, MAX_DFRAME_LEN_L1, NULL);
+
+       hw->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+           (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+       hw->dch.dev.nrbchan = 2;
+       for (i = 0; i < 2; i++) {
+               hw->bch[i].nr = i + 1;
+               set_channelmap(i + 1, hw->dch.dev.channelmap);
+               hw->bch[i].debug = debug;
+               mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM);
+               hw->bch[i].hw = hw;
+               hw->bch[i].ch.send = hfcusb_l2l1B;
+               hw->bch[i].ch.ctrl = hfc_bctrl;
+               hw->bch[i].ch.nr = i + 1;
+               list_add(&hw->bch[i].ch.list, &hw->dch.dev.bchannels);
+       }
+
+       hw->fifos[HFCUSB_B1_TX].bch = &hw->bch[0];
+       hw->fifos[HFCUSB_B1_RX].bch = &hw->bch[0];
+       hw->fifos[HFCUSB_B2_TX].bch = &hw->bch[1];
+       hw->fifos[HFCUSB_B2_RX].bch = &hw->bch[1];
+       hw->fifos[HFCUSB_D_TX].dch = &hw->dch;
+       hw->fifos[HFCUSB_D_RX].dch = &hw->dch;
+       hw->fifos[HFCUSB_PCM_RX].ech = &hw->ech;
+       hw->fifos[HFCUSB_PCM_TX].ech = &hw->ech;
+
+       err = setup_hfcsusb(hw);
+       if (err)
+               goto out;
+
+       snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s.%d", DRIVER_NAME,
+           hfcsusb_cnt + 1);
+       printk(KERN_INFO "%s: registered as '%s'\n",
+           DRIVER_NAME, hw->name);
+
+       err = mISDN_register_device(&hw->dch.dev, parent, hw->name);
+       if (err)
+               goto out;
+
+       hfcsusb_cnt++;
+       write_lock_irqsave(&HFClock, flags);
+       list_add_tail(&hw->list, &HFClist);
+       write_unlock_irqrestore(&HFClock, flags);
+       return 0;
+
+out:
+       mISDN_freebchannel(&hw->bch[1]);
+       mISDN_freebchannel(&hw->bch[0]);
+       mISDN_freedchannel(&hw->dch);
+       kfree(hw);
+       return err;
+}
+
+static int
+hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+       struct hfcsusb                  *hw;
+       struct usb_device               *dev = interface_to_usbdev(intf);
+       struct usb_host_interface       *iface = intf->cur_altsetting;
+       struct usb_host_interface       *iface_used = NULL;
+       struct usb_host_endpoint        *ep;
+       struct hfcsusb_vdata            *driver_info;
+       int ifnum = iface->desc.bInterfaceNumber, i, idx, alt_idx,
+           probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found,
+           ep_addr, cmptbl[16], small_match, iso_packet_size, packet_size,
+           alt_used = 0;
+
+       vend_idx = 0xffff;
+       for (i = 0; hfcsusb_idtab[i].idVendor; i++) {
+               if ((le16_to_cpu(dev->descriptor.idVendor)
+                      == hfcsusb_idtab[i].idVendor) &&
+                   (le16_to_cpu(dev->descriptor.idProduct)
+                      == hfcsusb_idtab[i].idProduct)) {
+                       vend_idx = i;
+                       continue;
+               }
+       }
+
+       printk(KERN_DEBUG
+           "%s: interface(%d) actalt(%d) minor(%d) vend_idx(%d)\n",
+           __func__, ifnum, iface->desc.bAlternateSetting,
+           intf->minor, vend_idx);
+
+       if (vend_idx == 0xffff) {
+               printk(KERN_WARNING
+                   "%s: no valid vendor found in USB descriptor\n",
+                   __func__);
+               return -EIO;
+       }
+       /* if vendor and product ID is OK, start probing alternate settings */
+       alt_idx = 0;
+       small_match = -1;
+
+       /* default settings */
+       iso_packet_size = 16;
+       packet_size = 64;
+
+       while (alt_idx < intf->num_altsetting) {
+               iface = intf->altsetting + alt_idx;
+               probe_alt_setting = iface->desc.bAlternateSetting;
+               cfg_used = 0;
+
+               while (validconf[cfg_used][0]) {
+                       cfg_found = 1;
+                       vcf = validconf[cfg_used];
+                       ep = iface->endpoint;
+                       memcpy(cmptbl, vcf, 16 * sizeof(int));
+
+                       /* check for all endpoints in this alternate setting */
+                       for (i = 0; i < iface->desc.bNumEndpoints; i++) {
+                               ep_addr = ep->desc.bEndpointAddress;
+
+                               /* get endpoint base */
+                               idx = ((ep_addr & 0x7f) - 1) * 2;
+                               if (ep_addr & 0x80)
+                                       idx++;
+                               attr = ep->desc.bmAttributes;
+
+                               if (cmptbl[idx] != EP_NOP) {
+                                       if (cmptbl[idx] == EP_NUL)
+                                               cfg_found = 0;
+                                       if (attr == USB_ENDPOINT_XFER_INT
+                                               && cmptbl[idx] == EP_INT)
+                                               cmptbl[idx] = EP_NUL;
+                                       if (attr == USB_ENDPOINT_XFER_BULK
+                                               && cmptbl[idx] == EP_BLK)
+                                               cmptbl[idx] = EP_NUL;
+                                       if (attr == USB_ENDPOINT_XFER_ISOC
+                                               && cmptbl[idx] == EP_ISO)
+                                               cmptbl[idx] = EP_NUL;
+
+                                       if (attr == USB_ENDPOINT_XFER_INT &&
+                                               ep->desc.bInterval < vcf[17]) {
+                                               cfg_found = 0;
+                                       }
+                               }
+                               ep++;
+                       }
+
+                       for (i = 0; i < 16; i++)
+                               if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL)
+                                       cfg_found = 0;
+
+                       if (cfg_found) {
+                               if (small_match < cfg_used) {
+                                       small_match = cfg_used;
+                                       alt_used = probe_alt_setting;
+                                       iface_used = iface;
+                               }
+                       }
+                       cfg_used++;
+               }
+               alt_idx++;
+       }       /* (alt_idx < intf->num_altsetting) */
+
+       /* not found a valid USB Ta Endpoint config */
+       if (small_match == -1)
+               return -EIO;
+
+       iface = iface_used;
+       hw = kzalloc(sizeof(struct hfcsusb), GFP_KERNEL);
+       if (!hw)
+               return -ENOMEM; /* got no mem */
+       snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s", DRIVER_NAME);
+
+       ep = iface->endpoint;
+       vcf = validconf[small_match];
+
+       for (i = 0; i < iface->desc.bNumEndpoints; i++) {
+               struct usb_fifo *f;
+
+               ep_addr = ep->desc.bEndpointAddress;
+               /* get endpoint base */
+               idx = ((ep_addr & 0x7f) - 1) * 2;
+               if (ep_addr & 0x80)
+                       idx++;
+               f = &hw->fifos[idx & 7];
+
+               /* init Endpoints */
+               if (vcf[idx] == EP_NOP || vcf[idx] == EP_NUL) {
+                       ep++;
+                       continue;
+               }
+               switch (ep->desc.bmAttributes) {
+               case USB_ENDPOINT_XFER_INT:
+                       f->pipe = usb_rcvintpipe(dev,
+                               ep->desc.bEndpointAddress);
+                       f->usb_transfer_mode = USB_INT;
+                       packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+                       break;
+               case USB_ENDPOINT_XFER_BULK:
+                       if (ep_addr & 0x80)
+                               f->pipe = usb_rcvbulkpipe(dev,
+                                       ep->desc.bEndpointAddress);
+                       else
+                               f->pipe = usb_sndbulkpipe(dev,
+                                       ep->desc.bEndpointAddress);
+                       f->usb_transfer_mode = USB_BULK;
+                       packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+                       break;
+               case USB_ENDPOINT_XFER_ISOC:
+                       if (ep_addr & 0x80)
+                               f->pipe = usb_rcvisocpipe(dev,
+                                       ep->desc.bEndpointAddress);
+                       else
+                               f->pipe = usb_sndisocpipe(dev,
+                                       ep->desc.bEndpointAddress);
+                       f->usb_transfer_mode = USB_ISOC;
+                       iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+                       break;
+               default:
+                       f->pipe = 0;
+               }
+
+               if (f->pipe) {
+                       f->fifonum = idx & 7;
+                       f->hw = hw;
+                       f->usb_packet_maxlen =
+                           le16_to_cpu(ep->desc.wMaxPacketSize);
+                       f->intervall = ep->desc.bInterval;
+               }
+               ep++;
+       }
+       hw->dev = dev; /* save device */
+       hw->if_used = ifnum; /* save used interface */
+       hw->alt_used = alt_used; /* and alternate config */
+       hw->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */
+       hw->cfg_used = vcf[16]; /* store used config */
+       hw->vend_idx = vend_idx; /* store found vendor */
+       hw->packet_size = packet_size;
+       hw->iso_packet_size = iso_packet_size;
+
+       /* create the control pipes needed for register access */
+       hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
+       hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
+       hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+       driver_info =
+               (struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info;
+       printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
+           hw->name, __func__, driver_info->vend_name,
+           conf_str[small_match], ifnum, alt_used);
+
+       if (setup_instance(hw, dev->dev.parent))
+               return -EIO;
+
+       hw->intf = intf;
+       usb_set_intfdata(hw->intf, hw);
+       return 0;
+}
+
+/* function called when an active device is removed */
+static void
+hfcsusb_disconnect(struct usb_interface *intf)
+{
+       struct hfcsusb *hw = usb_get_intfdata(intf);
+       struct hfcsusb *next;
+       int cnt = 0;
+
+       printk(KERN_INFO "%s: device disconnected\n", hw->name);
+
+       handle_led(hw, LED_POWER_OFF);
+       release_hw(hw);
+
+       list_for_each_entry_safe(hw, next, &HFClist, list)
+               cnt++;
+       if (!cnt)
+               hfcsusb_cnt = 0;
+
+       usb_set_intfdata(intf, NULL);
+}
+
+static struct usb_driver hfcsusb_drv = {
+       .name = DRIVER_NAME,
+       .id_table = hfcsusb_idtab,
+       .probe = hfcsusb_probe,
+       .disconnect = hfcsusb_disconnect,
+};
+
+static int __init
+hfcsusb_init(void)
+{
+       printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n",
+           hfcsusb_rev, debug, poll);
+
+       if (usb_register(&hfcsusb_drv)) {
+               printk(KERN_INFO DRIVER_NAME
+                   ": Unable to register hfcsusb module at usb stack\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static void __exit
+hfcsusb_cleanup(void)
+{
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_INFO DRIVER_NAME ": %s\n", __func__);
+
+       /* unregister Hardware */
+       usb_deregister(&hfcsusb_drv);   /* release our driver */
+}
+
+module_init(hfcsusb_init);
+module_exit(hfcsusb_cleanup);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
new file mode 100644 (file)
index 0000000..098486b
--- /dev/null
@@ -0,0 +1,418 @@
+/*
+ * hfcsusb.h, HFC-S USB mISDN driver
+ */
+
+#ifndef __HFCSUSB_H__
+#define __HFCSUSB_H__
+
+
+#define DRIVER_NAME "HFC-S_USB"
+
+#define DBG_HFC_CALL_TRACE     0x00010000
+#define DBG_HFC_FIFO_VERBOSE   0x00020000
+#define DBG_HFC_USB_VERBOSE    0x00100000
+#define DBG_HFC_URB_INFO       0x00200000
+#define DBG_HFC_URB_ERROR      0x00400000
+
+#define DEFAULT_TRANSP_BURST_SZ 128
+
+#define HFC_CTRL_TIMEOUT       20      /* 5ms timeout writing/reading regs */
+#define CLKDEL_TE              0x0f    /* CLKDEL in TE mode */
+#define CLKDEL_NT              0x6c    /* CLKDEL in NT mode */
+
+/* hfcsusb Layer1 commands */
+#define HFC_L1_ACTIVATE_TE             1
+#define HFC_L1_ACTIVATE_NT             2
+#define HFC_L1_DEACTIVATE_NT           3
+#define HFC_L1_FORCE_DEACTIVATE_TE     4
+
+/* cmd FLAGS in HFCUSB_STATES register */
+#define HFCUSB_LOAD_STATE      0x10
+#define HFCUSB_ACTIVATE                0x20
+#define HFCUSB_DO_ACTION       0x40
+#define HFCUSB_NT_G2_G3                0x80
+
+/* timers */
+#define NT_ACTIVATION_TIMER    0x01    /* enables NT mode activation Timer */
+#define NT_T1_COUNT            10
+
+#define MAX_BCH_SIZE           2048    /* allowed B-channel packet size */
+
+#define HFCUSB_RX_THRESHOLD    64      /* threshold for fifo report bit rx */
+#define HFCUSB_TX_THRESHOLD    96      /* threshold for fifo report bit tx */
+
+#define HFCUSB_CHIP_ID         0x16    /* Chip ID register index */
+#define HFCUSB_CIRM            0x00    /* cirm register index */
+#define HFCUSB_USB_SIZE                0x07    /* int length register */
+#define HFCUSB_USB_SIZE_I      0x06    /* iso length register */
+#define HFCUSB_F_CROSS         0x0b    /* bit order register */
+#define HFCUSB_CLKDEL          0x37    /* bit delay register */
+#define HFCUSB_CON_HDLC                0xfa    /* channel connect register */
+#define HFCUSB_HDLC_PAR                0xfb
+#define HFCUSB_SCTRL           0x31    /* S-bus control register (tx) */
+#define HFCUSB_SCTRL_E         0x32    /* same for E and special funcs */
+#define HFCUSB_SCTRL_R         0x33    /* S-bus control register (rx) */
+#define HFCUSB_F_THRES         0x0c    /* threshold register */
+#define HFCUSB_FIFO            0x0f    /* fifo select register */
+#define HFCUSB_F_USAGE         0x1a    /* fifo usage register */
+#define HFCUSB_MST_MODE0       0x14
+#define HFCUSB_MST_MODE1       0x15
+#define HFCUSB_P_DATA          0x1f
+#define HFCUSB_INC_RES_F       0x0e
+#define HFCUSB_B1_SSL          0x20
+#define HFCUSB_B2_SSL          0x21
+#define HFCUSB_B1_RSL          0x24
+#define HFCUSB_B2_RSL          0x25
+#define HFCUSB_STATES          0x30
+
+
+#define HFCUSB_CHIPID          0x40    /* ID value of HFC-S USB */
+
+/* fifo registers */
+#define HFCUSB_NUM_FIFOS       8       /* maximum number of fifos */
+#define HFCUSB_B1_TX           0       /* index for B1 transmit bulk/int */
+#define HFCUSB_B1_RX           1       /* index for B1 receive bulk/int */
+#define HFCUSB_B2_TX           2
+#define HFCUSB_B2_RX           3
+#define HFCUSB_D_TX            4
+#define HFCUSB_D_RX            5
+#define HFCUSB_PCM_TX          6
+#define HFCUSB_PCM_RX          7
+
+
+#define USB_INT                0
+#define USB_BULK       1
+#define USB_ISOC       2
+
+#define ISOC_PACKETS_D 8
+#define ISOC_PACKETS_B 8
+#define ISO_BUFFER_SIZE        128
+
+/* defines how much ISO packets are handled in one URB */
+static int iso_packets[8] =
+    { ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B,
+       ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D
+};
+
+
+/* Fifo flow Control for TX ISO */
+#define SINK_MAX       68
+#define SINK_MIN       48
+#define SINK_DMIN      12
+#define SINK_DMAX      18
+#define BITLINE_INF    (-96*8)
+
+/* HFC-S USB register access by Control-URSs */
+#define write_reg_atomic(a, b, c) \
+       usb_control_msg((a)->dev, (a)->ctrl_out_pipe, 0, 0x40, (c), (b), \
+               0, 0, HFC_CTRL_TIMEOUT)
+#define read_reg_atomic(a, b, c) \
+       usb_control_msg((a)->dev, (a)->ctrl_in_pipe, 1, 0xC0, 0, (b), (c), \
+               1, HFC_CTRL_TIMEOUT)
+#define HFC_CTRL_BUFSIZE 64
+
+struct ctrl_buf {
+       __u8 hfcs_reg;          /* register number */
+       __u8 reg_val;           /* value to be written (or read) */
+};
+
+/*
+ * URB error codes
+ * Used to represent a list of values and their respective symbolic names
+ */
+struct hfcusb_symbolic_list {
+       const int num;
+       const char *name;
+};
+
+static struct hfcusb_symbolic_list urb_errlist[] = {
+       {-ENOMEM, "No memory for allocation of internal structures"},
+       {-ENOSPC, "The host controller's bandwidth is already consumed"},
+       {-ENOENT, "URB was canceled by unlink_urb"},
+       {-EXDEV, "ISO transfer only partially completed"},
+       {-EAGAIN, "Too match scheduled for the future"},
+       {-ENXIO, "URB already queued"},
+       {-EFBIG, "Too much ISO frames requested"},
+       {-ENOSR, "Buffer error (overrun)"},
+       {-EPIPE, "Specified endpoint is stalled (device not responding)"},
+       {-EOVERFLOW, "Babble (bad cable?)"},
+       {-EPROTO, "Bit-stuff error (bad cable?)"},
+       {-EILSEQ, "CRC/Timeout"},
+       {-ETIMEDOUT, "NAK (device does not respond)"},
+       {-ESHUTDOWN, "Device unplugged"},
+       {-1, NULL}
+};
+
+static inline const char *
+symbolic(struct hfcusb_symbolic_list list[], const int num)
+{
+       int i;
+       for (i = 0; list[i].name != NULL; i++)
+               if (list[i].num == num)
+                       return list[i].name;
+       return "<unkown USB Error>";
+}
+
+/* USB descriptor need to contain one of the following EndPoint combination: */
+#define CNF_4INT3ISO   1       /* 4 INT IN, 3 ISO OUT */
+#define CNF_3INT3ISO   2       /* 3 INT IN, 3 ISO OUT */
+#define CNF_4ISO3ISO   3       /* 4 ISO IN, 3 ISO OUT */
+#define CNF_3ISO3ISO   4       /* 3 ISO IN, 3 ISO OUT */
+
+#define EP_NUL 1       /* Endpoint at this position not allowed */
+#define EP_NOP 2       /* all type of endpoints allowed at this position */
+#define EP_ISO 3       /* Isochron endpoint mandatory at this position */
+#define EP_BLK 4       /* Bulk endpoint mandatory at this position */
+#define EP_INT 5       /* Interrupt endpoint mandatory at this position */
+
+#define HFC_CHAN_B1    0
+#define HFC_CHAN_B2    1
+#define HFC_CHAN_D     2
+#define HFC_CHAN_E     3
+
+
+/*
+ * List of all supported enpoints configiration sets, used to find the
+ * best matching endpoint configuration within a devices' USB descriptor.
+ * We need at least 3 RX endpoints, and 3 TX endpoints, either
+ * INT-in and ISO-out, or ISO-in and ISO-out)
+ * with 4 RX endpoints even E-Channel logging is possible
+ */
+static int
+validconf[][19] = {
+       /* INT in, ISO out config */
+       {EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NOP, EP_INT,
+        EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL,
+        CNF_4INT3ISO, 2, 1},
+       {EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_NUL,
+        EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL,
+        CNF_3INT3ISO, 2, 0},
+       /* ISO in, ISO out config */
+       {EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP,
+        EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NOP, EP_ISO,
+        CNF_4ISO3ISO, 2, 1},
+       {EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL,
+        EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NUL, EP_NUL,
+        CNF_3ISO3ISO, 2, 0},
+       {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} /* EOL element */
+};
+
+/* string description of chosen config */
+char *conf_str[] = {
+       "4 Interrupt IN + 3 Isochron OUT",
+       "3 Interrupt IN + 3 Isochron OUT",
+       "4 Isochron IN + 3 Isochron OUT",
+       "3 Isochron IN + 3 Isochron OUT"
+};
+
+
+#define LED_OFF                0       /* no LED support */
+#define LED_SCHEME1    1       /* LED standard scheme */
+#define LED_SCHEME2    2       /* not used yet... */
+
+#define LED_POWER_ON   1
+#define LED_POWER_OFF  2
+#define LED_S0_ON      3
+#define LED_S0_OFF     4
+#define LED_B1_ON      5
+#define LED_B1_OFF     6
+#define LED_B1_DATA    7
+#define LED_B2_ON      8
+#define LED_B2_OFF     9
+#define LED_B2_DATA    10
+
+#define LED_NORMAL     0       /* LEDs are normal */
+#define LED_INVERTED   1       /* LEDs are inverted */
+
+/* time in ms to perform a Flashing LED when B-Channel has traffic */
+#define LED_TIME      250
+
+
+
+struct hfcsusb;
+struct usb_fifo;
+
+/* structure defining input+output fifos (interrupt/bulk mode) */
+struct iso_urb {
+       struct urb *urb;
+       __u8 buffer[ISO_BUFFER_SIZE];   /* buffer rx/tx USB URB data */
+       struct usb_fifo *owner_fifo;    /* pointer to owner fifo */
+       __u8 indx; /* Fifos's ISO double buffer 0 or 1 ? */
+#ifdef ISO_FRAME_START_DEBUG
+       int start_frames[ISO_FRAME_START_RING_COUNT];
+       __u8 iso_frm_strt_pos; /* index in start_frame[] */
+#endif
+};
+
+struct usb_fifo {
+       int fifonum;            /* fifo index attached to this structure */
+       int active;             /* fifo is currently active */
+       struct hfcsusb *hw;     /* pointer to main structure */
+       int pipe;               /* address of endpoint */
+       __u8 usb_packet_maxlen; /* maximum length for usb transfer */
+       unsigned int max_size;  /* maximum size of receive/send packet */
+       __u8 intervall;         /* interrupt interval */
+       struct urb *urb;        /* transfer structure for usb routines */
+       __u8 buffer[128];       /* buffer USB INT OUT URB data */
+       int bit_line;           /* how much bits are in the fifo? */
+
+       __u8 usb_transfer_mode; /* switched between ISO and INT */
+       struct iso_urb  iso[2]; /* two urbs to have one always
+                                        one pending */
+
+       struct dchannel *dch;   /* link to hfcsusb_t->dch */
+       struct bchannel *bch;   /* link to hfcsusb_t->bch */
+       struct dchannel *ech;   /* link to hfcsusb_t->ech, TODO: E-CHANNEL */
+       int last_urblen;        /* remember length of last packet */
+       __u8 stop_gracefull;    /* stops URB retransmission */
+};
+
+struct hfcsusb {
+       struct list_head        list;
+       struct dchannel         dch;
+       struct bchannel         bch[2];
+       struct dchannel         ech; /* TODO : wait for struct echannel ;) */
+
+       struct usb_device       *dev;           /* our device */
+       struct usb_interface    *intf;          /* used interface */
+       int                     if_used;        /* used interface number */
+       int                     alt_used;       /* used alternate config */
+       int                     cfg_used;       /* configuration index used */
+       int                     vend_idx;       /* index in hfcsusb_idtab */
+       int                     packet_size;
+       int                     iso_packet_size;
+       struct usb_fifo         fifos[HFCUSB_NUM_FIFOS];
+
+       /* control pipe background handling */
+       struct ctrl_buf         ctrl_buff[HFC_CTRL_BUFSIZE];
+       int                     ctrl_in_idx, ctrl_out_idx, ctrl_cnt;
+       struct urb              *ctrl_urb;
+       struct usb_ctrlrequest  ctrl_write;
+       struct usb_ctrlrequest  ctrl_read;
+       int                     ctrl_paksize;
+       int                     ctrl_in_pipe, ctrl_out_pipe;
+       spinlock_t              ctrl_lock; /* lock for ctrl */
+       spinlock_t              lock;
+
+       __u8                    threshold_mask;
+       __u8                    led_state;
+
+       __u8                    protocol;
+       int                     nt_timer;
+       int                     open;
+       __u8                    timers;
+       __u8                    initdone;
+       char                    name[MISDN_MAX_IDLEN];
+};
+
+/* private vendor specific data */
+struct hfcsusb_vdata {
+       __u8            led_scheme;  /* led display scheme */
+       signed short    led_bits[8]; /* array of 8 possible LED bitmask */
+       char            *vend_name;  /* device name */
+};
+
+
+#define HFC_MAX_TE_LAYER1_STATE 8
+#define HFC_MAX_NT_LAYER1_STATE 4
+
+const char *HFC_TE_LAYER1_STATES[HFC_MAX_TE_LAYER1_STATE + 1] = {
+       "TE F0 - Reset",
+       "TE F1 - Reset",
+       "TE F2 - Sensing",
+       "TE F3 - Deactivated",
+       "TE F4 - Awaiting signal",
+       "TE F5 - Identifying input",
+       "TE F6 - Synchronized",
+       "TE F7 - Activated",
+       "TE F8 - Lost framing",
+};
+
+const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = {
+       "NT G0 - Reset",
+       "NT G1 - Deactive",
+       "NT G2 - Pending activation",
+       "NT G3 - Active",
+       "NT G4 - Pending deactivation",
+};
+
+/* supported devices */
+static struct usb_device_id hfcsusb_idtab[] = {
+       {
+        USB_DEVICE(0x0959, 0x2bd0),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_OFF, {4, 0, 2, 1},
+                          "ISDN USB TA (Cologne Chip HFC-S USB based)"}),
+       },
+       {
+        USB_DEVICE(0x0675, 0x1688),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {1, 2, 0, 0},
+                          "DrayTek miniVigor 128 USB ISDN TA"}),
+       },
+       {
+        USB_DEVICE(0x07b0, 0x0007),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {0x80, -64, -32, -16},
+                          "Billion tiny USB ISDN TA 128"}),
+       },
+       {
+        USB_DEVICE(0x0742, 0x2008),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {4, 0, 2, 1},
+                          "Stollmann USB TA"}),
+       },
+       {
+        USB_DEVICE(0x0742, 0x2009),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {4, 0, 2, 1},
+                          "Aceex USB ISDN TA"}),
+       },
+       {
+        USB_DEVICE(0x0742, 0x200A),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {4, 0, 2, 1},
+                          "OEM USB ISDN TA"}),
+       },
+       {
+        USB_DEVICE(0x08e3, 0x0301),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {2, 0, 1, 4},
+                          "Olitec USB RNIS"}),
+       },
+       {
+        USB_DEVICE(0x07fa, 0x0846),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {0x80, -64, -32, -16},
+                          "Bewan Modem RNIS USB"}),
+       },
+       {
+        USB_DEVICE(0x07fa, 0x0847),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {0x80, -64, -32, -16},
+                          "Djinn Numeris USB"}),
+       },
+       {
+        USB_DEVICE(0x07b0, 0x0006),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {0x80, -64, -32, -16},
+                          "Twister ISDN TA"}),
+       },
+       {
+        USB_DEVICE(0x071d, 0x1005),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {0x02, 0, 0x01, 0x04},
+                          "Eicon DIVA USB 4.0"}),
+       },
+       {
+        USB_DEVICE(0x0586, 0x0102),
+        .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+                         {LED_SCHEME1, {0x88, -64, -32, -16},
+                          "ZyXEL OMNI.NET USB II"}),
+       },
+       { }
+};
+
+MODULE_DEVICE_TABLE(usb, hfcsusb_idtab);
+
+#endif /* __HFCSUSB_H__ */
index 1cb5e633cf75b4f22c9341e45cc297cd051d3e97..0a6bd2a9e730d5f0326f4f8c7152a95b890ed912 100644 (file)
@@ -8,6 +8,6 @@ obj-$(CONFIG_MISDN_L1OIP) += l1oip.o
 
 # multi objects
 
-mISDN_core-objs := core.o fsm.o socket.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o
+mISDN_core-objs := core.o fsm.o socket.o clock.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o
 mISDN_dsp-objs := dsp_core.o dsp_cmx.o dsp_tones.o dsp_dtmf.o dsp_audio.o dsp_blowfish.o dsp_pipeline.o dsp_hwec.o
 l1oip-objs := l1oip_core.o l1oip_codec.o
diff --git a/drivers/isdn/mISDN/clock.c b/drivers/isdn/mISDN/clock.c
new file mode 100644 (file)
index 0000000..44d9c3d
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2008  by Andreas Eversberg <andreas@eversberg.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Quick API description:
+ *
+ * A clock source registers using mISDN_register_clock:
+ *     name = text string to name clock source
+ *     priority = value to priorize clock sources (0 = default)
+ *     ctl = callback function to enable/disable clock source
+ *     priv = private pointer of clock source
+ *     return = pointer to clock source structure;
+ *
+ * Note: Callback 'ctl' can be called before mISDN_register_clock returns!
+ *       Also it can be called during mISDN_unregister_clock.
+ *
+ * A clock source calls mISDN_clock_update with given samples elapsed, if
+ * enabled. If function call is delayed, tv must be set with the timestamp
+ * of the actual event.
+ *
+ * A clock source unregisters using mISDN_unregister_clock.
+ *
+ * To get current clock, call mISDN_clock_get. The signed short value
+ * counts the number of samples since. Time since last clock event is added.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/mISDNif.h>
+#include "core.h"
+
+static u_int *debug;
+static LIST_HEAD(iclock_list);
+DEFINE_RWLOCK(iclock_lock);
+u16    iclock_count;           /* counter of last clock */
+struct timeval iclock_tv;      /* time stamp of last clock */
+int    iclock_tv_valid;        /* already received one timestamp */
+struct mISDNclock *iclock_current;
+
+void
+mISDN_init_clock(u_int *dp)
+{
+       debug = dp;
+       do_gettimeofday(&iclock_tv);
+}
+
+static void
+select_iclock(void)
+{
+       struct mISDNclock *iclock, *bestclock = NULL, *lastclock = NULL;
+       int pri = -128;
+
+       list_for_each_entry(iclock, &iclock_list, list) {
+               if (iclock->pri > pri) {
+                       pri = iclock->pri;
+                       bestclock = iclock;
+               }
+               if (iclock_current == iclock)
+                       lastclock = iclock;
+       }
+       if (lastclock && bestclock != lastclock) {
+               /* last used clock source still exists but changes, disable */
+               if (*debug & DEBUG_CLOCK)
+                       printk(KERN_DEBUG "Old clock source '%s' disable.\n",
+                               lastclock->name);
+               lastclock->ctl(lastclock->priv, 0);
+       }
+       if (bestclock && bestclock != iclock_current) {
+               /* new clock source selected, enable */
+               if (*debug & DEBUG_CLOCK)
+                       printk(KERN_DEBUG "New clock source '%s' enable.\n",
+                               bestclock->name);
+               bestclock->ctl(bestclock->priv, 1);
+       }
+       if (bestclock != iclock_current) {
+               /* no clock received yet */
+               iclock_tv_valid = 0;
+       }
+       iclock_current = bestclock;
+}
+
+struct mISDNclock
+*mISDN_register_clock(char *name, int pri, clockctl_func_t *ctl, void *priv)
+{
+       u_long                  flags;
+       struct mISDNclock       *iclock;
+
+       if (*debug & (DEBUG_CORE | DEBUG_CLOCK))
+               printk(KERN_DEBUG "%s: %s %d\n", __func__, name, pri);
+       iclock = kzalloc(sizeof(struct mISDNclock), GFP_ATOMIC);
+       if (!iclock) {
+               printk(KERN_ERR "%s: No memory for clock entry.\n", __func__);
+               return NULL;
+       }
+       strncpy(iclock->name, name, sizeof(iclock->name)-1);
+       iclock->pri = pri;
+       iclock->priv = priv;
+       iclock->ctl = ctl;
+       write_lock_irqsave(&iclock_lock, flags);
+       list_add_tail(&iclock->list, &iclock_list);
+       select_iclock();
+       write_unlock_irqrestore(&iclock_lock, flags);
+       return iclock;
+}
+EXPORT_SYMBOL(mISDN_register_clock);
+
+void
+mISDN_unregister_clock(struct mISDNclock *iclock)
+{
+       u_long  flags;
+
+       if (*debug & (DEBUG_CORE | DEBUG_CLOCK))
+               printk(KERN_DEBUG "%s: %s %d\n", __func__, iclock->name,
+                       iclock->pri);
+       write_lock_irqsave(&iclock_lock, flags);
+       if (iclock_current == iclock) {
+               if (*debug & DEBUG_CLOCK)
+                       printk(KERN_DEBUG
+                               "Current clock source '%s' unregisters.\n",
+                               iclock->name);
+               iclock->ctl(iclock->priv, 0);
+       }
+       list_del(&iclock->list);
+       select_iclock();
+       write_unlock_irqrestore(&iclock_lock, flags);
+}
+EXPORT_SYMBOL(mISDN_unregister_clock);
+
+void
+mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv)
+{
+       u_long          flags;
+       struct timeval  tv_now;
+       time_t          elapsed_sec;
+       int             elapsed_8000th;
+
+       write_lock_irqsave(&iclock_lock, flags);
+       if (iclock_current != iclock) {
+               printk(KERN_ERR "%s: '%s' sends us clock updates, but we do "
+                       "listen to '%s'. This is a bug!\n", __func__,
+                       iclock->name,
+                       iclock_current ? iclock_current->name : "nothing");
+               iclock->ctl(iclock->priv, 0);
+               write_unlock_irqrestore(&iclock_lock, flags);
+               return;
+       }
+       if (iclock_tv_valid) {
+               /* increment sample counter by given samples */
+               iclock_count += samples;
+               if (tv) { /* tv must be set, if function call is delayed */
+                       iclock_tv.tv_sec = tv->tv_sec;
+                       iclock_tv.tv_usec = tv->tv_usec;
+               } else
+                       do_gettimeofday(&iclock_tv);
+       } else {
+               /* calc elapsed time by system clock */
+               if (tv) { /* tv must be set, if function call is delayed */
+                       tv_now.tv_sec = tv->tv_sec;
+                       tv_now.tv_usec = tv->tv_usec;
+               } else
+                       do_gettimeofday(&tv_now);
+               elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
+               elapsed_8000th = (tv_now.tv_usec / 125)
+                       - (iclock_tv.tv_usec / 125);
+               if (elapsed_8000th < 0) {
+                       elapsed_sec -= 1;
+                       elapsed_8000th += 8000;
+               }
+               /* add elapsed time to counter and set new timestamp */
+               iclock_count += elapsed_sec * 8000 + elapsed_8000th;
+               iclock_tv.tv_sec = tv_now.tv_sec;
+               iclock_tv.tv_usec = tv_now.tv_usec;
+               iclock_tv_valid = 1;
+               if (*debug & DEBUG_CLOCK)
+                       printk("Received first clock from source '%s'.\n",
+                           iclock_current ? iclock_current->name : "nothing");
+       }
+       write_unlock_irqrestore(&iclock_lock, flags);
+}
+EXPORT_SYMBOL(mISDN_clock_update);
+
+unsigned short
+mISDN_clock_get(void)
+{
+       u_long          flags;
+       struct timeval  tv_now;
+       time_t          elapsed_sec;
+       int             elapsed_8000th;
+       u16             count;
+
+       read_lock_irqsave(&iclock_lock, flags);
+       /* calc elapsed time by system clock */
+       do_gettimeofday(&tv_now);
+       elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
+       elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125);
+       if (elapsed_8000th < 0) {
+               elapsed_sec -= 1;
+               elapsed_8000th += 8000;
+       }
+       /* add elapsed time to counter */
+       count = iclock_count + elapsed_sec * 8000 + elapsed_8000th;
+       read_unlock_irqrestore(&iclock_lock, flags);
+       return count;
+}
+EXPORT_SYMBOL(mISDN_clock_get);
+
index 751665c448d0e9ebe284d6942cf709e4e29e9160..9426c9827e47307c24efa3de435651864cb7eb72 100644 (file)
@@ -25,39 +25,183 @@ MODULE_AUTHOR("Karsten Keil");
 MODULE_LICENSE("GPL");
 module_param(debug, uint, S_IRUGO | S_IWUSR);
 
-static LIST_HEAD(devices);
-static DEFINE_RWLOCK(device_lock);
 static u64             device_ids;
 #define MAX_DEVICE_ID  63
 
 static LIST_HEAD(Bprotocols);
 static DEFINE_RWLOCK(bp_lock);
 
+static void mISDN_dev_release(struct device *dev)
+{
+       /* nothing to do: the device is part of its parent's data structure */
+}
+
+static ssize_t _show_id(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return -ENODEV;
+       return sprintf(buf, "%d\n", mdev->id);
+}
+
+static ssize_t _show_nrbchan(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return -ENODEV;
+       return sprintf(buf, "%d\n", mdev->nrbchan);
+}
+
+static ssize_t _show_d_protocols(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return -ENODEV;
+       return sprintf(buf, "%d\n", mdev->Dprotocols);
+}
+
+static ssize_t _show_b_protocols(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return -ENODEV;
+       return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols());
+}
+
+static ssize_t _show_protocol(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return -ENODEV;
+       return sprintf(buf, "%d\n", mdev->D.protocol);
+}
+
+static ssize_t _show_name(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       strcpy(buf, dev_name(dev));
+       return strlen(buf);
+}
+
+#if 0 /* hangs */
+static ssize_t _set_name(struct device *dev, struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       int err = 0;
+       char *out = kmalloc(count + 1, GFP_KERNEL);
+
+       if (!out)
+               return -ENOMEM;
+
+       memcpy(out, buf, count);
+       if (count && out[count - 1] == '\n')
+               out[--count] = 0;
+       if (count)
+               err = device_rename(dev, out);
+       kfree(out);
+
+       return (err < 0) ? err : count;
+}
+#endif
+
+static ssize_t _show_channelmap(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+       char *bp = buf;
+       int i;
+
+       for (i = 0; i <= mdev->nrbchan; i++)
+               *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0';
+
+       return bp - buf;
+}
+
+static struct device_attribute mISDN_dev_attrs[] = {
+       __ATTR(id,          S_IRUGO,         _show_id,          NULL),
+       __ATTR(d_protocols, S_IRUGO,         _show_d_protocols, NULL),
+       __ATTR(b_protocols, S_IRUGO,         _show_b_protocols, NULL),
+       __ATTR(protocol,    S_IRUGO,         _show_protocol,    NULL),
+       __ATTR(channelmap,  S_IRUGO,         _show_channelmap,  NULL),
+       __ATTR(nrbchan,     S_IRUGO,         _show_nrbchan,     NULL),
+       __ATTR(name,        S_IRUGO,         _show_name,        NULL),
+/*     __ATTR(name,        S_IRUGO|S_IWUSR, _show_name,       _set_name), */
+       {}
+};
+
+#ifdef CONFIG_HOTPLUG
+static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return 0;
+
+       if (add_uevent_var(env, "nchans=%d", mdev->nrbchan))
+               return -ENOMEM;
+
+       return 0;
+}
+#endif
+
+static void mISDN_class_release(struct class *cls)
+{
+       /* do nothing, it's static */
+}
+
+static struct class mISDN_class = {
+       .name = "mISDN",
+       .owner = THIS_MODULE,
+#ifdef CONFIG_HOTPLUG
+       .dev_uevent = mISDN_uevent,
+#endif
+       .dev_attrs = mISDN_dev_attrs,
+       .dev_release = mISDN_dev_release,
+       .class_release = mISDN_class_release,
+};
+
+static int
+_get_mdevice(struct device *dev, void *id)
+{
+       struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+       if (!mdev)
+               return 0;
+       if (mdev->id != *(u_int *)id)
+               return 0;
+       return 1;
+}
+
 struct mISDNdevice
 *get_mdevice(u_int id)
 {
-       struct mISDNdevice      *dev;
+       return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id,
+               _get_mdevice));
+}
 
-       read_lock(&device_lock);
-       list_for_each_entry(dev, &devices, D.list)
-               if (dev->id == id) {
-                       read_unlock(&device_lock);
-                       return dev;
-               }
-       read_unlock(&device_lock);
-       return NULL;
+static int
+_get_mdevice_count(struct device *dev, void *cnt)
+{
+       *(int *)cnt += 1;
+       return 0;
 }
 
 int
 get_mdevice_count(void)
 {
-       struct mISDNdevice      *dev;
-       int                     cnt = 0;
+       int cnt = 0;
 
-       read_lock(&device_lock);
-       list_for_each_entry(dev, &devices, D.list)
-               cnt++;
-       read_unlock(&device_lock);
+       class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count);
        return cnt;
 }
 
@@ -68,48 +212,66 @@ get_free_devid(void)
 
        for (i = 0; i <= MAX_DEVICE_ID; i++)
                if (!test_and_set_bit(i, (u_long *)&device_ids))
-                       return i;
-       return -1;
+                       break;
+       if (i > MAX_DEVICE_ID)
+               return -1;
+       return i;
 }
 
 int
-mISDN_register_device(struct mISDNdevice *dev, char *name)
+mISDN_register_device(struct mISDNdevice *dev,
+                       struct device *parent, char *name)
 {
-       u_long  flags;
        int     err;
 
        dev->id = get_free_devid();
+       err = -EBUSY;
        if (dev->id < 0)
-               return -EBUSY;
+               goto error1;
+
+       device_initialize(&dev->dev);
        if (name && name[0])
-               strcpy(dev->name, name);
+               dev_set_name(&dev->dev, "%s", name);
        else
-               sprintf(dev->name, "mISDN%d", dev->id);
+               dev_set_name(&dev->dev, "mISDN%d", dev->id);
        if (debug & DEBUG_CORE)
                printk(KERN_DEBUG "mISDN_register %s %d\n",
-                       dev->name, dev->id);
+                       dev_name(&dev->dev), dev->id);
        err = create_stack(dev);
        if (err)
-               return err;
-       write_lock_irqsave(&device_lock, flags);
-       list_add_tail(&dev->D.list, &devices);
-       write_unlock_irqrestore(&device_lock, flags);
+               goto error1;
+
+       dev->dev.class = &mISDN_class;
+       dev->dev.platform_data = dev;
+       dev->dev.parent = parent;
+       dev_set_drvdata(&dev->dev, dev);
+
+       err = device_add(&dev->dev);
+       if (err)
+               goto error3;
        return 0;
+
+error3:
+       delete_stack(dev);
+       return err;
+error1:
+       return err;
+
 }
 EXPORT_SYMBOL(mISDN_register_device);
 
 void
 mISDN_unregister_device(struct mISDNdevice *dev) {
-       u_long  flags;
-
        if (debug & DEBUG_CORE)
                printk(KERN_DEBUG "mISDN_unregister %s %d\n",
-                       dev->name, dev->id);
-       write_lock_irqsave(&device_lock, flags);
-       list_del(&dev->D.list);
-       write_unlock_irqrestore(&device_lock, flags);
+                       dev_name(&dev->dev), dev->id);
+       /* sysfs_remove_link(&dev->dev.kobj, "device"); */
+       device_del(&dev->dev);
+       dev_set_drvdata(&dev->dev, NULL);
+
        test_and_clear_bit(dev->id, (u_long *)&device_ids);
        delete_stack(dev);
+       put_device(&dev->dev);
 }
 EXPORT_SYMBOL(mISDN_unregister_device);
 
@@ -199,43 +361,45 @@ mISDNInit(void)
 
        printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n",
                MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE);
+       mISDN_init_clock(&debug);
        mISDN_initstack(&debug);
+       err = class_register(&mISDN_class);
+       if (err)
+               goto error1;
        err = mISDN_inittimer(&debug);
        if (err)
-               goto error;
+               goto error2;
        err = l1_init(&debug);
-       if (err) {
-               mISDN_timer_cleanup();
-               goto error;
-       }
+       if (err)
+               goto error3;
        err = Isdnl2_Init(&debug);
-       if (err) {
-               mISDN_timer_cleanup();
-               l1_cleanup();
-               goto error;
-       }
+       if (err)
+               goto error4;
        err = misdn_sock_init(&debug);
-       if (err) {
-               mISDN_timer_cleanup();
-               l1_cleanup();
-               Isdnl2_cleanup();
-       }
-error:
+       if (err)
+               goto error5;
+       return 0;
+
+error5:
+       Isdnl2_cleanup();
+error4:
+       l1_cleanup();
+error3:
+       mISDN_timer_cleanup();
+error2:
+       class_unregister(&mISDN_class);
+error1:
        return err;
 }
 
 static void mISDN_cleanup(void)
 {
        misdn_sock_cleanup();
-       mISDN_timer_cleanup();
-       l1_cleanup();
        Isdnl2_cleanup();
+       l1_cleanup();
+       mISDN_timer_cleanup();
+       class_unregister(&mISDN_class);
 
-       if (!list_empty(&devices))
-               printk(KERN_ERR "%s devices still registered\n", __func__);
-
-       if (!list_empty(&Bprotocols))
-               printk(KERN_ERR "%s Bprotocols still registered\n", __func__);
        printk(KERN_DEBUG "mISDNcore unloaded\n");
 }
 
index 7da7233b4c1a4f611256e4830798c14cea370c2b..7ac2f81a812b0f32eab886684800ab4c937158d4 100644 (file)
@@ -74,4 +74,6 @@ extern void   l1_cleanup(void);
 extern int     Isdnl2_Init(u_int *);
 extern void    Isdnl2_cleanup(void);
 
+extern void    mISDN_init_clock(u_int *);
+
 #endif
index 6c3fed6b8d4f830a7aca4843ce833a2a1d0250ca..98a33c58f0911f4b7482eb40a949a9a2412407f2 100644 (file)
@@ -15,6 +15,7 @@
 #define DEBUG_DSP_TONE         0x0020
 #define DEBUG_DSP_BLOWFISH     0x0040
 #define DEBUG_DSP_DELAY                0x0100
+#define DEBUG_DSP_CLOCK                0x0200
 #define DEBUG_DSP_DTMFCOEFF    0x8000 /* heavy output */
 
 /* options may be:
@@ -198,6 +199,7 @@ struct dsp {
        /* hardware stuff */
        struct dsp_features features;
        int             features_rx_off; /* set if rx_off is featured */
+       int             features_fill_empty; /* set if fill_empty is featured */
        int             pcm_slot_rx; /* current PCM slot (or -1) */
        int             pcm_bank_rx;
        int             pcm_slot_tx;
index c884511e2d49038a465ab9e550a8e04739463de0..0ac67bff303a7cfbe92408f84d2bec4d4ddb29a9 100644 (file)
 /* #define CMX_CONF_DEBUG */
 
 /*#define CMX_DEBUG * massive read/write pointer output */
+/*#define CMX_DELAY_DEBUG * gives rx-buffer delay overview */
 /*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */
 
 static inline int
@@ -744,11 +745,11 @@ conf_software:
                                        if (dsp->pcm_slot_rx >= 0 &&
                                            dsp->pcm_slot_rx <
                                            sizeof(freeslots))
-                                               freeslots[dsp->pcm_slot_tx] = 0;
+                                               freeslots[dsp->pcm_slot_rx] = 0;
                                        if (dsp->pcm_slot_tx >= 0 &&
                                            dsp->pcm_slot_tx <
                                            sizeof(freeslots))
-                                               freeslots[dsp->pcm_slot_rx] = 0;
+                                               freeslots[dsp->pcm_slot_tx] = 0;
                                }
                        }
                        i = 0;
@@ -836,11 +837,11 @@ conf_software:
                                        if (dsp->pcm_slot_rx >= 0 &&
                                            dsp->pcm_slot_rx <
                                            sizeof(freeslots))
-                                               freeslots[dsp->pcm_slot_tx] = 0;
+                                               freeslots[dsp->pcm_slot_rx] = 0;
                                        if (dsp->pcm_slot_tx >= 0 &&
                                            dsp->pcm_slot_tx <
                                            sizeof(freeslots))
-                                               freeslots[dsp->pcm_slot_rx] = 0;
+                                               freeslots[dsp->pcm_slot_tx] = 0;
                                }
                        }
                        i1 = 0;
@@ -926,10 +927,6 @@ conf_software:
 
        /* for more than two members.. */
 
-       /* in case of hdlc, we change to software */
-       if (dsp->hdlc)
-               goto conf_software;
-
        /* if all members already have the same conference */
        if (all_conf)
                return;
@@ -940,6 +937,9 @@ conf_software:
        if (current_conf >= 0) {
 join_members:
                list_for_each_entry(member, &conf->mlist, list) {
+                       /* in case of hdlc, change to software */
+                       if (member->dsp->hdlc)
+                               goto conf_software;
                        /* join to current conference */
                        if (member->dsp->hfc_conf == current_conf)
                                continue;
@@ -1135,6 +1135,25 @@ dsp_cmx_conf(struct dsp *dsp, u32 conf_id)
        return 0;
 }
 
+#ifdef CMX_DELAY_DEBUG
+int delaycount;
+static void
+showdelay(struct dsp *dsp, int samples, int delay)
+{
+       char bar[] = "--------------------------------------------------|";
+       int sdelay;
+
+       delaycount += samples;
+       if (delaycount < 8000)
+               return;
+       delaycount = 0;
+
+       sdelay = delay * 50 / (dsp_poll << 2);
+
+       printk(KERN_DEBUG "DELAY (%s) %3d >%s\n", dsp->name, delay,
+               sdelay > 50 ? "..." : bar + 50 - sdelay);
+}
+#endif
 
 /*
  * audio data is received from card
@@ -1168,11 +1187,18 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
                dsp->rx_init = 0;
                if (dsp->features.unordered) {
                        dsp->rx_R = (hh->id & CMX_BUFF_MASK);
-                       dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
-                               & CMX_BUFF_MASK;
+                       if (dsp->cmx_delay)
+                               dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+                                       & CMX_BUFF_MASK;
+                       else
+                               dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
+                                       & CMX_BUFF_MASK;
                } else {
                        dsp->rx_R = 0;
-                       dsp->rx_W = dsp->cmx_delay;
+                       if (dsp->cmx_delay)
+                               dsp->rx_W = dsp->cmx_delay;
+                       else
+                               dsp->rx_W = dsp_poll >> 1;
                }
        }
        /* if frame contains time code, write directly */
@@ -1185,19 +1211,25 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
         * we set our new read pointer, and write silence to buffer
         */
        if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) {
-               if (dsp_debug & DEBUG_DSP_CMX)
+               if (dsp_debug & DEBUG_DSP_CLOCK)
                        printk(KERN_DEBUG
                            "cmx_receive(dsp=%lx): UNDERRUN (or overrun the "
                            "maximum delay), adjusting read pointer! "
                            "(inst %s)\n", (u_long)dsp, dsp->name);
-               /* flush buffer */
+               /* flush rx buffer and set delay to dsp_poll / 2 */
                if (dsp->features.unordered) {
                        dsp->rx_R = (hh->id & CMX_BUFF_MASK);
-                       dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
-                               & CMX_BUFF_MASK;
+                       if (dsp->cmx_delay)
+                               dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+                                       & CMX_BUFF_MASK;
+                               dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
+                                       & CMX_BUFF_MASK;
                } else {
                        dsp->rx_R = 0;
-                       dsp->rx_W = dsp->cmx_delay;
+                       if (dsp->cmx_delay)
+                               dsp->rx_W = dsp->cmx_delay;
+                       else
+                               dsp->rx_W = dsp_poll >> 1;
                }
                memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
        }
@@ -1205,7 +1237,7 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
        if (dsp->cmx_delay)
                if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >=
                    (dsp->cmx_delay << 1)) {
-                       if (dsp_debug & DEBUG_DSP_CMX)
+                       if (dsp_debug & DEBUG_DSP_CLOCK)
                                printk(KERN_DEBUG
                                    "cmx_receive(dsp=%lx): OVERRUN (because "
                                    "twice the delay is reached), adjusting "
@@ -1243,6 +1275,9 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
 
        /* increase write-pointer */
        dsp->rx_W = ((dsp->rx_W+len) & CMX_BUFF_MASK);
+#ifdef CMX_DELAY_DEBUG
+       showdelay(dsp, len, (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK);
+#endif
 }
 
 
@@ -1360,8 +1395,12 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
                                t = (t+1) & CMX_BUFF_MASK;
                                r = (r+1) & CMX_BUFF_MASK;
                        }
-                       if (r != rr)
+                       if (r != rr) {
+                               if (dsp_debug & DEBUG_DSP_CLOCK)
+                                       printk(KERN_DEBUG "%s: RX empty\n",
+                                               __func__);
                                memset(d, dsp_silence, (rr-r)&CMX_BUFF_MASK);
+                       }
                /* -> if echo is enabled */
                } else {
                        /*
@@ -1540,13 +1579,11 @@ send_packet:
        schedule_work(&dsp->workq);
 }
 
-static u32     samplecount;
+static u32     jittercount; /* counter for jitter check */;
 struct timer_list dsp_spl_tl;
 u32    dsp_spl_jiffies; /* calculate the next time to fire */
-#ifdef UNUSED
-static u32     dsp_start_jiffies; /* jiffies at the time, the calculation begins */
-#endif /* UNUSED */
-static struct timeval dsp_start_tv; /* time at start of calculation */
+static u16     dsp_count; /* last sample count */
+static int     dsp_count_valid ; /* if we have last sample count */
 
 void
 dsp_cmx_send(void *arg)
@@ -1560,38 +1597,32 @@ dsp_cmx_send(void *arg)
        int r, rr;
        int jittercheck = 0, delay, i;
        u_long flags;
-       struct timeval tv;
-       u32 elapsed;
-       s16 length;
+       u16 length, count;
 
        /* lock */
        spin_lock_irqsave(&dsp_lock, flags);
 
-       if (!dsp_start_tv.tv_sec) {
-               do_gettimeofday(&dsp_start_tv);
+       if (!dsp_count_valid) {
+               dsp_count = mISDN_clock_get();
                length = dsp_poll;
+               dsp_count_valid = 1;
        } else {
-               do_gettimeofday(&tv);
-               elapsed = ((tv.tv_sec - dsp_start_tv.tv_sec) * 8000)
-                   + ((s32)(tv.tv_usec / 125) - (dsp_start_tv.tv_usec / 125));
-               dsp_start_tv.tv_sec = tv.tv_sec;
-               dsp_start_tv.tv_usec = tv.tv_usec;
-               length = elapsed;
+               count = mISDN_clock_get();
+               length = count - dsp_count;
+               dsp_count = count;
        }
        if (length > MAX_POLL + 100)
                length = MAX_POLL + 100;
-/* printk(KERN_DEBUG "len=%d dsp_count=0x%x.%04x dsp_poll_diff=0x%x.%04x\n",
- length, dsp_count >> 16, dsp_count & 0xffff, dsp_poll_diff >> 16,
- dsp_poll_diff & 0xffff);
- */
+       /* printk(KERN_DEBUG "len=%d dsp_count=0x%x\n", length, dsp_count); */
 
        /*
-        * check if jitter needs to be checked
-        * (this is about every second = 8192 samples)
+        * check if jitter needs to be checked (this is every second)
         */
-       samplecount += length;
-       if ((samplecount & 8191) < length)
+       jittercount += length;
+       if (jittercount >= 8000) {
+               jittercount -= 8000;
                jittercheck = 1;
+       }
 
        /* loop all members that do not require conference mixing */
        list_for_each_entry(dsp, &dsp_ilist, list) {
@@ -1704,17 +1735,19 @@ dsp_cmx_send(void *arg)
                        }
                        /*
                         * remove rx_delay only if we have delay AND we
-                        * have not preset cmx_delay
+                        * have not preset cmx_delay AND
+                        * the delay is greater dsp_poll
                         */
-                       if (delay && !dsp->cmx_delay) {
-                               if (dsp_debug & DEBUG_DSP_CMX)
+                       if (delay > dsp_poll && !dsp->cmx_delay) {
+                               if (dsp_debug & DEBUG_DSP_CLOCK)
                                        printk(KERN_DEBUG
                                            "%s lowest rx_delay of %d bytes for"
                                            " dsp %s are now removed.\n",
                                            __func__, delay,
                                            dsp->name);
                                r = dsp->rx_R;
-                               rr = (r + delay) & CMX_BUFF_MASK;
+                               rr = (r + delay - (dsp_poll >> 1))
+                                       & CMX_BUFF_MASK;
                                /* delete rx-data */
                                while (r != rr) {
                                        p[r] = dsp_silence;
@@ -1736,15 +1769,16 @@ dsp_cmx_send(void *arg)
                         * remove delay only if we have delay AND we
                         * have enabled tx_dejitter
                         */
-                       if (delay && dsp->tx_dejitter) {
-                               if (dsp_debug & DEBUG_DSP_CMX)
+                       if (delay > dsp_poll && dsp->tx_dejitter) {
+                               if (dsp_debug & DEBUG_DSP_CLOCK)
                                        printk(KERN_DEBUG
                                            "%s lowest tx_delay of %d bytes for"
                                            " dsp %s are now removed.\n",
                                            __func__, delay,
                                            dsp->name);
                                r = dsp->tx_R;
-                               rr = (r + delay) & CMX_BUFF_MASK;
+                               rr = (r + delay - (dsp_poll >> 1))
+                                       & CMX_BUFF_MASK;
                                /* delete tx-data */
                                while (r != rr) {
                                        q[r] = dsp_silence;
@@ -1797,14 +1831,16 @@ dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb)
        ww = dsp->tx_R;
        p = dsp->tx_buff;
        d = skb->data;
-       space = ww-w;
-       if (space <= 0)
-               space += CMX_BUFF_SIZE;
+       space = (ww - w - 1) & CMX_BUFF_MASK;
        /* write-pointer should not overrun nor reach read pointer */
-       if (space-1 < skb->len)
+       if (space < skb->len) {
                /* write to the space we have left */
-               ww = (ww - 1) & CMX_BUFF_MASK;
-       else
+               ww = (ww - 1) & CMX_BUFF_MASK; /* end one byte prior tx_R */
+               if (dsp_debug & DEBUG_DSP_CLOCK)
+                       printk(KERN_DEBUG "%s: TX overflow space=%d skb->len="
+                           "%d, w=0x%04x, ww=0x%04x\n", __func__, space,
+                           skb->len, w, ww);
+       } else
                /* write until all byte are copied */
                ww = (w + skb->len) & CMX_BUFF_MASK;
        dsp->tx_W = ww;
index 1dc21d8034109f7a71b7f015baae329a994535b4..3083338716b262eb8a730ee4c5fbc4dd3f202e27 100644 (file)
@@ -191,6 +191,8 @@ dsp_rx_off_member(struct dsp *dsp)
        struct mISDN_ctrl_req   cq;
        int rx_off = 1;
 
+       memset(&cq, 0, sizeof(cq));
+
        if (!dsp->features_rx_off)
                return;
 
@@ -249,6 +251,32 @@ dsp_rx_off(struct dsp *dsp)
        }
 }
 
+/* enable "fill empty" feature */
+static void
+dsp_fill_empty(struct dsp *dsp)
+{
+       struct mISDN_ctrl_req   cq;
+
+       memset(&cq, 0, sizeof(cq));
+
+       if (!dsp->ch.peer) {
+               if (dsp_debug & DEBUG_DSP_CORE)
+                       printk(KERN_DEBUG "%s: no peer, no fill_empty\n",
+                               __func__);
+               return;
+       }
+       cq.op = MISDN_CTRL_FILL_EMPTY;
+       cq.p1 = 1;
+       if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
+               printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
+                       __func__);
+               return;
+       }
+       if (dsp_debug & DEBUG_DSP_CORE)
+               printk(KERN_DEBUG "%s: %s set fill_empty = 1\n",
+                       __func__, dsp->name);
+}
+
 static int
 dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
 {
@@ -273,8 +301,9 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
                if (dsp_debug & DEBUG_DSP_CORE)
                        printk(KERN_DEBUG "%s: start dtmf\n", __func__);
                if (len == sizeof(int)) {
-                       printk(KERN_NOTICE "changing DTMF Threshold "
-                               "to %d\n", *((int *)data));
+                       if (dsp_debug & DEBUG_DSP_CORE)
+                               printk(KERN_NOTICE "changing DTMF Threshold "
+                                       "to %d\n", *((int *)data));
                        dsp->dtmf.treshold = (*(int *)data) * 10000;
                }
                /* init goertzel */
@@ -593,8 +622,6 @@ get_features(struct mISDNchannel *ch)
        struct dsp              *dsp = container_of(ch, struct dsp, ch);
        struct mISDN_ctrl_req   cq;
 
-       if (dsp_options & DSP_OPT_NOHARDWARE)
-               return;
        if (!ch->peer) {
                if (dsp_debug & DEBUG_DSP_CORE)
                        printk(KERN_DEBUG "%s: no peer, no features\n",
@@ -610,6 +637,10 @@ get_features(struct mISDNchannel *ch)
        }
        if (cq.op & MISDN_CTRL_RX_OFF)
                dsp->features_rx_off = 1;
+       if (cq.op & MISDN_CTRL_FILL_EMPTY)
+               dsp->features_fill_empty = 1;
+       if (dsp_options & DSP_OPT_NOHARDWARE)
+               return;
        if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) {
                cq.op = MISDN_CTRL_HW_FEATURES;
                *((u_long *)&cq.p1) = (u_long)&dsp->features;
@@ -837,11 +868,14 @@ dsp_function(struct mISDNchannel *ch,  struct sk_buff *skb)
                }
                if (dsp->hdlc) {
                        /* hdlc */
-                       spin_lock_irqsave(&dsp_lock, flags);
-                       if (dsp->b_active) {
-                               skb_queue_tail(&dsp->sendq, skb);
-                               schedule_work(&dsp->workq);
+                       if (!dsp->b_active) {
+                               ret = -EIO;
+                               break;
                        }
+                       hh->prim = PH_DATA_REQ;
+                       spin_lock_irqsave(&dsp_lock, flags);
+                       skb_queue_tail(&dsp->sendq, skb);
+                       schedule_work(&dsp->workq);
                        spin_unlock_irqrestore(&dsp_lock, flags);
                        return 0;
                }
@@ -865,6 +899,9 @@ dsp_function(struct mISDNchannel *ch,  struct sk_buff *skb)
                if (dsp->dtmf.hardware || dsp->dtmf.software)
                        dsp_dtmf_goertzel_init(dsp);
                get_features(ch);
+               /* enable fill_empty feature */
+               if (dsp->features_fill_empty)
+                       dsp_fill_empty(dsp);
                /* send ph_activate */
                hh->prim = PH_ACTIVATE_REQ;
                if (ch->peer)
@@ -1105,7 +1142,7 @@ static int dsp_init(void)
        } else {
                poll = 8;
                while (poll <= MAX_POLL) {
-                       tics = poll * HZ / 8000;
+                       tics = (poll * HZ) / 8000;
                        if (tics * 8000 == poll * HZ) {
                                dsp_tics = tics;
                                dsp_poll = poll;
index 83639be7f7add0a17df3400ae7a8070891ef6154..bf999bdc41c3e87df4a670738b81b90b215c1c97 100644 (file)
@@ -75,6 +75,15 @@ static struct device_attribute element_attributes[] = {
        __ATTR(args, 0444, attr_show_args, NULL),
 };
 
+static void
+mISDN_dsp_dev_release(struct device *dev)
+{
+       struct dsp_element_entry *entry =
+               container_of(dev, struct dsp_element_entry, dev);
+       list_del(&entry->list);
+       kfree(entry);
+}
+
 int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
 {
        struct dsp_element_entry *entry;
@@ -83,13 +92,14 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
        if (!elem)
                return -EINVAL;
 
-       entry = kzalloc(sizeof(struct dsp_element_entry), GFP_KERNEL);
+       entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC);
        if (!entry)
                return -ENOMEM;
 
        entry->elem = elem;
 
        entry->dev.class = elements_class;
+       entry->dev.release = mISDN_dsp_dev_release;
        dev_set_drvdata(&entry->dev, elem);
        dev_set_name(&entry->dev, elem->name);
        ret = device_register(&entry->dev);
@@ -98,6 +108,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
                        __func__, elem->name);
                goto err1;
        }
+       list_add_tail(&entry->list, &dsp_elements);
 
        for (i = 0; i < (sizeof(element_attributes)
                / sizeof(struct device_attribute)); ++i)
@@ -109,14 +120,15 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
                        goto err2;
                }
 
-       list_add_tail(&entry->list, &dsp_elements);
-
+#ifdef PIPELINE_DEBUG
        printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
+#endif
 
        return 0;
 
 err2:
        device_unregister(&entry->dev);
+       return ret;
 err1:
        kfree(entry);
        return ret;
@@ -132,11 +144,11 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
 
        list_for_each_entry_safe(entry, n, &dsp_elements, list)
                if (entry->elem == elem) {
-                       list_del(&entry->list);
                        device_unregister(&entry->dev);
-                       kfree(entry);
+#ifdef PIPELINE_DEBUG
                        printk(KERN_DEBUG "%s: %s unregistered\n",
                                __func__, elem->name);
+#endif
                        return;
                }
        printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
@@ -173,7 +185,9 @@ void dsp_pipeline_module_exit(void)
                kfree(entry);
        }
 
+#ifdef PIPELINE_DEBUG
        printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
+#endif
 }
 
 int dsp_pipeline_init(struct dsp_pipeline *pipeline)
@@ -239,7 +253,7 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
        if (!len)
                return 0;
 
-       dup = kmalloc(len + 1, GFP_KERNEL);
+       dup = kmalloc(len + 1, GFP_ATOMIC);
        if (!dup)
                return 0;
        strcpy(dup, cfg);
@@ -256,9 +270,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
                                elem = entry->elem;
 
                                pipeline_entry = kmalloc(sizeof(struct
-                                       dsp_pipeline_entry), GFP_KERNEL);
+                                       dsp_pipeline_entry), GFP_ATOMIC);
                                if (!pipeline_entry) {
-                                       printk(KERN_DEBUG "%s: failed to add "
+                                       printk(KERN_ERR "%s: failed to add "
                                            "entry to pipeline: %s (out of "
                                            "memory)\n", __func__, elem->name);
                                        incomplete = 1;
@@ -286,7 +300,7 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
                                                    args : "");
 #endif
                                        } else {
-                                               printk(KERN_DEBUG "%s: failed "
+                                               printk(KERN_ERR "%s: failed "
                                                  "to add entry to pipeline: "
                                                  "%s (new() returned NULL)\n",
                                                  __func__, elem->name);
@@ -301,7 +315,7 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
                if (found)
                        found = 0;
                else {
-                       printk(KERN_DEBUG "%s: element not found, skipping: "
+                       printk(KERN_ERR "%s: element not found, skipping: "
                                "%s\n", __func__, name);
                        incomplete = 1;
                }
index 2596fba4e6145b61e3c442ea483bcd0483ecba41..ab1168a110ae96104c7db2ea8ea12ae214d83fe1 100644 (file)
@@ -50,9 +50,6 @@ bchannel_bh(struct work_struct *ws)
 
        if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
                while ((skb = skb_dequeue(&bch->rqueue))) {
-                       if (bch->rcount >= 64)
-                               printk(KERN_WARNING "B-channel %p receive "
-                                       "queue if full, but empties...\n", bch);
                        bch->rcount--;
                        if (likely(bch->ch.peer)) {
                                err = bch->ch.recv(bch->ch.peer, skb);
@@ -168,6 +165,25 @@ recv_Dchannel(struct dchannel *dch)
 }
 EXPORT_SYMBOL(recv_Dchannel);
 
+void
+recv_Echannel(struct dchannel *ech, struct dchannel *dch)
+{
+       struct mISDNhead *hh;
+
+       if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
+               dev_kfree_skb(ech->rx_skb);
+               ech->rx_skb = NULL;
+               return;
+       }
+       hh = mISDN_HEAD_P(ech->rx_skb);
+       hh->prim = PH_DATA_E_IND;
+       hh->id = get_sapi_tei(ech->rx_skb->data);
+       skb_queue_tail(&dch->rqueue, ech->rx_skb);
+       ech->rx_skb = NULL;
+       schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Echannel);
+
 void
 recv_Bchannel(struct bchannel *bch)
 {
@@ -177,8 +193,10 @@ recv_Bchannel(struct bchannel *bch)
        hh->prim = PH_DATA_IND;
        hh->id = MISDN_ID_ANY;
        if (bch->rcount >= 64) {
-               dev_kfree_skb(bch->rx_skb);
-               bch->rx_skb = NULL;
+               printk(KERN_WARNING "B-channel %p receive queue overflow, "
+                       "fushing!\n", bch);
+               skb_queue_purge(&bch->rqueue);
+               bch->rcount = 0;
                return;
        }
        bch->rcount++;
@@ -200,8 +218,10 @@ void
 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
 {
        if (bch->rcount >= 64) {
-               dev_kfree_skb(skb);
-               return;
+               printk(KERN_WARNING "B-channel %p receive queue overflow, "
+                       "fushing!\n", bch);
+               skb_queue_purge(&bch->rqueue);
+               bch->rcount = 0;
        }
        bch->rcount++;
        skb_queue_tail(&bch->rqueue, skb);
@@ -245,8 +265,12 @@ confirm_Bsend(struct bchannel *bch)
 {
        struct sk_buff  *skb;
 
-       if (bch->rcount >= 64)
-               return;
+       if (bch->rcount >= 64) {
+               printk(KERN_WARNING "B-channel %p receive queue overflow, "
+                       "fushing!\n", bch);
+               skb_queue_purge(&bch->rqueue);
+               bch->rcount = 0;
+       }
        skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
            0, NULL, GFP_ATOMIC);
        if (!skb) {
index 0884dd6892f813868417a8b54d90bdf7025227f5..abe574989572098bac2b9e0899c99ee9d08e6dd7 100644 (file)
@@ -777,6 +777,8 @@ fail:
 static void
 l1oip_socket_close(struct l1oip *hc)
 {
+       struct dchannel *dch = hc->chan[hc->d_idx].dch;
+
        /* kill thread */
        if (hc->socket_thread) {
                if (debug & DEBUG_L1OIP_SOCKET)
@@ -785,6 +787,16 @@ l1oip_socket_close(struct l1oip *hc)
                send_sig(SIGTERM, hc->socket_thread, 0);
                wait_for_completion(&hc->socket_complete);
        }
+
+       /* if active, we send up a PH_DEACTIVATE and deactivate */
+       if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+               if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+                       printk(KERN_DEBUG "%s: interface become deactivated "
+                               "due to timeout\n", __func__);
+               test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+               _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
+                       NULL, GFP_ATOMIC);
+       }
 }
 
 static int
@@ -944,7 +956,8 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
 
        switch (cq->op) {
        case MISDN_CTRL_GETOP:
-               cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER;
+               cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER
+                       | MISDN_CTRL_GETPEER;
                break;
        case MISDN_CTRL_SETPEER:
                hc->remoteip = (u32)cq->p1;
@@ -964,6 +977,13 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
                hc->remoteip = 0;
                l1oip_socket_open(hc);
                break;
+       case MISDN_CTRL_GETPEER:
+               if (debug & DEBUG_L1OIP_SOCKET)
+                       printk(KERN_DEBUG "%s: getting ip address.\n",
+                               __func__);
+               cq->p1 = hc->remoteip;
+               cq->p2 = hc->remoteport | (hc->localport << 16);
+               break;
        default:
                printk(KERN_WARNING "%s: unknown Op %x\n",
                    __func__, cq->op);
@@ -1413,7 +1433,8 @@ init_card(struct l1oip *hc, int pri, int bundle)
                hc->chan[i + ch].bch = bch;
                set_channelmap(bch->nr, dch->dev.channelmap);
        }
-       ret = mISDN_register_device(&dch->dev, hc->name);
+       /* TODO: create a parent device for this driver */
+       ret = mISDN_register_device(&dch->dev, NULL, hc->name);
        if (ret)
                return ret;
        hc->registered = 1;
index b73e952d12cf0674f6eb09782f009b530c9b1720..e826eeb1ecec58a45ef7483df0bb55599a2fbe7c 100644 (file)
@@ -101,7 +101,7 @@ l1m_debug(struct FsmInst *fi, char *fmt, ...)
        va_list va;
 
        va_start(va, fmt);
-       printk(KERN_DEBUG "%s: ", l1->dch->dev.name);
+       printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev));
        vprintk(fmt, va);
        printk("\n");
        va_end(va);
index 37a2de18cfd0f68d13c537a5f0c53ffbb252e509..508945d1b9c1a4b3ac18a31471179f13097a8cc3 100644 (file)
@@ -381,7 +381,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        memcpy(di.channelmap, dev->channelmap,
                                sizeof(di.channelmap));
                        di.nrbchan = dev->nrbchan;
-                       strcpy(di.name, dev->name);
+                       strcpy(di.name, dev_name(&dev->dev));
                        if (copy_to_user((void __user *)arg, &di, sizeof(di)))
                                err = -EFAULT;
                } else
@@ -460,6 +460,8 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 {
        struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
        struct sock *sk = sock->sk;
+       struct hlist_node *node;
+       struct sock *csk;
        int err = 0;
 
        if (*debug & DEBUG_SOCKET)
@@ -480,6 +482,26 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
                err = -ENODEV;
                goto done;
        }
+
+       if (sk->sk_protocol < ISDN_P_B_START) {
+               read_lock_bh(&data_sockets.lock);
+               sk_for_each(csk, node, &data_sockets.head) {
+                       if (sk == csk)
+                               continue;
+                       if (_pms(csk)->dev != _pms(sk)->dev)
+                               continue;
+                       if (csk->sk_protocol >= ISDN_P_B_START)
+                               continue;
+                       if (IS_ISDN_P_TE(csk->sk_protocol)
+                                       == IS_ISDN_P_TE(sk->sk_protocol))
+                               continue;
+                       read_unlock_bh(&data_sockets.lock);
+                       err = -EBUSY;
+                       goto done;
+               }
+               read_unlock_bh(&data_sockets.lock);
+       }
+
        _pms(sk)->ch.send = mISDN_send;
        _pms(sk)->ch.ctrl = mISDN_ctrl;
 
@@ -639,12 +661,27 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        memcpy(di.channelmap, dev->channelmap,
                                sizeof(di.channelmap));
                        di.nrbchan = dev->nrbchan;
-                       strcpy(di.name, dev->name);
+                       strcpy(di.name, dev_name(&dev->dev));
                        if (copy_to_user((void __user *)arg, &di, sizeof(di)))
                                err = -EFAULT;
                } else
                        err = -ENODEV;
                break;
+       case IMSETDEVNAME:
+               {
+                       struct mISDN_devrename dn;
+                       if (copy_from_user(&dn, (void __user *)arg,
+                           sizeof(dn))) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       dev = get_mdevice(dn.id);
+                       if (dev)
+                               err = device_rename(&dev->dev, dn.name);
+                       else
+                               err = -ENODEV;
+               }
+               break;
        default:
                err = -EINVAL;
        }
index d55b14ae4e99d4e1ffbf58e76d9bda93df2cfb63..e2f45019ebf0be504bdcd68538cd47f51109c588 100644 (file)
@@ -172,7 +172,8 @@ send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
                else
                        printk(KERN_WARNING
                            "%s: dev(%s) prim(%x) id(%x) no channel\n",
-                           __func__, st->dev->name, hh->prim, hh->id);
+                           __func__, dev_name(&st->dev->dev), hh->prim,
+                           hh->id);
        } else if (lm == 0x8) {
                WARN_ON(lm == 0x8);
                ch = get_channel4id(st, hh->id);
@@ -181,11 +182,12 @@ send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
                else
                        printk(KERN_WARNING
                            "%s: dev(%s) prim(%x) id(%x) no channel\n",
-                           __func__, st->dev->name, hh->prim, hh->id);
+                           __func__, dev_name(&st->dev->dev), hh->prim,
+                           hh->id);
        } else {
                /* broadcast not handled yet */
                printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
-                   __func__, st->dev->name, hh->prim);
+                   __func__, dev_name(&st->dev->dev), hh->prim);
        }
        return -ESRCH;
 }
@@ -209,7 +211,8 @@ mISDNStackd(void *data)
        unlock_kernel();
 #endif
        if (*debug & DEBUG_MSG_THREAD)
-               printk(KERN_DEBUG "mISDNStackd %s started\n", st->dev->name);
+               printk(KERN_DEBUG "mISDNStackd %s started\n",
+                   dev_name(&st->dev->dev));
 
        if (st->notify != NULL) {
                complete(st->notify);
@@ -245,7 +248,7 @@ mISDNStackd(void *data)
                                        printk(KERN_DEBUG
                                            "%s: %s prim(%x) id(%x) "
                                            "send call(%d)\n",
-                                           __func__, st->dev->name,
+                                           __func__, dev_name(&st->dev->dev),
                                            mISDN_HEAD_PRIM(skb),
                                            mISDN_HEAD_ID(skb), err);
                                dev_kfree_skb(skb);
@@ -288,7 +291,7 @@ mISDNStackd(void *data)
                    mISDN_STACK_ACTION_MASK));
                if (*debug & DEBUG_MSG_THREAD)
                        printk(KERN_DEBUG "%s: %s wake status %08lx\n",
-                           __func__, st->dev->name, st->status);
+                           __func__, dev_name(&st->dev->dev), st->status);
                test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
 
                test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
@@ -303,15 +306,16 @@ mISDNStackd(void *data)
 #ifdef MISDN_MSG_STATS
        printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
            "msg %d sleep %d stopped\n",
-           st->dev->name, st->msg_cnt, st->sleep_cnt, st->stopped_cnt);
+           dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
+           st->stopped_cnt);
        printk(KERN_DEBUG
            "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
-           st->dev->name, st->thread->utime, st->thread->stime);
+           dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
        printk(KERN_DEBUG
            "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
-           st->dev->name, st->thread->nvcsw, st->thread->nivcsw);
+           dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
        printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
-           st->dev->name);
+           dev_name(&st->dev->dev));
 #endif
        test_and_set_bit(mISDN_STACK_KILLED, &st->status);
        test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
@@ -401,15 +405,16 @@ create_stack(struct mISDNdevice *dev)
        newst->own.send = mISDN_queue_message;
        newst->own.recv = mISDN_queue_message;
        if (*debug & DEBUG_CORE_FUNC)
-               printk(KERN_DEBUG "%s: st(%s)\n", __func__, newst->dev->name);
+               printk(KERN_DEBUG "%s: st(%s)\n", __func__,
+                   dev_name(&newst->dev->dev));
        newst->notify = &done;
        newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
-               newst->dev->name);
+               dev_name(&newst->dev->dev));
        if (IS_ERR(newst->thread)) {
                err = PTR_ERR(newst->thread);
                printk(KERN_ERR
                        "mISDN:cannot create kernel thread for %s (%d)\n",
-                       newst->dev->name, err);
+                       dev_name(&newst->dev->dev), err);
                delete_teimanager(dev->teimgr);
                kfree(newst);
        } else
@@ -428,29 +433,21 @@ connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
 
        if (*debug &  DEBUG_CORE_FUNC)
                printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-                       __func__, dev->name, protocol, adr->dev, adr->channel,
-                        adr->sapi, adr->tei);
+                       __func__, dev_name(&dev->dev), protocol, adr->dev,
+                       adr->channel, adr->sapi, adr->tei);
        switch (protocol) {
        case ISDN_P_NT_S0:
        case ISDN_P_NT_E1:
        case ISDN_P_TE_S0:
        case ISDN_P_TE_E1:
-#ifdef PROTOCOL_CHECK
-               /* this should be enhanced */
-               if (!list_empty(&dev->D.st->layer2)
-                       && dev->D.protocol != protocol)
-                       return -EBUSY;
-               if (!hlist_empty(&dev->D.st->l1sock.head)
-                       && dev->D.protocol != protocol)
-                       return -EBUSY;
-#endif
                ch->recv = mISDN_queue_message;
                ch->peer = &dev->D.st->own;
                ch->st = dev->D.st;
                rq.protocol = protocol;
-               rq.adr.channel = 0;
+               rq.adr.channel = adr->channel;
                err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
-               printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
+               printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
+                       dev->id);
                if (err)
                        return err;
                write_lock_bh(&dev->D.st->l1sock.lock);
@@ -473,7 +470,7 @@ connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
 
        if (*debug &  DEBUG_CORE_FUNC)
                printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-                       __func__, dev->name, protocol,
+                       __func__, dev_name(&dev->dev), protocol,
                        adr->dev, adr->channel, adr->sapi,
                        adr->tei);
        ch->st = dev->D.st;
@@ -529,7 +526,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
 
        if (*debug &  DEBUG_CORE_FUNC)
                printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-                       __func__, dev->name, protocol,
+                       __func__, dev_name(&dev->dev), protocol,
                        adr->dev, adr->channel, adr->sapi,
                        adr->tei);
        rq.protocol = ISDN_P_TE_S0;
@@ -541,15 +538,6 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
                if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
                        rq.protocol = ISDN_P_NT_E1;
        case ISDN_P_LAPD_TE:
-#ifdef PROTOCOL_CHECK
-               /* this should be enhanced */
-               if (!list_empty(&dev->D.st->layer2)
-                       && dev->D.protocol != protocol)
-                       return -EBUSY;
-               if (!hlist_empty(&dev->D.st->l1sock.head)
-                       && dev->D.protocol != protocol)
-                       return -EBUSY;
-#endif
                ch->recv = mISDN_queue_message;
                ch->peer = &dev->D.st->own;
                ch->st = dev->D.st;
@@ -590,7 +578,7 @@ delete_channel(struct mISDNchannel *ch)
        }
        if (*debug & DEBUG_CORE_FUNC)
                printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
-                   ch->st->dev->name, ch->protocol);
+                   dev_name(&ch->st->dev->dev), ch->protocol);
        if (ch->protocol >= ISDN_P_B_START) {
                if (ch->peer) {
                        ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
@@ -643,7 +631,7 @@ delete_stack(struct mISDNdevice *dev)
 
        if (*debug & DEBUG_CORE_FUNC)
                printk(KERN_DEBUG "%s: st(%s)\n", __func__,
-                   st->dev->name);
+                   dev_name(&st->dev->dev));
        if (dev->teimgr)
                delete_teimanager(dev->teimgr);
        if (st->thread) {
index 5c43d19e7c11676716e32be9357452a5f4db8a8c..b452dead8fd04e3fc782ec81fe68914c75f7b6b5 100644 (file)
@@ -968,9 +968,9 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
 
        if (*debug & DEBUG_L2_TEI)
                printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-                       __func__, mgr->ch.st->dev->name, crq->protocol,
-                       crq->adr.dev, crq->adr.channel, crq->adr.sapi,
-                       crq->adr.tei);
+                       __func__, dev_name(&mgr->ch.st->dev->dev),
+                       crq->protocol, crq->adr.dev, crq->adr.channel,
+                       crq->adr.sapi, crq->adr.tei);
        if (crq->adr.sapi != 0) /* not supported yet */
                return -EINVAL;
        if (crq->adr.tei > GROUP_TEI)
index e7fb7d2fcbfc4792abc91993337620a562a3323c..a4a1ae2146300b0f87ba8dc6526d46d5db16606a 100644 (file)
@@ -63,6 +63,12 @@ config LEDS_WRAP
        help
          This option enables support for the PCEngines WRAP programmable LEDs.
 
+config LEDS_ALIX2
+       tristate "LED Support for ALIX.2 and ALIX.3 series"
+       depends on LEDS_CLASS && X86 && EXPERIMENTAL
+       help
+         This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+
 config LEDS_H1940
        tristate "LED Support for iPAQ H1940 device"
        depends on LEDS_CLASS && ARCH_H1940
@@ -77,7 +83,7 @@ config LEDS_COBALT_QUBE
 
 config LEDS_COBALT_RAQ
        bool "LED Support for the Cobalt Raq series"
-       depends on LEDS_CLASS && MIPS_COBALT
+       depends on LEDS_CLASS=y && MIPS_COBALT
        select LEDS_TRIGGERS
        help
          This option enables support for the Cobalt Raq series LEDs.
@@ -158,6 +164,13 @@ config LEDS_PCA955X
          LED driver chips accessed via the I2C bus.  Supported
          devices include PCA9550, PCA9551, PCA9552, and PCA9553.
 
+config LEDS_WM8350
+       tristate "LED Support for WM8350 AudioPlus PMIC"
+       depends on LEDS_CLASS && MFD_WM8350
+       help
+         This option enables support for LEDs driven by the Wolfson
+         Microelectronics WM8350 AudioPlus PMIC.
+
 config LEDS_DA903X
        tristate "LED Support for DA9030/DA9034 PMIC"
        depends on LEDS_CLASS && PMIC_DA903X
index e1967a29850e8b1c4ef446af8030ddfe1a547658..bc247cb02e8269857d71b4fca63de1d99801e22b 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_S3C24XX)            += leds-s3c24xx.o
 obj-$(CONFIG_LEDS_AMS_DELTA)           += leds-ams-delta.o
 obj-$(CONFIG_LEDS_NET48XX)             += leds-net48xx.o
 obj-$(CONFIG_LEDS_WRAP)                        += leds-wrap.o
+obj-$(CONFIG_LEDS_ALIX2)               += leds-alix2.o
 obj-$(CONFIG_LEDS_H1940)               += leds-h1940.o
 obj-$(CONFIG_LEDS_COBALT_QUBE)         += leds-cobalt-qube.o
 obj-$(CONFIG_LEDS_COBALT_RAQ)          += leds-cobalt-raq.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_LEDS_FSG)                        += leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)             += leds-pca955x.o
 obj-$(CONFIG_LEDS_DA903X)              += leds-da903x.o
 obj-$(CONFIG_LEDS_HP_DISK)             += leds-hp-disk.o
+obj-$(CONFIG_LEDS_WM8350)              += leds-wm8350.o
 
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)       += ledtrig-timer.o
index 6c4a326176d7be57d724d994a7ff5e971f50dc93..52f82e3ea13aae5b04a603938fba9dbbd0654712 100644 (file)
@@ -91,9 +91,29 @@ void led_classdev_resume(struct led_classdev *led_cdev)
 }
 EXPORT_SYMBOL_GPL(led_classdev_resume);
 
+static int led_suspend(struct device *dev, pm_message_t state)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+       if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+               led_classdev_suspend(led_cdev);
+
+       return 0;
+}
+
+static int led_resume(struct device *dev)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+       if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+               led_classdev_resume(led_cdev);
+
+       return 0;
+}
+
 /**
  * led_classdev_register - register a new object of led_classdev class.
- * @dev: The device to register.
+ * @parent: The device to register.
  * @led_cdev: the led_classdev structure for this device.
  */
 int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
@@ -174,6 +194,8 @@ static int __init leds_init(void)
        leds_class = class_create(THIS_MODULE, "leds");
        if (IS_ERR(leds_class))
                return PTR_ERR(leds_class);
+       leds_class->suspend = led_suspend;
+       leds_class->resume = led_resume;
        return 0;
 }
 
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
new file mode 100644 (file)
index 0000000..ddbd773
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * LEDs driver for PCEngines ALIX.2 and ALIX.3
+ *
+ * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+static int force = 0;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+
+struct alix_led {
+       struct led_classdev cdev;
+       unsigned short port;
+       unsigned int on_value;
+       unsigned int off_value;
+};
+
+static void alix_led_set(struct led_classdev *led_cdev,
+                        enum led_brightness brightness)
+{
+       struct alix_led *led_dev =
+               container_of(led_cdev, struct alix_led, cdev);
+
+       if (brightness)
+               outl(led_dev->on_value, led_dev->port);
+       else
+               outl(led_dev->off_value, led_dev->port);
+}
+
+static struct alix_led alix_leds[] = {
+       {
+               .cdev = {
+                       .name = "alix:1",
+                       .brightness_set = alix_led_set,
+               },
+               .port = 0x6100,
+               .on_value = 1 << 22,
+               .off_value = 1 << 6,
+       },
+       {
+               .cdev = {
+                       .name = "alix:2",
+                       .brightness_set = alix_led_set,
+               },
+               .port = 0x6180,
+               .on_value = 1 << 25,
+               .off_value = 1 << 9,
+       },
+       {
+               .cdev = {
+                       .name = "alix:3",
+                       .brightness_set = alix_led_set,
+               },
+               .port = 0x6180,
+               .on_value = 1 << 27,
+               .off_value = 1 << 11,
+       },
+};
+
+static int __init alix_led_probe(struct platform_device *pdev)
+{
+       int i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(alix_leds); i++) {
+               alix_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
+               ret = led_classdev_register(&pdev->dev, &alix_leds[i].cdev);
+               if (ret < 0)
+                       goto fail;
+       }
+       return 0;
+
+fail:
+       while (--i >= 0)
+               led_classdev_unregister(&alix_leds[i].cdev);
+       return ret;
+}
+
+static int alix_led_remove(struct platform_device *pdev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(alix_leds); i++)
+               led_classdev_unregister(&alix_leds[i].cdev);
+       return 0;
+}
+
+static struct platform_driver alix_led_driver = {
+       .remove = alix_led_remove,
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init alix_present(void)
+{
+       const unsigned long bios_phys = 0x000f0000;
+       const size_t bios_len = 0x00010000;
+       const char alix_sig[] = "PC Engines ALIX.";
+       const size_t alix_sig_len = sizeof(alix_sig) - 1;
+
+       const char *bios_virt;
+       const char *scan_end;
+       const char *p;
+       int ret = 0;
+
+       if (force) {
+               printk(KERN_NOTICE "%s: forced to skip BIOS test, "
+                      "assume system has ALIX.2 style LEDs\n",
+                      KBUILD_MODNAME);
+               ret = 1;
+               goto out;
+       }
+
+       bios_virt = phys_to_virt(bios_phys);
+       scan_end = bios_virt + bios_len - (alix_sig_len + 2);
+       for (p = bios_virt; p < scan_end; p++) {
+               const char *tail;
+
+               if (memcmp(p, alix_sig, alix_sig_len) != 0) {
+                       continue;
+               }
+
+               tail = p + alix_sig_len;
+               if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') {
+                       printk(KERN_INFO
+                              "%s: system is recognized as \"%s\"\n",
+                              KBUILD_MODNAME, p);
+                       ret = 1;
+                       break;
+               }
+       }
+
+out:
+       return ret;
+}
+
+static struct platform_device *pdev;
+
+static int __init alix_led_init(void)
+{
+       int ret;
+
+       if (!alix_present()) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+       if (!IS_ERR(pdev)) {
+               ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
+               if (ret)
+                       platform_device_unregister(pdev);
+       } else
+               ret = PTR_ERR(pdev);
+
+out:
+       return ret;
+}
+
+static void __exit alix_led_exit(void)
+{
+       platform_device_unregister(pdev);
+       platform_driver_unregister(&alix_led_driver);
+}
+
+module_init(alix_led_init);
+module_exit(alix_led_exit);
+
+MODULE_AUTHOR("Constantin Baranov <const@mimas.ru>");
+MODULE_DESCRIPTION("PCEngines ALIX.2 and ALIX.3 LED driver");
+MODULE_LICENSE("GPL");
index 1bd590bb3a6e8edce5aae8b672da8fe2a89bdb8a..446050759b4dfb93e18a7a3dcd5905cf2886785e 100644 (file)
@@ -79,37 +79,12 @@ static struct ams_delta_led ams_delta_leds[] = {
        },
 };
 
-#ifdef CONFIG_PM
-static int ams_delta_led_suspend(struct platform_device *dev,
-               pm_message_t state)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
-               led_classdev_suspend(&ams_delta_leds[i].cdev);
-
-       return 0;
-}
-
-static int ams_delta_led_resume(struct platform_device *dev)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
-               led_classdev_resume(&ams_delta_leds[i].cdev);
-
-       return 0;
-}
-#else
-#define ams_delta_led_suspend NULL
-#define ams_delta_led_resume NULL
-#endif
-
 static int ams_delta_led_probe(struct platform_device *pdev)
 {
        int i, ret;
 
        for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) {
+               ams_delta_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
                ret = led_classdev_register(&pdev->dev,
                                &ams_delta_leds[i].cdev);
                if (ret < 0)
@@ -127,7 +102,7 @@ static int ams_delta_led_remove(struct platform_device *pdev)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i--)
+       for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
                led_classdev_unregister(&ams_delta_leds[i].cdev);
 
        return 0;
@@ -136,8 +111,6 @@ static int ams_delta_led_remove(struct platform_device *pdev)
 static struct platform_driver ams_delta_led_driver = {
        .probe          = ams_delta_led_probe,
        .remove         = ams_delta_led_remove,
-       .suspend        = ams_delta_led_suspend,
-       .resume         = ams_delta_led_resume,
        .driver         = {
                .name = "ams-delta-led",
                .owner = THIS_MODULE,
@@ -151,7 +124,7 @@ static int __init ams_delta_led_init(void)
 
 static void __exit ams_delta_led_exit(void)
 {
-       return platform_driver_unregister(&ams_delta_led_driver);
+       platform_driver_unregister(&ams_delta_led_driver);
 }
 
 module_init(ams_delta_led_init);
index eb3415e88f43e217fb8faa87a00b9c082773ce0f..1813c84ea5fccb10293e499a3721dacf94c54290 100644 (file)
@@ -142,6 +142,7 @@ static struct led_classdev clevo_mail_led = {
        .name                   = "clevo::mail",
        .brightness_set         = clevo_mail_led_set,
        .blink_set              = clevo_mail_led_blink,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static int __init clevo_mail_led_probe(struct platform_device *pdev)
@@ -155,29 +156,9 @@ static int clevo_mail_led_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int clevo_mail_led_suspend(struct platform_device *dev,
-                                 pm_message_t state)
-{
-       led_classdev_suspend(&clevo_mail_led);
-       return 0;
-}
-
-static int clevo_mail_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&clevo_mail_led);
-       return 0;
-}
-#else
-#define clevo_mail_led_suspend    NULL
-#define clevo_mail_led_resume     NULL
-#endif
-
 static struct platform_driver clevo_mail_led_driver = {
        .probe          = clevo_mail_led_probe,
        .remove         = clevo_mail_led_remove,
-       .suspend        = clevo_mail_led_suspend,
-       .resume         = clevo_mail_led_resume,
        .driver         = {
                .name           = KBUILD_MODNAME,
                .owner          = THIS_MODULE,
index 34935155c1c00077b6e94201cb4c5e6a534f1fad..5f7c9c5c09b1306c35211ed024aed989c1592b61 100644 (file)
@@ -99,64 +99,43 @@ static void fsg_led_ring_set(struct led_classdev *led_cdev,
 }
 
 
-
 static struct led_classdev fsg_wlan_led = {
        .name                   = "fsg:blue:wlan",
        .brightness_set         = fsg_led_wlan_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_wan_led = {
        .name                   = "fsg:blue:wan",
        .brightness_set         = fsg_led_wan_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_sata_led = {
        .name                   = "fsg:blue:sata",
        .brightness_set         = fsg_led_sata_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_usb_led = {
        .name                   = "fsg:blue:usb",
        .brightness_set         = fsg_led_usb_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_sync_led = {
        .name                   = "fsg:blue:sync",
        .brightness_set         = fsg_led_sync_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_ring_led = {
        .name                   = "fsg:blue:ring",
        .brightness_set         = fsg_led_ring_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 
-
-#ifdef CONFIG_PM
-static int fsg_led_suspend(struct platform_device *dev, pm_message_t state)
-{
-       led_classdev_suspend(&fsg_wlan_led);
-       led_classdev_suspend(&fsg_wan_led);
-       led_classdev_suspend(&fsg_sata_led);
-       led_classdev_suspend(&fsg_usb_led);
-       led_classdev_suspend(&fsg_sync_led);
-       led_classdev_suspend(&fsg_ring_led);
-       return 0;
-}
-
-static int fsg_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&fsg_wlan_led);
-       led_classdev_resume(&fsg_wan_led);
-       led_classdev_resume(&fsg_sata_led);
-       led_classdev_resume(&fsg_usb_led);
-       led_classdev_resume(&fsg_sync_led);
-       led_classdev_resume(&fsg_ring_led);
-       return 0;
-}
-#endif
-
-
 static int fsg_led_probe(struct platform_device *pdev)
 {
        int ret;
@@ -232,10 +211,6 @@ static int fsg_led_remove(struct platform_device *pdev)
 static struct platform_driver fsg_led_driver = {
        .probe          = fsg_led_probe,
        .remove         = fsg_led_remove,
-#ifdef CONFIG_PM
-       .suspend        = fsg_led_suspend,
-       .resume         = fsg_led_resume,
-#endif
        .driver         = {
                .name           = "fsg-led",
        },
index b13bd2950e956347b98bac9ed76e18efcabed612..2e3df08b649b6c2a3a2dd83b62233bc3505a1459 100644 (file)
@@ -105,6 +105,7 @@ static int gpio_led_probe(struct platform_device *pdev)
                }
                led_dat->cdev.brightness_set = gpio_led_set;
                led_dat->cdev.brightness = LED_OFF;
+               led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
                gpio_direction_output(led_dat->gpio, led_dat->active_low);
 
@@ -154,44 +155,9 @@ static int __devexit gpio_led_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int gpio_led_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
-       struct gpio_led_data *leds_data;
-       int i;
-
-       leds_data = platform_get_drvdata(pdev);
-
-       for (i = 0; i < pdata->num_leds; i++)
-               led_classdev_suspend(&leds_data[i].cdev);
-
-       return 0;
-}
-
-static int gpio_led_resume(struct platform_device *pdev)
-{
-       struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
-       struct gpio_led_data *leds_data;
-       int i;
-
-       leds_data = platform_get_drvdata(pdev);
-
-       for (i = 0; i < pdata->num_leds; i++)
-               led_classdev_resume(&leds_data[i].cdev);
-
-       return 0;
-}
-#else
-#define gpio_led_suspend NULL
-#define gpio_led_resume NULL
-#endif
-
 static struct platform_driver gpio_led_driver = {
        .probe          = gpio_led_probe,
        .remove         = __devexit_p(gpio_led_remove),
-       .suspend        = gpio_led_suspend,
-       .resume         = gpio_led_resume,
        .driver         = {
                .name   = "leds-gpio",
                .owner  = THIS_MODULE,
index 44fa757d82547ff82b705a29ee5d54bf8a3b179a..d786adc8c5e3616ecb8131f2b1d58d68d19a30c8 100644 (file)
@@ -68,25 +68,9 @@ static struct led_classdev hpled_led = {
        .name                   = "hp:red:hddprotection",
        .default_trigger        = "heartbeat",
        .brightness_set         = hpled_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int hpled_suspend(struct acpi_device *dev, pm_message_t state)
-{
-       led_classdev_suspend(&hpled_led);
-       return 0;
-}
-
-static int hpled_resume(struct acpi_device *dev)
-{
-       led_classdev_resume(&hpled_led);
-       return 0;
-}
-#else
-#define hpled_suspend NULL
-#define hpled_resume NULL
-#endif
-
 static int hpled_add(struct acpi_device *device)
 {
        int ret;
@@ -121,8 +105,6 @@ static struct acpi_driver leds_hp_driver = {
        .ops = {
                .add     = hpled_add,
                .remove  = hpled_remove,
-               .suspend = hpled_suspend,
-               .resume  = hpled_resume,
        }
 };
 
index e8fb1baf8a5072e3d3454b3366e32d1c2c015140..e4ce1fd46338122b6e93b39f102a81be4ea621cb 100644 (file)
@@ -45,30 +45,16 @@ static struct led_classdev hp6xx_red_led = {
        .name                   = "hp6xx:red",
        .default_trigger        = "hp6xx-charge",
        .brightness_set         = hp6xxled_red_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev hp6xx_green_led = {
        .name                   = "hp6xx:green",
        .default_trigger        = "ide-disk",
        .brightness_set         = hp6xxled_green_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int hp6xxled_suspend(struct platform_device *dev, pm_message_t state)
-{
-       led_classdev_suspend(&hp6xx_red_led);
-       led_classdev_suspend(&hp6xx_green_led);
-       return 0;
-}
-
-static int hp6xxled_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&hp6xx_red_led);
-       led_classdev_resume(&hp6xx_green_led);
-       return 0;
-}
-#endif
-
 static int hp6xxled_probe(struct platform_device *pdev)
 {
        int ret;
@@ -98,10 +84,6 @@ MODULE_ALIAS("platform:hp6xx-led");
 static struct platform_driver hp6xxled_driver = {
        .probe          = hp6xxled_probe,
        .remove         = hp6xxled_remove,
-#ifdef CONFIG_PM
-       .suspend        = hp6xxled_suspend,
-       .resume         = hp6xxled_resume,
-#endif
        .driver         = {
                .name           = "hp6xx-led",
                .owner          = THIS_MODULE,
index 054360473c9490ea6e4251188dcbf4cad92fa310..93987a12da494594881dde8c375d94a246cd2254 100644 (file)
@@ -33,26 +33,9 @@ static void net48xx_error_led_set(struct led_classdev *led_cdev,
 static struct led_classdev net48xx_error_led = {
        .name           = "net48xx::error",
        .brightness_set = net48xx_error_led_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int net48xx_led_suspend(struct platform_device *dev,
-               pm_message_t state)
-{
-       led_classdev_suspend(&net48xx_error_led);
-       return 0;
-}
-
-static int net48xx_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&net48xx_error_led);
-       return 0;
-}
-#else
-#define net48xx_led_suspend NULL
-#define net48xx_led_resume NULL
-#endif
-
 static int net48xx_led_probe(struct platform_device *pdev)
 {
        return led_classdev_register(&pdev->dev, &net48xx_error_led);
@@ -67,8 +50,6 @@ static int net48xx_led_remove(struct platform_device *pdev)
 static struct platform_driver net48xx_led_driver = {
        .probe          = net48xx_led_probe,
        .remove         = net48xx_led_remove,
-       .suspend        = net48xx_led_suspend,
-       .resume         = net48xx_led_resume,
        .driver         = {
                .name           = DRVNAME,
                .owner          = THIS_MODULE,
index 4064d4f6b33b41a99d093d3e05c65f281ed60588..76ec7498e2d5eee3e8d762ec71f2c8c7e54bafd7 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/leds.h>
 #include <linux/input.h>
 #include <linux/mutex.h>
+#include <linux/workqueue.h>
 #include <linux/leds-pca9532.h>
 
 static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
@@ -34,6 +35,7 @@ struct pca9532_data {
        struct pca9532_led leds[16];
        struct mutex update_lock;
        struct input_dev    *idev;
+       struct work_struct work;
        u8 pwm[2];
        u8 psc[2];
 };
@@ -63,7 +65,7 @@ static struct i2c_driver pca9532_driver = {
  * as a compromise we average one pwm to the values requested by all
  * leds that are not ON/OFF.
  * */
-static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink,
+static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
        enum led_brightness value)
 {
        int a = 0, b = 0, i = 0;
@@ -84,11 +86,17 @@ static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink,
        b = b/a;
        if (b > 0xFF)
                return -EINVAL;
-       mutex_lock(&data->update_lock);
        data->pwm[pwm] = b;
+       data->psc[pwm] = blink;
+       return 0;
+}
+
+static int pca9532_setpwm(struct i2c_client *client, int pwm)
+{
+       struct pca9532_data *data = i2c_get_clientdata(client);
+       mutex_lock(&data->update_lock);
        i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
                data->pwm[pwm]);
-       data->psc[pwm] = blink;
        i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
                data->psc[pwm]);
        mutex_unlock(&data->update_lock);
@@ -124,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
                led->state = PCA9532_ON;
        else {
                led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
-               err = pca9532_setpwm(led->client, 0, 0, value);
+               err = pca9532_calcpwm(led->client, 0, 0, value);
                if (err)
                        return; /* XXX: led api doesn't allow error code? */
        }
-       pca9532_setled(led);
+       schedule_work(&led->work);
 }
 
 static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -137,6 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
        struct pca9532_led *led = ldev_to_led(led_cdev);
        struct i2c_client *client = led->client;
        int psc;
+       int err = 0;
 
        if (*delay_on == 0 && *delay_off == 0) {
        /* led subsystem ask us for a blink rate */
@@ -148,11 +157,15 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
 
        /* Thecus specific: only use PSC/PWM 0 */
        psc = (*delay_on * 152-1)/1000;
-       return pca9532_setpwm(client, 0, psc, led_cdev->brightness);
+       err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+       if (err)
+               return err;
+       schedule_work(&led->work);
+       return 0;
 }
 
-int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code,
-       int value)
+static int pca9532_event(struct input_dev *dev, unsigned int type,
+       unsigned int code, int value)
 {
        struct pca9532_data *data = input_get_drvdata(dev);
 
@@ -165,13 +178,28 @@ int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code,
        else
                data->pwm[1] = 0;
 
-       dev_info(&dev->dev, "setting beep to %d \n", data->pwm[1]);
+       schedule_work(&data->work);
+
+       return 0;
+}
+
+static void pca9532_input_work(struct work_struct *work)
+{
+       struct pca9532_data *data;
+       data = container_of(work, struct pca9532_data, work);
        mutex_lock(&data->update_lock);
        i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
                data->pwm[1]);
        mutex_unlock(&data->update_lock);
+}
 
-       return 0;
+static void pca9532_led_work(struct work_struct *work)
+{
+       struct pca9532_led *led;
+       led = container_of(work, struct pca9532_led, work);
+       if (led->state == PCA9532_PWM0)
+               pca9532_setpwm(led->client, 0);
+       pca9532_setled(led);
 }
 
 static int pca9532_configure(struct i2c_client *client,
@@ -204,8 +232,9 @@ static int pca9532_configure(struct i2c_client *client,
                        led->ldev.brightness = LED_OFF;
                        led->ldev.brightness_set = pca9532_set_brightness;
                        led->ldev.blink_set = pca9532_set_blink;
-                       if (led_classdev_register(&client->dev,
-                               &led->ldev) < 0)        {
+                       INIT_WORK(&led->work, pca9532_led_work);
+                       err = led_classdev_register(&client->dev, &led->ldev);
+                       if (err < 0) {
                                dev_err(&client->dev,
                                        "couldn't register LED %s\n",
                                        led->name);
@@ -233,9 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
                                                BIT_MASK(SND_TONE);
                        data->idev->event = pca9532_event;
                        input_set_drvdata(data->idev, data);
+                       INIT_WORK(&data->work, pca9532_input_work);
                        err = input_register_device(data->idev);
                        if (err) {
                                input_free_device(data->idev);
+                               cancel_work_sync(&data->work);
                                data->idev = NULL;
                                goto exit;
                        }
@@ -252,18 +283,19 @@ exit:
                                break;
                        case PCA9532_TYPE_LED:
                                led_classdev_unregister(&data->leds[i].ldev);
+                               cancel_work_sync(&data->leds[i].work);
                                break;
                        case PCA9532_TYPE_N2100_BEEP:
                                if (data->idev != NULL) {
                                        input_unregister_device(data->idev);
                                        input_free_device(data->idev);
+                                       cancel_work_sync(&data->work);
                                        data->idev = NULL;
                                }
                                break;
                        }
 
        return err;
-
 }
 
 static int pca9532_probe(struct i2c_client *client,
@@ -271,12 +303,16 @@ static int pca9532_probe(struct i2c_client *client,
 {
        struct pca9532_data *data = i2c_get_clientdata(client);
        struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
+       int err;
+
+       if (!pca9532_pdata)
+               return -EIO;
 
        if (!i2c_check_functionality(client->adapter,
                I2C_FUNC_SMBUS_BYTE_DATA))
                return -EIO;
 
-       data = kzalloc(sizeof(struct pca9532_data), GFP_KERNEL);
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -285,12 +321,13 @@ static int pca9532_probe(struct i2c_client *client,
        data->client = client;
        mutex_init(&data->update_lock);
 
-       if (pca9532_pdata == NULL)
-               return -EIO;
-
-       pca9532_configure(client, data, pca9532_pdata);
-       return 0;
+       err = pca9532_configure(client, data, pca9532_pdata);
+       if (err) {
+               kfree(data);
+               i2c_set_clientdata(client, NULL);
+       }
 
+       return err;
 }
 
 static int pca9532_remove(struct i2c_client *client)
@@ -303,11 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
                        break;
                case PCA9532_TYPE_LED:
                        led_classdev_unregister(&data->leds[i].ldev);
+                       cancel_work_sync(&data->leds[i].work);
                        break;
                case PCA9532_TYPE_N2100_BEEP:
                        if (data->idev != NULL) {
                                input_unregister_device(data->idev);
                                input_free_device(data->idev);
+                               cancel_work_sync(&data->work);
                                data->idev = NULL;
                        }
                        break;
index 25a07f2643ade687245f5f3e929207325e094da0..4d81131542ae68e71e203dae1f9e63194547ffef 100644 (file)
@@ -82,6 +82,7 @@ static int s3c24xx_led_probe(struct platform_device *dev)
        led->cdev.brightness_set = s3c24xx_led_set;
        led->cdev.default_trigger = pdata->def_trigger;
        led->cdev.name = pdata->name;
+       led->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
        led->pdata = pdata;
 
@@ -111,33 +112,9 @@ static int s3c24xx_led_probe(struct platform_device *dev)
        return ret;
 }
 
-
-#ifdef CONFIG_PM
-static int s3c24xx_led_suspend(struct platform_device *dev, pm_message_t state)
-{
-       struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
-       led_classdev_suspend(&led->cdev);
-       return 0;
-}
-
-static int s3c24xx_led_resume(struct platform_device *dev)
-{
-       struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
-       led_classdev_resume(&led->cdev);
-       return 0;
-}
-#else
-#define s3c24xx_led_suspend NULL
-#define s3c24xx_led_resume NULL
-#endif
-
 static struct platform_driver s3c24xx_led_driver = {
        .probe          = s3c24xx_led_probe,
        .remove         = s3c24xx_led_remove,
-       .suspend        = s3c24xx_led_suspend,
-       .resume         = s3c24xx_led_resume,
        .driver         = {
                .name           = "s3c24xx_led",
                .owner          = THIS_MODULE,
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
new file mode 100644 (file)
index 0000000..38c6bcb
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * LED driver for WM8350 driven LEDS.
+ *
+ * Copyright(C) 2007, 2008 Wolfson Microelectronics PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/mfd/wm8350/pmic.h>
+#include <linux/regulator/consumer.h>
+
+/* Microamps */
+static const int isink_cur[] = {
+       4,
+       5,
+       6,
+       7,
+       8,
+       10,
+       11,
+       14,
+       16,
+       19,
+       23,
+       27,
+       32,
+       39,
+       46,
+       54,
+       65,
+       77,
+       92,
+       109,
+       130,
+       154,
+       183,
+       218,
+       259,
+       308,
+       367,
+       436,
+       518,
+       616,
+       733,
+       872,
+       1037,
+       1233,
+       1466,
+       1744,
+       2073,
+       2466,
+       2933,
+       3487,
+       4147,
+       4932,
+       5865,
+       6975,
+       8294,
+       9864,
+       11730,
+       13949,
+       16589,
+       19728,
+       23460,
+       27899,
+       33178,
+       39455,
+       46920,
+       55798,
+       66355,
+       78910,
+       93840,
+       111596,
+       132710,
+       157820,
+       187681,
+       223191
+};
+
+#define to_wm8350_led(led_cdev) \
+       container_of(led_cdev, struct wm8350_led, cdev)
+
+static void wm8350_led_enable(struct wm8350_led *led)
+{
+       int ret;
+
+       if (led->enabled)
+               return;
+
+       ret = regulator_enable(led->isink);
+       if (ret != 0) {
+               dev_err(led->cdev.dev, "Failed to enable ISINK: %d\n", ret);
+               return;
+       }
+
+       ret = regulator_enable(led->dcdc);
+       if (ret != 0) {
+               dev_err(led->cdev.dev, "Failed to enable DCDC: %d\n", ret);
+               regulator_disable(led->isink);
+               return;
+       }
+
+       led->enabled = 1;
+}
+
+static void wm8350_led_disable(struct wm8350_led *led)
+{
+       int ret;
+
+       if (!led->enabled)
+               return;
+
+       ret = regulator_disable(led->dcdc);
+       if (ret != 0) {
+               dev_err(led->cdev.dev, "Failed to disable DCDC: %d\n", ret);
+               return;
+       }
+
+       ret = regulator_disable(led->isink);
+       if (ret != 0) {
+               dev_err(led->cdev.dev, "Failed to disable ISINK: %d\n", ret);
+               regulator_enable(led->dcdc);
+               return;
+       }
+
+       led->enabled = 0;
+}
+
+static void led_work(struct work_struct *work)
+{
+       struct wm8350_led *led = container_of(work, struct wm8350_led, work);
+       int ret;
+       int uA;
+       unsigned long flags;
+
+       mutex_lock(&led->mutex);
+
+       spin_lock_irqsave(&led->value_lock, flags);
+
+       if (led->value == LED_OFF) {
+               spin_unlock_irqrestore(&led->value_lock, flags);
+               wm8350_led_disable(led);
+               goto out;
+       }
+
+       /* This scales linearly into the index of valid current
+        * settings which results in a linear scaling of perceived
+        * brightness due to the non-linear current settings provided
+        * by the hardware.
+        */
+       uA = (led->max_uA_index * led->value) / LED_FULL;
+       spin_unlock_irqrestore(&led->value_lock, flags);
+       BUG_ON(uA >= ARRAY_SIZE(isink_cur));
+
+       ret = regulator_set_current_limit(led->isink, isink_cur[uA],
+                                         isink_cur[uA]);
+       if (ret != 0)
+               dev_err(led->cdev.dev, "Failed to set %duA: %d\n",
+                       isink_cur[uA], ret);
+
+       wm8350_led_enable(led);
+
+out:
+       mutex_unlock(&led->mutex);
+}
+
+static void wm8350_led_set(struct led_classdev *led_cdev,
+                          enum led_brightness value)
+{
+       struct wm8350_led *led = to_wm8350_led(led_cdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&led->value_lock, flags);
+       led->value = value;
+       schedule_work(&led->work);
+       spin_unlock_irqrestore(&led->value_lock, flags);
+}
+
+static void wm8350_led_shutdown(struct platform_device *pdev)
+{
+       struct wm8350_led *led = platform_get_drvdata(pdev);
+
+       mutex_lock(&led->mutex);
+       led->value = LED_OFF;
+       wm8350_led_disable(led);
+       mutex_unlock(&led->mutex);
+}
+
+static int wm8350_led_probe(struct platform_device *pdev)
+{
+       struct regulator *isink, *dcdc;
+       struct wm8350_led *led;
+       struct wm8350_led_platform_data *pdata = pdev->dev.platform_data;
+       int ret, i;
+
+       if (pdata == NULL) {
+               dev_err(&pdev->dev, "no platform data\n");
+               return -ENODEV;
+       }
+
+       if (pdata->max_uA < isink_cur[0]) {
+               dev_err(&pdev->dev, "Invalid maximum current %duA\n",
+                       pdata->max_uA);
+               return -EINVAL;
+       }
+
+       isink = regulator_get(&pdev->dev, "led_isink");
+       if (IS_ERR(isink)) {
+               printk(KERN_ERR "%s: cant get ISINK\n", __func__);
+               return PTR_ERR(isink);
+       }
+
+       dcdc = regulator_get(&pdev->dev, "led_vcc");
+       if (IS_ERR(dcdc)) {
+               printk(KERN_ERR "%s: cant get DCDC\n", __func__);
+               ret = PTR_ERR(dcdc);
+               goto err_isink;
+       }
+
+       led = kzalloc(sizeof(*led), GFP_KERNEL);
+       if (led == NULL) {
+               ret = -ENOMEM;
+               goto err_dcdc;
+       }
+
+       led->cdev.brightness_set = wm8350_led_set;
+       led->cdev.default_trigger = pdata->default_trigger;
+       led->cdev.name = pdata->name;
+       led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+       led->enabled = regulator_is_enabled(isink);
+       led->isink = isink;
+       led->dcdc = dcdc;
+
+       for (i = 0; i < ARRAY_SIZE(isink_cur) - 1; i++)
+               if (isink_cur[i] >= pdata->max_uA)
+                       break;
+       led->max_uA_index = i;
+       if (pdata->max_uA != isink_cur[i])
+               dev_warn(&pdev->dev,
+                        "Maximum current %duA is not directly supported,"
+                        " check platform data\n",
+                        pdata->max_uA);
+
+       spin_lock_init(&led->value_lock);
+       mutex_init(&led->mutex);
+       INIT_WORK(&led->work, led_work);
+       led->value = LED_OFF;
+       platform_set_drvdata(pdev, led);
+
+       ret = led_classdev_register(&pdev->dev, &led->cdev);
+       if (ret < 0)
+               goto err_led;
+
+       return 0;
+
+ err_led:
+       kfree(led);
+ err_dcdc:
+       regulator_put(dcdc);
+ err_isink:
+       regulator_put(isink);
+       return ret;
+}
+
+static int wm8350_led_remove(struct platform_device *pdev)
+{
+       struct wm8350_led *led = platform_get_drvdata(pdev);
+
+       led_classdev_unregister(&led->cdev);
+       flush_scheduled_work();
+       wm8350_led_disable(led);
+       regulator_put(led->dcdc);
+       regulator_put(led->isink);
+       kfree(led);
+       return 0;
+}
+
+static struct platform_driver wm8350_led_driver = {
+       .driver = {
+                  .name = "wm8350-led",
+                  .owner = THIS_MODULE,
+                  },
+       .probe = wm8350_led_probe,
+       .remove = wm8350_led_remove,
+       .shutdown = wm8350_led_shutdown,
+};
+
+static int __devinit wm8350_led_init(void)
+{
+       return platform_driver_register(&wm8350_led_driver);
+}
+module_init(wm8350_led_init);
+
+static void wm8350_led_exit(void)
+{
+       platform_driver_unregister(&wm8350_led_driver);
+}
+module_exit(wm8350_led_exit);
+
+MODULE_AUTHOR("Mark Brown");
+MODULE_DESCRIPTION("WM8350 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8350-led");
index 2f3aa87f2a1f60d86ab7c7d4ada902462c992ed2..2982c86ac4cffb9e93da0a252b232a573c02ac22 100644 (file)
@@ -56,40 +56,21 @@ static struct led_classdev wrap_power_led = {
        .name                   = "wrap::power",
        .brightness_set         = wrap_power_led_set,
        .default_trigger        = "default-on",
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev wrap_error_led = {
        .name           = "wrap::error",
        .brightness_set = wrap_error_led_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev wrap_extra_led = {
        .name           = "wrap::extra",
        .brightness_set = wrap_extra_led_set,
+       .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int wrap_led_suspend(struct platform_device *dev,
-               pm_message_t state)
-{
-       led_classdev_suspend(&wrap_power_led);
-       led_classdev_suspend(&wrap_error_led);
-       led_classdev_suspend(&wrap_extra_led);
-       return 0;
-}
-
-static int wrap_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&wrap_power_led);
-       led_classdev_resume(&wrap_error_led);
-       led_classdev_resume(&wrap_extra_led);
-       return 0;
-}
-#else
-#define wrap_led_suspend NULL
-#define wrap_led_resume NULL
-#endif
-
 static int wrap_led_probe(struct platform_device *pdev)
 {
        int ret;
@@ -127,8 +108,6 @@ static int wrap_led_remove(struct platform_device *pdev)
 static struct platform_driver wrap_led_driver = {
        .probe          = wrap_led_probe,
        .remove         = wrap_led_remove,
-       .suspend        = wrap_led_suspend,
-       .resume         = wrap_led_resume,
        .driver         = {
                .name           = DRVNAME,
                .owner          = THIS_MODULE,
index db681962d7bb14742d53dea274ea3c49e424b6dd..3d6531396dda094e41eb7f00b80d20acbbd244cd 100644 (file)
@@ -199,6 +199,7 @@ err_out:
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
        struct timer_trig_data *timer_data = led_cdev->trigger_data;
+       unsigned long on = 0, off = 0;
 
        if (timer_data) {
                device_remove_file(led_cdev->dev, &dev_attr_delay_on);
@@ -206,6 +207,10 @@ static void timer_trig_deactivate(struct led_classdev *led_cdev)
                del_timer_sync(&timer_data->timer);
                kfree(timer_data);
        }
+
+       /* If there is hardware support for blinking, stop it */
+       if (led_cdev->blink_set)
+               led_cdev->blink_set(led_cdev, &on, &off);
 }
 
 static struct led_trigger timer_led_trigger = {
index 3a273ccef3f2660b127ab426755650950377827c..f92595c8f165b7223d66958f648ba183dbb5519e 100644 (file)
@@ -1453,6 +1453,9 @@ void wm8350_device_exit(struct wm8350 *wm8350)
 {
        int i;
 
+       for (i = 0; i < ARRAY_SIZE(wm8350->pmic.led); i++)
+               platform_device_unregister(wm8350->pmic.led[i].pdev);
+
        for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++)
                platform_device_unregister(wm8350->pmic.pdev[i]);
 
index c68c496b2c499c30316e86bcb3f4237f79c9c4ff..7aa35248181bd4b021e5d638343e1bfd57459030 100644 (file)
@@ -1412,6 +1412,97 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
 }
 EXPORT_SYMBOL_GPL(wm8350_register_regulator);
 
+/**
+ * wm8350_register_led - Register a WM8350 LED output
+ *
+ * @param wm8350 The WM8350 device to configure.
+ * @param lednum LED device index to create.
+ * @param dcdc The DCDC to use for the LED.
+ * @param isink The ISINK to use for the LED.
+ * @param pdata Configuration for the LED.
+ *
+ * The WM8350 supports the use of an ISINK together with a DCDC to
+ * provide a power-efficient LED driver.  This function registers the
+ * regulators and instantiates the platform device for a LED.  The
+ * operating modes for the LED regulators must be configured using
+ * wm8350_isink_set_flash(), wm8350_dcdc25_set_mode() and
+ * wm8350_dcdc_set_slot() prior to calling this function.
+ */
+int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
+                       struct wm8350_led_platform_data *pdata)
+{
+       struct wm8350_led *led;
+       struct platform_device *pdev;
+       int ret;
+
+       if (lednum > ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) {
+               dev_err(wm8350->dev, "Invalid LED index %d\n", lednum);
+               return -ENODEV;
+       }
+
+       led = &wm8350->pmic.led[lednum];
+
+       if (led->pdev) {
+               dev_err(wm8350->dev, "LED %d already allocated\n", lednum);
+               return -EINVAL;
+       }
+
+       pdev = platform_device_alloc("wm8350-led", lednum);
+       if (pdev == NULL) {
+               dev_err(wm8350->dev, "Failed to allocate LED %d\n", lednum);
+               return -ENOMEM;
+       }
+
+       led->isink_consumer.dev = &pdev->dev;
+       led->isink_consumer.supply = "led_isink";
+       led->isink_init.num_consumer_supplies = 1;
+       led->isink_init.consumer_supplies = &led->isink_consumer;
+       led->isink_init.constraints.min_uA = 0;
+       led->isink_init.constraints.max_uA = pdata->max_uA;
+       led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
+       led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
+       ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
+       if (ret != 0) {
+               platform_device_put(pdev);
+               return ret;
+       }
+
+       led->dcdc_consumer.dev = &pdev->dev;
+       led->dcdc_consumer.supply = "led_vcc";
+       led->dcdc_init.num_consumer_supplies = 1;
+       led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
+       led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
+       ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
+       if (ret != 0) {
+               platform_device_put(pdev);
+               return ret;
+       }
+
+       switch (isink) {
+       case WM8350_ISINK_A:
+               wm8350->pmic.isink_A_dcdc = dcdc;
+               break;
+       case WM8350_ISINK_B:
+               wm8350->pmic.isink_B_dcdc = dcdc;
+               break;
+       }
+
+       pdev->dev.platform_data = pdata;
+       pdev->dev.parent = wm8350->dev;
+       ret = platform_device_add(pdev);
+       if (ret != 0) {
+               dev_err(wm8350->dev, "Failed to register LED %d: %d\n",
+                       lednum, ret);
+               platform_device_put(pdev);
+               return ret;
+       }
+
+       led->pdev = pdev;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_register_led);
+
 static struct platform_driver wm8350_regulator_driver = {
        .probe = wm8350_regulator_probe,
        .remove = wm8350_regulator_remove,
index 570ae59c1d5eb5aa0841c1ee661134d93922ea4c..bd5914994142707556b36b95fcb8b3e7042899fd 100644 (file)
@@ -336,6 +336,9 @@ static int
 dasd_state_ready_to_online(struct dasd_device * device)
 {
        int rc;
+       struct gendisk *disk;
+       struct disk_part_iter piter;
+       struct hd_struct *part;
 
        if (device->discipline->ready_to_online) {
                rc = device->discipline->ready_to_online(device);
@@ -343,8 +346,14 @@ dasd_state_ready_to_online(struct dasd_device * device)
                        return rc;
        }
        device->state = DASD_STATE_ONLINE;
-       if (device->block)
+       if (device->block) {
                dasd_schedule_block_bh(device->block);
+               disk = device->block->bdev->bd_disk;
+               disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+               while ((part = disk_part_iter_next(&piter)))
+                       kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+               disk_part_iter_exit(&piter);
+       }
        return 0;
 }
 
@@ -354,6 +363,9 @@ dasd_state_ready_to_online(struct dasd_device * device)
 static int dasd_state_online_to_ready(struct dasd_device *device)
 {
        int rc;
+       struct gendisk *disk;
+       struct disk_part_iter piter;
+       struct hd_struct *part;
 
        if (device->discipline->online_to_ready) {
                rc = device->discipline->online_to_ready(device);
@@ -361,6 +373,13 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
                        return rc;
        }
        device->state = DASD_STATE_READY;
+       if (device->block) {
+               disk = device->block->bdev->bd_disk;
+               disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+               while ((part = disk_part_iter_next(&piter)))
+                       kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+               disk_part_iter_exit(&piter);
+       }
        return 0;
 }
 
index 2ef25731d197ed0f6a9ce2fdfdc1fc4358b9e460..300e28a531f80c938f880b68c246f921686c63bb 100644 (file)
@@ -206,6 +206,8 @@ dasd_feature_list(char *str, char **endp)
                        features |= DASD_FEATURE_USEDIAG;
                else if (len == 6 && !strncmp(str, "erplog", 6))
                        features |= DASD_FEATURE_ERPLOG;
+               else if (len == 8 && !strncmp(str, "failfast", 8))
+                       features |= DASD_FEATURE_FAILFAST;
                else {
                        MESSAGE(KERN_WARNING,
                                "unsupported feature: %*s, "
@@ -666,6 +668,51 @@ dasd_device_from_cdev(struct ccw_device *cdev)
  * SECTION: files in sysfs
  */
 
+/*
+ * failfast controls the behaviour, if no path is available
+ */
+static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct dasd_devmap *devmap;
+       int ff_flag;
+
+       devmap = dasd_find_busid(dev->bus_id);
+       if (!IS_ERR(devmap))
+               ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
+       else
+               ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
+       return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+}
+
+static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
+             const char *buf, size_t count)
+{
+       struct dasd_devmap *devmap;
+       int val;
+       char *endp;
+
+       devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+       if (IS_ERR(devmap))
+               return PTR_ERR(devmap);
+
+       val = simple_strtoul(buf, &endp, 0);
+       if (((endp + 1) < (buf + count)) || (val > 1))
+               return -EINVAL;
+
+       spin_lock(&dasd_devmap_lock);
+       if (val)
+               devmap->features |= DASD_FEATURE_FAILFAST;
+       else
+               devmap->features &= ~DASD_FEATURE_FAILFAST;
+       if (devmap->device)
+               devmap->device->features = devmap->features;
+       spin_unlock(&dasd_devmap_lock);
+       return count;
+}
+
+static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
+
 /*
  * readonly controls the readonly status of a dasd
  */
@@ -1020,6 +1067,7 @@ static struct attribute * dasd_attrs[] = {
        &dev_attr_use_diag.attr,
        &dev_attr_eer_enabled.attr,
        &dev_attr_erplog.attr,
+       &dev_attr_failfast.attr,
        NULL,
 };
 
index 7844461a995b7caf0b0b27ee32dfaf53f2d6cf69..ef2a5695205442a05c4ff817444066076df5d6e7 100644 (file)
@@ -544,7 +544,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        }
        cqr->retries = DIAG_MAX_RETRIES;
        cqr->buildclk = get_clock();
-       if (blk_noretry_request(req))
+       if (blk_noretry_request(req) ||
+           block->base->features & DASD_FEATURE_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
        cqr->startdev = memdev;
        cqr->memdev = memdev;
index bd2c52e20762e3593cb531be9b77552db0a2089a..bdb87998f364f276067470e672a0db49bfc21e26 100644 (file)
@@ -1700,7 +1700,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
                        recid++;
                }
        }
-       if (blk_noretry_request(req))
+       if (blk_noretry_request(req) ||
+           block->base->features & DASD_FEATURE_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
        cqr->startdev = startdev;
        cqr->memdev = startdev;
index 7d442aeff3d1163c6f6b728cdd0b9d2a1f2017e5..f1d176021694886fc1b6c2ba1f30ee6318c322c8 100644 (file)
@@ -355,7 +355,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
                        recid++;
                }
        }
-       if (blk_noretry_request(req))
+       if (blk_noretry_request(req) ||
+           block->base->features & DASD_FEATURE_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
        cqr->startdev = memdev;
        cqr->memdev = memdev;
index 643033890e341d28188a3769ea35dcc56cf66fe0..0769ced52dbd64c3b5583f7c447971fd2e8eeb4d 100644 (file)
@@ -100,7 +100,7 @@ comment "S/390 tape interface support"
 
 config S390_TAPE_BLOCK
        bool "Support for tape block devices"
-       depends on S390_TAPE
+       depends on S390_TAPE && BLOCK
        help
          Select this option if you want to access your channel-attached tape
          devices using the block device interface.  This interface is similar
index f8a3b6967f691aa504711cf6b49867c8879ace5c..da7afb04e71ff8003c0b6139d169590a0b1caf2e 100644 (file)
@@ -169,6 +169,8 @@ static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
                 q->nr);
        debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
                                                debugfs_root, q, &debugfs_fops);
+       if (IS_ERR(debugfs_queues[i]))
+               debugfs_queues[i] = NULL;
 }
 
 void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
index 4a4dd9adc328cfc63f797cd2d785c14ed8082259..72facb9eb7db7661fbf8ef77cc4c66aca50b3184 100644 (file)
@@ -52,11 +52,11 @@ config LCD_ILI9320
          then say y to include a power driver for it.
 
 config LCD_TDO24M
-       tristate "Toppoly TDO24M LCD Panels support"
+       tristate "Toppoly TDO24M  and TDO35S LCD Panels support"
        depends on LCD_CLASS_DEVICE && SPI_MASTER
        default n
        help
-         If you have a Toppoly TDO24M series LCD panel, say y here to
+         If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
          include the support for it.
 
 config LCD_VGG2432A4
@@ -123,17 +123,14 @@ config BACKLIGHT_ATMEL_PWM
          To compile this driver as a module, choose M here: the module will be
          called atmel-pwm-bl.
 
-config BACKLIGHT_CORGI
-       tristate "Generic (aka Sharp Corgi) Backlight Driver (DEPRECATED)"
+config BACKLIGHT_GENERIC
+       tristate "Generic (aka Sharp Corgi) Backlight Driver"
        depends on BACKLIGHT_CLASS_DEVICE
-       default n
+       default y
        help
          Say y to enable the generic platform backlight driver previously
          known as the Corgi backlight driver. If you have a Sharp Zaurus
-         SL-C7xx, SL-Cxx00 or SL-6000x say y. Most users can say n.
-
-         Note: this driver is marked as deprecated, try enable SPI and
-         use the new corgi_lcd driver with integrated backlight control
+         SL-C7xx, SL-Cxx00 or SL-6000x say y.
 
 config BACKLIGHT_LOCOMO
        tristate "Sharp LOCOMO LCD/Backlight Driver"
index 103427de670357f4f974fd598b37ee41cd5f6ef2..363b3cb2f01b263ffd2b14b50c8bbdd694b7656f 100644 (file)
@@ -11,7 +11,7 @@ obj-$(CONFIG_LCD_TOSA)                   += tosa_lcd.o
 
 obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
 obj-$(CONFIG_BACKLIGHT_ATMEL_PWM)    += atmel-pwm-bl.o
-obj-$(CONFIG_BACKLIGHT_CORGI)  += corgi_bl.o
+obj-$(CONFIG_BACKLIGHT_GENERIC)        += generic_bl.o
 obj-$(CONFIG_BACKLIGHT_HP680)  += hp680_bl.o
 obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)  += omap1_bl.o
index 0664fc032235d37903e14f607e0e798214e89abf..157057c79ca3760fc80d6df28236dc0bbf106044 100644 (file)
@@ -40,6 +40,10 @@ static int fb_notifier_callback(struct notifier_block *self,
                if (!bd->ops->check_fb ||
                    bd->ops->check_fb(evdata->info)) {
                        bd->props.fb_blank = *(int *)evdata->data;
+                       if (bd->props.fb_blank == FB_BLANK_UNBLANK)
+                               bd->props.state &= ~BL_CORE_FBBLANK;
+                       else
+                               bd->props.state |= BL_CORE_FBBLANK;
                        backlight_update_status(bd);
                }
        mutex_unlock(&bd->ops_lock);
@@ -80,20 +84,18 @@ static ssize_t backlight_show_power(struct device *dev,
 static ssize_t backlight_store_power(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       int rc = -ENXIO;
-       char *endp;
+       int rc;
        struct backlight_device *bd = to_backlight_device(dev);
-       int power = simple_strtoul(buf, &endp, 0);
-       size_t size = endp - buf;
+       unsigned long power;
 
-       if (*endp && isspace(*endp))
-               size++;
-       if (size != count)
-               return -EINVAL;
+       rc = strict_strtoul(buf, 0, &power);
+       if (rc)
+               return rc;
 
+       rc = -ENXIO;
        mutex_lock(&bd->ops_lock);
        if (bd->ops) {
-               pr_debug("backlight: set power to %d\n", power);
+               pr_debug("backlight: set power to %lu\n", power);
                if (bd->props.power != power) {
                        bd->props.power = power;
                        backlight_update_status(bd);
@@ -116,28 +118,25 @@ static ssize_t backlight_show_brightness(struct device *dev,
 static ssize_t backlight_store_brightness(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       int rc = -ENXIO;
-       char *endp;
+       int rc;
        struct backlight_device *bd = to_backlight_device(dev);
-       int brightness = simple_strtoul(buf, &endp, 0);
-       size_t size = endp - buf;
+       unsigned long brightness;
+
+       rc = strict_strtoul(buf, 0, &brightness);
+       if (rc)
+               return rc;
 
-       if (*endp && isspace(*endp))
-               size++;
-       if (size != count)
-               return -EINVAL;
+       rc = -ENXIO;
 
        mutex_lock(&bd->ops_lock);
        if (bd->ops) {
                if (brightness > bd->props.max_brightness)
                        rc = -EINVAL;
                else {
-                       pr_debug("backlight: set brightness to %d\n",
+                       pr_debug("backlight: set brightness to %lu\n",
                                 brightness);
-                       if (bd->props.brightness != brightness) {
-                               bd->props.brightness = brightness;
-                               backlight_update_status(bd);
-                       }
+                       bd->props.brightness = brightness;
+                       backlight_update_status(bd);
                        rc = count;
                }
        }
@@ -170,6 +169,34 @@ static ssize_t backlight_show_actual_brightness(struct device *dev,
 
 static struct class *backlight_class;
 
+static int backlight_suspend(struct device *dev, pm_message_t state)
+{
+       struct backlight_device *bd = to_backlight_device(dev);
+
+       if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
+               mutex_lock(&bd->ops_lock);
+               bd->props.state |= BL_CORE_SUSPENDED;
+               backlight_update_status(bd);
+               mutex_unlock(&bd->ops_lock);
+       }
+
+       return 0;
+}
+
+static int backlight_resume(struct device *dev)
+{
+       struct backlight_device *bd = to_backlight_device(dev);
+
+       if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
+               mutex_lock(&bd->ops_lock);
+               bd->props.state &= ~BL_CORE_SUSPENDED;
+               backlight_update_status(bd);
+               mutex_unlock(&bd->ops_lock);
+       }
+
+       return 0;
+}
+
 static void bl_device_release(struct device *dev)
 {
        struct backlight_device *bd = to_backlight_device(dev);
@@ -286,6 +313,8 @@ static int __init backlight_class_init(void)
        }
 
        backlight_class->dev_attrs = bl_device_attributes;
+       backlight_class->suspend = backlight_suspend;
+       backlight_class->resume = backlight_resume;
        return 0;
 }
 
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
deleted file mode 100644 (file)
index 4d4d037..0000000
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- *  Backlight Driver for Sharp Zaurus Handhelds (various models)
- *
- *  Copyright (c) 2004-2006 Richard Purdie
- *
- *  Based on Sharp's 2.4 Backlight Driver
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-
-static int corgibl_intensity;
-static struct backlight_properties corgibl_data;
-static struct backlight_device *corgi_backlight_device;
-static struct generic_bl_info *bl_machinfo;
-
-static unsigned long corgibl_flags;
-#define CORGIBL_SUSPENDED     0x01
-#define CORGIBL_BATTLOW       0x02
-
-static int corgibl_send_intensity(struct backlight_device *bd)
-{
-       int intensity = bd->props.brightness;
-
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               intensity = 0;
-       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-               intensity = 0;
-       if (corgibl_flags & CORGIBL_SUSPENDED)
-               intensity = 0;
-       if (corgibl_flags & CORGIBL_BATTLOW)
-               intensity &= bl_machinfo->limit_mask;
-
-       bl_machinfo->set_bl_intensity(intensity);
-
-       corgibl_intensity = intensity;
-
-       if (bl_machinfo->kick_battery)
-               bl_machinfo->kick_battery();
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int corgibl_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       struct backlight_device *bd = platform_get_drvdata(pdev);
-
-       corgibl_flags |= CORGIBL_SUSPENDED;
-       backlight_update_status(bd);
-       return 0;
-}
-
-static int corgibl_resume(struct platform_device *pdev)
-{
-       struct backlight_device *bd = platform_get_drvdata(pdev);
-
-       corgibl_flags &= ~CORGIBL_SUSPENDED;
-       backlight_update_status(bd);
-       return 0;
-}
-#else
-#define corgibl_suspend        NULL
-#define corgibl_resume NULL
-#endif
-
-static int corgibl_get_intensity(struct backlight_device *bd)
-{
-       return corgibl_intensity;
-}
-
-/*
- * Called when the battery is low to limit the backlight intensity.
- * If limit==0 clear any limit, otherwise limit the intensity
- */
-void corgibl_limit_intensity(int limit)
-{
-       if (limit)
-               corgibl_flags |= CORGIBL_BATTLOW;
-       else
-               corgibl_flags &= ~CORGIBL_BATTLOW;
-       backlight_update_status(corgi_backlight_device);
-}
-EXPORT_SYMBOL(corgibl_limit_intensity);
-
-
-static struct backlight_ops corgibl_ops = {
-       .get_brightness = corgibl_get_intensity,
-       .update_status  = corgibl_send_intensity,
-};
-
-static int corgibl_probe(struct platform_device *pdev)
-{
-       struct generic_bl_info *machinfo = pdev->dev.platform_data;
-       const char *name = "generic-bl";
-
-       bl_machinfo = machinfo;
-       if (!machinfo->limit_mask)
-               machinfo->limit_mask = -1;
-
-       if (machinfo->name)
-               name = machinfo->name;
-
-       corgi_backlight_device = backlight_device_register (name,
-               &pdev->dev, NULL, &corgibl_ops);
-       if (IS_ERR (corgi_backlight_device))
-               return PTR_ERR (corgi_backlight_device);
-
-       platform_set_drvdata(pdev, corgi_backlight_device);
-
-       corgi_backlight_device->props.max_brightness = machinfo->max_intensity;
-       corgi_backlight_device->props.power = FB_BLANK_UNBLANK;
-       corgi_backlight_device->props.brightness = machinfo->default_intensity;
-       backlight_update_status(corgi_backlight_device);
-
-       printk("Corgi Backlight Driver Initialized.\n");
-       return 0;
-}
-
-static int corgibl_remove(struct platform_device *pdev)
-{
-       struct backlight_device *bd = platform_get_drvdata(pdev);
-
-       corgibl_data.power = 0;
-       corgibl_data.brightness = 0;
-       backlight_update_status(bd);
-
-       backlight_device_unregister(bd);
-
-       printk("Corgi Backlight Driver Unloaded\n");
-       return 0;
-}
-
-static struct platform_driver corgibl_driver = {
-       .probe          = corgibl_probe,
-       .remove         = corgibl_remove,
-       .suspend        = corgibl_suspend,
-       .resume         = corgibl_resume,
-       .driver         = {
-               .name   = "generic-bl",
-       },
-};
-
-static int __init corgibl_init(void)
-{
-       return platform_driver_register(&corgibl_driver);
-}
-
-static void __exit corgibl_exit(void)
-{
-       platform_driver_unregister(&corgibl_driver);
-}
-
-module_init(corgibl_init);
-module_exit(corgibl_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi Backlight Driver");
-MODULE_LICENSE("GPL");
index 26add8898605e0bc08789e6bdf4a7961886e575b..b9fe62b475c63a3fa953ea182958496cd4037d52 100644 (file)
@@ -259,22 +259,18 @@ static int __init cr_backlight_init(void)
 {
        int ret = platform_driver_register(&cr_backlight_driver);
 
-       if (!ret) {
-               crp = platform_device_alloc("cr_backlight", -1);
-               if (!crp)
-                       return -ENOMEM;
+       if (ret)
+               return ret;
 
-               ret = platform_device_add(crp);
-
-               if (ret) {
-                       platform_device_put(crp);
-                       platform_driver_unregister(&cr_backlight_driver);
-               }
+       crp = platform_device_register_simple("cr_backlight", -1, NULL, 0);
+       if (IS_ERR(crp)) {
+               platform_driver_unregister(&cr_backlight_driver);
+               return PTR_ERR(crp);
        }
 
        printk("Carillo Ranch Backlight Driver Initialized.\n");
 
-       return ret;
+       return 0;
 }
 
 static void __exit cr_backlight_exit(void)
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
new file mode 100644 (file)
index 0000000..6d27f62
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ *  Generic Backlight Driver
+ *
+ *  Copyright (c) 2004-2008 Richard Purdie
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+
+static int genericbl_intensity;
+static struct backlight_device *generic_backlight_device;
+static struct generic_bl_info *bl_machinfo;
+
+/* Flag to signal when the battery is low */
+#define GENERICBL_BATTLOW       BL_CORE_DRIVER1
+
+static int genericbl_send_intensity(struct backlight_device *bd)
+{
+       int intensity = bd->props.brightness;
+
+       if (bd->props.power != FB_BLANK_UNBLANK)
+               intensity = 0;
+       if (bd->props.state & BL_CORE_FBBLANK)
+               intensity = 0;
+       if (bd->props.state & BL_CORE_SUSPENDED)
+               intensity = 0;
+       if (bd->props.state & GENERICBL_BATTLOW)
+               intensity &= bl_machinfo->limit_mask;
+
+       bl_machinfo->set_bl_intensity(intensity);
+
+       genericbl_intensity = intensity;
+
+       if (bl_machinfo->kick_battery)
+               bl_machinfo->kick_battery();
+
+       return 0;
+}
+
+static int genericbl_get_intensity(struct backlight_device *bd)
+{
+       return genericbl_intensity;
+}
+
+/*
+ * Called when the battery is low to limit the backlight intensity.
+ * If limit==0 clear any limit, otherwise limit the intensity
+ */
+void corgibl_limit_intensity(int limit)
+{
+       struct backlight_device *bd = generic_backlight_device;
+
+       mutex_lock(&bd->ops_lock);
+       if (limit)
+               bd->props.state |= GENERICBL_BATTLOW;
+       else
+               bd->props.state &= ~GENERICBL_BATTLOW;
+       backlight_update_status(generic_backlight_device);
+       mutex_unlock(&bd->ops_lock);
+}
+EXPORT_SYMBOL(corgibl_limit_intensity);
+
+static struct backlight_ops genericbl_ops = {
+       .options = BL_CORE_SUSPENDRESUME,
+       .get_brightness = genericbl_get_intensity,
+       .update_status  = genericbl_send_intensity,
+};
+
+static int genericbl_probe(struct platform_device *pdev)
+{
+       struct generic_bl_info *machinfo = pdev->dev.platform_data;
+       const char *name = "generic-bl";
+       struct backlight_device *bd;
+
+       bl_machinfo = machinfo;
+       if (!machinfo->limit_mask)
+               machinfo->limit_mask = -1;
+
+       if (machinfo->name)
+               name = machinfo->name;
+
+       bd = backlight_device_register (name,
+               &pdev->dev, NULL, &genericbl_ops);
+       if (IS_ERR (bd))
+               return PTR_ERR (bd);
+
+       platform_set_drvdata(pdev, bd);
+
+       bd->props.max_brightness = machinfo->max_intensity;
+       bd->props.power = FB_BLANK_UNBLANK;
+       bd->props.brightness = machinfo->default_intensity;
+       backlight_update_status(bd);
+
+       generic_backlight_device = bd;
+
+       printk("Generic Backlight Driver Initialized.\n");
+       return 0;
+}
+
+static int genericbl_remove(struct platform_device *pdev)
+{
+       struct backlight_device *bd = platform_get_drvdata(pdev);
+
+       bd->props.power = 0;
+       bd->props.brightness = 0;
+       backlight_update_status(bd);
+
+       backlight_device_unregister(bd);
+
+       printk("Generic Backlight Driver Unloaded\n");
+       return 0;
+}
+
+static struct platform_driver genericbl_driver = {
+       .probe          = genericbl_probe,
+       .remove         = genericbl_remove,
+       .driver         = {
+               .name   = "generic-bl",
+       },
+};
+
+static int __init genericbl_init(void)
+{
+       return platform_driver_register(&genericbl_driver);
+}
+
+static void __exit genericbl_exit(void)
+{
+       platform_driver_unregister(&genericbl_driver);
+}
+
+module_init(genericbl_init);
+module_exit(genericbl_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Generic Backlight Driver");
+MODULE_LICENSE("GPL");
index d4cfed0b26d5f8a83242c73769ca650805a521a5..5be55a20d8c77326510f48a6ad0ba600b5c36435 100644 (file)
@@ -151,19 +151,15 @@ static int __init hp680bl_init(void)
        int ret;
 
        ret = platform_driver_register(&hp680bl_driver);
-       if (!ret) {
-               hp680bl_device = platform_device_alloc("hp680-bl", -1);
-               if (!hp680bl_device)
-                       return -ENOMEM;
-
-               ret = platform_device_add(hp680bl_device);
-
-               if (ret) {
-                       platform_device_put(hp680bl_device);
-                       platform_driver_unregister(&hp680bl_driver);
-               }
+       if (ret)
+               return ret;
+       hp680bl_device = platform_device_register_simple("hp680-bl", -1,
+                                                       NULL, 0);
+       if (IS_ERR(hp680bl_device)) {
+               platform_driver_unregister(&hp680bl_driver);
+               return PTR_ERR(hp680bl_device);
        }
-       return ret;
+       return 0;
 }
 
 static void __exit hp680bl_exit(void)
index 06964af761c630295851142dac7690fc1bf11ab5..65864c500455677a1ef540579c0817f32fd62478 100644 (file)
@@ -70,6 +70,7 @@ static int mbp_get_intensity(struct backlight_device *bd)
 }
 
 static struct backlight_ops mbp_ops = {
+       .options = BL_CORE_SUSPENDRESUME,
        .get_brightness = mbp_get_intensity,
        .update_status  = mbp_send_intensity,
 };
index 15fb4d58b5bcce35f4e8c0a9a00bd0d76246370e..9edaf24fd82d4e49f0bf439162fa156088cbc25e 100644 (file)
@@ -119,20 +119,16 @@ static int __init progearbl_init(void)
 {
        int ret = platform_driver_register(&progearbl_driver);
 
-       if (!ret) {
-               progearbl_device = platform_device_alloc("progear-bl", -1);
-               if (!progearbl_device)
-                       return -ENOMEM;
-
-               ret = platform_device_add(progearbl_device);
-
-               if (ret) {
-                       platform_device_put(progearbl_device);
-                       platform_driver_unregister(&progearbl_driver);
-               }
+       if (ret)
+               return ret;
+       progearbl_device = platform_device_register_simple("progear-bl", -1,
+                                                               NULL, 0);
+       if (IS_ERR(progearbl_device)) {
+               platform_driver_unregister(&progearbl_driver);
+               return PTR_ERR(progearbl_device);
        }
 
-       return ret;
+       return 0;
 }
 
 static void __exit progearbl_exit(void)
index 8427669162ea494b7f9630bc4ea0842a1d0efbd1..1dae7f8f3c6b47c7f4861ff824fff0a4d4dcf8f3 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/tdo24m.h>
 #include <linux/fb.h>
 #include <linux/lcd.h>
 
@@ -31,6 +32,9 @@ struct tdo24m {
        struct spi_transfer     xfer;
        uint8_t                 *buf;
 
+       int (*adj_mode)(struct tdo24m *lcd, int mode);
+       int color_invert;
+
        int                     power;
        int                     mode;
 };
@@ -66,7 +70,7 @@ static uint32_t lcd_panel_off[] = {
        CMD_NULL,
 };
 
-static uint32_t lcd_vga_pass_through[] = {
+static uint32_t lcd_vga_pass_through_tdo24m[] = {
        CMD1(0xB0, 0x16),
        CMD1(0xBC, 0x80),
        CMD1(0xE1, 0x00),
@@ -75,7 +79,7 @@ static uint32_t lcd_vga_pass_through[] = {
        CMD_NULL,
 };
 
-static uint32_t lcd_qvga_pass_through[] = {
+static uint32_t lcd_qvga_pass_through_tdo24m[] = {
        CMD1(0xB0, 0x16),
        CMD1(0xBC, 0x81),
        CMD1(0xE1, 0x00),
@@ -84,7 +88,7 @@ static uint32_t lcd_qvga_pass_through[] = {
        CMD_NULL,
 };
 
-static uint32_t lcd_vga_transfer[] = {
+static uint32_t lcd_vga_transfer_tdo24m[] = {
        CMD1(0xcf, 0x02),       /* Blanking period control (1) */
        CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
        CMD1(0xd1, 0x01),       /* CKV timing control on/off */
@@ -110,6 +114,35 @@ static uint32_t lcd_qvga_transfer[] = {
        CMD_NULL,
 };
 
+static uint32_t lcd_vga_pass_through_tdo35s[] = {
+       CMD1(0xB0, 0x16),
+       CMD1(0xBC, 0x80),
+       CMD1(0xE1, 0x00),
+       CMD1(0x3B, 0x00),
+       CMD_NULL,
+};
+
+static uint32_t lcd_qvga_pass_through_tdo35s[] = {
+       CMD1(0xB0, 0x16),
+       CMD1(0xBC, 0x81),
+       CMD1(0xE1, 0x00),
+       CMD1(0x3B, 0x22),
+       CMD_NULL,
+};
+
+static uint32_t lcd_vga_transfer_tdo35s[] = {
+       CMD1(0xcf, 0x02),       /* Blanking period control (1) */
+       CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
+       CMD1(0xd1, 0x01),       /* CKV timing control on/off */
+       CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */
+       CMD2(0xd3, 0x14, 0x28), /* OEV timing control */
+       CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */
+       CMD1(0xd5, 0x28),       /* ASW timing control (2) */
+       CMD0(0x21),             /* Invert for normally black display */
+       CMD0(0x29),             /* Display on */
+       CMD_NULL,
+};
+
 static uint32_t lcd_panel_config[] = {
        CMD2(0xb8, 0xff, 0xf9), /* Output control */
        CMD0(0x11),             /* sleep out */
@@ -148,6 +181,8 @@ static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array)
        int nparams, err = 0;
 
        for (; *p != CMD_NULL; p++) {
+               if (!lcd->color_invert && *p == CMD0(0x21))
+                       continue;
 
                nparams = (*p >> 30) & 0x3;
 
@@ -184,12 +219,33 @@ static int tdo24m_adj_mode(struct tdo24m *lcd, int mode)
 {
        switch (mode) {
        case MODE_VGA:
-               tdo24m_writes(lcd, lcd_vga_pass_through);
+               tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m);
                tdo24m_writes(lcd, lcd_panel_config);
-               tdo24m_writes(lcd, lcd_vga_transfer);
+               tdo24m_writes(lcd, lcd_vga_transfer_tdo24m);
                break;
        case MODE_QVGA:
-               tdo24m_writes(lcd, lcd_qvga_pass_through);
+               tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m);
+               tdo24m_writes(lcd, lcd_panel_config);
+               tdo24m_writes(lcd, lcd_qvga_transfer);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       lcd->mode = mode;
+       return 0;
+}
+
+static int tdo35s_adj_mode(struct tdo24m *lcd, int mode)
+{
+       switch (mode) {
+       case MODE_VGA:
+               tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s);
+               tdo24m_writes(lcd, lcd_panel_config);
+               tdo24m_writes(lcd, lcd_vga_transfer_tdo35s);
+               break;
+       case MODE_QVGA:
+               tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s);
                tdo24m_writes(lcd, lcd_panel_config);
                tdo24m_writes(lcd, lcd_qvga_transfer);
                break;
@@ -213,7 +269,7 @@ static int tdo24m_power_on(struct tdo24m *lcd)
        if (err)
                goto out;
 
-       err = tdo24m_adj_mode(lcd, lcd->mode);
+       err = lcd->adj_mode(lcd, lcd->mode);
 out:
        return err;
 }
@@ -262,7 +318,7 @@ static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m)
        if (lcd->mode == mode)
                return 0;
 
-       return tdo24m_adj_mode(lcd, mode);
+       return lcd->adj_mode(lcd, mode);
 }
 
 static struct lcd_ops tdo24m_ops = {
@@ -276,8 +332,16 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
        struct tdo24m *lcd;
        struct spi_message *m;
        struct spi_transfer *x;
+       struct tdo24m_platform_data *pdata;
+       enum tdo24m_model model;
        int err;
 
+       pdata = spi->dev.platform_data;
+       if (pdata)
+               model = pdata->model;
+       else
+               model = TDO24M;
+
        spi->bits_per_word = 8;
        spi->mode = SPI_MODE_3;
        err = spi_setup(spi);
@@ -306,6 +370,20 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
        x->tx_buf = &lcd->buf[0];
        spi_message_add_tail(x, m);
 
+       switch (model) {
+       case TDO24M:
+               lcd->color_invert = 1;
+               lcd->adj_mode = tdo24m_adj_mode;
+               break;
+       case TDO35S:
+               lcd->adj_mode = tdo35s_adj_mode;
+               lcd->color_invert = 0;
+               break;
+       default:
+               dev_err(&spi->dev, "Unsupported model");
+               goto out_free;
+       }
+
        lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
                                        lcd, &tdo24m_ops);
        if (IS_ERR(lcd->lcd_dev)) {
index 57a26649f1a5ed6eeb4d6eae7517c79534090cad..b7fbc75a62fc7ec6c0dd6097af8a355d677724a8 100644 (file)
@@ -39,6 +39,7 @@ struct tosa_lcd_data {
        struct i2c_client *i2c;
 
        int lcd_power;
+       bool is_vga;
 };
 
 static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
@@ -81,8 +82,12 @@ static void tosa_lcd_tg_init(struct tosa_lcd_data *data)
 static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
 {
        struct spi_device *spi = data->spi;
-       const int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
-       tosa_tg_send(spi, TG_PNLCTL, value | TG_REG0_VQV); /* this depends on mode */
+       int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
+
+       if (data->is_vga)
+               value |= TG_REG0_VQV;
+
+       tosa_tg_send(spi, TG_PNLCTL, value);
 
        /* TG LCD pannel power up */
        tosa_tg_send(spi, TG_PINICTL,0x4);
@@ -142,9 +147,25 @@ static int tosa_lcd_get_power(struct lcd_device *lcd)
        return data->lcd_power;
 }
 
+static int tosa_lcd_set_mode(struct lcd_device *lcd, struct fb_videomode *mode)
+{
+       struct tosa_lcd_data *data = lcd_get_data(lcd);
+
+       if (mode->xres == 320 || mode->yres == 320)
+               data->is_vga = false;
+       else
+               data->is_vga = true;
+
+       if (POWER_IS_ON(data->lcd_power))
+               tosa_lcd_tg_on(data);
+
+       return 0;
+}
+
 static struct lcd_ops tosa_lcd_ops = {
        .set_power = tosa_lcd_set_power,
        .get_power = tosa_lcd_get_power,
+       .set_mode = tosa_lcd_set_mode,
 };
 
 static int __devinit tosa_lcd_probe(struct spi_device *spi)
@@ -156,6 +177,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
        if (!data)
                return -ENOMEM;
 
+       data->is_vga = true; /* defaut to VGA mode */
+
        /*
         * bits_per_word cannot be configured in platform data
         */
index 593c7687d54ae2f487fad7c5c47e45530c314043..8e653b8a6f17dcbdaa46f2942393e48c6d398a8b 100644 (file)
@@ -137,7 +137,7 @@ static int vgg2432a4_lcd_init(struct ili9320 *lcd,
 
        ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
        ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
-       ili9320_write(lcd, ILI9320_RGB_IF2, ILI9320_RGBIF2_DPL);
+       ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2);
 
        ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
        if (ret != 0)
index 02cff86af1b4fac046dee991217e131a99cb8336..51307b0fdf0f6f8c4c7022c91f9fa47618c82771 100644 (file)
@@ -932,6 +932,58 @@ config CRAMFS
 
          If unsure, say N.
 
+config SQUASHFS
+       tristate "SquashFS 4.0 - Squashed file system support"
+       depends on BLOCK
+       select ZLIB_INFLATE
+       help
+         Saying Y here includes support for SquashFS 4.0 (a Compressed
+         Read-Only File System).  Squashfs is a highly compressed read-only
+         filesystem for Linux.  It uses zlib compression to compress both
+         files, inodes and directories.  Inodes in the system are very small
+         and all blocks are packed to minimise data overhead. Block sizes
+         greater than 4K are supported up to a maximum of 1 Mbytes (default
+         block size 128K).  SquashFS 4.0 supports 64 bit filesystems and files
+         (larger than 4GB), full uid/gid information, hard links and
+         timestamps.  
+
+         Squashfs is intended for general read-only filesystem use, for
+         archival use (i.e. in cases where a .tar.gz file may be used), and in
+         embedded systems where low overhead is needed.  Further information
+         and tools are available from http://squashfs.sourceforge.net.
+
+         If you want to compile this as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/modules.txt>.  The module
+         will be called squashfs.  Note that the root file system (the one
+         containing the directory /) cannot be compiled as a module.
+
+         If unsure, say N.
+
+config SQUASHFS_EMBEDDED
+
+       bool "Additional option for memory-constrained systems" 
+       depends on SQUASHFS
+       default n
+       help
+         Saying Y here allows you to specify cache size.
+
+         If unsure, say N.
+
+config SQUASHFS_FRAGMENT_CACHE_SIZE
+       int "Number of fragments cached" if SQUASHFS_EMBEDDED
+       depends on SQUASHFS
+       default "3"
+       help
+         By default SquashFS caches the last 3 fragments read from
+         the filesystem.  Increasing this amount may mean SquashFS
+         has to re-read fragments less often from disk, at the expense
+         of extra system memory.  Decreasing this amount will mean
+         SquashFS uses less memory at the expense of extra reads from disk.
+
+         Note there must be at least one cached fragment.  Anything
+         much more than three will probably not make much difference.
+
 config VXFS_FS
        tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
        depends on BLOCK
index bc4e14df1082319979ff6b52b604bccaca0f2636..38bc735c67ad2ebfe79a4d64d35e6ad01117c8d6 100644 (file)
@@ -74,6 +74,7 @@ obj-$(CONFIG_JBD)             += jbd/
 obj-$(CONFIG_JBD2)             += jbd2/
 obj-$(CONFIG_EXT2_FS)          += ext2/
 obj-$(CONFIG_CRAMFS)           += cramfs/
+obj-$(CONFIG_SQUASHFS)         += squashfs/
 obj-y                          += ramfs/
 obj-$(CONFIG_HUGETLBFS)                += hugetlbfs/
 obj-$(CONFIG_CODA_FS)          += coda/
index aa5b43205e3732a622e849d157b6227bb722ca81..f3e72c5c19f56ec0deee0614e1a625df511a58e0 100644 (file)
@@ -168,9 +168,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
        struct elf_fdpic_params exec_params, interp_params;
        struct elf_phdr *phdr;
        unsigned long stack_size, entryaddr;
-#ifndef CONFIG_MMU
-       unsigned long fullsize;
-#endif
 #ifdef ELF_FDPIC_PLAT_INIT
        unsigned long dynaddr;
 #endif
@@ -390,11 +387,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
                goto error_kill;
        }
 
-       /* expand the stack mapping to use up the entire allocation granule */
-       fullsize = kobjsize((char *) current->mm->start_brk);
-       if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size,
-                                   fullsize, 0, 0)))
-               stack_size = fullsize;
        up_write(&current->mm->mmap_sem);
 
        current->mm->brk = current->mm->start_brk;
@@ -1567,11 +1559,9 @@ end_coredump:
 static int elf_fdpic_dump_segments(struct file *file, size_t *size,
                           unsigned long *limit, unsigned long mm_flags)
 {
-       struct vm_list_struct *vml;
-
-       for (vml = current->mm->context.vmlist; vml; vml = vml->next) {
-       struct vm_area_struct *vma = vml->vma;
+       struct vm_area_struct *vma;
 
+       for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
                if (!maydump(vma, mm_flags))
                        continue;
 
@@ -1617,9 +1607,6 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
        elf_fpxregset_t *xfpu = NULL;
 #endif
        int thread_status_size = 0;
-#ifndef CONFIG_MMU
-       struct vm_list_struct *vml;
-#endif
        elf_addr_t *auxv;
        unsigned long mm_flags;
 
@@ -1685,13 +1672,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
        fill_prstatus(prstatus, current, signr);
        elf_core_copy_regs(&prstatus->pr_reg, regs);
 
-#ifdef CONFIG_MMU
        segs = current->mm->map_count;
-#else
-       segs = 0;
-       for (vml = current->mm->context.vmlist; vml; vml = vml->next)
-           segs++;
-#endif
 #ifdef ELF_CORE_EXTRA_PHDRS
        segs += ELF_CORE_EXTRA_PHDRS;
 #endif
@@ -1766,20 +1747,10 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
        mm_flags = current->mm->flags;
 
        /* write program headers for segments dump */
-       for (
-#ifdef CONFIG_MMU
-               vma = current->mm->mmap; vma; vma = vma->vm_next
-#else
-                       vml = current->mm->context.vmlist; vml; vml = vml->next
-#endif
-            ) {
+       for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
                struct elf_phdr phdr;
                size_t sz;
 
-#ifndef CONFIG_MMU
-               vma = vml->vma;
-#endif
-
                sz = vma->vm_end - vma->vm_start;
 
                phdr.p_type = PT_LOAD;
index 7bbd5c6b37257ad5beba8180f9249d6f4eb1a6f3..5cebf0b37798422ff097260f657dc128ee2b0b58 100644 (file)
@@ -417,8 +417,8 @@ static int load_flat_file(struct linux_binprm * bprm,
        unsigned long textpos = 0, datapos = 0, result;
        unsigned long realdatastart = 0;
        unsigned long text_len, data_len, bss_len, stack_len, flags;
-       unsigned long len, reallen, memp = 0;
-       unsigned long extra, rlim;
+       unsigned long len, memp = 0;
+       unsigned long memp_size, extra, rlim;
        unsigned long *reloc = 0, *rp;
        struct inode *inode;
        int i, rev, relocs = 0;
@@ -543,17 +543,10 @@ static int load_flat_file(struct linux_binprm * bprm,
                }
 
                len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+               len = PAGE_ALIGN(len);
                down_write(&current->mm->mmap_sem);
                realdatastart = do_mmap(0, 0, len,
                        PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
-               /* Remap to use all availabe slack region space */
-               if (realdatastart && (realdatastart < (unsigned long)-4096)) {
-                       reallen = kobjsize((void *)realdatastart);
-                       if (reallen > len) {
-                               realdatastart = do_mremap(realdatastart, len,
-                                       reallen, MREMAP_FIXED, realdatastart);
-                       }
-               }
                up_write(&current->mm->mmap_sem);
 
                if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) {
@@ -591,21 +584,14 @@ static int load_flat_file(struct linux_binprm * bprm,
 
                reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len));
                memp = realdatastart;
-
+               memp_size = len;
        } else {
 
                len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+               len = PAGE_ALIGN(len);
                down_write(&current->mm->mmap_sem);
                textpos = do_mmap(0, 0, len,
                        PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
-               /* Remap to use all availabe slack region space */
-               if (textpos && (textpos < (unsigned long) -4096)) {
-                       reallen = kobjsize((void *)textpos);
-                       if (reallen > len) {
-                               textpos = do_mremap(textpos, len, reallen,
-                                       MREMAP_FIXED, textpos);
-                       }
-               }
                up_write(&current->mm->mmap_sem);
 
                if (!textpos  || textpos >= (unsigned long) -4096) {
@@ -622,7 +608,7 @@ static int load_flat_file(struct linux_binprm * bprm,
                reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) +
                                MAX_SHARED_LIBS * sizeof(unsigned long));
                memp = textpos;
-
+               memp_size = len;
 #ifdef CONFIG_BINFMT_ZFLAT
                /*
                 * load it all in and treat it like a RAM load from now on
@@ -680,10 +666,12 @@ static int load_flat_file(struct linux_binprm * bprm,
                 * set up the brk stuff, uses any slack left in data/bss/stack
                 * allocation.  We put the brk after the bss (between the bss
                 * and stack) like other platforms.
+                * Userspace code relies on the stack pointer starting out at
+                * an address right at the end of a page.
                 */
                current->mm->start_brk = datapos + data_len + bss_len;
                current->mm->brk = (current->mm->start_brk + 3) & ~3;
-               current->mm->context.end_brk = memp + kobjsize((void *) memp) - stack_len;
+               current->mm->context.end_brk = memp + memp_size - stack_len;
        }
 
        if (flags & FLAT_FLAG_KTRACE)
@@ -790,8 +778,8 @@ static int load_flat_file(struct linux_binprm * bprm,
 
        /* zero the BSS,  BRK and stack areas */
        memset((void*)(datapos + data_len), 0, bss_len + 
-                       (memp + kobjsize((void *) memp) - stack_len -   /* end brk */
-                       libinfo->lib_list[id].start_brk) +              /* start brk */
+                       (memp + memp_size - stack_len -         /* end brk */
+                       libinfo->lib_list[id].start_brk) +      /* start brk */
                        stack_len);
 
        return 0;
index 1750445556c313dbb9ddf716a52cce14fe6deac9..507ed6ec1847cfdf7d3ae0377816228f249f42a0 100644 (file)
@@ -366,9 +366,6 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c);
 void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
 struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
 void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
-struct rb_node *rb_next(struct rb_node *);
-struct rb_node *rb_prev(struct rb_node *);
-void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
 int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
 uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
 struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
index 3e8aeb8b61ce251601f8795d3f6bd4fd423beed0..cd53ff838498de03973494d6a70ddae9f193a1ce 100644 (file)
@@ -41,8 +41,6 @@ do {                                          \
        (vmi)->used = 0;                        \
        (vmi)->largest_chunk = 0;               \
 } while(0)
-
-extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
 #endif
 
 extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
index b1675c4e66dae952d5b1dfbf0eade72f83077b7f..43d23948384addca6930aa65acbf49804226827f 100644 (file)
@@ -73,6 +73,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                "HighFree:       %8lu kB\n"
                "LowTotal:       %8lu kB\n"
                "LowFree:        %8lu kB\n"
+#endif
+#ifndef CONFIG_MMU
+               "MmapCopy:       %8lu kB\n"
 #endif
                "SwapTotal:      %8lu kB\n"
                "SwapFree:       %8lu kB\n"
@@ -115,6 +118,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freehigh),
                K(i.totalram-i.totalhigh),
                K(i.freeram-i.freehigh),
+#endif
+#ifndef CONFIG_MMU
+               K((unsigned long) atomic_read(&mmap_pages_allocated)),
 #endif
                K(i.totalswap),
                K(i.freeswap),
index 3f87d2632947371b47df580c32d041fb39bb91ba..b446d7ad0b0da469f02521c451b98ec8cc1bed32 100644 (file)
 #include "internal.h"
 
 /*
- * display a single VMA to a sequenced file
+ * display a single region to a sequenced file
  */
-int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
+static int nommu_region_show(struct seq_file *m, struct vm_region *region)
 {
        unsigned long ino = 0;
        struct file *file;
        dev_t dev = 0;
        int flags, len;
 
-       flags = vma->vm_flags;
-       file = vma->vm_file;
+       flags = region->vm_flags;
+       file = region->vm_file;
 
        if (file) {
-               struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+               struct inode *inode = region->vm_file->f_path.dentry->d_inode;
                dev = inode->i_sb->s_dev;
                ino = inode->i_ino;
        }
 
        seq_printf(m,
                   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
-                  vma->vm_start,
-                  vma->vm_end,
+                  region->vm_start,
+                  region->vm_end,
                   flags & VM_READ ? 'r' : '-',
                   flags & VM_WRITE ? 'w' : '-',
                   flags & VM_EXEC ? 'x' : '-',
                   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
-                  ((loff_t)vma->vm_pgoff) << PAGE_SHIFT,
+                  ((loff_t)region->vm_pgoff) << PAGE_SHIFT,
                   MAJOR(dev), MINOR(dev), ino, &len);
 
        if (file) {
@@ -75,61 +75,54 @@ int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
 }
 
 /*
- * display a list of all the VMAs the kernel knows about
+ * display a list of all the REGIONs the kernel knows about
  * - nommu kernals have a single flat list
  */
-static int nommu_vma_list_show(struct seq_file *m, void *v)
+static int nommu_region_list_show(struct seq_file *m, void *_p)
 {
-       struct vm_area_struct *vma;
+       struct rb_node *p = _p;
 
-       vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
-       return nommu_vma_show(m, vma);
+       return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb));
 }
 
-static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
+static void *nommu_region_list_start(struct seq_file *m, loff_t *_pos)
 {
-       struct rb_node *_rb;
+       struct rb_node *p;
        loff_t pos = *_pos;
-       void *next = NULL;
 
-       down_read(&nommu_vma_sem);
+       down_read(&nommu_region_sem);
 
-       for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) {
-               if (pos == 0) {
-                       next = _rb;
-                       break;
-               }
-               pos--;
-       }
-
-       return next;
+       for (p = rb_first(&nommu_region_tree); p; p = rb_next(p))
+               if (pos-- == 0)
+                       return p;
+       return NULL;
 }
 
-static void nommu_vma_list_stop(struct seq_file *m, void *v)
+static void nommu_region_list_stop(struct seq_file *m, void *v)
 {
-       up_read(&nommu_vma_sem);
+       up_read(&nommu_region_sem);
 }
 
-static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos)
+static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos)
 {
        (*pos)++;
        return rb_next((struct rb_node *) v);
 }
 
-static const struct seq_operations proc_nommu_vma_list_seqop = {
-       .start  = nommu_vma_list_start,
-       .next   = nommu_vma_list_next,
-       .stop   = nommu_vma_list_stop,
-       .show   = nommu_vma_list_show
+static struct seq_operations proc_nommu_region_list_seqop = {
+       .start  = nommu_region_list_start,
+       .next   = nommu_region_list_next,
+       .stop   = nommu_region_list_stop,
+       .show   = nommu_region_list_show
 };
 
-static int proc_nommu_vma_list_open(struct inode *inode, struct file *file)
+static int proc_nommu_region_list_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &proc_nommu_vma_list_seqop);
+       return seq_open(file, &proc_nommu_region_list_seqop);
 }
 
-static const struct file_operations proc_nommu_vma_list_operations = {
-       .open    = proc_nommu_vma_list_open,
+static const struct file_operations proc_nommu_region_list_operations = {
+       .open    = proc_nommu_region_list_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release,
@@ -137,7 +130,7 @@ static const struct file_operations proc_nommu_vma_list_operations = {
 
 static int __init proc_nommu_init(void)
 {
-       proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations);
+       proc_create("maps", S_IRUGO, NULL, &proc_nommu_region_list_operations);
        return 0;
 }
 
index d4a8be32b902279a3f2c84b1d4d137636dd3f7de..343ea1216bc8e05ddc84e75af47cfbab73fee529 100644 (file)
  */
 void task_mem(struct seq_file *m, struct mm_struct *mm)
 {
-       struct vm_list_struct *vml;
-       unsigned long bytes = 0, sbytes = 0, slack = 0;
+       struct vm_area_struct *vma;
+       struct vm_region *region;
+       struct rb_node *p;
+       unsigned long bytes = 0, sbytes = 0, slack = 0, size;
         
        down_read(&mm->mmap_sem);
-       for (vml = mm->context.vmlist; vml; vml = vml->next) {
-               if (!vml->vma)
-                       continue;
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+               vma = rb_entry(p, struct vm_area_struct, vm_rb);
+
+               bytes += kobjsize(vma);
+
+               region = vma->vm_region;
+               if (region) {
+                       size = kobjsize(region);
+                       size += region->vm_end - region->vm_start;
+               } else {
+                       size = vma->vm_end - vma->vm_start;
+               }
 
-               bytes += kobjsize(vml);
                if (atomic_read(&mm->mm_count) > 1 ||
-                   atomic_read(&vml->vma->vm_usage) > 1
-                   ) {
-                       sbytes += kobjsize((void *) vml->vma->vm_start);
-                       sbytes += kobjsize(vml->vma);
+                   vma->vm_flags & VM_MAYSHARE) {
+                       sbytes += size;
                } else {
-                       bytes += kobjsize((void *) vml->vma->vm_start);
-                       bytes += kobjsize(vml->vma);
-                       slack += kobjsize((void *) vml->vma->vm_start) -
-                               (vml->vma->vm_end - vml->vma->vm_start);
+                       bytes += size;
+                       if (region)
+                               slack = region->vm_end - vma->vm_end;
                }
        }
 
@@ -70,13 +77,14 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
 
 unsigned long task_vsize(struct mm_struct *mm)
 {
-       struct vm_list_struct *tbp;
+       struct vm_area_struct *vma;
+       struct rb_node *p;
        unsigned long vsize = 0;
 
        down_read(&mm->mmap_sem);
-       for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
-               if (tbp->vma)
-                       vsize += kobjsize((void *) tbp->vma->vm_start);
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+               vma = rb_entry(p, struct vm_area_struct, vm_rb);
+               vsize += vma->vm_end - vma->vm_start;
        }
        up_read(&mm->mmap_sem);
        return vsize;
@@ -85,15 +93,19 @@ unsigned long task_vsize(struct mm_struct *mm)
 int task_statm(struct mm_struct *mm, int *shared, int *text,
               int *data, int *resident)
 {
-       struct vm_list_struct *tbp;
+       struct vm_area_struct *vma;
+       struct vm_region *region;
+       struct rb_node *p;
        int size = kobjsize(mm);
 
        down_read(&mm->mmap_sem);
-       for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
-               size += kobjsize(tbp);
-               if (tbp->vma) {
-                       size += kobjsize(tbp->vma);
-                       size += kobjsize((void *) tbp->vma->vm_start);
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+               vma = rb_entry(p, struct vm_area_struct, vm_rb);
+               size += kobjsize(vma);
+               region = vma->vm_region;
+               if (region) {
+                       size += kobjsize(region);
+                       size += region->vm_end - region->vm_start;
                }
        }
 
@@ -104,21 +116,63 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
        return size;
 }
 
+/*
+ * display a single VMA to a sequenced file
+ */
+static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
+{
+       unsigned long ino = 0;
+       struct file *file;
+       dev_t dev = 0;
+       int flags, len;
+
+       flags = vma->vm_flags;
+       file = vma->vm_file;
+
+       if (file) {
+               struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+               dev = inode->i_sb->s_dev;
+               ino = inode->i_ino;
+       }
+
+       seq_printf(m,
+                  "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+                  vma->vm_start,
+                  vma->vm_end,
+                  flags & VM_READ ? 'r' : '-',
+                  flags & VM_WRITE ? 'w' : '-',
+                  flags & VM_EXEC ? 'x' : '-',
+                  flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
+                  vma->vm_pgoff << PAGE_SHIFT,
+                  MAJOR(dev), MINOR(dev), ino, &len);
+
+       if (file) {
+               len = 25 + sizeof(void *) * 6 - len;
+               if (len < 1)
+                       len = 1;
+               seq_printf(m, "%*c", len, ' ');
+               seq_path(m, &file->f_path, "");
+       }
+
+       seq_putc(m, '\n');
+       return 0;
+}
+
 /*
  * display mapping lines for a particular process's /proc/pid/maps
  */
-static int show_map(struct seq_file *m, void *_vml)
+static int show_map(struct seq_file *m, void *_p)
 {
-       struct vm_list_struct *vml = _vml;
+       struct rb_node *p = _p;
 
-       return nommu_vma_show(m, vml->vma);
+       return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
 }
 
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
        struct proc_maps_private *priv = m->private;
-       struct vm_list_struct *vml;
        struct mm_struct *mm;
+       struct rb_node *p;
        loff_t n = *pos;
 
        /* pin the task and mm whilst we play with them */
@@ -134,9 +188,9 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        }
 
        /* start from the Nth VMA */
-       for (vml = mm->context.vmlist; vml; vml = vml->next)
+       for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
                if (n-- == 0)
-                       return vml;
+                       return p;
        return NULL;
 }
 
@@ -152,12 +206,12 @@ static void m_stop(struct seq_file *m, void *_vml)
        }
 }
 
-static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
+static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
 {
-       struct vm_list_struct *vml = _vml;
+       struct rb_node *p = _p;
 
        (*pos)++;
-       return vml ? vml->next : NULL;
+       return p ? rb_next(p) : NULL;
 }
 
 static const struct seq_operations proc_pid_maps_ops = {
index 76acdbc3461144512208755d4b69946d7eda76e3..b9b567a28376313a33b74ad9668eda99134b70bc 100644 (file)
@@ -262,11 +262,11 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
        ret = -ENOMEM;
        pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
        if (!pages)
-               goto out;
+               goto out_free;
 
        nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
        if (nr != lpages)
-               goto out; /* leave if some pages were missing */
+               goto out_free_pages; /* leave if some pages were missing */
 
        /* check the pages for physical adjacency */
        ptr = pages;
@@ -274,19 +274,18 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
        page++;
        for (loop = lpages; loop > 1; loop--)
                if (*ptr++ != page++)
-                       goto out;
+                       goto out_free_pages;
 
        /* okay - all conditions fulfilled */
        ret = (unsigned long) page_address(pages[0]);
 
- out:
-       if (pages) {
-               ptr = pages;
-               for (loop = lpages; loop > 0; loop--)
-                       put_page(*ptr++);
-               kfree(pages);
-       }
-
+out_free_pages:
+       ptr = pages;
+       for (loop = nr; loop > 0; loop--)
+               put_page(*ptr++);
+out_free:
+       kfree(pages);
+out:
        return ret;
 }
 
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
new file mode 100644 (file)
index 0000000..8258cf9
--- /dev/null
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux squashfs routines.
+#
+
+obj-$(CONFIG_SQUASHFS) += squashfs.o
+squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
+squashfs-y += namei.o super.o symlink.o
+#squashfs-y += squashfs2_0.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
new file mode 100644 (file)
index 0000000..c837dfc
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * block.c
+ */
+
+/*
+ * This file implements the low-level routines to read and decompress
+ * datablocks and metadata blocks.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Read the metadata block length, this is stored in the first two
+ * bytes of the metadata block.
+ */
+static struct buffer_head *get_block_length(struct super_block *sb,
+                       u64 *cur_index, int *offset, int *length)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       struct buffer_head *bh;
+
+       bh = sb_bread(sb, *cur_index);
+       if (bh == NULL)
+               return NULL;
+
+       if (msblk->devblksize - *offset == 1) {
+               *length = (unsigned char) bh->b_data[*offset];
+               put_bh(bh);
+               bh = sb_bread(sb, ++(*cur_index));
+               if (bh == NULL)
+                       return NULL;
+               *length |= (unsigned char) bh->b_data[0] << 8;
+               *offset = 1;
+       } else {
+               *length = (unsigned char) bh->b_data[*offset] |
+                       (unsigned char) bh->b_data[*offset + 1] << 8;
+               *offset += 2;
+       }
+
+       return bh;
+}
+
+
+/*
+ * Read and decompress a metadata block or datablock.  Length is non-zero
+ * if a datablock is being read (the size is stored elsewhere in the
+ * filesystem), otherwise the length is obtained from the first two bytes of
+ * the metadata block.  A bit in the length field indicates if the block
+ * is stored uncompressed in the filesystem (usually because compression
+ * generated a larger block - this does occasionally happen with zlib).
+ */
+int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
+                       int length, u64 *next_index, int srclength)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       struct buffer_head **bh;
+       int offset = index & ((1 << msblk->devblksize_log2) - 1);
+       u64 cur_index = index >> msblk->devblksize_log2;
+       int bytes, compressed, b = 0, k = 0, page = 0, avail;
+
+
+       bh = kcalloc((msblk->block_size >> msblk->devblksize_log2) + 1,
+                               sizeof(*bh), GFP_KERNEL);
+       if (bh == NULL)
+               return -ENOMEM;
+
+       if (length) {
+               /*
+                * Datablock.
+                */
+               bytes = -offset;
+               compressed = SQUASHFS_COMPRESSED_BLOCK(length);
+               length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
+               if (next_index)
+                       *next_index = index + length;
+
+               TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
+                       index, compressed ? "" : "un", length, srclength);
+
+               if (length < 0 || length > srclength ||
+                               (index + length) > msblk->bytes_used)
+                       goto read_failure;
+
+               for (b = 0; bytes < length; b++, cur_index++) {
+                       bh[b] = sb_getblk(sb, cur_index);
+                       if (bh[b] == NULL)
+                               goto block_release;
+                       bytes += msblk->devblksize;
+               }
+               ll_rw_block(READ, b, bh);
+       } else {
+               /*
+                * Metadata block.
+                */
+               if ((index + 2) > msblk->bytes_used)
+                       goto read_failure;
+
+               bh[0] = get_block_length(sb, &cur_index, &offset, &length);
+               if (bh[0] == NULL)
+                       goto read_failure;
+               b = 1;
+
+               bytes = msblk->devblksize - offset;
+               compressed = SQUASHFS_COMPRESSED(length);
+               length = SQUASHFS_COMPRESSED_SIZE(length);
+               if (next_index)
+                       *next_index = index + length + 2;
+
+               TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
+                               compressed ? "" : "un", length);
+
+               if (length < 0 || length > srclength ||
+                                       (index + length) > msblk->bytes_used)
+                       goto block_release;
+
+               for (; bytes < length; b++) {
+                       bh[b] = sb_getblk(sb, ++cur_index);
+                       if (bh[b] == NULL)
+                               goto block_release;
+                       bytes += msblk->devblksize;
+               }
+               ll_rw_block(READ, b - 1, bh + 1);
+       }
+
+       if (compressed) {
+               int zlib_err = 0, zlib_init = 0;
+
+               /*
+                * Uncompress block.
+                */
+
+               mutex_lock(&msblk->read_data_mutex);
+
+               msblk->stream.avail_out = 0;
+               msblk->stream.avail_in = 0;
+
+               bytes = length;
+               do {
+                       if (msblk->stream.avail_in == 0 && k < b) {
+                               avail = min(bytes, msblk->devblksize - offset);
+                               bytes -= avail;
+                               wait_on_buffer(bh[k]);
+                               if (!buffer_uptodate(bh[k]))
+                                       goto release_mutex;
+
+                               if (avail == 0) {
+                                       offset = 0;
+                                       put_bh(bh[k++]);
+                                       continue;
+                               }
+
+                               msblk->stream.next_in = bh[k]->b_data + offset;
+                               msblk->stream.avail_in = avail;
+                               offset = 0;
+                       }
+
+                       if (msblk->stream.avail_out == 0) {
+                               msblk->stream.next_out = buffer[page++];
+                               msblk->stream.avail_out = PAGE_CACHE_SIZE;
+                       }
+
+                       if (!zlib_init) {
+                               zlib_err = zlib_inflateInit(&msblk->stream);
+                               if (zlib_err != Z_OK) {
+                                       ERROR("zlib_inflateInit returned"
+                                               " unexpected result 0x%x,"
+                                               " srclength %d\n", zlib_err,
+                                               srclength);
+                                       goto release_mutex;
+                               }
+                               zlib_init = 1;
+                       }
+
+                       zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH);
+
+                       if (msblk->stream.avail_in == 0 && k < b)
+                               put_bh(bh[k++]);
+               } while (zlib_err == Z_OK);
+
+               if (zlib_err != Z_STREAM_END) {
+                       ERROR("zlib_inflate returned unexpected result"
+                               " 0x%x, srclength %d, avail_in %d,"
+                               " avail_out %d\n", zlib_err, srclength,
+                               msblk->stream.avail_in,
+                               msblk->stream.avail_out);
+                       goto release_mutex;
+               }
+
+               zlib_err = zlib_inflateEnd(&msblk->stream);
+               if (zlib_err != Z_OK) {
+                       ERROR("zlib_inflateEnd returned unexpected result 0x%x,"
+                               " srclength %d\n", zlib_err, srclength);
+                       goto release_mutex;
+               }
+               length = msblk->stream.total_out;
+               mutex_unlock(&msblk->read_data_mutex);
+       } else {
+               /*
+                * Block is uncompressed.
+                */
+               int i, in, pg_offset = 0;
+
+               for (i = 0; i < b; i++) {
+                       wait_on_buffer(bh[i]);
+                       if (!buffer_uptodate(bh[i]))
+                               goto block_release;
+               }
+
+               for (bytes = length; k < b; k++) {
+                       in = min(bytes, msblk->devblksize - offset);
+                       bytes -= in;
+                       while (in) {
+                               if (pg_offset == PAGE_CACHE_SIZE) {
+                                       page++;
+                                       pg_offset = 0;
+                               }
+                               avail = min_t(int, in, PAGE_CACHE_SIZE -
+                                               pg_offset);
+                               memcpy(buffer[page] + pg_offset,
+                                               bh[k]->b_data + offset, avail);
+                               in -= avail;
+                               pg_offset += avail;
+                               offset += avail;
+                       }
+                       offset = 0;
+                       put_bh(bh[k]);
+               }
+       }
+
+       kfree(bh);
+       return length;
+
+release_mutex:
+       mutex_unlock(&msblk->read_data_mutex);
+
+block_release:
+       for (; k < b; k++)
+               put_bh(bh[k]);
+
+read_failure:
+       ERROR("sb_bread failed reading block 0x%llx\n", cur_index);
+       kfree(bh);
+       return -EIO;
+}
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
new file mode 100644 (file)
index 0000000..f29eda1
--- /dev/null
@@ -0,0 +1,412 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * cache.c
+ */
+
+/*
+ * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
+ * recently accessed data Squashfs uses two small metadata and fragment caches.
+ *
+ * This file implements a generic cache implementation used for both caches,
+ * plus functions layered ontop of the generic cache implementation to
+ * access the metadata and fragment caches.
+ *
+ * To avoid out of memory and fragmentation isssues with vmalloc the cache
+ * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
+ *
+ * It should be noted that the cache is not used for file datablocks, these
+ * are decompressed and cached in the page-cache in the normal way.  The
+ * cache is only used to temporarily cache fragment and metadata blocks
+ * which have been read as as a result of a metadata (i.e. inode or
+ * directory) or fragment access.  Because metadata and fragments are packed
+ * together into blocks (to gain greater compression) the read of a particular
+ * piece of metadata or fragment will retrieve other metadata/fragments which
+ * have been packed with it, these because of locality-of-reference may be read
+ * in the near future. Temporarily caching them ensures they are available for
+ * near future access without requiring an additional read and decompress.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/zlib.h>
+#include <linux/pagemap.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up block in cache, and increment usage count.  If not in cache, read
+ * and decompress it from disk.
+ */
+struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
+       struct squashfs_cache *cache, u64 block, int length)
+{
+       int i, n;
+       struct squashfs_cache_entry *entry;
+
+       spin_lock(&cache->lock);
+
+       while (1) {
+               for (i = 0; i < cache->entries; i++)
+                       if (cache->entry[i].block == block)
+                               break;
+
+               if (i == cache->entries) {
+                       /*
+                        * Block not in cache, if all cache entries are used
+                        * go to sleep waiting for one to become available.
+                        */
+                       if (cache->unused == 0) {
+                               cache->num_waiters++;
+                               spin_unlock(&cache->lock);
+                               wait_event(cache->wait_queue, cache->unused);
+                               spin_lock(&cache->lock);
+                               cache->num_waiters--;
+                               continue;
+                       }
+
+                       /*
+                        * At least one unused cache entry.  A simple
+                        * round-robin strategy is used to choose the entry to
+                        * be evicted from the cache.
+                        */
+                       i = cache->next_blk;
+                       for (n = 0; n < cache->entries; n++) {
+                               if (cache->entry[i].refcount == 0)
+                                       break;
+                               i = (i + 1) % cache->entries;
+                       }
+
+                       cache->next_blk = (i + 1) % cache->entries;
+                       entry = &cache->entry[i];
+
+                       /*
+                        * Initialise choosen cache entry, and fill it in from
+                        * disk.
+                        */
+                       cache->unused--;
+                       entry->block = block;
+                       entry->refcount = 1;
+                       entry->pending = 1;
+                       entry->num_waiters = 0;
+                       entry->error = 0;
+                       spin_unlock(&cache->lock);
+
+                       entry->length = squashfs_read_data(sb, entry->data,
+                               block, length, &entry->next_index,
+                               cache->block_size);
+
+                       spin_lock(&cache->lock);
+
+                       if (entry->length < 0)
+                               entry->error = entry->length;
+
+                       entry->pending = 0;
+
+                       /*
+                        * While filling this entry one or more other processes
+                        * have looked it up in the cache, and have slept
+                        * waiting for it to become available.
+                        */
+                       if (entry->num_waiters) {
+                               spin_unlock(&cache->lock);
+                               wake_up_all(&entry->wait_queue);
+                       } else
+                               spin_unlock(&cache->lock);
+
+                       goto out;
+               }
+
+               /*
+                * Block already in cache.  Increment refcount so it doesn't
+                * get reused until we're finished with it, if it was
+                * previously unused there's one less cache entry available
+                * for reuse.
+                */
+               entry = &cache->entry[i];
+               if (entry->refcount == 0)
+                       cache->unused--;
+               entry->refcount++;
+
+               /*
+                * If the entry is currently being filled in by another process
+                * go to sleep waiting for it to become available.
+                */
+               if (entry->pending) {
+                       entry->num_waiters++;
+                       spin_unlock(&cache->lock);
+                       wait_event(entry->wait_queue, !entry->pending);
+               } else
+                       spin_unlock(&cache->lock);
+
+               goto out;
+       }
+
+out:
+       TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
+               cache->name, i, entry->block, entry->refcount, entry->error);
+
+       if (entry->error)
+               ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
+                                                       block);
+       return entry;
+}
+
+
+/*
+ * Release cache entry, once usage count is zero it can be reused.
+ */
+void squashfs_cache_put(struct squashfs_cache_entry *entry)
+{
+       struct squashfs_cache *cache = entry->cache;
+
+       spin_lock(&cache->lock);
+       entry->refcount--;
+       if (entry->refcount == 0) {
+               cache->unused++;
+               /*
+                * If there's any processes waiting for a block to become
+                * available, wake one up.
+                */
+               if (cache->num_waiters) {
+                       spin_unlock(&cache->lock);
+                       wake_up(&cache->wait_queue);
+                       return;
+               }
+       }
+       spin_unlock(&cache->lock);
+}
+
+/*
+ * Delete cache reclaiming all kmalloced buffers.
+ */
+void squashfs_cache_delete(struct squashfs_cache *cache)
+{
+       int i, j;
+
+       if (cache == NULL)
+               return;
+
+       for (i = 0; i < cache->entries; i++) {
+               if (cache->entry[i].data) {
+                       for (j = 0; j < cache->pages; j++)
+                               kfree(cache->entry[i].data[j]);
+                       kfree(cache->entry[i].data);
+               }
+       }
+
+       kfree(cache->entry);
+       kfree(cache);
+}
+
+
+/*
+ * Initialise cache allocating the specified number of entries, each of
+ * size block_size.  To avoid vmalloc fragmentation issues each entry
+ * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
+ */
+struct squashfs_cache *squashfs_cache_init(char *name, int entries,
+       int block_size)
+{
+       int i, j;
+       struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+
+       if (cache == NULL) {
+               ERROR("Failed to allocate %s cache\n", name);
+               return NULL;
+       }
+
+       cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
+       if (cache->entry == NULL) {
+               ERROR("Failed to allocate %s cache\n", name);
+               goto cleanup;
+       }
+
+       cache->next_blk = 0;
+       cache->unused = entries;
+       cache->entries = entries;
+       cache->block_size = block_size;
+       cache->pages = block_size >> PAGE_CACHE_SHIFT;
+       cache->name = name;
+       cache->num_waiters = 0;
+       spin_lock_init(&cache->lock);
+       init_waitqueue_head(&cache->wait_queue);
+
+       for (i = 0; i < entries; i++) {
+               struct squashfs_cache_entry *entry = &cache->entry[i];
+
+               init_waitqueue_head(&cache->entry[i].wait_queue);
+               entry->cache = cache;
+               entry->block = SQUASHFS_INVALID_BLK;
+               entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
+               if (entry->data == NULL) {
+                       ERROR("Failed to allocate %s cache entry\n", name);
+                       goto cleanup;
+               }
+
+               for (j = 0; j < cache->pages; j++) {
+                       entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+                       if (entry->data[j] == NULL) {
+                               ERROR("Failed to allocate %s buffer\n", name);
+                               goto cleanup;
+                       }
+               }
+       }
+
+       return cache;
+
+cleanup:
+       squashfs_cache_delete(cache);
+       return NULL;
+}
+
+
+/*
+ * Copy upto length bytes from cache entry to buffer starting at offset bytes
+ * into the cache entry.  If there's not length bytes then copy the number of
+ * bytes available.  In all cases return the number of bytes copied.
+ */
+int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
+               int offset, int length)
+{
+       int remaining = length;
+
+       if (length == 0)
+               return 0;
+       else if (buffer == NULL)
+               return min(length, entry->length - offset);
+
+       while (offset < entry->length) {
+               void *buff = entry->data[offset / PAGE_CACHE_SIZE]
+                               + (offset % PAGE_CACHE_SIZE);
+               int bytes = min_t(int, entry->length - offset,
+                               PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
+
+               if (bytes >= remaining) {
+                       memcpy(buffer, buff, remaining);
+                       remaining = 0;
+                       break;
+               }
+
+               memcpy(buffer, buff, bytes);
+               buffer += bytes;
+               remaining -= bytes;
+               offset += bytes;
+       }
+
+       return length - remaining;
+}
+
+
+/*
+ * Read length bytes from metadata position <block, offset> (block is the
+ * start of the compressed block on disk, and offset is the offset into
+ * the block once decompressed).  Data is packed into consecutive blocks,
+ * and length bytes may require reading more than one block.
+ */
+int squashfs_read_metadata(struct super_block *sb, void *buffer,
+               u64 *block, int *offset, int length)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int bytes, copied = length;
+       struct squashfs_cache_entry *entry;
+
+       TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
+
+       while (length) {
+               entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
+               if (entry->error)
+                       return entry->error;
+               else if (*offset >= entry->length)
+                       return -EIO;
+
+               bytes = squashfs_copy_data(buffer, entry, *offset, length);
+               if (buffer)
+                       buffer += bytes;
+               length -= bytes;
+               *offset += bytes;
+
+               if (*offset == entry->length) {
+                       *block = entry->next_index;
+                       *offset = 0;
+               }
+
+               squashfs_cache_put(entry);
+       }
+
+       return copied;
+}
+
+
+/*
+ * Look-up in the fragmment cache the fragment located at <start_block> in the
+ * filesystem.  If necessary read and decompress it from disk.
+ */
+struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
+                               u64 start_block, int length)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+       return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
+               length);
+}
+
+
+/*
+ * Read and decompress the datablock located at <start_block> in the
+ * filesystem.  The cache is used here to avoid duplicating locking and
+ * read/decompress code.
+ */
+struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
+                               u64 start_block, int length)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+       return squashfs_cache_get(sb, msblk->read_page, start_block, length);
+}
+
+
+/*
+ * Read a filesystem table (uncompressed sequence of bytes) from disk
+ */
+int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
+       int length)
+{
+       int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       int i, res;
+       void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
+               data[i] = buffer;
+       res = squashfs_read_data(sb, data, block, length |
+               SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length);
+       kfree(data);
+       return res;
+}
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
new file mode 100644 (file)
index 0000000..566b0ea
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * dir.c
+ */
+
+/*
+ * This file implements code to read directories from disk.
+ *
+ * See namei.c for a description of directory organisation on disk.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static const unsigned char squashfs_filetype_table[] = {
+       DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
+};
+
+/*
+ * Lookup offset (f_pos) in the directory index, returning the
+ * metadata block containing it.
+ *
+ * If we get an error reading the index then return the part of the index
+ * (if any) we have managed to read - the index isn't essential, just
+ * quicker.
+ */
+static int get_dir_index_using_offset(struct super_block *sb,
+       u64 *next_block, int *next_offset, u64 index_start, int index_offset,
+       int i_count, u64 f_pos)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int err, i, index, length = 0;
+       struct squashfs_dir_index dir_index;
+
+       TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
+                                       i_count, f_pos);
+
+       /*
+        * Translate from external f_pos to the internal f_pos.  This
+        * is offset by 3 because we invent "." and ".." entries which are
+        * not actually stored in the directory.
+        */
+       if (f_pos < 3)
+               return f_pos;
+       f_pos -= 3;
+
+       for (i = 0; i < i_count; i++) {
+               err = squashfs_read_metadata(sb, &dir_index, &index_start,
+                               &index_offset, sizeof(dir_index));
+               if (err < 0)
+                       break;
+
+               index = le32_to_cpu(dir_index.index);
+               if (index > f_pos)
+                       /*
+                        * Found the index we're looking for.
+                        */
+                       break;
+
+               err = squashfs_read_metadata(sb, NULL, &index_start,
+                               &index_offset, le32_to_cpu(dir_index.size) + 1);
+               if (err < 0)
+                       break;
+
+               length = index;
+               *next_block = le32_to_cpu(dir_index.start_block) +
+                                       msblk->directory_table;
+       }
+
+       *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+
+       /*
+        * Translate back from internal f_pos to external f_pos.
+        */
+       return length + 3;
+}
+
+
+static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+       struct inode *inode = file->f_dentry->d_inode;
+       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+       u64 block = squashfs_i(inode)->start + msblk->directory_table;
+       int offset = squashfs_i(inode)->offset, length = 0, dir_count, size,
+                               type, err;
+       unsigned int inode_number;
+       struct squashfs_dir_header dirh;
+       struct squashfs_dir_entry *dire;
+
+       TRACE("Entered squashfs_readdir [%llx:%x]\n", block, offset);
+
+       dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
+       if (dire == NULL) {
+               ERROR("Failed to allocate squashfs_dir_entry\n");
+               goto finish;
+       }
+
+       /*
+        * Return "." and  ".." entries as the first two filenames in the
+        * directory.  To maximise compression these two entries are not
+        * stored in the directory, and so we invent them here.
+        *
+        * It also means that the external f_pos is offset by 3 from the
+        * on-disk directory f_pos.
+        */
+       while (file->f_pos < 3) {
+               char *name;
+               int i_ino;
+
+               if (file->f_pos == 0) {
+                       name = ".";
+                       size = 1;
+                       i_ino = inode->i_ino;
+               } else {
+                       name = "..";
+                       size = 2;
+                       i_ino = squashfs_i(inode)->parent;
+               }
+
+               TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n",
+                               dirent, name, size, file->f_pos, i_ino,
+                               squashfs_filetype_table[1]);
+
+               if (filldir(dirent, name, size, file->f_pos, i_ino,
+                               squashfs_filetype_table[1]) < 0) {
+                               TRACE("Filldir returned less than 0\n");
+                       goto finish;
+               }
+
+               file->f_pos += size;
+       }
+
+       length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
+                               squashfs_i(inode)->dir_idx_start,
+                               squashfs_i(inode)->dir_idx_offset,
+                               squashfs_i(inode)->dir_idx_cnt,
+                               file->f_pos);
+
+       while (length < i_size_read(inode)) {
+               /*
+                * Read directory header
+                */
+               err = squashfs_read_metadata(inode->i_sb, &dirh, &block,
+                                       &offset, sizeof(dirh));
+               if (err < 0)
+                       goto failed_read;
+
+               length += sizeof(dirh);
+
+               dir_count = le32_to_cpu(dirh.count) + 1;
+               while (dir_count--) {
+                       /*
+                        * Read directory entry.
+                        */
+                       err = squashfs_read_metadata(inode->i_sb, dire, &block,
+                                       &offset, sizeof(*dire));
+                       if (err < 0)
+                               goto failed_read;
+
+                       size = le16_to_cpu(dire->size) + 1;
+
+                       err = squashfs_read_metadata(inode->i_sb, dire->name,
+                                       &block, &offset, size);
+                       if (err < 0)
+                               goto failed_read;
+
+                       length += sizeof(*dire) + size;
+
+                       if (file->f_pos >= length)
+                               continue;
+
+                       dire->name[size] = '\0';
+                       inode_number = le32_to_cpu(dirh.inode_number) +
+                               ((short) le16_to_cpu(dire->inode_number));
+                       type = le16_to_cpu(dire->type);
+
+                       TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)"
+                                       "\n", dirent, dire->name, size,
+                                       file->f_pos,
+                                       le32_to_cpu(dirh.start_block),
+                                       le16_to_cpu(dire->offset),
+                                       inode_number,
+                                       squashfs_filetype_table[type]);
+
+                       if (filldir(dirent, dire->name, size, file->f_pos,
+                                       inode_number,
+                                       squashfs_filetype_table[type]) < 0) {
+                               TRACE("Filldir returned less than 0\n");
+                               goto finish;
+                       }
+
+                       file->f_pos = length;
+               }
+       }
+
+finish:
+       kfree(dire);
+       return 0;
+
+failed_read:
+       ERROR("Unable to read directory block [%llx:%x]\n", block, offset);
+       kfree(dire);
+       return 0;
+}
+
+
+const struct file_operations squashfs_dir_ops = {
+       .read = generic_read_dir,
+       .readdir = squashfs_readdir
+};
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
new file mode 100644 (file)
index 0000000..69e971d
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * export.c
+ */
+
+/*
+ * This file implements code to make Squashfs filesystems exportable (NFS etc.)
+ *
+ * The export code uses an inode lookup table to map inode numbers passed in
+ * filehandles to an inode location on disk.  This table is stored compressed
+ * into metadata blocks.  A second index table is used to locate these.  This
+ * second index table for speed of access (and because it is small) is read at
+ * mount time and cached in memory.
+ *
+ * The inode lookup table is used only by the export code, inode disk
+ * locations are directly encoded in directories, enabling direct access
+ * without an intermediate lookup for all operations except the export ops.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/dcache.h>
+#include <linux/exportfs.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up inode number (ino) in table, returning the inode location.
+ */
+static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
+       int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
+       u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+       __le64 ino;
+       int err;
+
+       TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
+
+       err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
+       if (err < 0)
+               return err;
+
+       TRACE("squashfs_inode_lookup, inode = 0x%llx\n",
+               (u64) le64_to_cpu(ino));
+
+       return le64_to_cpu(ino);
+}
+
+
+static struct dentry *squashfs_export_iget(struct super_block *sb,
+       unsigned int ino_num)
+{
+       long long ino;
+       struct dentry *dentry = ERR_PTR(-ENOENT);
+
+       TRACE("Entered squashfs_export_iget\n");
+
+       ino = squashfs_inode_lookup(sb, ino_num);
+       if (ino >= 0)
+               dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num));
+
+       return dentry;
+}
+
+
+static struct dentry *squashfs_fh_to_dentry(struct super_block *sb,
+               struct fid *fid, int fh_len, int fh_type)
+{
+       if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT)
+                       || fh_len < 2)
+               return NULL;
+
+       return squashfs_export_iget(sb, fid->i32.ino);
+}
+
+
+static struct dentry *squashfs_fh_to_parent(struct super_block *sb,
+               struct fid *fid, int fh_len, int fh_type)
+{
+       if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4)
+               return NULL;
+
+       return squashfs_export_iget(sb, fid->i32.parent_ino);
+}
+
+
+static struct dentry *squashfs_get_parent(struct dentry *child)
+{
+       struct inode *inode = child->d_inode;
+       unsigned int parent_ino = squashfs_i(inode)->parent;
+
+       return squashfs_export_iget(inode->i_sb, parent_ino);
+}
+
+
+/*
+ * Read uncompressed inode lookup table indexes off disk into memory
+ */
+__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
+               u64 lookup_table_start, unsigned int inodes)
+{
+       unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
+       __le64 *inode_lookup_table;
+       int err;
+
+       TRACE("In read_inode_lookup_table, length %d\n", length);
+
+       /* Allocate inode lookup table indexes */
+       inode_lookup_table = kmalloc(length, GFP_KERNEL);
+       if (inode_lookup_table == NULL) {
+               ERROR("Failed to allocate inode lookup table\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start,
+                       length);
+       if (err < 0) {
+               ERROR("unable to read inode lookup table\n");
+               kfree(inode_lookup_table);
+               return ERR_PTR(err);
+       }
+
+       return inode_lookup_table;
+}
+
+
+const struct export_operations squashfs_export_ops = {
+       .fh_to_dentry = squashfs_fh_to_dentry,
+       .fh_to_parent = squashfs_fh_to_parent,
+       .get_parent = squashfs_get_parent
+};
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
new file mode 100644 (file)
index 0000000..717767d
--- /dev/null
@@ -0,0 +1,502 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * file.c
+ */
+
+/*
+ * This file contains code for handling regular files.  A regular file
+ * consists of a sequence of contiguous compressed blocks, and/or a
+ * compressed fragment block (tail-end packed block).   The compressed size
+ * of each datablock is stored in a block list contained within the
+ * file inode (itself stored in one or more compressed metadata blocks).
+ *
+ * To speed up access to datablocks when reading 'large' files (256 Mbytes or
+ * larger), the code implements an index cache that caches the mapping from
+ * block index to datablock location on disk.
+ *
+ * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
+ * retaining a simple and space-efficient block list on disk.  The cache
+ * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
+ * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
+ * The index cache is designed to be memory efficient, and by default uses
+ * 16 KiB.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Locate cache slot in range [offset, index] for specified inode.  If
+ * there's more than one return the slot closest to index.
+ */
+static struct meta_index *locate_meta_index(struct inode *inode, int offset,
+                               int index)
+{
+       struct meta_index *meta = NULL;
+       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+       int i;
+
+       mutex_lock(&msblk->meta_index_mutex);
+
+       TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
+
+       if (msblk->meta_index == NULL)
+               goto not_allocated;
+
+       for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
+               if (msblk->meta_index[i].inode_number == inode->i_ino &&
+                               msblk->meta_index[i].offset >= offset &&
+                               msblk->meta_index[i].offset <= index &&
+                               msblk->meta_index[i].locked == 0) {
+                       TRACE("locate_meta_index: entry %d, offset %d\n", i,
+                                       msblk->meta_index[i].offset);
+                       meta = &msblk->meta_index[i];
+                       offset = meta->offset;
+               }
+       }
+
+       if (meta)
+               meta->locked = 1;
+
+not_allocated:
+       mutex_unlock(&msblk->meta_index_mutex);
+
+       return meta;
+}
+
+
+/*
+ * Find and initialise an empty cache slot for index offset.
+ */
+static struct meta_index *empty_meta_index(struct inode *inode, int offset,
+                               int skip)
+{
+       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+       struct meta_index *meta = NULL;
+       int i;
+
+       mutex_lock(&msblk->meta_index_mutex);
+
+       TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
+
+       if (msblk->meta_index == NULL) {
+               /*
+                * First time cache index has been used, allocate and
+                * initialise.  The cache index could be allocated at
+                * mount time but doing it here means it is allocated only
+                * if a 'large' file is read.
+                */
+               msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
+                       sizeof(*(msblk->meta_index)), GFP_KERNEL);
+               if (msblk->meta_index == NULL) {
+                       ERROR("Failed to allocate meta_index\n");
+                       goto failed;
+               }
+               for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
+                       msblk->meta_index[i].inode_number = 0;
+                       msblk->meta_index[i].locked = 0;
+               }
+               msblk->next_meta_index = 0;
+       }
+
+       for (i = SQUASHFS_META_SLOTS; i &&
+                       msblk->meta_index[msblk->next_meta_index].locked; i--)
+               msblk->next_meta_index = (msblk->next_meta_index + 1) %
+                       SQUASHFS_META_SLOTS;
+
+       if (i == 0) {
+               TRACE("empty_meta_index: failed!\n");
+               goto failed;
+       }
+
+       TRACE("empty_meta_index: returned meta entry %d, %p\n",
+                       msblk->next_meta_index,
+                       &msblk->meta_index[msblk->next_meta_index]);
+
+       meta = &msblk->meta_index[msblk->next_meta_index];
+       msblk->next_meta_index = (msblk->next_meta_index + 1) %
+                       SQUASHFS_META_SLOTS;
+
+       meta->inode_number = inode->i_ino;
+       meta->offset = offset;
+       meta->skip = skip;
+       meta->entries = 0;
+       meta->locked = 1;
+
+failed:
+       mutex_unlock(&msblk->meta_index_mutex);
+       return meta;
+}
+
+
+static void release_meta_index(struct inode *inode, struct meta_index *meta)
+{
+       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+       mutex_lock(&msblk->meta_index_mutex);
+       meta->locked = 0;
+       mutex_unlock(&msblk->meta_index_mutex);
+}
+
+
+/*
+ * Read the next n blocks from the block list, starting from
+ * metadata block <start_block, offset>.
+ */
+static long long read_indexes(struct super_block *sb, int n,
+                               u64 *start_block, int *offset)
+{
+       int err, i;
+       long long block = 0;
+       __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+
+       if (blist == NULL) {
+               ERROR("read_indexes: Failed to allocate block_list\n");
+               return -ENOMEM;
+       }
+
+       while (n) {
+               int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2);
+
+               err = squashfs_read_metadata(sb, blist, start_block,
+                               offset, blocks << 2);
+               if (err < 0) {
+                       ERROR("read_indexes: reading block [%llx:%x]\n",
+                               *start_block, *offset);
+                       goto failure;
+               }
+
+               for (i = 0; i < blocks; i++) {
+                       int size = le32_to_cpu(blist[i]);
+                       block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
+               }
+               n -= blocks;
+       }
+
+       kfree(blist);
+       return block;
+
+failure:
+       kfree(blist);
+       return err;
+}
+
+
+/*
+ * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
+ * can cache one index -> datablock/blocklist-block mapping.  We wish
+ * to distribute these over the length of the file, entry[0] maps index x,
+ * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
+ * The larger the file, the greater the skip factor.  The skip factor is
+ * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
+ * the number of metadata blocks that need to be read fits into the cache.
+ * If the skip factor is limited in this way then the file will use multiple
+ * slots.
+ */
+static inline int calculate_skip(int blocks)
+{
+       int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+                * SQUASHFS_META_INDEXES);
+       return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
+}
+
+
+/*
+ * Search and grow the index cache for the specified inode, returning the
+ * on-disk locations of the datablock and block list metadata block
+ * <index_block, index_offset> for index (scaled to nearest cache index).
+ */
+static int fill_meta_index(struct inode *inode, int index,
+               u64 *index_block, int *index_offset, u64 *data_block)
+{
+       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+       int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
+       int offset = 0;
+       struct meta_index *meta;
+       struct meta_entry *meta_entry;
+       u64 cur_index_block = squashfs_i(inode)->block_list_start;
+       int cur_offset = squashfs_i(inode)->offset;
+       u64 cur_data_block = squashfs_i(inode)->start;
+       int err, i;
+
+       /*
+        * Scale index to cache index (cache slot entry)
+        */
+       index /= SQUASHFS_META_INDEXES * skip;
+
+       while (offset < index) {
+               meta = locate_meta_index(inode, offset + 1, index);
+
+               if (meta == NULL) {
+                       meta = empty_meta_index(inode, offset + 1, skip);
+                       if (meta == NULL)
+                               goto all_done;
+               } else {
+                       offset = index < meta->offset + meta->entries ? index :
+                               meta->offset + meta->entries - 1;
+                       meta_entry = &meta->meta_entry[offset - meta->offset];
+                       cur_index_block = meta_entry->index_block +
+                               msblk->inode_table;
+                       cur_offset = meta_entry->offset;
+                       cur_data_block = meta_entry->data_block;
+                       TRACE("get_meta_index: offset %d, meta->offset %d, "
+                               "meta->entries %d\n", offset, meta->offset,
+                               meta->entries);
+                       TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
+                               " data_block 0x%llx\n", cur_index_block,
+                               cur_offset, cur_data_block);
+               }
+
+               /*
+                * If necessary grow cache slot by reading block list.  Cache
+                * slot is extended up to index or to the end of the slot, in
+                * which case further slots will be used.
+                */
+               for (i = meta->offset + meta->entries; i <= index &&
+                               i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
+                       int blocks = skip * SQUASHFS_META_INDEXES;
+                       long long res = read_indexes(inode->i_sb, blocks,
+                                       &cur_index_block, &cur_offset);
+
+                       if (res < 0) {
+                               if (meta->entries == 0)
+                                       /*
+                                        * Don't leave an empty slot on read
+                                        * error allocated to this inode...
+                                        */
+                                       meta->inode_number = 0;
+                               err = res;
+                               goto failed;
+                       }
+
+                       cur_data_block += res;
+                       meta_entry = &meta->meta_entry[i - meta->offset];
+                       meta_entry->index_block = cur_index_block -
+                               msblk->inode_table;
+                       meta_entry->offset = cur_offset;
+                       meta_entry->data_block = cur_data_block;
+                       meta->entries++;
+                       offset++;
+               }
+
+               TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
+                               meta->offset, meta->entries);
+
+               release_meta_index(inode, meta);
+       }
+
+all_done:
+       *index_block = cur_index_block;
+       *index_offset = cur_offset;
+       *data_block = cur_data_block;
+
+       /*
+        * Scale cache index (cache slot entry) to index
+        */
+       return offset * SQUASHFS_META_INDEXES * skip;
+
+failed:
+       release_meta_index(inode, meta);
+       return err;
+}
+
+
+/*
+ * Get the on-disk location and compressed size of the datablock
+ * specified by index.  Fill_meta_index() does most of the work.
+ */
+static int read_blocklist(struct inode *inode, int index, u64 *block)
+{
+       u64 start;
+       long long blks;
+       int offset;
+       __le32 size;
+       int res = fill_meta_index(inode, index, &start, &offset, block);
+
+       TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
+                      " 0x%x, block 0x%llx\n", res, index, start, offset,
+                       *block);
+
+       if (res < 0)
+               return res;
+
+       /*
+        * res contains the index of the mapping returned by fill_meta_index(),
+        * this will likely be less than the desired index (because the
+        * meta_index cache works at a higher granularity).  Read any
+        * extra block indexes needed.
+        */
+       if (res < index) {
+               blks = read_indexes(inode->i_sb, index - res, &start, &offset);
+               if (blks < 0)
+                       return (int) blks;
+               *block += blks;
+       }
+
+       /*
+        * Read length of block specified by index.
+        */
+       res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
+                       sizeof(size));
+       if (res < 0)
+               return res;
+       return le32_to_cpu(size);
+}
+
+
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+       struct inode *inode = page->mapping->host;
+       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+       int bytes, i, offset = 0, sparse = 0;
+       struct squashfs_cache_entry *buffer = NULL;
+       void *pageaddr;
+
+       int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+       int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+       int start_index = page->index & ~mask;
+       int end_index = start_index | mask;
+       int file_end = i_size_read(inode) >> msblk->block_log;
+
+       TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+                               page->index, squashfs_i(inode)->start);
+
+       if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+                                       PAGE_CACHE_SHIFT))
+               goto out;
+
+       if (index < file_end || squashfs_i(inode)->fragment_block ==
+                                       SQUASHFS_INVALID_BLK) {
+               /*
+                * Reading a datablock from disk.  Need to read block list
+                * to get location and block size.
+                */
+               u64 block = 0;
+               int bsize = read_blocklist(inode, index, &block);
+               if (bsize < 0)
+                       goto error_out;
+
+               if (bsize == 0) { /* hole */
+                       bytes = index == file_end ?
+                               (i_size_read(inode) & (msblk->block_size - 1)) :
+                                msblk->block_size;
+                       sparse = 1;
+               } else {
+                       /*
+                        * Read and decompress datablock.
+                        */
+                       buffer = squashfs_get_datablock(inode->i_sb,
+                                                               block, bsize);
+                       if (buffer->error) {
+                               ERROR("Unable to read page, block %llx, size %x"
+                                       "\n", block, bsize);
+                               squashfs_cache_put(buffer);
+                               goto error_out;
+                       }
+                       bytes = buffer->length;
+               }
+       } else {
+               /*
+                * Datablock is stored inside a fragment (tail-end packed
+                * block).
+                */
+               buffer = squashfs_get_fragment(inode->i_sb,
+                               squashfs_i(inode)->fragment_block,
+                               squashfs_i(inode)->fragment_size);
+
+               if (buffer->error) {
+                       ERROR("Unable to read page, block %llx, size %x\n",
+                               squashfs_i(inode)->fragment_block,
+                               squashfs_i(inode)->fragment_size);
+                       squashfs_cache_put(buffer);
+                       goto error_out;
+               }
+               bytes = i_size_read(inode) & (msblk->block_size - 1);
+               offset = squashfs_i(inode)->fragment_offset;
+       }
+
+       /*
+        * Loop copying datablock into pages.  As the datablock likely covers
+        * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly
+        * grab the pages from the page cache, except for the page that we've
+        * been called to fill.
+        */
+       for (i = start_index; i <= end_index && bytes > 0; i++,
+                       bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+               struct page *push_page;
+               int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE);
+
+               TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
+
+               push_page = (i == page->index) ? page :
+                       grab_cache_page_nowait(page->mapping, i);
+
+               if (!push_page)
+                       continue;
+
+               if (PageUptodate(push_page))
+                       goto skip_page;
+
+               pageaddr = kmap_atomic(push_page, KM_USER0);
+               squashfs_copy_data(pageaddr, buffer, offset, avail);
+               memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+               kunmap_atomic(pageaddr, KM_USER0);
+               flush_dcache_page(push_page);
+               SetPageUptodate(push_page);
+skip_page:
+               unlock_page(push_page);
+               if (i != page->index)
+                       page_cache_release(push_page);
+       }
+
+       if (!sparse)
+               squashfs_cache_put(buffer);
+
+       return 0;
+
+error_out:
+       SetPageError(page);
+out:
+       pageaddr = kmap_atomic(page, KM_USER0);
+       memset(pageaddr, 0, PAGE_CACHE_SIZE);
+       kunmap_atomic(pageaddr, KM_USER0);
+       flush_dcache_page(page);
+       if (!PageError(page))
+               SetPageUptodate(page);
+       unlock_page(page);
+
+       return 0;
+}
+
+
+const struct address_space_operations squashfs_aops = {
+       .readpage = squashfs_readpage
+};
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
new file mode 100644 (file)
index 0000000..b5a2c15
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * fragment.c
+ */
+
+/*
+ * This file implements code to handle compressed fragments (tail-end packed
+ * datablocks).
+ *
+ * Regular files contain a fragment index which is mapped to a fragment
+ * location on disk and compressed size using a fragment lookup table.
+ * Like everything in Squashfs this fragment lookup table is itself stored
+ * compressed into metadata blocks.  A second index table is used to locate
+ * these.  This second index table for speed of access (and because it
+ * is small) is read at mount time and cached in memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up fragment using the fragment index table.  Return the on disk
+ * location of the fragment and its compressed size
+ */
+int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
+                               u64 *fragment_block)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int block = SQUASHFS_FRAGMENT_INDEX(fragment);
+       int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+       u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
+       struct squashfs_fragment_entry fragment_entry;
+       int size;
+
+       size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
+                                       &offset, sizeof(fragment_entry));
+       if (size < 0)
+               return size;
+
+       *fragment_block = le64_to_cpu(fragment_entry.start_block);
+       size = le32_to_cpu(fragment_entry.size);
+
+       return size;
+}
+
+
+/*
+ * Read the uncompressed fragment lookup table indexes off disk into memory
+ */
+__le64 *squashfs_read_fragment_index_table(struct super_block *sb,
+       u64 fragment_table_start, unsigned int fragments)
+{
+       unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
+       __le64 *fragment_index;
+       int err;
+
+       /* Allocate fragment lookup table indexes */
+       fragment_index = kmalloc(length, GFP_KERNEL);
+       if (fragment_index == NULL) {
+               ERROR("Failed to allocate fragment index table\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       err = squashfs_read_table(sb, fragment_index, fragment_table_start,
+                       length);
+       if (err < 0) {
+               ERROR("unable to read fragment index table\n");
+               kfree(fragment_index);
+               return ERR_PTR(err);
+       }
+
+       return fragment_index;
+}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
new file mode 100644 (file)
index 0000000..3795b83
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * id.c
+ */
+
+/*
+ * This file implements code to handle uids and gids.
+ *
+ * For space efficiency regular files store uid and gid indexes, which are
+ * converted to 32-bit uids/gids using an id look up table.  This table is
+ * stored compressed into metadata blocks.  A second index table is used to
+ * locate these.  This second index table for speed of access (and because it
+ * is small) is read at mount time and cached in memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Map uid/gid index into real 32-bit uid/gid using the id look up table
+ */
+int squashfs_get_id(struct super_block *sb, unsigned int index,
+                                       unsigned int *id)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int block = SQUASHFS_ID_BLOCK(index);
+       int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
+       u64 start_block = le64_to_cpu(msblk->id_table[block]);
+       __le32 disk_id;
+       int err;
+
+       err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
+                                                       sizeof(disk_id));
+       if (err < 0)
+               return err;
+
+       *id = le32_to_cpu(disk_id);
+       return 0;
+}
+
+
+/*
+ * Read uncompressed id lookup table indexes from disk into memory
+ */
+__le64 *squashfs_read_id_index_table(struct super_block *sb,
+                       u64 id_table_start, unsigned short no_ids)
+{
+       unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
+       __le64 *id_table;
+       int err;
+
+       TRACE("In read_id_index_table, length %d\n", length);
+
+       /* Allocate id lookup table indexes */
+       id_table = kmalloc(length, GFP_KERNEL);
+       if (id_table == NULL) {
+               ERROR("Failed to allocate id index table\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       err = squashfs_read_table(sb, id_table, id_table_start, length);
+       if (err < 0) {
+               ERROR("unable to read id index table\n");
+               kfree(id_table);
+               return ERR_PTR(err);
+       }
+
+       return id_table;
+}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
new file mode 100644 (file)
index 0000000..7a63398
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * inode.c
+ */
+
+/*
+ * This file implements code to create and read inodes from disk.
+ *
+ * Inodes in Squashfs are identified by a 48-bit inode which encodes the
+ * location of the compressed metadata block containing the inode, and the byte
+ * offset into that block where the inode is placed (<block, offset>).
+ *
+ * To maximise compression there are different inodes for each file type
+ * (regular file, directory, device, etc.), the inode contents and length
+ * varying with the type.
+ *
+ * To further maximise compression, two types of regular file inode and
+ * directory inode are defined: inodes optimised for frequently occurring
+ * regular files and directories, and extended types where extra
+ * information has to be stored.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Initialise VFS inode with the base inode information common to all
+ * Squashfs inode types.  Sqsh_ino contains the unswapped base inode
+ * off disk.
+ */
+static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
+                               struct squashfs_base_inode *sqsh_ino)
+{
+       int err;
+
+       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &inode->i_uid);
+       if (err)
+               return err;
+
+       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &inode->i_gid);
+       if (err)
+               return err;
+
+       inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+       inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
+       inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
+       inode->i_ctime.tv_sec = inode->i_mtime.tv_sec;
+       inode->i_mode = le16_to_cpu(sqsh_ino->mode);
+       inode->i_size = 0;
+
+       return err;
+}
+
+
+struct inode *squashfs_iget(struct super_block *sb, long long ino,
+                               unsigned int ino_number)
+{
+       struct inode *inode = iget_locked(sb, ino_number);
+       int err;
+
+       TRACE("Entered squashfs_iget\n");
+
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
+       if (!(inode->i_state & I_NEW))
+               return inode;
+
+       err = squashfs_read_inode(inode, ino);
+       if (err) {
+               iget_failed(inode);
+               return ERR_PTR(err);
+       }
+
+       unlock_new_inode(inode);
+       return inode;
+}
+
+
+/*
+ * Initialise VFS inode by reading inode from inode table (compressed
+ * metadata).  The format and amount of data read depends on type.
+ */
+int squashfs_read_inode(struct inode *inode, long long ino)
+{
+       struct super_block *sb = inode->i_sb;
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
+       int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
+       union squashfs_inode squashfs_ino;
+       struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
+
+       TRACE("Entered squashfs_read_inode\n");
+
+       /*
+        * Read inode base common to all inode types.
+        */
+       err = squashfs_read_metadata(sb, sqshb_ino, &block,
+                               &offset, sizeof(*sqshb_ino));
+       if (err < 0)
+               goto failed_read;
+
+       err = squashfs_new_inode(sb, inode, sqshb_ino);
+       if (err)
+               goto failed_read;
+
+       block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
+       offset = SQUASHFS_INODE_OFFSET(ino);
+
+       type = le16_to_cpu(sqshb_ino->inode_type);
+       switch (type) {
+       case SQUASHFS_REG_TYPE: {
+               unsigned int frag_offset, frag_size, frag;
+               u64 frag_blk;
+               struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                                                       sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
+                       frag_offset = le32_to_cpu(sqsh_ino->offset);
+                       frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+                       if (frag_size < 0) {
+                               err = frag_size;
+                               goto failed_read;
+                       }
+               } else {
+                       frag_blk = SQUASHFS_INVALID_BLK;
+                       frag_size = 0;
+                       frag_offset = 0;
+               }
+
+               inode->i_nlink = 1;
+               inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+               inode->i_fop = &generic_ro_fops;
+               inode->i_mode |= S_IFREG;
+               inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+               squashfs_i(inode)->fragment_block = frag_blk;
+               squashfs_i(inode)->fragment_size = frag_size;
+               squashfs_i(inode)->fragment_offset = frag_offset;
+               squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+               squashfs_i(inode)->block_list_start = block;
+               squashfs_i(inode)->offset = offset;
+               inode->i_data.a_ops = &squashfs_aops;
+
+               TRACE("File inode %x:%x, start_block %llx, block_list_start "
+                       "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
+                       offset, squashfs_i(inode)->start, block, offset);
+               break;
+       }
+       case SQUASHFS_LREG_TYPE: {
+               unsigned int frag_offset, frag_size, frag;
+               u64 frag_blk;
+               struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                                                       sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
+                       frag_offset = le32_to_cpu(sqsh_ino->offset);
+                       frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+                       if (frag_size < 0) {
+                               err = frag_size;
+                               goto failed_read;
+                       }
+               } else {
+                       frag_blk = SQUASHFS_INVALID_BLK;
+                       frag_size = 0;
+                       frag_offset = 0;
+               }
+
+               inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+               inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+               inode->i_fop = &generic_ro_fops;
+               inode->i_mode |= S_IFREG;
+               inode->i_blocks = ((inode->i_size -
+                               le64_to_cpu(sqsh_ino->sparse) - 1) >> 9) + 1;
+
+               squashfs_i(inode)->fragment_block = frag_blk;
+               squashfs_i(inode)->fragment_size = frag_size;
+               squashfs_i(inode)->fragment_offset = frag_offset;
+               squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
+               squashfs_i(inode)->block_list_start = block;
+               squashfs_i(inode)->offset = offset;
+               inode->i_data.a_ops = &squashfs_aops;
+
+               TRACE("File inode %x:%x, start_block %llx, block_list_start "
+                       "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
+                       offset, squashfs_i(inode)->start, block, offset);
+               break;
+       }
+       case SQUASHFS_DIR_TYPE: {
+               struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                               sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+               inode->i_size = le16_to_cpu(sqsh_ino->file_size);
+               inode->i_op = &squashfs_dir_inode_ops;
+               inode->i_fop = &squashfs_dir_ops;
+               inode->i_mode |= S_IFDIR;
+               squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+               squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
+               squashfs_i(inode)->dir_idx_cnt = 0;
+               squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
+
+               TRACE("Directory inode %x:%x, start_block %llx, offset %x\n",
+                               SQUASHFS_INODE_BLK(ino), offset,
+                               squashfs_i(inode)->start,
+                               le16_to_cpu(sqsh_ino->offset));
+               break;
+       }
+       case SQUASHFS_LDIR_TYPE: {
+               struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                               sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+               inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+               inode->i_op = &squashfs_dir_inode_ops;
+               inode->i_fop = &squashfs_dir_ops;
+               inode->i_mode |= S_IFDIR;
+               squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+               squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
+               squashfs_i(inode)->dir_idx_start = block;
+               squashfs_i(inode)->dir_idx_offset = offset;
+               squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count);
+               squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
+
+               TRACE("Long directory inode %x:%x, start_block %llx, offset "
+                               "%x\n", SQUASHFS_INODE_BLK(ino), offset,
+                               squashfs_i(inode)->start,
+                               le16_to_cpu(sqsh_ino->offset));
+               break;
+       }
+       case SQUASHFS_SYMLINK_TYPE:
+       case SQUASHFS_LSYMLINK_TYPE: {
+               struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                               sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+               inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
+               inode->i_op = &page_symlink_inode_operations;
+               inode->i_data.a_ops = &squashfs_symlink_aops;
+               inode->i_mode |= S_IFLNK;
+               squashfs_i(inode)->start = block;
+               squashfs_i(inode)->offset = offset;
+
+               TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
+                               "%x\n", SQUASHFS_INODE_BLK(ino), offset,
+                               block, offset);
+               break;
+       }
+       case SQUASHFS_BLKDEV_TYPE:
+       case SQUASHFS_CHRDEV_TYPE:
+       case SQUASHFS_LBLKDEV_TYPE:
+       case SQUASHFS_LCHRDEV_TYPE: {
+               struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
+               unsigned int rdev;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                               sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               if (type == SQUASHFS_CHRDEV_TYPE)
+                       inode->i_mode |= S_IFCHR;
+               else
+                       inode->i_mode |= S_IFBLK;
+               inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+               rdev = le32_to_cpu(sqsh_ino->rdev);
+               init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+
+               TRACE("Device inode %x:%x, rdev %x\n",
+                               SQUASHFS_INODE_BLK(ino), offset, rdev);
+               break;
+       }
+       case SQUASHFS_FIFO_TYPE:
+       case SQUASHFS_SOCKET_TYPE:
+       case SQUASHFS_LFIFO_TYPE:
+       case SQUASHFS_LSOCKET_TYPE: {
+               struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
+
+               err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+                               sizeof(*sqsh_ino));
+               if (err < 0)
+                       goto failed_read;
+
+               if (type == SQUASHFS_FIFO_TYPE)
+                       inode->i_mode |= S_IFIFO;
+               else
+                       inode->i_mode |= S_IFSOCK;
+               inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+               init_special_inode(inode, inode->i_mode, 0);
+               break;
+       }
+       default:
+               ERROR("Unknown inode type %d in squashfs_iget!\n", type);
+               return -EINVAL;
+       }
+
+       return 0;
+
+failed_read:
+       ERROR("Unable to read inode 0x%llx\n", ino);
+       return err;
+}
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
new file mode 100644 (file)
index 0000000..9e39865
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * namei.c
+ */
+
+/*
+ * This file implements code to do filename lookup in directories.
+ *
+ * Like inodes, directories are packed into compressed metadata blocks, stored
+ * in a directory table.  Directories are accessed using the start address of
+ * the metablock containing the directory and the offset into the
+ * decompressed block (<block, offset>).
+ *
+ * Directories are organised in a slightly complex way, and are not simply
+ * a list of file names.  The organisation takes advantage of the
+ * fact that (in most cases) the inodes of the files will be in the same
+ * compressed metadata block, and therefore, can share the start block.
+ * Directories are therefore organised in a two level list, a directory
+ * header containing the shared start block value, and a sequence of directory
+ * entries, each of which share the shared start block.  A new directory header
+ * is written once/if the inode start block changes.  The directory
+ * header/directory entry list is repeated as many times as necessary.
+ *
+ * Directories are sorted, and can contain a directory index to speed up
+ * file lookup.  Directory indexes store one entry per metablock, each entry
+ * storing the index/filename mapping to the first directory header
+ * in each metadata block.  Directories are sorted in alphabetical order,
+ * and at lookup the index is scanned linearly looking for the first filename
+ * alphabetically larger than the filename being looked up.  At this point the
+ * location of the metadata block the filename is in has been found.
+ * The general idea of the index is ensure only one metadata block needs to be
+ * decompressed to do a lookup irrespective of the length of the directory.
+ * This scheme has the advantage that it doesn't require extra memory overhead
+ * and doesn't require much extra storage on disk.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dcache.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Lookup name in the directory index, returning the location of the metadata
+ * block containing it, and the directory index this represents.
+ *
+ * If we get an error reading the index then return the part of the index
+ * (if any) we have managed to read - the index isn't essential, just
+ * quicker.
+ */
+static int get_dir_index_using_name(struct super_block *sb,
+                       u64 *next_block, int *next_offset, u64 index_start,
+                       int index_offset, int i_count, const char *name,
+                       int len)
+{
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int i, size, length = 0, err;
+       struct squashfs_dir_index *index;
+       char *str;
+
+       TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
+
+       index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
+       if (index == NULL) {
+               ERROR("Failed to allocate squashfs_dir_index\n");
+               goto out;
+       }
+
+       str = &index->name[SQUASHFS_NAME_LEN + 1];
+       strncpy(str, name, len);
+       str[len] = '\0';
+
+       for (i = 0; i < i_count; i++) {
+               err = squashfs_read_metadata(sb, index, &index_start,
+                                       &index_offset, sizeof(*index));
+               if (err < 0)
+                       break;
+
+
+               size = le32_to_cpu(index->size) + 1;
+
+               err = squashfs_read_metadata(sb, index->name, &index_start,
+                                       &index_offset, size);
+               if (err < 0)
+                       break;
+
+               index->name[size] = '\0';
+
+               if (strcmp(index->name, str) > 0)
+                       break;
+
+               length = le32_to_cpu(index->index);
+               *next_block = le32_to_cpu(index->start_block) +
+                                       msblk->directory_table;
+       }
+
+       *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+       kfree(index);
+
+out:
+       /*
+        * Return index (f_pos) of the looked up metadata block.  Translate
+        * from internal f_pos to external f_pos which is offset by 3 because
+        * we invent "." and ".." entries which are not actually stored in the
+        * directory.
+        */
+       return length + 3;
+}
+
+
+static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
+                                struct nameidata *nd)
+{
+       const unsigned char *name = dentry->d_name.name;
+       int len = dentry->d_name.len;
+       struct inode *inode = NULL;
+       struct squashfs_sb_info *msblk = dir->i_sb->s_fs_info;
+       struct squashfs_dir_header dirh;
+       struct squashfs_dir_entry *dire;
+       u64 block = squashfs_i(dir)->start + msblk->directory_table;
+       int offset = squashfs_i(dir)->offset;
+       int err, length = 0, dir_count, size;
+
+       TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
+
+       dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
+       if (dire == NULL) {
+               ERROR("Failed to allocate squashfs_dir_entry\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       if (len > SQUASHFS_NAME_LEN) {
+               err = -ENAMETOOLONG;
+               goto failed;
+       }
+
+       length = get_dir_index_using_name(dir->i_sb, &block, &offset,
+                               squashfs_i(dir)->dir_idx_start,
+                               squashfs_i(dir)->dir_idx_offset,
+                               squashfs_i(dir)->dir_idx_cnt, name, len);
+
+       while (length < i_size_read(dir)) {
+               /*
+                * Read directory header.
+                */
+               err = squashfs_read_metadata(dir->i_sb, &dirh, &block,
+                               &offset, sizeof(dirh));
+               if (err < 0)
+                       goto read_failure;
+
+               length += sizeof(dirh);
+
+               dir_count = le32_to_cpu(dirh.count) + 1;
+               while (dir_count--) {
+                       /*
+                        * Read directory entry.
+                        */
+                       err = squashfs_read_metadata(dir->i_sb, dire, &block,
+                                       &offset, sizeof(*dire));
+                       if (err < 0)
+                               goto read_failure;
+
+                       size = le16_to_cpu(dire->size) + 1;
+
+                       err = squashfs_read_metadata(dir->i_sb, dire->name,
+                                       &block, &offset, size);
+                       if (err < 0)
+                               goto read_failure;
+
+                       length += sizeof(*dire) + size;
+
+                       if (name[0] < dire->name[0])
+                               goto exit_lookup;
+
+                       if (len == size && !strncmp(name, dire->name, len)) {
+                               unsigned int blk, off, ino_num;
+                               long long ino;
+                               blk = le32_to_cpu(dirh.start_block);
+                               off = le16_to_cpu(dire->offset);
+                               ino_num = le32_to_cpu(dirh.inode_number) +
+                                       (short) le16_to_cpu(dire->inode_number);
+                               ino = SQUASHFS_MKINODE(blk, off);
+
+                               TRACE("calling squashfs_iget for directory "
+                                       "entry %s, inode  %x:%x, %d\n", name,
+                                       blk, off, ino_num);
+
+                               inode = squashfs_iget(dir->i_sb, ino, ino_num);
+                               if (IS_ERR(inode)) {
+                                       err = PTR_ERR(inode);
+                                       goto failed;
+                               }
+
+                               goto exit_lookup;
+                       }
+               }
+       }
+
+exit_lookup:
+       kfree(dire);
+       if (inode)
+               return d_splice_alias(inode, dentry);
+       d_add(dentry, inode);
+       return ERR_PTR(0);
+
+read_failure:
+       ERROR("Unable to read directory block [%llx:%x]\n",
+               squashfs_i(dir)->start + msblk->directory_table,
+               squashfs_i(dir)->offset);
+failed:
+       kfree(dire);
+       return ERR_PTR(err);
+}
+
+
+const struct inode_operations squashfs_dir_inode_ops = {
+       .lookup = squashfs_lookup
+};
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
new file mode 100644 (file)
index 0000000..6b2515d
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs.h
+ */
+
+#define TRACE(s, args...)      pr_debug("SQUASHFS: "s, ## args)
+
+#define ERROR(s, args...)      pr_err("SQUASHFS error: "s, ## args)
+
+#define WARNING(s, args...)    pr_warning("SQUASHFS: "s, ## args)
+
+static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
+{
+       return list_entry(inode, struct squashfs_inode_info, vfs_inode);
+}
+
+/* block.c */
+extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
+                               int);
+
+/* cache.c */
+extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
+extern void squashfs_cache_delete(struct squashfs_cache *);
+extern struct squashfs_cache_entry *squashfs_cache_get(struct super_block *,
+                               struct squashfs_cache *, u64, int);
+extern void squashfs_cache_put(struct squashfs_cache_entry *);
+extern int squashfs_copy_data(void *, struct squashfs_cache_entry *, int, int);
+extern int squashfs_read_metadata(struct super_block *, void *, u64 *,
+                               int *, int);
+extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
+                               u64, int);
+extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
+                               u64, int);
+extern int squashfs_read_table(struct super_block *, void *, u64, int);
+
+/* export.c */
+extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
+                               unsigned int);
+
+/* fragment.c */
+extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
+extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
+                               u64, unsigned int);
+
+/* id.c */
+extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
+extern __le64 *squashfs_read_id_index_table(struct super_block *, u64,
+                               unsigned short);
+
+/* inode.c */
+extern struct inode *squashfs_iget(struct super_block *, long long,
+                               unsigned int);
+extern int squashfs_read_inode(struct inode *, long long);
+
+/*
+ * Inodes and files operations
+ */
+
+/* dir.c */
+extern const struct file_operations squashfs_dir_ops;
+
+/* export.c */
+extern const struct export_operations squashfs_export_ops;
+
+/* file.c */
+extern const struct address_space_operations squashfs_aops;
+
+/* namei.c */
+extern const struct inode_operations squashfs_dir_inode_ops;
+
+/* symlink.c */
+extern const struct address_space_operations squashfs_symlink_aops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
new file mode 100644 (file)
index 0000000..6840da1
--- /dev/null
@@ -0,0 +1,381 @@
+#ifndef SQUASHFS_FS
+#define SQUASHFS_FS
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs_fs.h
+ */
+
+#define SQUASHFS_CACHED_FRAGMENTS      CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
+#define SQUASHFS_MAJOR                 4
+#define SQUASHFS_MINOR                 0
+#define SQUASHFS_MAGIC                 0x73717368
+#define SQUASHFS_START                 0
+
+/* size of metadata (inode and directory) blocks */
+#define SQUASHFS_METADATA_SIZE         8192
+#define SQUASHFS_METADATA_LOG          13
+
+/* default size of data blocks */
+#define SQUASHFS_FILE_SIZE             131072
+#define SQUASHFS_FILE_LOG              17
+
+#define SQUASHFS_FILE_MAX_SIZE         1048576
+#define SQUASHFS_FILE_MAX_LOG          20
+
+/* Max number of uids and gids */
+#define SQUASHFS_IDS                   65536
+
+/* Max length of filename (not 255) */
+#define SQUASHFS_NAME_LEN              256
+
+#define SQUASHFS_INVALID_FRAG          (0xffffffffU)
+#define SQUASHFS_INVALID_BLK           (-1LL)
+
+/* Filesystem flags */
+#define SQUASHFS_NOI                   0
+#define SQUASHFS_NOD                   1
+#define SQUASHFS_NOF                   3
+#define SQUASHFS_NO_FRAG               4
+#define SQUASHFS_ALWAYS_FRAG           5
+#define SQUASHFS_DUPLICATE             6
+#define SQUASHFS_EXPORT                        7
+
+#define SQUASHFS_BIT(flag, bit)                ((flag >> bit) & 1)
+
+#define SQUASHFS_UNCOMPRESSED_INODES(flags)    SQUASHFS_BIT(flags, \
+                                               SQUASHFS_NOI)
+
+#define SQUASHFS_UNCOMPRESSED_DATA(flags)      SQUASHFS_BIT(flags, \
+                                               SQUASHFS_NOD)
+
+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+                                               SQUASHFS_NOF)
+
+#define SQUASHFS_NO_FRAGMENTS(flags)           SQUASHFS_BIT(flags, \
+                                               SQUASHFS_NO_FRAG)
+
+#define SQUASHFS_ALWAYS_FRAGMENTS(flags)       SQUASHFS_BIT(flags, \
+                                               SQUASHFS_ALWAYS_FRAG)
+
+#define SQUASHFS_DUPLICATES(flags)             SQUASHFS_BIT(flags, \
+                                               SQUASHFS_DUPLICATE)
+
+#define SQUASHFS_EXPORTABLE(flags)             SQUASHFS_BIT(flags, \
+                                               SQUASHFS_EXPORT)
+
+/* Max number of types and file types */
+#define SQUASHFS_DIR_TYPE              1
+#define SQUASHFS_REG_TYPE              2
+#define SQUASHFS_SYMLINK_TYPE          3
+#define SQUASHFS_BLKDEV_TYPE           4
+#define SQUASHFS_CHRDEV_TYPE           5
+#define SQUASHFS_FIFO_TYPE             6
+#define SQUASHFS_SOCKET_TYPE           7
+#define SQUASHFS_LDIR_TYPE             8
+#define SQUASHFS_LREG_TYPE             9
+#define SQUASHFS_LSYMLINK_TYPE         10
+#define SQUASHFS_LBLKDEV_TYPE          11
+#define SQUASHFS_LCHRDEV_TYPE          12
+#define SQUASHFS_LFIFO_TYPE            13
+#define SQUASHFS_LSOCKET_TYPE          14
+
+/* Flag whether block is compressed or uncompressed, bit is set if block is
+ * uncompressed */
+#define SQUASHFS_COMPRESSED_BIT                (1 << 15)
+
+#define SQUASHFS_COMPRESSED_SIZE(B)    (((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
+               (B) & ~SQUASHFS_COMPRESSED_BIT :  SQUASHFS_COMPRESSED_BIT)
+
+#define SQUASHFS_COMPRESSED(B)         (!((B) & SQUASHFS_COMPRESSED_BIT))
+
+#define SQUASHFS_COMPRESSED_BIT_BLOCK  (1 << 24)
+
+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B)      ((B) & \
+                                               ~SQUASHFS_COMPRESSED_BIT_BLOCK)
+
+#define SQUASHFS_COMPRESSED_BLOCK(B)   (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+
+/*
+ * Inode number ops.  Inodes consist of a compressed block number, and an
+ * uncompressed offset within that block
+ */
+#define SQUASHFS_INODE_BLK(A)          ((unsigned int) ((A) >> 16))
+
+#define SQUASHFS_INODE_OFFSET(A)       ((unsigned int) ((A) & 0xffff))
+
+#define SQUASHFS_MKINODE(A, B)         ((long long)(((long long) (A)\
+                                       << 16) + (B)))
+
+/* Translate between VFS mode and squashfs mode */
+#define SQUASHFS_MODE(A)               ((A) & 0xfff)
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES(A)     \
+                               ((A) * sizeof(struct squashfs_fragment_entry))
+
+#define SQUASHFS_FRAGMENT_INDEX(A)     (SQUASHFS_FRAGMENT_BYTES(A) / \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A)      (SQUASHFS_FRAGMENT_BYTES(A) % \
+                                               SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES(A)   ((SQUASHFS_FRAGMENT_BYTES(A) + \
+                                       SQUASHFS_METADATA_SIZE - 1) / \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A)       (SQUASHFS_FRAGMENT_INDEXES(A) *\
+                                               sizeof(u64))
+
+/* inode lookup table defines */
+#define SQUASHFS_LOOKUP_BYTES(A)       ((A) * sizeof(u64))
+
+#define SQUASHFS_LOOKUP_BLOCK(A)       (SQUASHFS_LOOKUP_BYTES(A) / \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_OFFSET(A)        (SQUASHFS_LOOKUP_BYTES(A) % \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCKS(A)      ((SQUASHFS_LOOKUP_BYTES(A) + \
+                                       SQUASHFS_METADATA_SIZE - 1) / \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_BYTES(A) (SQUASHFS_LOOKUP_BLOCKS(A) *\
+                                       sizeof(u64))
+
+/* uid/gid lookup table defines */
+#define SQUASHFS_ID_BYTES(A)           ((A) * sizeof(unsigned int))
+
+#define SQUASHFS_ID_BLOCK(A)           (SQUASHFS_ID_BYTES(A) / \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_OFFSET(A)    (SQUASHFS_ID_BYTES(A) % \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCKS(A)          ((SQUASHFS_ID_BYTES(A) + \
+                                       SQUASHFS_METADATA_SIZE - 1) / \
+                                       SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_BYTES(A)     (SQUASHFS_ID_BLOCKS(A) *\
+                                       sizeof(u64))
+
+/* cached data constants for filesystem */
+#define SQUASHFS_CACHED_BLKS           8
+
+#define SQUASHFS_MAX_FILE_SIZE_LOG     64
+
+#define SQUASHFS_MAX_FILE_SIZE         (1LL << \
+                                       (SQUASHFS_MAX_FILE_SIZE_LOG - 2))
+
+#define SQUASHFS_MARKER_BYTE           0xff
+
+/* meta index cache */
+#define SQUASHFS_META_INDEXES  (SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
+#define SQUASHFS_META_ENTRIES  127
+#define SQUASHFS_META_SLOTS    8
+
+struct meta_entry {
+       u64                     data_block;
+       unsigned int            index_block;
+       unsigned short          offset;
+       unsigned short          pad;
+};
+
+struct meta_index {
+       unsigned int            inode_number;
+       unsigned int            offset;
+       unsigned short          entries;
+       unsigned short          skip;
+       unsigned short          locked;
+       unsigned short          pad;
+       struct meta_entry       meta_entry[SQUASHFS_META_ENTRIES];
+};
+
+
+/*
+ * definitions for structures on disk
+ */
+#define ZLIB_COMPRESSION        1
+
+struct squashfs_super_block {
+       __le32                  s_magic;
+       __le32                  inodes;
+       __le32                  mkfs_time;
+       __le32                  block_size;
+       __le32                  fragments;
+       __le16                  compression;
+       __le16                  block_log;
+       __le16                  flags;
+       __le16                  no_ids;
+       __le16                  s_major;
+       __le16                  s_minor;
+       __le64                  root_inode;
+       __le64                  bytes_used;
+       __le64                  id_table_start;
+       __le64                  xattr_table_start;
+       __le64                  inode_table_start;
+       __le64                  directory_table_start;
+       __le64                  fragment_table_start;
+       __le64                  lookup_table_start;
+};
+
+struct squashfs_dir_index {
+       __le32                  index;
+       __le32                  start_block;
+       __le32                  size;
+       unsigned char           name[0];
+};
+
+struct squashfs_base_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+};
+
+struct squashfs_ipc_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le32                  nlink;
+};
+
+struct squashfs_dev_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le32                  nlink;
+       __le32                  rdev;
+};
+
+struct squashfs_symlink_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le32                  nlink;
+       __le32                  symlink_size;
+       char                    symlink[0];
+};
+
+struct squashfs_reg_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le32                  start_block;
+       __le32                  fragment;
+       __le32                  offset;
+       __le32                  file_size;
+       __le16                  block_list[0];
+};
+
+struct squashfs_lreg_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le64                  start_block;
+       __le64                  file_size;
+       __le64                  sparse;
+       __le32                  nlink;
+       __le32                  fragment;
+       __le32                  offset;
+       __le32                  xattr;
+       __le16                  block_list[0];
+};
+
+struct squashfs_dir_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le32                  start_block;
+       __le32                  nlink;
+       __le16                  file_size;
+       __le16                  offset;
+       __le32                  parent_inode;
+};
+
+struct squashfs_ldir_inode {
+       __le16                  inode_type;
+       __le16                  mode;
+       __le16                  uid;
+       __le16                  guid;
+       __le32                  mtime;
+       __le32                  inode_number;
+       __le32                  nlink;
+       __le32                  file_size;
+       __le32                  start_block;
+       __le32                  parent_inode;
+       __le16                  i_count;
+       __le16                  offset;
+       __le32                  xattr;
+       struct squashfs_dir_index       index[0];
+};
+
+union squashfs_inode {
+       struct squashfs_base_inode              base;
+       struct squashfs_dev_inode               dev;
+       struct squashfs_symlink_inode           symlink;
+       struct squashfs_reg_inode               reg;
+       struct squashfs_lreg_inode              lreg;
+       struct squashfs_dir_inode               dir;
+       struct squashfs_ldir_inode              ldir;
+       struct squashfs_ipc_inode               ipc;
+};
+
+struct squashfs_dir_entry {
+       __le16                  offset;
+       __le16                  inode_number;
+       __le16                  type;
+       __le16                  size;
+       char                    name[0];
+};
+
+struct squashfs_dir_header {
+       __le32                  count;
+       __le32                  start_block;
+       __le32                  inode_number;
+};
+
+struct squashfs_fragment_entry {
+       __le64                  start_block;
+       __le32                  size;
+       unsigned int            unused;
+};
+
+#endif
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
new file mode 100644 (file)
index 0000000..fbfca30
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef SQUASHFS_FS_I
+#define SQUASHFS_FS_I
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs_fs_i.h
+ */
+
+struct squashfs_inode_info {
+       u64             start;
+       int             offset;
+       union {
+               struct {
+                       u64             fragment_block;
+                       int             fragment_size;
+                       int             fragment_offset;
+                       u64             block_list_start;
+               };
+               struct {
+                       u64             dir_idx_start;
+                       int             dir_idx_offset;
+                       int             dir_idx_cnt;
+                       int             parent;
+               };
+       };
+       struct inode    vfs_inode;
+};
+#endif
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
new file mode 100644 (file)
index 0000000..c8c6561
--- /dev/null
@@ -0,0 +1,76 @@
+#ifndef SQUASHFS_FS_SB
+#define SQUASHFS_FS_SB
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs_fs_sb.h
+ */
+
+#include "squashfs_fs.h"
+
+struct squashfs_cache {
+       char                    *name;
+       int                     entries;
+       int                     next_blk;
+       int                     num_waiters;
+       int                     unused;
+       int                     block_size;
+       int                     pages;
+       spinlock_t              lock;
+       wait_queue_head_t       wait_queue;
+       struct squashfs_cache_entry *entry;
+};
+
+struct squashfs_cache_entry {
+       u64                     block;
+       int                     length;
+       int                     refcount;
+       u64                     next_index;
+       int                     pending;
+       int                     error;
+       int                     num_waiters;
+       wait_queue_head_t       wait_queue;
+       struct squashfs_cache   *cache;
+       void                    **data;
+};
+
+struct squashfs_sb_info {
+       int                     devblksize;
+       int                     devblksize_log2;
+       struct squashfs_cache   *block_cache;
+       struct squashfs_cache   *fragment_cache;
+       struct squashfs_cache   *read_page;
+       int                     next_meta_index;
+       __le64                  *id_table;
+       __le64                  *fragment_index;
+       unsigned int            *fragment_index_2;
+       struct mutex            read_data_mutex;
+       struct mutex            meta_index_mutex;
+       struct meta_index       *meta_index;
+       z_stream                stream;
+       __le64                  *inode_lookup_table;
+       u64                     inode_table;
+       u64                     directory_table;
+       unsigned int            block_size;
+       unsigned short          block_log;
+       long long               bytes_used;
+       unsigned int            inodes;
+};
+#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
new file mode 100644 (file)
index 0000000..a0466d7
--- /dev/null
@@ -0,0 +1,440 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * super.c
+ */
+
+/*
+ * This file implements code to read the superblock, read and initialise
+ * in-memory structures at mount time, and all the VFS glue code to register
+ * the filesystem.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static struct file_system_type squashfs_fs_type;
+static struct super_operations squashfs_super_ops;
+
+static int supported_squashfs_filesystem(short major, short minor, short comp)
+{
+       if (major < SQUASHFS_MAJOR) {
+               ERROR("Major/Minor mismatch, older Squashfs %d.%d "
+                       "filesystems are unsupported\n", major, minor);
+               return -EINVAL;
+       } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
+               ERROR("Major/Minor mismatch, trying to mount newer "
+                       "%d.%d filesystem\n", major, minor);
+               ERROR("Please update your kernel\n");
+               return -EINVAL;
+       }
+
+       if (comp != ZLIB_COMPRESSION)
+               return -EINVAL;
+
+       return 0;
+}
+
+
+static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+       struct squashfs_sb_info *msblk;
+       struct squashfs_super_block *sblk = NULL;
+       char b[BDEVNAME_SIZE];
+       struct inode *root;
+       long long root_inode;
+       unsigned short flags;
+       unsigned int fragments;
+       u64 lookup_table_start;
+       int err;
+
+       TRACE("Entered squashfs_fill_superblock\n");
+
+       sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
+       if (sb->s_fs_info == NULL) {
+               ERROR("Failed to allocate squashfs_sb_info\n");
+               return -ENOMEM;
+       }
+       msblk = sb->s_fs_info;
+
+       msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(),
+               GFP_KERNEL);
+       if (msblk->stream.workspace == NULL) {
+               ERROR("Failed to allocate zlib workspace\n");
+               goto failure;
+       }
+
+       sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+       if (sblk == NULL) {
+               ERROR("Failed to allocate squashfs_super_block\n");
+               goto failure;
+       }
+
+       msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
+       msblk->devblksize_log2 = ffz(~msblk->devblksize);
+
+       mutex_init(&msblk->read_data_mutex);
+       mutex_init(&msblk->meta_index_mutex);
+
+       /*
+        * msblk->bytes_used is checked in squashfs_read_table to ensure reads
+        * are not beyond filesystem end.  But as we're using
+        * squashfs_read_table here to read the superblock (including the value
+        * of bytes_used) we need to set it to an initial sensible dummy value
+        */
+       msblk->bytes_used = sizeof(*sblk);
+       err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk));
+
+       if (err < 0) {
+               ERROR("unable to read squashfs_super_block\n");
+               goto failed_mount;
+       }
+
+       /* Check it is a SQUASHFS superblock */
+       sb->s_magic = le32_to_cpu(sblk->s_magic);
+       if (sb->s_magic != SQUASHFS_MAGIC) {
+               if (!silent)
+                       ERROR("Can't find a SQUASHFS superblock on %s\n",
+                                               bdevname(sb->s_bdev, b));
+               err = -EINVAL;
+               goto failed_mount;
+       }
+
+       /* Check the MAJOR & MINOR versions and compression type */
+       err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major),
+                       le16_to_cpu(sblk->s_minor),
+                       le16_to_cpu(sblk->compression));
+       if (err < 0)
+               goto failed_mount;
+
+       err = -EINVAL;
+
+       /*
+        * Check if there's xattrs in the filesystem.  These are not
+        * supported in this version, so warn that they will be ignored.
+        */
+       if (le64_to_cpu(sblk->xattr_table_start) != SQUASHFS_INVALID_BLK)
+               ERROR("Xattrs in filesystem, these will be ignored\n");
+
+       /* Check the filesystem does not extend beyond the end of the
+          block device */
+       msblk->bytes_used = le64_to_cpu(sblk->bytes_used);
+       if (msblk->bytes_used < 0 || msblk->bytes_used >
+                       i_size_read(sb->s_bdev->bd_inode))
+               goto failed_mount;
+
+       /* Check block size for sanity */
+       msblk->block_size = le32_to_cpu(sblk->block_size);
+       if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE)
+               goto failed_mount;
+
+       msblk->block_log = le16_to_cpu(sblk->block_log);
+       if (msblk->block_log > SQUASHFS_FILE_MAX_LOG)
+               goto failed_mount;
+
+       /* Check the root inode for sanity */
+       root_inode = le64_to_cpu(sblk->root_inode);
+       if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE)
+               goto failed_mount;
+
+       msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
+       msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
+       msblk->inodes = le32_to_cpu(sblk->inodes);
+       flags = le16_to_cpu(sblk->flags);
+
+       TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b));
+       TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags)
+                               ? "un" : "");
+       TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags)
+                               ? "un" : "");
+       TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
+       TRACE("Block size %d\n", msblk->block_size);
+       TRACE("Number of inodes %d\n", msblk->inodes);
+       TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
+       TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
+       TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
+       TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
+       TRACE("sblk->fragment_table_start %llx\n",
+               (u64) le64_to_cpu(sblk->fragment_table_start));
+       TRACE("sblk->id_table_start %llx\n",
+               (u64) le64_to_cpu(sblk->id_table_start));
+
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+       sb->s_flags |= MS_RDONLY;
+       sb->s_op = &squashfs_super_ops;
+
+       err = -ENOMEM;
+
+       msblk->block_cache = squashfs_cache_init("metadata",
+                       SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
+       if (msblk->block_cache == NULL)
+               goto failed_mount;
+
+       /* Allocate read_page block */
+       msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size);
+       if (msblk->read_page == NULL) {
+               ERROR("Failed to allocate read_page block\n");
+               goto failed_mount;
+       }
+
+       /* Allocate and read id index table */
+       msblk->id_table = squashfs_read_id_index_table(sb,
+               le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids));
+       if (IS_ERR(msblk->id_table)) {
+               err = PTR_ERR(msblk->id_table);
+               msblk->id_table = NULL;
+               goto failed_mount;
+       }
+
+       fragments = le32_to_cpu(sblk->fragments);
+       if (fragments == 0)
+               goto allocate_lookup_table;
+
+       msblk->fragment_cache = squashfs_cache_init("fragment",
+               SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
+       if (msblk->fragment_cache == NULL) {
+               err = -ENOMEM;
+               goto failed_mount;
+       }
+
+       /* Allocate and read fragment index table */
+       msblk->fragment_index = squashfs_read_fragment_index_table(sb,
+               le64_to_cpu(sblk->fragment_table_start), fragments);
+       if (IS_ERR(msblk->fragment_index)) {
+               err = PTR_ERR(msblk->fragment_index);
+               msblk->fragment_index = NULL;
+               goto failed_mount;
+       }
+
+allocate_lookup_table:
+       lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
+       if (lookup_table_start == SQUASHFS_INVALID_BLK)
+               goto allocate_root;
+
+       /* Allocate and read inode lookup table */
+       msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
+               lookup_table_start, msblk->inodes);
+       if (IS_ERR(msblk->inode_lookup_table)) {
+               err = PTR_ERR(msblk->inode_lookup_table);
+               msblk->inode_lookup_table = NULL;
+               goto failed_mount;
+       }
+
+       sb->s_export_op = &squashfs_export_ops;
+
+allocate_root:
+       root = new_inode(sb);
+       if (!root) {
+               err = -ENOMEM;
+               goto failed_mount;
+       }
+
+       err = squashfs_read_inode(root, root_inode);
+       if (err) {
+               iget_failed(root);
+               goto failed_mount;
+       }
+       insert_inode_hash(root);
+
+       sb->s_root = d_alloc_root(root);
+       if (sb->s_root == NULL) {
+               ERROR("Root inode create failed\n");
+               err = -ENOMEM;
+               iput(root);
+               goto failed_mount;
+       }
+
+       TRACE("Leaving squashfs_fill_super\n");
+       kfree(sblk);
+       return 0;
+
+failed_mount:
+       squashfs_cache_delete(msblk->block_cache);
+       squashfs_cache_delete(msblk->fragment_cache);
+       squashfs_cache_delete(msblk->read_page);
+       kfree(msblk->inode_lookup_table);
+       kfree(msblk->fragment_index);
+       kfree(msblk->id_table);
+       kfree(msblk->stream.workspace);
+       kfree(sb->s_fs_info);
+       sb->s_fs_info = NULL;
+       kfree(sblk);
+       return err;
+
+failure:
+       kfree(msblk->stream.workspace);
+       kfree(sb->s_fs_info);
+       sb->s_fs_info = NULL;
+       return -ENOMEM;
+}
+
+
+static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+       struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
+
+       TRACE("Entered squashfs_statfs\n");
+
+       buf->f_type = SQUASHFS_MAGIC;
+       buf->f_bsize = msblk->block_size;
+       buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1;
+       buf->f_bfree = buf->f_bavail = 0;
+       buf->f_files = msblk->inodes;
+       buf->f_ffree = 0;
+       buf->f_namelen = SQUASHFS_NAME_LEN;
+
+       return 0;
+}
+
+
+static int squashfs_remount(struct super_block *sb, int *flags, char *data)
+{
+       *flags |= MS_RDONLY;
+       return 0;
+}
+
+
+static void squashfs_put_super(struct super_block *sb)
+{
+       if (sb->s_fs_info) {
+               struct squashfs_sb_info *sbi = sb->s_fs_info;
+               squashfs_cache_delete(sbi->block_cache);
+               squashfs_cache_delete(sbi->fragment_cache);
+               squashfs_cache_delete(sbi->read_page);
+               kfree(sbi->id_table);
+               kfree(sbi->fragment_index);
+               kfree(sbi->meta_index);
+               kfree(sbi->stream.workspace);
+               kfree(sb->s_fs_info);
+               sb->s_fs_info = NULL;
+       }
+}
+
+
+static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
+                               const char *dev_name, void *data,
+                               struct vfsmount *mnt)
+{
+       return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
+                               mnt);
+}
+
+
+static struct kmem_cache *squashfs_inode_cachep;
+
+
+static void init_once(void *foo)
+{
+       struct squashfs_inode_info *ei = foo;
+
+       inode_init_once(&ei->vfs_inode);
+}
+
+
+static int __init init_inodecache(void)
+{
+       squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
+               sizeof(struct squashfs_inode_info), 0,
+               SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once);
+
+       return squashfs_inode_cachep ? 0 : -ENOMEM;
+}
+
+
+static void destroy_inodecache(void)
+{
+       kmem_cache_destroy(squashfs_inode_cachep);
+}
+
+
+static int __init init_squashfs_fs(void)
+{
+       int err = init_inodecache();
+
+       if (err)
+               return err;
+
+       err = register_filesystem(&squashfs_fs_type);
+       if (err) {
+               destroy_inodecache();
+               return err;
+       }
+
+       printk(KERN_INFO "squashfs: version 4.0 (2009/01/03) "
+               "Phillip Lougher\n");
+
+       return 0;
+}
+
+
+static void __exit exit_squashfs_fs(void)
+{
+       unregister_filesystem(&squashfs_fs_type);
+       destroy_inodecache();
+}
+
+
+static struct inode *squashfs_alloc_inode(struct super_block *sb)
+{
+       struct squashfs_inode_info *ei =
+               kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
+
+       return ei ? &ei->vfs_inode : NULL;
+}
+
+
+static void squashfs_destroy_inode(struct inode *inode)
+{
+       kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
+}
+
+
+static struct file_system_type squashfs_fs_type = {
+       .owner = THIS_MODULE,
+       .name = "squashfs",
+       .get_sb = squashfs_get_sb,
+       .kill_sb = kill_block_super,
+       .fs_flags = FS_REQUIRES_DEV
+};
+
+static struct super_operations squashfs_super_ops = {
+       .alloc_inode = squashfs_alloc_inode,
+       .destroy_inode = squashfs_destroy_inode,
+       .statfs = squashfs_statfs,
+       .put_super = squashfs_put_super,
+       .remount_fs = squashfs_remount
+};
+
+module_init(init_squashfs_fs);
+module_exit(exit_squashfs_fs);
+MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
+MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
new file mode 100644 (file)
index 0000000..83d8788
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * symlink.c
+ */
+
+/*
+ * This file implements code to handle symbolic links.
+ *
+ * The data contents of symbolic links are stored inside the symbolic
+ * link inode within the inode table.  This allows the normally small symbolic
+ * link to be compressed as part of the inode table, achieving much greater
+ * compression than if the symbolic link was compressed individually.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static int squashfs_symlink_readpage(struct file *file, struct page *page)
+{
+       struct inode *inode = page->mapping->host;
+       struct super_block *sb = inode->i_sb;
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       int index = page->index << PAGE_CACHE_SHIFT;
+       u64 block = squashfs_i(inode)->start;
+       int offset = squashfs_i(inode)->offset;
+       int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
+       int bytes, copied;
+       void *pageaddr;
+       struct squashfs_cache_entry *entry;
+
+       TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
+                       "%llx, offset %x\n", page->index, block, offset);
+
+       /*
+        * Skip index bytes into symlink metadata.
+        */
+       if (index) {
+               bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
+                                                               index);
+               if (bytes < 0) {
+                       ERROR("Unable to read symlink [%llx:%x]\n",
+                               squashfs_i(inode)->start,
+                               squashfs_i(inode)->offset);
+                       goto error_out;
+               }
+       }
+
+       /*
+        * Read length bytes from symlink metadata.  Squashfs_read_metadata
+        * is not used here because it can sleep and we want to use
+        * kmap_atomic to map the page.  Instead call the underlying
+        * squashfs_cache_get routine.  As length bytes may overlap metadata
+        * blocks, we may need to call squashfs_cache_get multiple times.
+        */
+       for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
+               entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
+               if (entry->error) {
+                       ERROR("Unable to read symlink [%llx:%x]\n",
+                               squashfs_i(inode)->start,
+                               squashfs_i(inode)->offset);
+                       squashfs_cache_put(entry);
+                       goto error_out;
+               }
+
+               pageaddr = kmap_atomic(page, KM_USER0);
+               copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
+                                                               length - bytes);
+               if (copied == length - bytes)
+                       memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
+               else
+                       block = entry->next_index;
+               kunmap_atomic(pageaddr, KM_USER0);
+               squashfs_cache_put(entry);
+       }
+
+       flush_dcache_page(page);
+       SetPageUptodate(page);
+       unlock_page(page);
+       return 0;
+
+error_out:
+       SetPageError(page);
+       unlock_page(page);
+       return 0;
+}
+
+
+const struct address_space_operations squashfs_symlink_aops = {
+       .readpage = squashfs_symlink_readpage
+};
index 22c03714fb144d30b4c1bfd76fb30556eac373fa..86ca0e86e7d2579c0c096f7a57037627cf9dfa94 100644 (file)
@@ -22,7 +22,6 @@ typedef struct {
        unsigned long   dtlb_ptd_mapping;       /* [DAMR5] PTD mapping for dtlb cached PGE */
 
 #else
-       struct vm_list_struct   *vmlist;
        unsigned long           end_brk;
 
 #endif
index d9bd724479cf0a078ea9b92fa4f73045468a8234..150cb92bb6664e0ead1795a81e93ebed8f33f579 100644 (file)
@@ -4,7 +4,6 @@
 #if !defined(CONFIG_MMU)
 
 typedef struct {
-       struct vm_list_struct   *vmlist;
        unsigned long           end_brk;
 } mm_context_t;
 
index 1ee9488ca2e40589adf8634c5131c00424a7696f..79ca2da81c87a9710f790241dc494f67ed003dae 100644 (file)
@@ -31,6 +31,10 @@ struct backlight_device;
 struct fb_info;
 
 struct backlight_ops {
+       unsigned int options;
+
+#define BL_CORE_SUSPENDRESUME  (1 << 0)
+
        /* Notify the backlight driver some property has changed */
        int (*update_status)(struct backlight_device *);
        /* Return the current backlight brightness (accounting for power,
@@ -51,7 +55,19 @@ struct backlight_properties {
           modes; 4: full off), see FB_BLANK_XXX */
        int power;
        /* FB Blanking active? (values as for power) */
+       /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
        int fb_blank;
+       /* Flags used to signal drivers of state changes */
+       /* Upper 4 bits are reserved for driver internal use */
+       unsigned int state;
+
+#define BL_CORE_SUSPENDED      (1 << 0)        /* backlight is suspended */
+#define BL_CORE_FBBLANK                (1 << 1)        /* backlight is under an fb blank event */
+#define BL_CORE_DRIVER4                (1 << 28)       /* reserved for driver specific use */
+#define BL_CORE_DRIVER3                (1 << 29)       /* reserved for driver specific use */
+#define BL_CORE_DRIVER2                (1 << 30)       /* reserved for driver specific use */
+#define BL_CORE_DRIVER1                (1 << 31)       /* reserved for driver specific use */
+
 };
 
 struct backlight_device {
index 81b4207deb957dcdcacafefa3d26cd89343f95c3..96eea90f01a87711440b6fd3162e2d40d38269b4 100644 (file)
@@ -15,6 +15,7 @@
 #define __LINUX_PCA9532_H
 
 #include <linux/leds.h>
+#include <linux/workqueue.h>
 
 enum pca9532_state {
        PCA9532_OFF  = 0x0,
@@ -31,6 +32,7 @@ struct pca9532_led {
        struct i2c_client *client;
        char *name;
        struct led_classdev ldev;
+       struct work_struct work;
        enum pca9532_type type;
        enum pca9532_state state;
 };
index d3a73f5a48c3f0514508da1395914e7846b488c5..24489da701e331b1a4d1810b40afd7209775e265 100644 (file)
@@ -32,7 +32,10 @@ struct led_classdev {
        int                      brightness;
        int                      flags;
 
+       /* Lower 16 bits reflect status */
 #define LED_SUSPENDED          (1 << 0)
+       /* Upper 16 bits reflect control information */
+#define LED_CORE_SUSPENDRESUME (1 << 16)
 
        /* Set LED brightness level */
        /* Must not sleep, use a workqueue if needed */
@@ -62,7 +65,7 @@ struct led_classdev {
 
 extern int led_classdev_register(struct device *parent,
                                 struct led_classdev *led_cdev);
-extern void led_classdev_unregister(struct led_classdev *lcd);
+extern void led_classdev_unregister(struct led_classdev *led_cdev);
 extern void led_classdev_suspend(struct led_classdev *led_cdev);
 extern void led_classdev_resume(struct led_classdev *led_cdev);
 
index e794dfb87504666042a1fa8e634ea6657fe06bdf..97ffdc1d344245c99085fbc2d1007188a50091b0 100644 (file)
 #define FLG_L2DATA             14      /* channel use L2 DATA primitivs */
 #define FLG_ORIGIN             15      /* channel is on origin site */
 /* channel specific stuff */
+#define FLG_FILLEMPTY          16      /* fill fifo on first frame (empty) */
 /* arcofi specific */
-#define FLG_ARCOFI_TIMER       16
-#define FLG_ARCOFI_ERROR       17
+#define FLG_ARCOFI_TIMER       17
+#define FLG_ARCOFI_ERROR       18
 /* isar specific */
-#define FLG_INITIALIZED                16
-#define FLG_DLEETX             17
-#define FLG_LASTDLE            18
-#define FLG_FIRST              19
-#define FLG_LASTDATA           20
-#define FLG_NMD_DATA           21
-#define FLG_FTI_RUN            22
-#define FLG_LL_OK              23
-#define FLG_LL_CONN            24
-#define FLG_DTMFSEND           25
+#define FLG_INITIALIZED                17
+#define FLG_DLEETX             18
+#define FLG_LASTDLE            19
+#define FLG_FIRST              20
+#define FLG_LASTDATA           21
+#define FLG_NMD_DATA           22
+#define FLG_FTI_RUN            23
+#define FLG_LL_OK              24
+#define FLG_LL_CONN            25
+#define FLG_DTMFSEND           26
 
 /* workq events */
 #define FLG_RECVQUEUE          30
@@ -183,6 +184,7 @@ extern void queue_ch_frame(struct mISDNchannel *, u_int,
 extern int     dchannel_senddata(struct dchannel *, struct sk_buff *);
 extern int     bchannel_senddata(struct bchannel *, struct sk_buff *);
 extern void    recv_Dchannel(struct dchannel *);
+extern void    recv_Echannel(struct dchannel *, struct dchannel *);
 extern void    recv_Bchannel(struct bchannel *);
 extern void    recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
 extern void    recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
index 8f2d60da04e757f1e1c5a9bbd26f486916a419da..557477ac3d5b509969060d748173626efddfe873 100644 (file)
@@ -36,8 +36,8 @@
  *              - should be incremented on every checkin
  */
 #define        MISDN_MAJOR_VERSION     1
-#define        MISDN_MINOR_VERSION     0
-#define MISDN_RELEASE          19
+#define        MISDN_MINOR_VERSION     1
+#define MISDN_RELEASE          20
 
 /* primitives for information exchange
  * generell format
@@ -80,6 +80,7 @@
 #define PH_DEACTIVATE_IND      0x0202
 #define PH_DEACTIVATE_CNF      0x4202
 #define PH_DATA_IND            0x2002
+#define PH_DATA_E_IND          0x3002
 #define MPH_ACTIVATE_IND       0x0502
 #define MPH_DEACTIVATE_IND     0x0602
 #define MPH_INFORMATION_IND    0x0702
 #define ISDN_P_NT_S0           0x02
 #define ISDN_P_TE_E1           0x03
 #define ISDN_P_NT_E1           0x04
+#define ISDN_P_TE_UP0          0x05
+#define ISDN_P_NT_UP0          0x06
+
+#define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \
+                               (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE))
+#define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \
+                               (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT))
+#define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0))
+#define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1))
+#define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0))
+
+
 #define ISDN_P_LAPD_TE         0x10
 #define        ISDN_P_LAPD_NT          0x11
 
@@ -255,16 +268,6 @@ struct sockaddr_mISDN {
        unsigned char   tei;
 };
 
-/* timer device ioctl */
-#define IMADDTIMER     _IOR('I', 64, int)
-#define IMDELTIMER     _IOR('I', 65, int)
-/* socket ioctls */
-#define        IMGETVERSION    _IOR('I', 66, int)
-#define        IMGETCOUNT      _IOR('I', 67, int)
-#define IMGETDEVINFO   _IOR('I', 68, int)
-#define IMCTRLREQ      _IOR('I', 69, int)
-#define IMCLEAR_L2     _IOR('I', 70, int)
-
 struct mISDNversion {
        unsigned char   major;
        unsigned char   minor;
@@ -281,6 +284,40 @@ struct mISDN_devinfo {
        char                    name[MISDN_MAX_IDLEN];
 };
 
+struct mISDN_devrename {
+       u_int                   id;
+       char                    name[MISDN_MAX_IDLEN]; /* new name */
+};
+
+/* MPH_INFORMATION_REQ payload */
+struct ph_info_ch {
+        __u32 protocol;
+        __u64 Flags;
+};
+
+struct ph_info_dch {
+        struct ph_info_ch ch;
+        __u16 state;
+        __u16 num_bch;
+};
+
+struct ph_info {
+        struct ph_info_dch dch;
+        struct ph_info_ch  bch[];
+};
+
+/* timer device ioctl */
+#define IMADDTIMER     _IOR('I', 64, int)
+#define IMDELTIMER     _IOR('I', 65, int)
+
+/* socket ioctls */
+#define        IMGETVERSION    _IOR('I', 66, int)
+#define        IMGETCOUNT      _IOR('I', 67, int)
+#define IMGETDEVINFO   _IOR('I', 68, int)
+#define IMCTRLREQ      _IOR('I', 69, int)
+#define IMCLEAR_L2     _IOR('I', 70, int)
+#define IMSETDEVNAME   _IOR('I', 71, struct mISDN_devrename)
+
 static inline int
 test_channelmap(u_int nr, u_char *map)
 {
@@ -312,6 +349,8 @@ clear_channelmap(u_int nr, u_char *map)
 #define MISDN_CTRL_SETPEER             0x0040
 #define MISDN_CTRL_UNSETPEER           0x0080
 #define MISDN_CTRL_RX_OFF              0x0100
+#define MISDN_CTRL_FILL_EMPTY          0x0200
+#define MISDN_CTRL_GETPEER             0x0400
 #define MISDN_CTRL_HW_FEATURES_OP      0x2000
 #define MISDN_CTRL_HW_FEATURES         0x2001
 #define MISDN_CTRL_HFC_OP              0x4000
@@ -362,6 +401,7 @@ struct mISDN_ctrl_req {
 #define DEBUG_L2_TEI           0x00100000
 #define DEBUG_L2_TEIFSM                0x00200000
 #define DEBUG_TIMER            0x01000000
+#define DEBUG_CLOCK            0x02000000
 
 #define mISDN_HEAD_P(s)                ((struct mISDNhead *)&s->cb[0])
 #define mISDN_HEAD_PRIM(s)     (((struct mISDNhead *)&s->cb[0])->prim)
@@ -375,6 +415,7 @@ struct mISDN_ctrl_req {
 struct mISDNchannel;
 struct mISDNdevice;
 struct mISDNstack;
+struct mISDNclock;
 
 struct channel_req {
        u_int                   protocol;
@@ -423,7 +464,6 @@ struct mISDN_sock {
 struct mISDNdevice {
        struct mISDNchannel     D;
        u_int                   id;
-       char                    name[MISDN_MAX_IDLEN];
        u_int                   Dprotocols;
        u_int                   Bprotocols;
        u_int                   nrbchan;
@@ -452,6 +492,16 @@ struct mISDNstack {
 #endif
 };
 
+typedef        int     (clockctl_func_t)(void *, int);
+
+struct mISDNclock {
+       struct list_head        list;
+       char                    name[64];
+       int                     pri;
+       clockctl_func_t         *ctl;
+       void                    *priv;
+};
+
 /* global alloc/queue functions */
 
 static inline struct sk_buff *
@@ -498,12 +548,23 @@ _queue_data(struct mISDNchannel *ch, u_int prim,
 
 /* global register/unregister functions */
 
-extern int     mISDN_register_device(struct mISDNdevice *, char *name);
+extern int     mISDN_register_device(struct mISDNdevice *,
+                                       struct device *parent, char *name);
 extern void    mISDN_unregister_device(struct mISDNdevice *);
 extern int     mISDN_register_Bprotocol(struct Bprotocol *);
 extern void    mISDN_unregister_Bprotocol(struct Bprotocol *);
+extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
+                                               void *);
+extern void    mISDN_unregister_clock(struct mISDNclock *);
+
+static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+{
+       return dev_get_drvdata(dev);
+}
 
 extern void    set_channel_address(struct mISDNchannel *, u_int, u_int);
+extern void    mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern unsigned short mISDN_clock_get(void);
 
 #endif /* __KERNEL__ */
 #endif /* mISDNIF_H */
index 96acbfc8aa12f2c607a97e110af29ce370389297..be3264e286e0199588ee651c772ad16552ac70a5 100644 (file)
 #ifndef __LINUX_MFD_WM8350_PMIC_H
 #define __LINUX_MFD_WM8350_PMIC_H
 
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/regulator/machine.h>
+
 /*
  * Register values.
  */
@@ -700,6 +704,33 @@ struct wm8350;
 struct platform_device;
 struct regulator_init_data;
 
+/*
+ * WM8350 LED platform data
+ */
+struct wm8350_led_platform_data {
+       const char *name;
+       const char *default_trigger;
+       int max_uA;
+};
+
+struct wm8350_led {
+       struct platform_device *pdev;
+       struct mutex mutex;
+       struct work_struct work;
+       spinlock_t value_lock;
+       enum led_brightness value;
+       struct led_classdev cdev;
+       int max_uA_index;
+       int enabled;
+
+       struct regulator *isink;
+       struct regulator_consumer_supply isink_consumer;
+       struct regulator_init_data isink_init;
+       struct regulator *dcdc;
+       struct regulator_consumer_supply dcdc_consumer;
+       struct regulator_init_data dcdc_init;
+};
+
 struct wm8350_pmic {
        /* Number of regulators of each type on this device */
        int max_dcdc;
@@ -717,10 +748,15 @@ struct wm8350_pmic {
 
        /* regulator devices */
        struct platform_device *pdev[NUM_WM8350_REGULATORS];
+
+       /* LED devices */
+       struct wm8350_led led[2];
 };
 
 int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
                              struct regulator_init_data *initdata);
+int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
+                       struct wm8350_led_platform_data *pdata);
 
 /*
  * Additional DCDC control not supported via regulator API
index 4a3d28c86443f8dfdfe5e4ee49a47ef1d27a0f44..b91a73fd1bcc2d22f64d5387b5c2dfe7473456c8 100644 (file)
@@ -56,19 +56,9 @@ extern unsigned long mmap_min_addr;
 
 extern struct kmem_cache *vm_area_cachep;
 
-/*
- * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
- * disabled, then there's a single shared list of VMAs maintained by the
- * system, and mm's subscribe to these individually
- */
-struct vm_list_struct {
-       struct vm_list_struct   *next;
-       struct vm_area_struct   *vma;
-};
-
 #ifndef CONFIG_MMU
-extern struct rb_root nommu_vma_tree;
-extern struct rw_semaphore nommu_vma_sem;
+extern struct rb_root nommu_region_tree;
+extern struct rw_semaphore nommu_region_sem;
 
 extern unsigned int kobjsize(const void *objp);
 #endif
@@ -1061,6 +1051,7 @@ extern void memmap_init_zone(unsigned long, int, unsigned long,
                                unsigned long, enum memmap_context);
 extern void setup_per_zone_pages_min(void);
 extern void mem_init(void);
+extern void __init mmap_init(void);
 extern void show_mem(void);
 extern void si_meminfo(struct sysinfo * val);
 extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -1072,6 +1063,9 @@ extern void setup_per_cpu_pageset(void);
 static inline void setup_per_cpu_pageset(void) {}
 #endif
 
+/* nommu.c */
+extern atomic_t mmap_pages_allocated;
+
 /* prio_tree.c */
 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
 void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
index 9cfc9b627fdd745d4702a903e29db36f2676aa34..92915e81443ff028afc2ac8a188a7bdbf94721ee 100644 (file)
@@ -96,6 +96,23 @@ struct page {
 #endif /* WANT_PAGE_VIRTUAL */
 };
 
+/*
+ * A region containing a mapping of a non-memory backed file under NOMMU
+ * conditions.  These are held in a global tree and are pinned by the VMAs that
+ * map parts of them.
+ */
+struct vm_region {
+       struct rb_node  vm_rb;          /* link in global region tree */
+       unsigned long   vm_flags;       /* VMA vm_flags */
+       unsigned long   vm_start;       /* start address of region */
+       unsigned long   vm_end;         /* region initialised to here */
+       unsigned long   vm_top;         /* region allocated to here */
+       unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
+       struct file     *vm_file;       /* the backing file or NULL */
+
+       atomic_t        vm_usage;       /* region usage count */
+};
+
 /*
  * This struct defines a memory VMM memory area. There is one of these
  * per VM-area/task.  A VM area is any part of the process virtual memory
@@ -152,7 +169,7 @@ struct vm_area_struct {
        unsigned long vm_truncate_count;/* truncate_count or restart_addr */
 
 #ifndef CONFIG_MMU
-       atomic_t vm_usage;              /* refcount (VMAs shared if !MMU) */
+       struct vm_region *vm_region;    /* NOMMU mapping region */
 #endif
 #ifdef CONFIG_NUMA
        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h
new file mode 100644 (file)
index 0000000..7572d4e
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __TDO24M_H__
+#define __TDO24M_H__
+
+enum tdo24m_model {
+       TDO24M,
+       TDO35S,
+};
+
+struct tdo24m_platform_data {
+       enum tdo24m_model model;
+};
+
+#endif /* __TDO24M_H__ */
index a7c748fa977a44c2887ad9fcaee3beddf8d4f45e..0f0f0cf3ba9aa97c34d88824fbf4644ca76b0fac 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/string.h>
 
 #include "do_mounts.h"
+#include "../fs/squashfs/squashfs_fs.h"
 
 int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
 
@@ -41,6 +42,7 @@ static int __init crd_load(int in_fd, int out_fd);
  *     ext2
  *     romfs
  *     cramfs
+ *     squashfs
  *     gzip
  */
 static int __init 
@@ -51,6 +53,7 @@ identify_ramdisk_image(int fd, int start_block)
        struct ext2_super_block *ext2sb;
        struct romfs_super_block *romfsb;
        struct cramfs_super *cramfsb;
+       struct squashfs_super_block *squashfsb;
        int nblocks = -1;
        unsigned char *buf;
 
@@ -62,6 +65,7 @@ identify_ramdisk_image(int fd, int start_block)
        ext2sb = (struct ext2_super_block *) buf;
        romfsb = (struct romfs_super_block *) buf;
        cramfsb = (struct cramfs_super *) buf;
+       squashfsb = (struct squashfs_super_block *) buf;
        memset(buf, 0xe5, size);
 
        /*
@@ -99,6 +103,16 @@ identify_ramdisk_image(int fd, int start_block)
                goto done;
        }
 
+       /* squashfs is at block zero too */
+       if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) {
+               printk(KERN_NOTICE
+                      "RAMDISK: squashfs filesystem found at block %d\n",
+                      start_block);
+               nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1)
+                        >> BLOCK_SIZE_BITS;
+               goto done;
+       }
+
        /*
         * Read block 1 to test for minix and ext2 superblock
         */
index 4f5ba75aaa7c67d1d80754e6b0be67f68151dfda..d9c941c0c3cacd26fb051059540673b5e011d967 100644 (file)
@@ -317,6 +317,7 @@ static int __init do_name(void)
                        if (wfd >= 0) {
                                sys_fchown(wfd, uid, gid);
                                sys_fchmod(wfd, mode);
+                               sys_ftruncate(wfd, body_len);
                                vcollected = kstrdup(collected, GFP_KERNEL);
                                state = CopyFile;
                        }
index b125b560240ec1ceb204f5ed0ff6f63ba97081d8..d0ab5527bf45607ae5ba8abc66219c218fab694a 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -990,6 +990,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr)
         */
        vma = find_vma(mm, addr);
 
+#ifdef CONFIG_MMU
        while (vma) {
                next = vma->vm_next;
 
@@ -1034,6 +1035,17 @@ asmlinkage long sys_shmdt(char __user *shmaddr)
                vma = next;
        }
 
+#else /* CONFIG_MMU */
+       /* under NOMMU conditions, the exact address to be destroyed must be
+        * given */
+       retval = -EINVAL;
+       if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
+               do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+               retval = 0;
+       }
+
+#endif
+
        up_write(&mm->mmap_sem);
        return retval;
 }
index 043f78c133c46ccbfe4a56bcbf92b35e57f92018..3a039189d70748b294082301d9636d467c019c3b 100644 (file)
@@ -372,7 +372,8 @@ int commit_creds(struct cred *new)
            old->fsuid != new->fsuid ||
            old->fsgid != new->fsgid ||
            !cap_issubset(new->cap_permitted, old->cap_permitted)) {
-               set_dumpable(task->mm, suid_dumpable);
+               if (task->mm)
+                       set_dumpable(task->mm, suid_dumpable);
                task->pdeath_signal = 0;
                smp_wmb();
        }
index 4018308048cf8f52db8202cf5969c14a919830d1..1d68f1255dd824cf2fd1c06a1e6464bac718d26f 100644 (file)
@@ -1481,12 +1481,10 @@ void __init proc_caches_init(void)
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
-       vm_area_cachep = kmem_cache_create("vm_area_struct",
-                       sizeof(struct vm_area_struct), 0,
-                       SLAB_PANIC, NULL);
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+       mmap_init();
 }
 
 /*
index 92f6e5bc3c24f2700b88e31a689f642a83a29a4b..89d74436318ce9bd2db36deadfbd7fbe850944da 100644 (file)
@@ -82,6 +82,9 @@ extern int percpu_pagelist_fraction;
 extern int compat_log;
 extern int latencytop_enabled;
 extern int sysctl_nr_open_min, sysctl_nr_open_max;
+#ifndef CONFIG_MMU
+extern int sysctl_nr_trim_pages;
+#endif
 #ifdef CONFIG_RCU_TORTURE_TEST
 extern int rcutorture_runnable;
 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
@@ -1102,6 +1105,17 @@ static struct ctl_table vm_table[] = {
                .mode           = 0644,
                .proc_handler   = &proc_dointvec
        },
+#else
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "nr_trim_pages",
+               .data           = &sysctl_nr_trim_pages,
+               .maxlen         = sizeof(sysctl_nr_trim_pages),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &zero,
+       },
 #endif
        {
                .ctl_name       = VM_LAPTOP_MODE,
index 2e75478e9c696bc6933ca8952c0a013dd30eb78a..d0a32aab03ff66cbda68d3d60b30980d006d86ba 100644 (file)
@@ -512,6 +512,13 @@ config DEBUG_VIRTUAL
 
          If unsure, say N.
 
+config DEBUG_NOMMU_REGIONS
+       bool "Debug the global anon/private NOMMU mapping region tree"
+       depends on DEBUG_KERNEL && !MMU
+       help
+         This option causes the global tree of anonymous and private mapping
+         regions to be regularly checked for invalid topology.
+
 config DEBUG_WRITECOUNT
        bool "Debug filesystem writers count"
        depends on DEBUG_KERNEL
index a910c045cfd4241601306257c8822bda7f1ee05a..749623196cb96facba001c1dc55ae3ab235ff67b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2472,3 +2472,13 @@ void mm_drop_all_locks(struct mm_struct *mm)
 
        mutex_unlock(&mm_all_locks_mutex);
 }
+
+/*
+ * initialise the VMA slab
+ */
+void __init mmap_init(void)
+{
+       vm_area_cachep = kmem_cache_create("vm_area_struct",
+                       sizeof(struct vm_area_struct), 0,
+                       SLAB_PANIC, NULL);
+}
index 1c28ea3a4e9c9054975683a19237d2d823f98ead..60ed8375c986f56604e1551836fcf0f48d1273a3 100644 (file)
@@ -6,11 +6,11 @@
  *
  *  See Documentation/nommu-mmap.txt
  *
- *  Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
+ *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
- *  Copyright (c) 2007      Paul Mundt <lethal@linux-sh.org>
+ *  Copyright (c) 2007-2008 Paul Mundt <lethal@linux-sh.org>
  */
 
 #include <linux/module.h>
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include "internal.h"
+
+static inline __attribute__((format(printf, 1, 2)))
+void no_printk(const char *fmt, ...)
+{
+}
+
+#if 0
+#define kenter(FMT, ...) \
+       printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+       printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+       printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
+#else
+#define kenter(FMT, ...) \
+       no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+       no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+       no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
 
 #include "internal.h"
 
@@ -40,19 +62,22 @@ void *high_memory;
 struct page *mem_map;
 unsigned long max_mapnr;
 unsigned long num_physpages;
-unsigned long askedalloc, realalloc;
 atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50; /* default is 50% */
 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
 int heap_stack_gap = 0;
 
+atomic_t mmap_pages_allocated;
+
 EXPORT_SYMBOL(mem_map);
 EXPORT_SYMBOL(num_physpages);
 
-/* list of shareable VMAs */
-struct rb_root nommu_vma_tree = RB_ROOT;
-DECLARE_RWSEM(nommu_vma_sem);
+/* list of mapped, potentially shareable regions */
+static struct kmem_cache *vm_region_jar;
+struct rb_root nommu_region_tree = RB_ROOT;
+DECLARE_RWSEM(nommu_region_sem);
 
 struct vm_operations_struct generic_file_vm_ops = {
 };
@@ -123,6 +148,20 @@ unsigned int kobjsize(const void *objp)
        if (PageSlab(page))
                return ksize(objp);
 
+       /*
+        * If it's not a compound page, see if we have a matching VMA
+        * region. This test is intentionally done in reverse order,
+        * so if there's no VMA, we still fall through and hand back
+        * PAGE_SIZE for 0-order pages.
+        */
+       if (!PageCompound(page)) {
+               struct vm_area_struct *vma;
+
+               vma = find_vma(current->mm, (unsigned long)objp);
+               if (vma)
+                       return vma->vm_end - vma->vm_start;
+       }
+
        /*
         * The ksize() function is only guaranteed to work for pointers
         * returned by kmalloc(). So handle arbitrary pointers here.
@@ -401,129 +440,178 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
        return mm->brk = brk;
 }
 
-#ifdef DEBUG
-static void show_process_blocks(void)
+/*
+ * initialise the VMA and region record slabs
+ */
+void __init mmap_init(void)
 {
-       struct vm_list_struct *vml;
-
-       printk("Process blocks %d:", current->pid);
-
-       for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
-               printk(" %p: %p", vml, vml->vma);
-               if (vml->vma)
-                       printk(" (%d @%lx #%d)",
-                              kobjsize((void *) vml->vma->vm_start),
-                              vml->vma->vm_start,
-                              atomic_read(&vml->vma->vm_usage));
-               printk(vml->next ? " ->" : ".\n");
-       }
+       vm_region_jar = kmem_cache_create("vm_region_jar",
+                                         sizeof(struct vm_region), 0,
+                                         SLAB_PANIC, NULL);
+       vm_area_cachep = kmem_cache_create("vm_area_struct",
+                                          sizeof(struct vm_area_struct), 0,
+                                          SLAB_PANIC, NULL);
 }
-#endif /* DEBUG */
 
 /*
- * add a VMA into a process's mm_struct in the appropriate place in the list
- * - should be called with mm->mmap_sem held writelocked
+ * validate the region tree
+ * - the caller must hold the region lock
  */
-static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
+#ifdef CONFIG_DEBUG_NOMMU_REGIONS
+static noinline void validate_nommu_regions(void)
 {
-       struct vm_list_struct **ppv;
-
-       for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
-               if ((*ppv)->vma->vm_start > vml->vma->vm_start)
-                       break;
-
-       vml->next = *ppv;
-       *ppv = vml;
+       struct vm_region *region, *last;
+       struct rb_node *p, *lastp;
+
+       lastp = rb_first(&nommu_region_tree);
+       if (!lastp)
+               return;
+
+       last = rb_entry(lastp, struct vm_region, vm_rb);
+       if (unlikely(last->vm_end <= last->vm_start))
+               BUG();
+       if (unlikely(last->vm_top < last->vm_end))
+               BUG();
+
+       while ((p = rb_next(lastp))) {
+               region = rb_entry(p, struct vm_region, vm_rb);
+               last = rb_entry(lastp, struct vm_region, vm_rb);
+
+               if (unlikely(region->vm_end <= region->vm_start))
+                       BUG();
+               if (unlikely(region->vm_top < region->vm_end))
+                       BUG();
+               if (unlikely(region->vm_start < last->vm_top))
+                       BUG();
+
+               lastp = p;
+       }
 }
+#else
+#define validate_nommu_regions() do {} while(0)
+#endif
 
 /*
- * look up the first VMA in which addr resides, NULL if none
- * - should be called with mm->mmap_sem at least held readlocked
+ * add a region into the global tree
  */
-struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+static void add_nommu_region(struct vm_region *region)
 {
-       struct vm_list_struct *loop, *vml;
+       struct vm_region *pregion;
+       struct rb_node **p, *parent;
 
-       /* search the vm_start ordered list */
-       vml = NULL;
-       for (loop = mm->context.vmlist; loop; loop = loop->next) {
-               if (loop->vma->vm_start > addr)
-                       break;
-               vml = loop;
+       validate_nommu_regions();
+
+       BUG_ON(region->vm_start & ~PAGE_MASK);
+
+       parent = NULL;
+       p = &nommu_region_tree.rb_node;
+       while (*p) {
+               parent = *p;
+               pregion = rb_entry(parent, struct vm_region, vm_rb);
+               if (region->vm_start < pregion->vm_start)
+                       p = &(*p)->rb_left;
+               else if (region->vm_start > pregion->vm_start)
+                       p = &(*p)->rb_right;
+               else if (pregion == region)
+                       return;
+               else
+                       BUG();
        }
 
-       if (vml && vml->vma->vm_end > addr)
-               return vml->vma;
+       rb_link_node(&region->vm_rb, parent, p);
+       rb_insert_color(&region->vm_rb, &nommu_region_tree);
 
-       return NULL;
+       validate_nommu_regions();
 }
-EXPORT_SYMBOL(find_vma);
 
 /*
- * find a VMA
- * - we don't extend stack VMAs under NOMMU conditions
+ * delete a region from the global tree
  */
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+static void delete_nommu_region(struct vm_region *region)
 {
-       return find_vma(mm, addr);
-}
+       BUG_ON(!nommu_region_tree.rb_node);
 
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
-{
-       return -ENOMEM;
+       validate_nommu_regions();
+       rb_erase(&region->vm_rb, &nommu_region_tree);
+       validate_nommu_regions();
 }
 
 /*
- * look up the first VMA exactly that exactly matches addr
- * - should be called with mm->mmap_sem at least held readlocked
+ * free a contiguous series of pages
  */
-static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
-                                                   unsigned long addr)
+static void free_page_series(unsigned long from, unsigned long to)
 {
-       struct vm_list_struct *vml;
-
-       /* search the vm_start ordered list */
-       for (vml = mm->context.vmlist; vml; vml = vml->next) {
-               if (vml->vma->vm_start == addr)
-                       return vml->vma;
-               if (vml->vma->vm_start > addr)
-                       break;
+       for (; from < to; from += PAGE_SIZE) {
+               struct page *page = virt_to_page(from);
+
+               kdebug("- free %lx", from);
+               atomic_dec(&mmap_pages_allocated);
+               if (page_count(page) != 1)
+                       kdebug("free page %p [%d]", page, page_count(page));
+               put_page(page);
        }
-
-       return NULL;
 }
 
 /*
- * find a VMA in the global tree
+ * release a reference to a region
+ * - the caller must hold the region semaphore, which this releases
+ * - the region may not have been added to the tree yet, in which case vm_top
+ *   will equal vm_start
  */
-static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
+static void __put_nommu_region(struct vm_region *region)
+       __releases(nommu_region_sem)
 {
-       struct vm_area_struct *vma;
-       struct rb_node *n = nommu_vma_tree.rb_node;
+       kenter("%p{%d}", region, atomic_read(&region->vm_usage));
 
-       while (n) {
-               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+       BUG_ON(!nommu_region_tree.rb_node);
 
-               if (start < vma->vm_start)
-                       n = n->rb_left;
-               else if (start > vma->vm_start)
-                       n = n->rb_right;
-               else
-                       return vma;
+       if (atomic_dec_and_test(&region->vm_usage)) {
+               if (region->vm_top > region->vm_start)
+                       delete_nommu_region(region);
+               up_write(&nommu_region_sem);
+
+               if (region->vm_file)
+                       fput(region->vm_file);
+
+               /* IO memory and memory shared directly out of the pagecache
+                * from ramfs/tmpfs mustn't be released here */
+               if (region->vm_flags & VM_MAPPED_COPY) {
+                       kdebug("free series");
+                       free_page_series(region->vm_start, region->vm_top);
+               }
+               kmem_cache_free(vm_region_jar, region);
+       } else {
+               up_write(&nommu_region_sem);
        }
+}
 
-       return NULL;
+/*
+ * release a reference to a region
+ */
+static void put_nommu_region(struct vm_region *region)
+{
+       down_write(&nommu_region_sem);
+       __put_nommu_region(region);
 }
 
 /*
- * add a VMA in the global tree
+ * add a VMA into a process's mm_struct in the appropriate place in the list
+ * and tree and add to the address space's page tree also if not an anonymous
+ * page
+ * - should be called with mm->mmap_sem held writelocked
  */
-static void add_nommu_vma(struct vm_area_struct *vma)
+static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma;
+       struct vm_area_struct *pvma, **pp;
        struct address_space *mapping;
-       struct rb_node **p = &nommu_vma_tree.rb_node;
-       struct rb_node *parent = NULL;
+       struct rb_node **p, *parent;
+
+       kenter(",%p", vma);
+
+       BUG_ON(!vma->vm_region);
+
+       mm->map_count++;
+       vma->vm_mm = mm;
 
        /* add the VMA to the mapping */
        if (vma->vm_file) {
@@ -534,42 +622,62 @@ static void add_nommu_vma(struct vm_area_struct *vma)
                flush_dcache_mmap_unlock(mapping);
        }
 
-       /* add the VMA to the master list */
+       /* add the VMA to the tree */
+       parent = NULL;
+       p = &mm->mm_rb.rb_node;
        while (*p) {
                parent = *p;
                pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 
-               if (vma->vm_start < pvma->vm_start) {
+               /* sort by: start addr, end addr, VMA struct addr in that order
+                * (the latter is necessary as we may get identical VMAs) */
+               if (vma->vm_start < pvma->vm_start)
                        p = &(*p)->rb_left;
-               }
-               else if (vma->vm_start > pvma->vm_start) {
+               else if (vma->vm_start > pvma->vm_start)
                        p = &(*p)->rb_right;
-               }
-               else {
-                       /* mappings are at the same address - this can only
-                        * happen for shared-mem chardevs and shared file
-                        * mappings backed by ramfs/tmpfs */
-                       BUG_ON(!(pvma->vm_flags & VM_SHARED));
-
-                       if (vma < pvma)
-                               p = &(*p)->rb_left;
-                       else if (vma > pvma)
-                               p = &(*p)->rb_right;
-                       else
-                               BUG();
-               }
+               else if (vma->vm_end < pvma->vm_end)
+                       p = &(*p)->rb_left;
+               else if (vma->vm_end > pvma->vm_end)
+                       p = &(*p)->rb_right;
+               else if (vma < pvma)
+                       p = &(*p)->rb_left;
+               else if (vma > pvma)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
        }
 
        rb_link_node(&vma->vm_rb, parent, p);
-       rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
+       rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+
+       /* add VMA to the VMA list also */
+       for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
+               if (pvma->vm_start > vma->vm_start)
+                       break;
+               if (pvma->vm_start < vma->vm_start)
+                       continue;
+               if (pvma->vm_end < vma->vm_end)
+                       break;
+       }
+
+       vma->vm_next = *pp;
+       *pp = vma;
 }
 
 /*
- * delete a VMA from the global list
+ * delete a VMA from its owning mm_struct and address space
  */
-static void delete_nommu_vma(struct vm_area_struct *vma)
+static void delete_vma_from_mm(struct vm_area_struct *vma)
 {
+       struct vm_area_struct **pp;
        struct address_space *mapping;
+       struct mm_struct *mm = vma->vm_mm;
+
+       kenter("%p", vma);
+
+       mm->map_count--;
+       if (mm->mmap_cache == vma)
+               mm->mmap_cache = NULL;
 
        /* remove the VMA from the mapping */
        if (vma->vm_file) {
@@ -580,8 +688,115 @@ static void delete_nommu_vma(struct vm_area_struct *vma)
                flush_dcache_mmap_unlock(mapping);
        }
 
-       /* remove from the master list */
-       rb_erase(&vma->vm_rb, &nommu_vma_tree);
+       /* remove from the MM's tree and list */
+       rb_erase(&vma->vm_rb, &mm->mm_rb);
+       for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
+               if (*pp == vma) {
+                       *pp = vma->vm_next;
+                       break;
+               }
+       }
+
+       vma->vm_mm = NULL;
+}
+
+/*
+ * destroy a VMA record
+ */
+static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+       kenter("%p", vma);
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
+       if (vma->vm_file) {
+               fput(vma->vm_file);
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(mm);
+       }
+       put_nommu_region(vma->vm_region);
+       kmem_cache_free(vm_area_cachep, vma);
+}
+
+/*
+ * look up the first VMA in which addr resides, NULL if none
+ * - should be called with mm->mmap_sem at least held readlocked
+ */
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+{
+       struct vm_area_struct *vma;
+       struct rb_node *n = mm->mm_rb.rb_node;
+
+       /* check the cache first */
+       vma = mm->mmap_cache;
+       if (vma && vma->vm_start <= addr && vma->vm_end > addr)
+               return vma;
+
+       /* trawl the tree (there may be multiple mappings in which addr
+        * resides) */
+       for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+               if (vma->vm_start > addr)
+                       return NULL;
+               if (vma->vm_end > addr) {
+                       mm->mmap_cache = vma;
+                       return vma;
+               }
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(find_vma);
+
+/*
+ * find a VMA
+ * - we don't extend stack VMAs under NOMMU conditions
+ */
+struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+{
+       return find_vma(mm, addr);
+}
+
+/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return -ENOMEM;
+}
+
+/*
+ * look up the first VMA exactly that exactly matches addr
+ * - should be called with mm->mmap_sem at least held readlocked
+ */
+static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
+                                            unsigned long addr,
+                                            unsigned long len)
+{
+       struct vm_area_struct *vma;
+       struct rb_node *n = mm->mm_rb.rb_node;
+       unsigned long end = addr + len;
+
+       /* check the cache first */
+       vma = mm->mmap_cache;
+       if (vma && vma->vm_start == addr && vma->vm_end == end)
+               return vma;
+
+       /* trawl the tree (there may be multiple mappings in which addr
+        * resides) */
+       for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+               if (vma->vm_start < addr)
+                       continue;
+               if (vma->vm_start > addr)
+                       return NULL;
+               if (vma->vm_end == end) {
+                       mm->mmap_cache = vma;
+                       return vma;
+               }
+       }
+
+       return NULL;
 }
 
 /*
@@ -596,7 +811,7 @@ static int validate_mmap_request(struct file *file,
                                 unsigned long pgoff,
                                 unsigned long *_capabilities)
 {
-       unsigned long capabilities;
+       unsigned long capabilities, rlen;
        unsigned long reqprot = prot;
        int ret;
 
@@ -616,12 +831,12 @@ static int validate_mmap_request(struct file *file,
                return -EINVAL;
 
        /* Careful about overflows.. */
-       len = PAGE_ALIGN(len);
-       if (!len || len > TASK_SIZE)
+       rlen = PAGE_ALIGN(len);
+       if (!rlen || rlen > TASK_SIZE)
                return -ENOMEM;
 
        /* offset overflow? */
-       if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+       if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
                return -EOVERFLOW;
 
        if (file) {
@@ -795,13 +1010,18 @@ static unsigned long determine_vm_flags(struct file *file,
 }
 
 /*
- * set up a shared mapping on a file
+ * set up a shared mapping on a file (the driver or filesystem provides and
+ * pins the storage)
  */
-static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
+static int do_mmap_shared_file(struct vm_area_struct *vma)
 {
        int ret;
 
        ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+       if (ret == 0) {
+               vma->vm_region->vm_top = vma->vm_region->vm_end;
+               return ret;
+       }
        if (ret != -ENOSYS)
                return ret;
 
@@ -815,10 +1035,14 @@ static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
 /*
  * set up a private mapping or an anonymous shared mapping
  */
-static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
+static int do_mmap_private(struct vm_area_struct *vma,
+                          struct vm_region *region,
+                          unsigned long len)
 {
+       struct page *pages;
+       unsigned long total, point, n, rlen;
        void *base;
-       int ret;
+       int ret, order;
 
        /* invoke the file's mapping function so that it can keep track of
         * shared mappings on devices or memory
@@ -826,34 +1050,63 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
         */
        if (vma->vm_file) {
                ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
-               if (ret != -ENOSYS) {
+               if (ret == 0) {
                        /* shouldn't return success if we're not sharing */
-                       BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
-                       return ret; /* success or a real error */
+                       BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
+                       vma->vm_region->vm_top = vma->vm_region->vm_end;
+                       return ret;
                }
+               if (ret != -ENOSYS)
+                       return ret;
 
                /* getting an ENOSYS error indicates that direct mmap isn't
                 * possible (as opposed to tried but failed) so we'll try to
                 * make a private copy of the data and map that instead */
        }
 
+       rlen = PAGE_ALIGN(len);
+
        /* allocate some memory to hold the mapping
         * - note that this may not return a page-aligned address if the object
         *   we're allocating is smaller than a page
         */
-       base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
-       if (!base)
+       order = get_order(rlen);
+       kdebug("alloc order %d for %lx", order, len);
+
+       pages = alloc_pages(GFP_KERNEL, order);
+       if (!pages)
                goto enomem;
 
-       vma->vm_start = (unsigned long) base;
-       vma->vm_end = vma->vm_start + len;
-       vma->vm_flags |= VM_MAPPED_COPY;
+       total = 1 << order;
+       atomic_add(total, &mmap_pages_allocated);
+
+       point = rlen >> PAGE_SHIFT;
+
+       /* we allocated a power-of-2 sized page set, so we may want to trim off
+        * the excess */
+       if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
+               while (total > point) {
+                       order = ilog2(total - point);
+                       n = 1 << order;
+                       kdebug("shave %lu/%lu @%lu", n, total - point, total);
+                       atomic_sub(n, &mmap_pages_allocated);
+                       total -= n;
+                       set_page_refcounted(pages + total);
+                       __free_pages(pages + total, order);
+               }
+       }
+
+       for (point = 1; point < total; point++)
+               set_page_refcounted(&pages[point]);
 
-#ifdef WARN_ON_SLACK
-       if (len + WARN_ON_SLACK <= kobjsize(result))
-               printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
-                      len, current->pid, kobjsize(result) - len);
-#endif
+       base = page_address(pages);
+       region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+       region->vm_start = (unsigned long) base;
+       region->vm_end   = region->vm_start + rlen;
+       region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
+
+       vma->vm_start = region->vm_start;
+       vma->vm_end   = region->vm_start + len;
 
        if (vma->vm_file) {
                /* read the contents of a file into the copy */
@@ -865,26 +1118,28 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
 
                old_fs = get_fs();
                set_fs(KERNEL_DS);
-               ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
+               ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
                set_fs(old_fs);
 
                if (ret < 0)
                        goto error_free;
 
                /* clear the last little bit */
-               if (ret < len)
-                       memset(base + ret, 0, len - ret);
+               if (ret < rlen)
+                       memset(base + ret, 0, rlen - ret);
 
        } else {
                /* if it's an anonymous mapping, then just clear it */
-               memset(base, 0, len);
+               memset(base, 0, rlen);
        }
 
        return 0;
 
 error_free:
-       kfree(base);
-       vma->vm_start = 0;
+       free_page_series(region->vm_start, region->vm_end);
+       region->vm_start = vma->vm_start = 0;
+       region->vm_end   = vma->vm_end = 0;
+       region->vm_top   = 0;
        return ret;
 
 enomem:
@@ -904,13 +1159,14 @@ unsigned long do_mmap_pgoff(struct file *file,
                            unsigned long flags,
                            unsigned long pgoff)
 {
-       struct vm_list_struct *vml = NULL;
-       struct vm_area_struct *vma = NULL;
+       struct vm_area_struct *vma;
+       struct vm_region *region;
        struct rb_node *rb;
-       unsigned long capabilities, vm_flags;
-       void *result;
+       unsigned long capabilities, vm_flags, result;
        int ret;
 
+       kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
+
        if (!(flags & MAP_FIXED))
                addr = round_hint_to_min(addr);
 
@@ -918,73 +1174,120 @@ unsigned long do_mmap_pgoff(struct file *file,
         * mapping */
        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
                                    &capabilities);
-       if (ret < 0)
+       if (ret < 0) {
+               kleave(" = %d [val]", ret);
                return ret;
+       }
 
        /* we've determined that we can make the mapping, now translate what we
         * now know into VMA flags */
        vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
-       /* we're going to need to record the mapping if it works */
-       vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
-       if (!vml)
-               goto error_getting_vml;
+       /* we're going to need to record the mapping */
+       region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
+       if (!region)
+               goto error_getting_region;
+
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       if (!vma)
+               goto error_getting_vma;
+
+       atomic_set(&region->vm_usage, 1);
+       region->vm_flags = vm_flags;
+       region->vm_pgoff = pgoff;
+
+       INIT_LIST_HEAD(&vma->anon_vma_node);
+       vma->vm_flags = vm_flags;
+       vma->vm_pgoff = pgoff;
 
-       down_write(&nommu_vma_sem);
+       if (file) {
+               region->vm_file = file;
+               get_file(file);
+               vma->vm_file = file;
+               get_file(file);
+               if (vm_flags & VM_EXECUTABLE) {
+                       added_exe_file_vma(current->mm);
+                       vma->vm_mm = current->mm;
+               }
+       }
 
-       /* if we want to share, we need to check for VMAs created by other
+       down_write(&nommu_region_sem);
+
+       /* if we want to share, we need to check for regions created by other
         * mmap() calls that overlap with our proposed mapping
-        * - we can only share with an exact match on most regular files
+        * - we can only share with a superset match on most regular files
         * - shared mappings on character devices and memory backed files are
         *   permitted to overlap inexactly as far as we are concerned for in
         *   these cases, sharing is handled in the driver or filesystem rather
         *   than here
         */
        if (vm_flags & VM_MAYSHARE) {
-               unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-               unsigned long vmpglen;
+               struct vm_region *pregion;
+               unsigned long pglen, rpglen, pgend, rpgend, start;
 
-               /* suppress VMA sharing for shared regions */
-               if (vm_flags & VM_SHARED &&
-                   capabilities & BDI_CAP_MAP_DIRECT)
-                       goto dont_share_VMAs;
+               pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+               pgend = pgoff + pglen;
 
-               for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
-                       vma = rb_entry(rb, struct vm_area_struct, vm_rb);
+               for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
+                       pregion = rb_entry(rb, struct vm_region, vm_rb);
 
-                       if (!(vma->vm_flags & VM_MAYSHARE))
+                       if (!(pregion->vm_flags & VM_MAYSHARE))
                                continue;
 
                        /* search for overlapping mappings on the same file */
-                       if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
+                       if (pregion->vm_file->f_path.dentry->d_inode !=
+                           file->f_path.dentry->d_inode)
                                continue;
 
-                       if (vma->vm_pgoff >= pgoff + pglen)
+                       if (pregion->vm_pgoff >= pgend)
                                continue;
 
-                       vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
-                       vmpglen >>= PAGE_SHIFT;
-                       if (pgoff >= vma->vm_pgoff + vmpglen)
+                       rpglen = pregion->vm_end - pregion->vm_start;
+                       rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+                       rpgend = pregion->vm_pgoff + rpglen;
+                       if (pgoff >= rpgend)
                                continue;
 
-                       /* handle inexactly overlapping matches between mappings */
-                       if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
+                       /* handle inexactly overlapping matches between
+                        * mappings */
+                       if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
+                           !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
+                               /* new mapping is not a subset of the region */
                                if (!(capabilities & BDI_CAP_MAP_DIRECT))
                                        goto sharing_violation;
                                continue;
                        }
 
-                       /* we've found a VMA we can share */
-                       atomic_inc(&vma->vm_usage);
-
-                       vml->vma = vma;
-                       result = (void *) vma->vm_start;
-                       goto shared;
+                       /* we've found a region we can share */
+                       atomic_inc(&pregion->vm_usage);
+                       vma->vm_region = pregion;
+                       start = pregion->vm_start;
+                       start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
+                       vma->vm_start = start;
+                       vma->vm_end = start + len;
+
+                       if (pregion->vm_flags & VM_MAPPED_COPY) {
+                               kdebug("share copy");
+                               vma->vm_flags |= VM_MAPPED_COPY;
+                       } else {
+                               kdebug("share mmap");
+                               ret = do_mmap_shared_file(vma);
+                               if (ret < 0) {
+                                       vma->vm_region = NULL;
+                                       vma->vm_start = 0;
+                                       vma->vm_end = 0;
+                                       atomic_dec(&pregion->vm_usage);
+                                       pregion = NULL;
+                                       goto error_just_free;
+                               }
+                       }
+                       fput(region->vm_file);
+                       kmem_cache_free(vm_region_jar, region);
+                       region = pregion;
+                       result = start;
+                       goto share;
                }
 
-       dont_share_VMAs:
-               vma = NULL;
-
                /* obtain the address at which to make a shared mapping
                 * - this is the hook for quasi-memory character devices to
                 *   tell us the location of a shared mapping
@@ -995,113 +1298,93 @@ unsigned long do_mmap_pgoff(struct file *file,
                        if (IS_ERR((void *) addr)) {
                                ret = addr;
                                if (ret != (unsigned long) -ENOSYS)
-                                       goto error;
+                                       goto error_just_free;
 
                                /* the driver refused to tell us where to site
                                 * the mapping so we'll have to attempt to copy
                                 * it */
                                ret = (unsigned long) -ENODEV;
                                if (!(capabilities & BDI_CAP_MAP_COPY))
-                                       goto error;
+                                       goto error_just_free;
 
                                capabilities &= ~BDI_CAP_MAP_DIRECT;
+                       } else {
+                               vma->vm_start = region->vm_start = addr;
+                               vma->vm_end = region->vm_end = addr + len;
                        }
                }
        }
 
-       /* we're going to need a VMA struct as well */
-       vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
-       if (!vma)
-               goto error_getting_vma;
-
-       INIT_LIST_HEAD(&vma->anon_vma_node);
-       atomic_set(&vma->vm_usage, 1);
-       if (file) {
-               get_file(file);
-               if (vm_flags & VM_EXECUTABLE) {
-                       added_exe_file_vma(current->mm);
-                       vma->vm_mm = current->mm;
-               }
-       }
-       vma->vm_file    = file;
-       vma->vm_flags   = vm_flags;
-       vma->vm_start   = addr;
-       vma->vm_end     = addr + len;
-       vma->vm_pgoff   = pgoff;
-
-       vml->vma = vma;
+       vma->vm_region = region;
 
        /* set up the mapping */
        if (file && vma->vm_flags & VM_SHARED)
-               ret = do_mmap_shared_file(vma, len);
+               ret = do_mmap_shared_file(vma);
        else
-               ret = do_mmap_private(vma, len);
+               ret = do_mmap_private(vma, region, len);
        if (ret < 0)
-               goto error;
-
-       /* okay... we have a mapping; now we have to register it */
-       result = (void *) vma->vm_start;
+               goto error_put_region;
 
-       if (vma->vm_flags & VM_MAPPED_COPY) {
-               realalloc += kobjsize(result);
-               askedalloc += len;
-       }
+       add_nommu_region(region);
 
-       realalloc += kobjsize(vma);
-       askedalloc += sizeof(*vma);
+       /* okay... we have a mapping; now we have to register it */
+       result = vma->vm_start;
 
        current->mm->total_vm += len >> PAGE_SHIFT;
 
-       add_nommu_vma(vma);
-
- shared:
-       realalloc += kobjsize(vml);
-       askedalloc += sizeof(*vml);
-
-       add_vma_to_mm(current->mm, vml);
+share:
+       add_vma_to_mm(current->mm, vma);
 
-       up_write(&nommu_vma_sem);
+       up_write(&nommu_region_sem);
 
        if (prot & PROT_EXEC)
-               flush_icache_range((unsigned long) result,
-                                  (unsigned long) result + len);
+               flush_icache_range(result, result + len);
 
-#ifdef DEBUG
-       printk("do_mmap:\n");
-       show_process_blocks();
-#endif
-
-       return (unsigned long) result;
+       kleave(" = %lx", result);
+       return result;
 
- error:
-       up_write(&nommu_vma_sem);
-       kfree(vml);
+error_put_region:
+       __put_nommu_region(region);
        if (vma) {
                if (vma->vm_file) {
                        fput(vma->vm_file);
                        if (vma->vm_flags & VM_EXECUTABLE)
                                removed_exe_file_vma(vma->vm_mm);
                }
-               kfree(vma);
+               kmem_cache_free(vm_area_cachep, vma);
        }
+       kleave(" = %d [pr]", ret);
        return ret;
 
- sharing_violation:
-       up_write(&nommu_vma_sem);
-       printk("Attempt to share mismatched mappings\n");
-       kfree(vml);
-       return -EINVAL;
+error_just_free:
+       up_write(&nommu_region_sem);
+error:
+       fput(region->vm_file);
+       kmem_cache_free(vm_region_jar, region);
+       fput(vma->vm_file);
+       if (vma->vm_flags & VM_EXECUTABLE)
+               removed_exe_file_vma(vma->vm_mm);
+       kmem_cache_free(vm_area_cachep, vma);
+       kleave(" = %d", ret);
+       return ret;
 
- error_getting_vma:
-       up_write(&nommu_vma_sem);
-       kfree(vml);
-       printk("Allocation of vma for %lu byte allocation from process %d failed\n",
+sharing_violation:
+       up_write(&nommu_region_sem);
+       printk(KERN_WARNING "Attempt to share mismatched mappings\n");
+       ret = -EINVAL;
+       goto error;
+
+error_getting_vma:
+       kmem_cache_free(vm_region_jar, region);
+       printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
+              " from process %d failed\n",
               len, current->pid);
        show_free_areas();
        return -ENOMEM;
 
- error_getting_vml:
-       printk("Allocation of vml for %lu byte allocation from process %d failed\n",
+error_getting_region:
+       printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
+              " from process %d failed\n",
               len, current->pid);
        show_free_areas();
        return -ENOMEM;
@@ -1109,85 +1392,183 @@ unsigned long do_mmap_pgoff(struct file *file,
 EXPORT_SYMBOL(do_mmap_pgoff);
 
 /*
- * handle mapping disposal for uClinux
+ * split a vma into two pieces at address 'addr', a new vma is allocated either
+ * for the first part or the tail.
  */
-static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+             unsigned long addr, int new_below)
 {
-       if (vma) {
-               down_write(&nommu_vma_sem);
+       struct vm_area_struct *new;
+       struct vm_region *region;
+       unsigned long npages;
 
-               if (atomic_dec_and_test(&vma->vm_usage)) {
-                       delete_nommu_vma(vma);
+       kenter("");
 
-                       if (vma->vm_ops && vma->vm_ops->close)
-                               vma->vm_ops->close(vma);
+       /* we're only permitted to split anonymous regions that have a single
+        * owner */
+       if (vma->vm_file ||
+           atomic_read(&vma->vm_region->vm_usage) != 1)
+               return -ENOMEM;
 
-                       /* IO memory and memory shared directly out of the pagecache from
-                        * ramfs/tmpfs mustn't be released here */
-                       if (vma->vm_flags & VM_MAPPED_COPY) {
-                               realalloc -= kobjsize((void *) vma->vm_start);
-                               askedalloc -= vma->vm_end - vma->vm_start;
-                               kfree((void *) vma->vm_start);
-                       }
+       if (mm->map_count >= sysctl_max_map_count)
+               return -ENOMEM;
 
-                       realalloc -= kobjsize(vma);
-                       askedalloc -= sizeof(*vma);
+       region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
+       if (!region)
+               return -ENOMEM;
 
-                       if (vma->vm_file) {
-                               fput(vma->vm_file);
-                               if (vma->vm_flags & VM_EXECUTABLE)
-                                       removed_exe_file_vma(mm);
-                       }
-                       kfree(vma);
-               }
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       if (!new) {
+               kmem_cache_free(vm_region_jar, region);
+               return -ENOMEM;
+       }
+
+       /* most fields are the same, copy all, and then fixup */
+       *new = *vma;
+       *region = *vma->vm_region;
+       new->vm_region = region;
+
+       npages = (addr - vma->vm_start) >> PAGE_SHIFT;
 
-               up_write(&nommu_vma_sem);
+       if (new_below) {
+               region->vm_top = region->vm_end = new->vm_end = addr;
+       } else {
+               region->vm_start = new->vm_start = addr;
+               region->vm_pgoff = new->vm_pgoff += npages;
+       }
+
+       if (new->vm_ops && new->vm_ops->open)
+               new->vm_ops->open(new);
+
+       delete_vma_from_mm(vma);
+       down_write(&nommu_region_sem);
+       delete_nommu_region(vma->vm_region);
+       if (new_below) {
+               vma->vm_region->vm_start = vma->vm_start = addr;
+               vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
+       } else {
+               vma->vm_region->vm_end = vma->vm_end = addr;
+               vma->vm_region->vm_top = addr;
        }
+       add_nommu_region(vma->vm_region);
+       add_nommu_region(new->vm_region);
+       up_write(&nommu_region_sem);
+       add_vma_to_mm(mm, vma);
+       add_vma_to_mm(mm, new);
+       return 0;
 }
 
 /*
- * release a mapping
- * - under NOMMU conditions the parameters must match exactly to the mapping to
- *   be removed
+ * shrink a VMA by removing the specified chunk from either the beginning or
+ * the end
  */
-int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
+static int shrink_vma(struct mm_struct *mm,
+                     struct vm_area_struct *vma,
+                     unsigned long from, unsigned long to)
 {
-       struct vm_list_struct *vml, **parent;
-       unsigned long end = addr + len;
+       struct vm_region *region;
 
-#ifdef DEBUG
-       printk("do_munmap:\n");
-#endif
+       kenter("");
 
-       for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
-               if ((*parent)->vma->vm_start > addr)
-                       break;
-               if ((*parent)->vma->vm_start == addr &&
-                   ((len == 0) || ((*parent)->vma->vm_end == end)))
-                       goto found;
+       /* adjust the VMA's pointers, which may reposition it in the MM's tree
+        * and list */
+       delete_vma_from_mm(vma);
+       if (from > vma->vm_start)
+               vma->vm_end = from;
+       else
+               vma->vm_start = to;
+       add_vma_to_mm(mm, vma);
+
+       /* cut the backing region down to size */
+       region = vma->vm_region;
+       BUG_ON(atomic_read(&region->vm_usage) != 1);
+
+       down_write(&nommu_region_sem);
+       delete_nommu_region(region);
+       if (from > region->vm_start) {
+               to = region->vm_top;
+               region->vm_top = region->vm_end = from;
+       } else {
+               region->vm_start = to;
        }
+       add_nommu_region(region);
+       up_write(&nommu_region_sem);
 
-       printk("munmap of non-mmaped memory by process %d (%s): %p\n",
-              current->pid, current->comm, (void *) addr);
-       return -EINVAL;
+       free_page_series(from, to);
+       return 0;
+}
 
- found:
-       vml = *parent;
+/*
+ * release a mapping
+ * - under NOMMU conditions the chunk to be unmapped must be backed by a single
+ *   VMA, though it need not cover the whole VMA
+ */
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+{
+       struct vm_area_struct *vma;
+       struct rb_node *rb;
+       unsigned long end = start + len;
+       int ret;
 
-       put_vma(mm, vml->vma);
+       kenter(",%lx,%zx", start, len);
 
-       *parent = vml->next;
-       realalloc -= kobjsize(vml);
-       askedalloc -= sizeof(*vml);
-       kfree(vml);
+       if (len == 0)
+               return -EINVAL;
 
-       update_hiwater_vm(mm);
-       mm->total_vm -= len >> PAGE_SHIFT;
+       /* find the first potentially overlapping VMA */
+       vma = find_vma(mm, start);
+       if (!vma) {
+               printk(KERN_WARNING
+                      "munmap of memory not mmapped by process %d (%s):"
+                      " 0x%lx-0x%lx\n",
+                      current->pid, current->comm, start, start + len - 1);
+               return -EINVAL;
+       }
 
-#ifdef DEBUG
-       show_process_blocks();
-#endif
+       /* we're allowed to split an anonymous VMA but not a file-backed one */
+       if (vma->vm_file) {
+               do {
+                       if (start > vma->vm_start) {
+                               kleave(" = -EINVAL [miss]");
+                               return -EINVAL;
+                       }
+                       if (end == vma->vm_end)
+                               goto erase_whole_vma;
+                       rb = rb_next(&vma->vm_rb);
+                       vma = rb_entry(rb, struct vm_area_struct, vm_rb);
+               } while (rb);
+               kleave(" = -EINVAL [split file]");
+               return -EINVAL;
+       } else {
+               /* the chunk must be a subset of the VMA found */
+               if (start == vma->vm_start && end == vma->vm_end)
+                       goto erase_whole_vma;
+               if (start < vma->vm_start || end > vma->vm_end) {
+                       kleave(" = -EINVAL [superset]");
+                       return -EINVAL;
+               }
+               if (start & ~PAGE_MASK) {
+                       kleave(" = -EINVAL [unaligned start]");
+                       return -EINVAL;
+               }
+               if (end != vma->vm_end && end & ~PAGE_MASK) {
+                       kleave(" = -EINVAL [unaligned split]");
+                       return -EINVAL;
+               }
+               if (start != vma->vm_start && end != vma->vm_end) {
+                       ret = split_vma(mm, vma, start, 1);
+                       if (ret < 0) {
+                               kleave(" = %d [split]", ret);
+                               return ret;
+                       }
+               }
+               return shrink_vma(mm, vma, start, end);
+       }
 
+erase_whole_vma:
+       delete_vma_from_mm(vma);
+       delete_vma(mm, vma);
+       kleave(" = 0");
        return 0;
 }
 EXPORT_SYMBOL(do_munmap);
@@ -1204,32 +1585,26 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
 }
 
 /*
- * Release all mappings
+ * release all the mappings made in a process's VM space
  */
-void exit_mmap(struct mm_struct * mm)
+void exit_mmap(struct mm_struct *mm)
 {
-       struct vm_list_struct *tmp;
-
-       if (mm) {
-#ifdef DEBUG
-               printk("Exit_mmap:\n");
-#endif
+       struct vm_area_struct *vma;
 
-               mm->total_vm = 0;
+       if (!mm)
+               return;
 
-               while ((tmp = mm->context.vmlist)) {
-                       mm->context.vmlist = tmp->next;
-                       put_vma(mm, tmp->vma);
+       kenter("");
 
-                       realalloc -= kobjsize(tmp);
-                       askedalloc -= sizeof(*tmp);
-                       kfree(tmp);
-               }
+       mm->total_vm = 0;
 
-#ifdef DEBUG
-               show_process_blocks();
-#endif
+       while ((vma = mm->mmap)) {
+               mm->mmap = vma->vm_next;
+               delete_vma_from_mm(vma);
+               delete_vma(mm, vma);
        }
+
+       kleave("");
 }
 
 unsigned long do_brk(unsigned long addr, unsigned long len)
@@ -1242,8 +1617,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  *
  * under NOMMU conditions, we only permit changing a mapping's size, and only
- * as long as it stays within the hole allocated by the kmalloc() call in
- * do_mmap_pgoff() and the block is not shareable
+ * as long as it stays within the region allocated by do_mmap_private() and the
+ * block is not shareable
  *
  * MREMAP_FIXED is not supported under NOMMU conditions
  */
@@ -1254,13 +1629,16 @@ unsigned long do_mremap(unsigned long addr,
        struct vm_area_struct *vma;
 
        /* insanity checks first */
-       if (new_len == 0)
+       if (old_len == 0 || new_len == 0)
                return (unsigned long) -EINVAL;
 
+       if (addr & ~PAGE_MASK)
+               return -EINVAL;
+
        if (flags & MREMAP_FIXED && new_addr != addr)
                return (unsigned long) -EINVAL;
 
-       vma = find_vma_exact(current->mm, addr);
+       vma = find_vma_exact(current->mm, addr, old_len);
        if (!vma)
                return (unsigned long) -EINVAL;
 
@@ -1270,22 +1648,19 @@ unsigned long do_mremap(unsigned long addr,
        if (vma->vm_flags & VM_MAYSHARE)
                return (unsigned long) -EPERM;
 
-       if (new_len > kobjsize((void *) addr))
+       if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
                return (unsigned long) -ENOMEM;
 
        /* all checks complete - do it */
        vma->vm_end = vma->vm_start + new_len;
-
-       askedalloc -= old_len;
-       askedalloc += new_len;
-
        return vma->vm_start;
 }
 EXPORT_SYMBOL(do_mremap);
 
-asmlinkage unsigned long sys_mremap(unsigned long addr,
-       unsigned long old_len, unsigned long new_len,
-       unsigned long flags, unsigned long new_addr)
+asmlinkage
+unsigned long sys_mremap(unsigned long addr,
+                        unsigned long old_len, unsigned long new_len,
+                        unsigned long flags, unsigned long new_addr)
 {
        unsigned long ret;