]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge current mainline tree into linux-omap tree
authorTony Lindgren <tony@atomide.com>
Wed, 5 Mar 2008 08:33:15 +0000 (10:33 +0200)
committerTony Lindgren <tony@atomide.com>
Wed, 5 Mar 2008 08:33:15 +0000 (10:33 +0200)
Merge branches 'master' and 'linus'

Conflicts:

drivers/rtc/Kconfig

598 files changed:
.gitignore
Documentation/DocBook/kernel-api.tmpl
Documentation/controllers/memory.txt
Documentation/debugging-via-ohci1394.txt
Documentation/feature-removal-schedule.txt
Documentation/gpio.txt
Documentation/ide.txt
Documentation/kprobes.txt
Documentation/pci.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/pci_iommu.c
arch/arm/Kconfig
arch/arm/mach-pxa/cpu-pxa.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mach-pxa/zylonite.c
arch/arm/mm/mmap.c
arch/arm/mm/pgd.c
arch/avr32/boards/atstk1000/atstk1004.c
arch/avr32/kernel/process.c
arch/avr32/mm/fault.c
arch/blackfin/Makefile
arch/blackfin/configs/BF527-EZKIT_defconfig
arch/blackfin/configs/BF533-EZKIT_defconfig
arch/blackfin/configs/BF533-STAMP_defconfig
arch/blackfin/configs/BF537-STAMP_defconfig
arch/blackfin/configs/BF548-EZKIT_defconfig
arch/blackfin/configs/BF561-EZKIT_defconfig
arch/blackfin/kernel/bfin_dma_5xx.c
arch/blackfin/kernel/gptimers.c
arch/blackfin/kernel/setup.c
arch/blackfin/kernel/vmlinux.lds.S
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf533/boards/ezkit.c
arch/blackfin/mach-bf533/boards/stamp.c
arch/blackfin/mach-bf537/boards/generic_board.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf548/dma.c
arch/blackfin/mach-bf548/head.S
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-common/dpmc.S
arch/blackfin/mach-common/ints-priority.c
arch/blackfin/mm/init.c
arch/cris/arch-v10/kernel/time.c
arch/cris/arch-v10/lib/string.c
arch/cris/arch-v10/lib/usercopy.c
arch/cris/arch-v32/lib/string.c
arch/cris/arch-v32/lib/usercopy.c
arch/ia64/Kconfig
arch/ia64/Makefile
arch/ia64/configs/generic_defconfig [moved from arch/ia64/defconfig with 100% similarity]
arch/ia64/ia32/ia32_signal.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/sal.c
arch/ia64/kernel/signal.c
arch/m68k/kernel/entry.S
arch/m68knommu/defconfig
arch/m68knommu/kernel/syscalltable.S
arch/m68knommu/platform/68328/timers.c
arch/powerpc/Kconfig
arch/powerpc/boot/cuboot-bamboo.c
arch/powerpc/boot/cuboot-ebony.c
arch/powerpc/boot/cuboot-katmai.c
arch/powerpc/boot/cuboot-taishan.c
arch/powerpc/boot/cuboot-warp.c
arch/powerpc/boot/dts/haleakala.dts
arch/powerpc/boot/dts/katmai.dts
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/cell/setup.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/sputrace.c
arch/powerpc/platforms/cell/spufs/switch.c
arch/powerpc/platforms/celleb/beat.h
arch/s390/Kconfig
arch/sh/Kconfig
arch/sh/drivers/dma/dma-sh.c
arch/sh/drivers/heartbeat.c
arch/sh/drivers/pci/ops-dreamcast.c
arch/sh/kernel/cpu/sh2/setup-sh7619.c
arch/sh/kernel/cpu/sh2a/clock-sh7203.c
arch/sh/kernel/cpu/sh2a/setup-sh7203.c
arch/sh/kernel/cpu/sh2a/setup-sh7206.c
arch/sh/kernel/cpu/sh3/probe.c
arch/sh/kernel/cpu/sh3/setup-sh7705.c
arch/sh/kernel/cpu/sh3/setup-sh770x.c
arch/sh/kernel/cpu/sh3/setup-sh7710.c
arch/sh/kernel/cpu/sh3/setup-sh7720.c
arch/sh/kernel/cpu/sh4/setup-sh4-202.c
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/cpu/sh4/setup-sh7760.c
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
arch/sh/kernel/cpu/sh4a/setup-shx3.c
arch/sparc/kernel/Makefile
arch/sparc/kernel/cpu.c
arch/sparc/kernel/ebus.c
arch/sparc/kernel/led.c
arch/sparc/kernel/process.c
arch/sparc/kernel/una_asm.S [new file with mode: 0644]
arch/sparc/kernel/unaligned.c
arch/sparc64/Kconfig
arch/sparc64/kernel/cpu.c
arch/sparc64/kernel/ds.c
arch/sparc64/kernel/mdesc.c
arch/sparc64/kernel/process.c
arch/sparc64/mm/fault.c
arch/sparc64/mm/init.c
arch/sparc64/solaris/conv.h
arch/sparc64/solaris/timod.c
arch/um/kernel/process.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/boot/memory.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/hpet.c
arch/x86/kernel/i387.c
arch/x86/kernel/init_task.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/setup_64.c
arch/x86/kernel/smpboot_64.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/tls.c
arch/x86/kernel/tsc_32.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/numa_64.c
arch/x86/mm/pageattr.c
arch/x86/vdso/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/setup.c
block/blk-barrier.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/blk-settings.c
block/blk-tag.c
block/blk.h
block/bsg.c
block/genhd.c
block/scsi_ioctl.c
drivers/acorn/char/defkeymap-l7200.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/sata_svw.c
drivers/base/core.c
drivers/base/power/main.c
drivers/base/transport_class.c
drivers/block/cciss.c
drivers/block/cciss_scsi.c
drivers/block/pktcdvd.c
drivers/cdrom/cdrom.c
drivers/char/defkeymap.c_shipped
drivers/char/isicom.c
drivers/char/pcmcia/ipwireless/network.c
drivers/char/rtc.c
drivers/char/specialix.c
drivers/char/vt.c
drivers/char/xilinx_hwicap/buffer_icap.c
drivers/char/xilinx_hwicap/fifo_icap.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/char/xilinx_hwicap/xilinx_hwicap.h
drivers/connector/connector.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/fsldma.c [new file with mode: 0644]
drivers/dma/fsldma.h [new file with mode: 0644]
drivers/dma/ioat_dma.c
drivers/firewire/fw-card.c
drivers/firewire/fw-cdev.c
drivers/firewire/fw-device.c
drivers/firewire/fw-device.h
drivers/firewire/fw-sbp2.c
drivers/firewire/fw-topology.c
drivers/firewire/fw-transaction.h
drivers/ide/ide-cd.c
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-probe.c
drivers/ide/ide-tape.c
drivers/ide/ide.c
drivers/ide/legacy/qd65xx.c
drivers/ide/pci/cmd640.c
drivers/ide/pci/hpt366.c
drivers/ieee1394/sbp2.c
drivers/ieee1394/sbp2.h
drivers/infiniband/hw/cxgb3/iwch_mem.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_verbs.c
drivers/input/misc/Kconfig
drivers/isdn/hisax/hisax_fcpcipnp.c
drivers/isdn/i4l/isdn_ttyfax.c
drivers/isdn/isdnloop/isdnloop.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptscsih.c
drivers/mfd/sm501.c
drivers/misc/thinkpad_acpi.c
drivers/net/Kconfig
drivers/net/bnx2x.c
drivers/net/bnx2x.h
drivers/net/bnx2x_fw_defs.h
drivers/net/bnx2x_hsi.h
drivers/net/bnx2x_init.h
drivers/net/bnx2x_reg.h
drivers/net/cs89x0.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/e1000.h
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/lib.c
drivers/net/e1000e/netdev.c
drivers/net/e1000e/phy.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/fec.c
drivers/net/fs_enet/fs_enet-main.c
drivers/net/gianfar.c
drivers/net/igb/igb_main.c
drivers/net/ixgb/ixgb_ethtool.c
drivers/net/macb.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/phy/mdio_bus.c
drivers/net/pppol2tp.c
drivers/net/ps3_gelic_wireless.c
drivers/net/sis190.c
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/tlan.c
drivers/net/tulip/uli526x.c
drivers/net/tun.c
drivers/net/via-rhine.c
drivers/net/virtio_net.c
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43legacy/Kconfig
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/bcm43xx/Kconfig
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/cmdresp.c
drivers/net/wireless/libertas/decl.h
drivers/net/wireless/libertas/main.c
drivers/net/wireless/p54common.c
drivers/net/wireless/p54common.h
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2x00config.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00reg.h
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/parisc/Kconfig
drivers/parisc/ccio-dma.c
drivers/parisc/iommu-helpers.h
drivers/parisc/sba_iommu.c
drivers/pci/bus.c
drivers/pci/hotplug-pci.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/cpci_hotplug_pci.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/pciehp_pci.c
drivers/pci/hotplug/shpchp_pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/rom.c
drivers/rapidio/rio-driver.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-s35390a.c [new file with mode: 0644]
drivers/s390/char/defkeymap.c
drivers/s390/net/claw.c
drivers/scsi/scsi.c
drivers/scsi/scsi_scan.c
drivers/serial/8250_pnp.c
drivers/serial/Kconfig
drivers/serial/bfin_5xx.c
drivers/serial/m32r_sio.c
drivers/serial/sh-sci.c
drivers/sh/maple/maple.c
drivers/spi/mpc52xx_psc_spi.c
drivers/ssb/Kconfig
drivers/ssb/Makefile
drivers/ssb/driver_pcicore.c
drivers/ssb/ssb_private.h
drivers/usb/core/Kconfig
drivers/usb/core/quirks.c
drivers/usb/core/usb.c
drivers/usb/gadget/printer.c
drivers/usb/gadget/pxa2xx_udc.c
drivers/usb/gadget/pxa2xx_udc.h
drivers/usb/host/ehci-q.c
drivers/usb/host/isp116x-hcd.c
drivers/usb/host/isp116x.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/storage/protocol.c
drivers/video/sm501fb.c
drivers/video/tridentfb.c
drivers/w1/masters/ds1wm.c
fs/binfmt_elf.c
fs/buffer.c
fs/cifs/CHANGES
fs/cifs/README
fs/cifs/cifs_debug.c
fs/cifs/cifs_debug.h
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifs_spnego.c
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsacl.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/dns_resolve.h
fs/cifs/fcntl.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/md4.c
fs/cifs/md5.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smbdes.c
fs/cifs/transport.c
fs/cifs/xattr.c
fs/debugfs/inode.c
fs/ecryptfs/mmap.c
fs/exec.c
fs/ext3/super.c
fs/ext4/dir.c
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/jbd/transaction.c
fs/mpage.c
fs/ocfs2/aops.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlmglue.c
fs/ocfs2/dlmglue.h
fs/ocfs2/heartbeat.c
fs/ocfs2/heartbeat.h
fs/ocfs2/localalloc.c
fs/proc/base.c
fs/proc/proc_misc.c
fs/reiserfs/super.c
fs/splice.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_bit.c
fs/xfs/xfs_bit.h
fs/xfs/xfs_clnt.h
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_vfsops.c
include/asm-arm/arch-pxa/entry-macro.S
include/asm-arm/arch-pxa/pxa-regs.h
include/asm-arm/kexec.h
include/asm-arm/kprobes.h
include/asm-arm/unaligned.h
include/asm-avr32/pgtable.h
include/asm-blackfin/gptimers.h
include/asm-blackfin/irq.h
include/asm-blackfin/mach-bf527/bfin_serial_5xx.h
include/asm-blackfin/mach-bf533/bfin_serial_5xx.h
include/asm-blackfin/mach-bf537/bfin_serial_5xx.h
include/asm-blackfin/mach-bf548/bfin_serial_5xx.h
include/asm-blackfin/mach-bf561/bfin_serial_5xx.h
include/asm-blackfin/mach-bf561/blackfin.h
include/asm-blackfin/mach-bf561/cdefBF561.h
include/asm-cris/uaccess.h
include/asm-cris/unistd.h
include/asm-ia64/Kbuild
include/asm-ia64/hw_irq.h
include/asm-ia64/kprobes.h
include/asm-ia64/sal.h
include/asm-m68k/unistd.h
include/asm-m68knommu/machdep.h
include/asm-m68knommu/unistd.h
include/asm-powerpc/kprobes.h
include/asm-powerpc/reg.h
include/asm-s390/kprobes.h
include/asm-sh/cpu-sh3/cache.h
include/asm-sh/entry-macros.S
include/asm-sparc/system.h
include/asm-sparc64/kprobes.h
include/asm-sparc64/system.h
include/asm-x86/futex.h
include/asm-x86/kprobes.h
include/asm-x86/lguest.h
include/asm-x86/nops.h
include/asm-x86/page_64.h
include/asm-x86/ptrace-abi.h
include/linux/Kbuild
include/linux/blkdev.h
include/linux/cgroup_subsys.h
include/linux/compiler.h
include/linux/connector.h
include/linux/debugfs.h
include/linux/delay.h
include/linux/dmaengine.h
include/linux/elfcore-compat.h
include/linux/ext4_fs_extents.h
include/linux/genhd.h
include/linux/gpio.h [new file with mode: 0644]
include/linux/hardirq.h
include/linux/iommu-helper.h
include/linux/kprobes.h
include/linux/kvm.h
include/linux/kvm_host.h
include/linux/maple.h
include/linux/marker.h
include/linux/memcontrol.h
include/linux/mm_types.h
include/linux/netfilter.h
include/linux/netpoll.h
include/linux/pci.h
include/linux/raid/bitmap.h
include/linux/raid/md_k.h
include/linux/rcuclassic.h
include/linux/rcupreempt.h
include/linux/sched.h
include/linux/serial_sci.h [moved from include/asm-sh/sci.h with 73% similarity]
include/linux/slub_def.h
include/linux/sm501-regs.h
include/linux/sm501.h
include/linux/usb.h
include/linux/vmstat.h
include/net/inet_sock.h
include/net/sctp/user.h
init/Kconfig
init/main.c
kernel/audit.c
kernel/auditsc.c
kernel/cgroup.c
kernel/exit.c
kernel/kprobes.c
kernel/lockdep.c
kernel/marker.c
kernel/module.c
kernel/power/process.c
kernel/printk.c
kernel/rcupreempt.c
kernel/res_counter.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/signal.c
kernel/softirq.c
kernel/softlockup.c
kernel/sysctl.c
kernel/time/tick-sched.c
lib/iommu-helper.c
lib/kobject.c
mm/Makefile
mm/allocpercpu.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/oom_kill.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slub.c
mm/swap.c
mm/truncate.c
mm/vmscan.c
net/8021q/vlanproc.c
net/appletalk/atalk_proc.c
net/atm/br2684.c
net/atm/clip.c
net/atm/lec.c
net/atm/mpoa_proc.c
net/atm/proc.c
net/bluetooth/l2cap.c
net/core/neighbour.c
net/core/netpoll.c
net/core/pktgen.c
net/ipv4/Kconfig
net/ipv4/devinet.c
net/ipv4/ip_gre.c
net/ipv4/ipcomp.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/route.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_input.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipcomp6.c
net/ipv6/proc.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/sysctl_net_ipv6.c
net/ipx/ipx_proc.c
net/irda/ircomm/ircomm_core.c
net/irda/irlan/irlan_common.c
net/irda/irproc.c
net/iucv/iucv.c
net/key/af_key.c
net/llc/llc_proc.c
net/mac80211/ieee80211_sta.c
net/mac80211/rc80211_pid_algo.c
net/netfilter/nf_conntrack_core.c
net/netfilter/xt_conntrack.c
net/sctp/auth.c
net/sctp/ipv6.c
net/sctp/objcnt.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/ulpevent.c
net/sunrpc/cache.c
net/sunrpc/stats.c
net/tipc/cluster.c
net/tipc/link.c
net/tipc/ref.c
net/tipc/zone.c
net/wanrouter/wanproc.c
net/x25/x25_proc.c
samples/Kconfig
samples/Makefile
samples/kprobes/Makefile [new file with mode: 0644]
samples/kprobes/jprobe_example.c [new file with mode: 0644]
samples/kprobes/kprobe_example.c [new file with mode: 0644]
samples/kprobes/kretprobe_example.c [new file with mode: 0644]
scripts/checkpatch.pl
sound/isa/sb/sb8_main.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/phase.c
sound/pci/ice1712/revo.c
sound/pci/intel8x0.c
sound/pci/oxygen/hifier.c
sound/pci/oxygen/virtuoso.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm9712.c
sound/soc/pxa/corgi.c
sound/soc/pxa/poodle.c
sound/soc/pxa/spitz.c
sound/soc/pxa/tosa.c
sound/usb/usbaudio.c
virt/kvm/ioapic.c
virt/kvm/kvm_main.c

index 8363e48cdcdc67bad8834a08ec8dd57b8cb41635..fdcce40226d7d4273a08cc4ef84bb25755a710a4 100644 (file)
@@ -53,3 +53,5 @@ cscope.*
 
 *.orig
 *.rej
+*~
+\#*#
index f31601e8bd89acba6eaef4d27c5bd45eb2928edf..dc0f30c3e5715d6801e482dc51988fe992d7bf82 100644 (file)
@@ -361,12 +361,14 @@ X!Edrivers/pnp/system.c
   <chapter id="blkdev">
      <title>Block Devices</title>
 !Eblock/blk-core.c
+!Iblock/blk-core.c
 !Eblock/blk-map.c
 !Iblock/blk-sysfs.c
 !Eblock/blk-settings.c
 !Eblock/blk-exec.c
 !Eblock/blk-barrier.c
 !Eblock/blk-tag.c
+!Iblock/blk-tag.c
   </chapter>
 
   <chapter id="chrdev">
index 6015347b41e2676378be5fc029444b35a2ae3664..866b9cd9a9590d6b6b8c3d577038e8d51234082b 100644 (file)
@@ -1,4 +1,8 @@
-Memory Controller
+Memory Resource Controller
+
+NOTE: The Memory Resource Controller has been generically been referred
+to as the memory controller in this document. Do not confuse memory controller
+used here with the memory controller that is used in hardware.
 
 Salient features
 
@@ -152,7 +156,7 @@ The memory controller uses the following hierarchy
 
 a. Enable CONFIG_CGROUPS
 b. Enable CONFIG_RESOURCE_COUNTERS
-c. Enable CONFIG_CGROUP_MEM_CONT
+c. Enable CONFIG_CGROUP_MEM_RES_CTLR
 
 1. Prepare the cgroups
 # mkdir -p /cgroups
@@ -164,7 +168,7 @@ c. Enable CONFIG_CGROUP_MEM_CONT
 
 Since now we're in the 0 cgroup,
 We can alter the memory limit:
-# echo -n 4M > /cgroups/0/memory.limit_in_bytes
+# echo 4M > /cgroups/0/memory.limit_in_bytes
 
 NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
 mega or gigabytes.
@@ -185,7 +189,7 @@ number of factors, such as rounding up to page boundaries or the total
 availability of memory on the system.  The user is required to re-read
 this file after a write to guarantee the value committed by the kernel.
 
-# echo -n 1 > memory.limit_in_bytes
+# echo 1 > memory.limit_in_bytes
 # cat memory.limit_in_bytes
 4096
 
@@ -197,7 +201,7 @@ caches, RSS and Active pages/Inactive pages are shown.
 
 The memory.force_empty gives an interface to drop *all* charges by force.
 
-# echo -n 1 > memory.force_empty
+# echo 1 > memory.force_empty
 
 will drop all charges in cgroup. Currently, this is maintained for test.
 
index de4804e8b396eb29000b37dd8fa575f5cd7dee8d..c360d4e91b4892d6be5a6ed5efcb981d4a3ad19f 100644 (file)
@@ -36,14 +36,15 @@ available (notebooks) or too slow for extensive debug information (like ACPI).
 Drivers
 -------
 
-The OHCI-1394 drivers in drivers/firewire and drivers/ieee1394 initialize
-the OHCI-1394 controllers to a working state and can be used to enable
-physical DMA. By default you only have to load the driver, and physical
-DMA access will be granted to all remote nodes, but it can be turned off
-when using the ohci1394 driver.
-
-Because these drivers depend on the PCI enumeration to be completed, an
-initialization routine which can runs pretty early (long before console_init(),
+The ohci1394 driver in drivers/ieee1394 initializes the OHCI-1394 controllers
+to a working state and enables physical DMA by default for all remote nodes.
+This can be turned off by ohci1394's module parameter phys_dma=0.
+
+The alternative firewire-ohci driver in drivers/firewire uses filtered physical
+DMA, hence is not yet suitable for remote debugging.
+
+Because ohci1394 depends on the PCI enumeration to be completed, an
+initialization routine which runs pretty early (long before console_init()
 which makes the printk buffer appear on the console can be called) was written.
 
 To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu:
index 4d3aa519eadfc5cebea01e5244f0435702264319..c1d1fd0c299b9c1b175532c494e3aefb15dbe60c 100644 (file)
@@ -172,6 +172,16 @@ Who:       Len Brown <len.brown@intel.com>
 
 ---------------------------
 
+What:  ide-tape driver
+When:  July 2008
+Files: drivers/ide/ide-tape.c
+Why:   This driver might not have any users anymore and maintaining it for no
+       reason is an effort no one wants to make.
+Who:   Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>, Borislav Petkov
+       <petkovbb@googlemail.com>
+
+---------------------------
+
 What: libata spindown skipping and warning
 When: Dec 2008
 Why:  Some halt(8) implementations synchronize caches for and spin
@@ -306,3 +316,15 @@ Why:       Largely unmaintained and almost entirely unused.  File system
        is largely pointless as without a lot of work only the most
        trivial of Solaris binaries can work with the emulation code.
 Who:   David S. Miller <davem@davemloft.net>
+
+---------------------------
+
+What:  init_mm export
+When:  2.6.26
+Why:   Not used in-tree. The current out-of-tree users used it to
+       work around problems in the CPA code which should be resolved
+       by now. One usecase was described to provide verification code
+       of the CPA operation. That's a good idea in general, but such
+       code / infrastructure should be in the kernel and not in some
+       out-of-tree driver.
+Who:   Thomas Gleixner <tglx@linutronix.de>
index 8da724e2a0ff795d450e8b0b7b61118d7bf604a6..54630095aa3c8f841c097e723eef518f660a30a7 100644 (file)
@@ -2,6 +2,9 @@ GPIO Interfaces
 
 This provides an overview of GPIO access conventions on Linux.
 
+These calls use the gpio_* naming prefix.  No other calls should use that
+prefix, or the related __gpio_* prefix.
+
 
 What is a GPIO?
 ===============
@@ -69,11 +72,13 @@ in this document, but drivers acting as clients to the GPIO interface must
 not care how it's implemented.)
 
 That said, if the convention is supported on their platform, drivers should
-use it when possible.  Platforms should declare GENERIC_GPIO support in
-Kconfig (boolean true), which multi-platform drivers can depend on when
-using the include file:
+use it when possible.  Platforms must declare GENERIC_GPIO support in their
+Kconfig (boolean true), and provide an <asm/gpio.h> file.  Drivers that can't
+work without standard GPIO calls should have Kconfig entries which depend
+on GENERIC_GPIO.  The GPIO calls are available, either as "real code" or as
+optimized-away stubs, when drivers use the include file:
 
-       #include <asm/gpio.h>
+       #include <linux/gpio.h>
 
 If you stick to this convention then it'll be easier for other developers to
 see what your code is doing, and help maintain it.
@@ -316,6 +321,9 @@ pulldowns integrated on some platforms.  Not all platforms support them,
 or support them in the same way; and any given board might use external
 pullups (or pulldowns) so that the on-chip ones should not be used.
 (When a circuit needs 5 kOhm, on-chip 100 kOhm resistors won't do.)
+Likewise drive strength (2 mA vs 20 mA) and voltage (1.8V vs 3.3V) is a
+platform-specific issue, as are models like (not) having a one-to-one
+correspondence between configurable pins and GPIOs.
 
 There are other system-specific mechanisms that are not specified here,
 like the aforementioned options for input de-glitching and wire-OR output.
index 94e2e3b9e77f03ebcb0a3d0b146e53397a6fcfc0..bcd7cd1278efd285d456fe058d1774b5181d5a7b 100644 (file)
@@ -258,8 +258,6 @@ Summary of ide driver parameters for kernel command line
                          As for VLB, it is safest to not specify it.
                          Bigger values are safer than smaller ones.
 
- "idex=noprobe"                : do not attempt to access/use this interface
  "idex=base"           : probe for an interface at the addr specified,
                          where "base" is usually 0x1f0 or 0x170
                          and "ctl" is assumed to be "base"+0x206
@@ -307,53 +305,6 @@ Also for legacy CMD640 host driver (cmd640) you need to use "probe_vlb"
 kernel paremeter to enable probing for VLB version of the chipset (PCI ones
 are detected automatically).
 
-================================================================================
-
-IDE ATAPI streaming tape driver
--------------------------------
-
-This driver is a part of the Linux ide driver and works in co-operation
-with linux/drivers/block/ide.c.
-
-The driver, in co-operation with ide.c, basically traverses the
-request-list for the block device interface. The character device
-interface, on the other hand, creates new requests, adds them
-to the request-list of the block device, and waits for their completion.
-
-Pipelined operation mode is now supported on both reads and writes.
-
-The block device major and minor numbers are determined from the
-tape's relative position in the ide interfaces, as explained in ide.c.
-
-The character device interface consists of the following devices:
-
- ht0           major 37, minor 0       first  IDE tape, rewind on close.
- ht1           major 37, minor 1       second IDE tape, rewind on close.
- ...
- nht0          major 37, minor 128     first  IDE tape, no rewind on close.
- nht1          major 37, minor 129     second IDE tape, no rewind on close.
- ...
-
-Run /dev/MAKEDEV to create the above entries.
-
-The general magnetic tape commands compatible interface, as defined by
-include/linux/mtio.h, is accessible through the character device.
-
-General ide driver configuration options, such as the interrupt-unmask
-flag, can be configured by issuing an ioctl to the block device interface,
-as any other ide device.
-
-Our own ide-tape ioctl's can be issued to either the block device or
-the character device interface.
-
-Maximal throughput with minimal bus load will usually be achieved in the
-following scenario:
-
-       1.      ide-tape is operating in the pipelined operation mode.
-       2.      No buffering is performed by the user backup program.
-
-
-
 ================================================================================
 
 Some Terminology
index 83f515c2905a806d88276ea587da5d9ef5196c52..be89f393274fbd086a4f14b4e56bb82c0a29c76d 100644 (file)
@@ -192,7 +192,8 @@ code mapping.
 The Kprobes API includes a "register" function and an "unregister"
 function for each type of probe.  Here are terse, mini-man-page
 specifications for these functions and the associated probe handlers
-that you'll write.  See the latter half of this document for examples.
+that you'll write.  See the files in the samples/kprobes/ sub-directory
+for examples.
 
 4.1 register_kprobe
 
@@ -420,249 +421,15 @@ e. Watchpoint probes (which fire on data references).
 
 8. Kprobes Example
 
-Here's a sample kernel module showing the use of kprobes to dump a
-stack trace and selected i386 registers when do_fork() is called.
------ cut here -----
-/*kprobe_example.c*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include <linux/sched.h>
-
-/*For each probe you need to allocate a kprobe structure*/
-static struct kprobe kp;
-
-/*kprobe pre_handler: called just before the probed instruction is executed*/
-int handler_pre(struct kprobe *p, struct pt_regs *regs)
-{
-       printk("pre_handler: p->addr=0x%p, eip=%lx, eflags=0x%lx\n",
-               p->addr, regs->eip, regs->eflags);
-       dump_stack();
-       return 0;
-}
-
-/*kprobe post_handler: called after the probed instruction is executed*/
-void handler_post(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
-{
-       printk("post_handler: p->addr=0x%p, eflags=0x%lx\n",
-               p->addr, regs->eflags);
-}
-
-/* fault_handler: this is called if an exception is generated for any
- * instruction within the pre- or post-handler, or when Kprobes
- * single-steps the probed instruction.
- */
-int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
-{
-       printk("fault_handler: p->addr=0x%p, trap #%dn",
-               p->addr, trapnr);
-       /* Return 0 because we don't handle the fault. */
-       return 0;
-}
-
-static int __init kprobe_init(void)
-{
-       int ret;
-       kp.pre_handler = handler_pre;
-       kp.post_handler = handler_post;
-       kp.fault_handler = handler_fault;
-       kp.symbol_name = "do_fork";
-
-       ret = register_kprobe(&kp);
-       if (ret < 0) {
-               printk("register_kprobe failed, returned %d\n", ret);
-               return ret;
-       }
-       printk("kprobe registered\n");
-       return 0;
-}
-
-static void __exit kprobe_exit(void)
-{
-       unregister_kprobe(&kp);
-       printk("kprobe unregistered\n");
-}
-
-module_init(kprobe_init)
-module_exit(kprobe_exit)
-MODULE_LICENSE("GPL");
------ cut here -----
-
-You can build the kernel module, kprobe-example.ko, using the following
-Makefile:
------ cut here -----
-obj-m := kprobe-example.o
-KDIR := /lib/modules/$(shell uname -r)/build
-PWD := $(shell pwd)
-default:
-       $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules
-clean:
-       rm -f *.mod.c *.ko *.o
------ cut here -----
-
-$ make
-$ su -
-...
-# insmod kprobe-example.ko
-
-You will see the trace data in /var/log/messages and on the console
-whenever do_fork() is invoked to create a new process.
+See samples/kprobes/kprobe_example.c
 
 9. Jprobes Example
 
-Here's a sample kernel module showing the use of jprobes to dump
-the arguments of do_fork().
------ cut here -----
-/*jprobe-example.c */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/uio.h>
-#include <linux/kprobes.h>
-
-/*
- * Jumper probe for do_fork.
- * Mirror principle enables access to arguments of the probed routine
- * from the probe handler.
- */
-
-/* Proxy routine having the same arguments as actual do_fork() routine */
-long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
-             struct pt_regs *regs, unsigned long stack_size,
-             int __user * parent_tidptr, int __user * child_tidptr)
-{
-       printk("jprobe: clone_flags=0x%lx, stack_size=0x%lx, regs=0x%p\n",
-              clone_flags, stack_size, regs);
-       /* Always end with a call to jprobe_return(). */
-       jprobe_return();
-       /*NOTREACHED*/
-       return 0;
-}
-
-static struct jprobe my_jprobe = {
-       .entry = jdo_fork
-};
-
-static int __init jprobe_init(void)
-{
-       int ret;
-       my_jprobe.kp.symbol_name = "do_fork";
-
-       if ((ret = register_jprobe(&my_jprobe)) <0) {
-               printk("register_jprobe failed, returned %d\n", ret);
-               return -1;
-       }
-       printk("Planted jprobe at %p, handler addr %p\n",
-              my_jprobe.kp.addr, my_jprobe.entry);
-       return 0;
-}
-
-static void __exit jprobe_exit(void)
-{
-       unregister_jprobe(&my_jprobe);
-       printk("jprobe unregistered\n");
-}
-
-module_init(jprobe_init)
-module_exit(jprobe_exit)
-MODULE_LICENSE("GPL");
------ cut here -----
-
-Build and insert the kernel module as shown in the above kprobe
-example.  You will see the trace data in /var/log/messages and on
-the console whenever do_fork() is invoked to create a new process.
-(Some messages may be suppressed if syslogd is configured to
-eliminate duplicate messages.)
+See samples/kprobes/jprobe_example.c
 
 10. Kretprobes Example
 
-Here's a sample kernel module showing the use of return probes to
-report failed calls to sys_open().
------ cut here -----
-/*kretprobe-example.c*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include <linux/ktime.h>
-
-/* per-instance private data */
-struct my_data {
-       ktime_t entry_stamp;
-};
-
-static const char *probed_func = "sys_open";
-
-/* Timestamp function entry. */
-static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
-       struct my_data *data;
-
-       if(!current->mm)
-               return 1; /* skip kernel threads */
-
-       data = (struct my_data *)ri->data;
-       data->entry_stamp = ktime_get();
-       return 0;
-}
-
-/* If the probed function failed, log the return value and duration.
- * Duration may turn out to be zero consistently, depending upon the
- * granularity of time accounting on the platform. */
-static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
-       int retval = regs_return_value(regs);
-       struct my_data *data = (struct my_data *)ri->data;
-       s64 delta;
-       ktime_t now;
-
-       if (retval < 0) {
-               now = ktime_get();
-               delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
-               printk("%s: return val = %d (duration = %lld ns)\n",
-                      probed_func, retval, delta);
-       }
-       return 0;
-}
-
-static struct kretprobe my_kretprobe = {
-       .handler = return_handler,
-       .entry_handler = entry_handler,
-       .data_size = sizeof(struct my_data),
-       .maxactive = 20, /* probe up to 20 instances concurrently */
-};
-
-static int __init kretprobe_init(void)
-{
-       int ret;
-       my_kretprobe.kp.symbol_name = (char *)probed_func;
-
-       if ((ret = register_kretprobe(&my_kretprobe)) < 0) {
-               printk("register_kretprobe failed, returned %d\n", ret);
-               return -1;
-       }
-       printk("Kretprobe active on %s\n", my_kretprobe.kp.symbol_name);
-       return 0;
-}
-
-static void __exit kretprobe_exit(void)
-{
-       unregister_kretprobe(&my_kretprobe);
-       printk("kretprobe unregistered\n");
-       /* nmissed > 0 suggests that maxactive was set too low. */
-       printk("Missed probing %d instances of %s\n",
-              my_kretprobe.nmissed, probed_func);
-}
-
-module_init(kretprobe_init)
-module_exit(kretprobe_exit)
-MODULE_LICENSE("GPL");
------ cut here -----
-
-Build and insert the kernel module as shown in the above kprobe
-example.  You will see the trace data in /var/log/messages and on the
-console whenever sys_open() returns a negative value.  (Some messages
-may be suppressed if syslogd is configured to eliminate duplicate
-messages.)
+See samples/kprobes/kretprobe_example.c
 
 For additional information on Kprobes, refer to the following URLs:
 http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe
index 72b20c63959651ce7be7dad2f2e854c983f660e0..bb7bd27d468215826dbd616713f0d130f30be420 100644 (file)
@@ -123,7 +123,8 @@ initialization with a pointer to a structure describing the driver
 
 
 The ID table is an array of struct pci_device_id entries ending with an
-all-zero entry.  Each entry consists of:
+all-zero entry; use of the macro DECLARE_PCI_DEVICE_TABLE is the preferred
+method of declaring the table.  Each entry consists of:
 
        vendor,device   Vendor and device ID to match (or PCI_ANY_ID)
 
@@ -191,7 +192,8 @@ Tips on when/where to use the above attributes:
 
        o Do not mark the struct pci_driver.
 
-       o The ID table array should be marked __devinitdata.
+       o The ID table array should be marked __devinitconst; this is done
+         automatically if the table is declared with DECLARE_PCI_DEVICE_TABLE().
 
        o The probe() and remove() functions should be marked __devinit
          and __devexit respectively.  All initialization functions
index e4f2aca709a9ee371e666481ef4d37ceba0478c5..8d6a2c11b9534aac27800913554047fe872e7593 100644 (file)
@@ -767,14 +767,14 @@ S:        Maintained
 
 BLACKFIN ARCHITECTURE
 P:     Bryan Wu
-M:     bryan.wu@analog.com
+M:     cooloney@kernel.org
 L:     uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
 W:     http://blackfin.uclinux.org
 S:     Supported
 
 BLACKFIN EMAC DRIVER
 P:     Bryan Wu
-M:     bryan.wu@analog.com
+M:     cooloney@kernel.org
 L:     uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
 W:     http://blackfin.uclinux.org
 S:     Supported
@@ -982,6 +982,12 @@ M: mchan@broadcom.com
 L:     netdev@vger.kernel.org
 S:     Supported
 
+BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
+P:     Eliezer Tamir
+M:     eliezert@broadcom.com
+L:     netdev@vger.kernel.org
+S:     Supported
+
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
 P:     Michael Chan
 M:     mchan@broadcom.com
@@ -1132,6 +1138,12 @@ L:       accessrunner-general@lists.sourceforge.net
 W:     http://accessrunner.sourceforge.net/
 S:     Maintained
 
+CONTROL GROUPS (CGROUPS)
+P:     Paul Menage
+M:     menage@google.com
+L:     containers@lists.linux-foundation.org
+S:     Maintained
+
 CORETEMP HARDWARE MONITORING DRIVER
 P:     Rudolf Marek
 M:     r.marek@assembler.cz
@@ -1583,6 +1595,13 @@ L:       linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://linux-fbdev.sourceforge.net/
 S:     Maintained
 
+FREESCALE DMA DRIVER
+P;     Zhang Wei
+M:     wei.zhang@freescale.com
+L:     linuxppc-embedded@ozlabs.org
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 FREESCALE SOC FS_ENET DRIVER
 P:     Pantelis Antoniou
 M:     pantelis.antoniou@gmail.com
@@ -2620,6 +2639,17 @@ L:       linux-kernel@vger.kernel.org
 W:     http://www.linux-mm.org
 S:     Maintained
 
+MEMORY RESOURCE CONTROLLER
+P:     Balbir Singh
+M:     balbir@linux.vnet.ibm.com
+P:     Pavel Emelyanov
+M:     xemul@openvz.org
+P:     KAMEZAWA Hiroyuki
+M:     kamezawa.hiroyu@jp.fujitsu.com
+L:     linux-mm@kvack.org
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 MEI MN10300/AM33 PORT
 P:     David Howells
 M:     dhowells@redhat.com
@@ -2744,6 +2774,8 @@ S:        Maintained
 NETEFFECT IWARP RNIC DRIVER (IW_NES)
 P:     Faisal Latif
 M:     flatif@neteffect.com
+P:     Nishi Gupta
+M:     ngupta@neteffect.com
 P:     Glenn Streiff
 M:     gstreiff@neteffect.com
 L:     general@lists.openfabrics.org
@@ -3884,10 +3916,13 @@ M:      trivial@kernel.org
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
-TULIP NETWORK DRIVER
-L:     tulip-users@lists.sourceforge.net
-W:     http://sourceforge.net/projects/tulip/
-S:     Orphan
+TULIP NETWORK DRIVERS
+P:     Grant Grundler
+M:     grundler@parisc-linux.org
+P:     Kyle McMartin
+M:     kyle@parisc-linux.org
+L:     netdev@vger.kernel.org
+S:     Maintained
 
 TUN/TAP driver
 P:     Maxim Krasnyansky
index e9201edfae6fa3ac17064b154a8161977f0b1a5f..a613e2a80a619073bb67818dbfc6dd92481ea456 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 25
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Funky Weasel is Jiggy wit it
 
 # *DOCUMENTATION*
index 3d72dc3fc8f52f5d822cc6c256e21fb21e2b4f61..694c9af520bbed3e9a36a6c16aa693f541b192b8 100644 (file)
@@ -27,5 +27,12 @@ config KPROBES
          for kernel debugging, non-intrusive instrumentation and testing.
          If in doubt, say "N".
 
+config KRETPROBES
+       def_bool y
+       depends on KPROBES && HAVE_KRETPROBES
+
 config HAVE_KPROBES
        def_bool n
+
+config HAVE_KRETPROBES
+       def_bool n
index 26d3789dfdd0420bc4e7ce496995138b1decda14..be6fa105cd34214d11c8b5bbf11dbc305f7ddae3 100644 (file)
@@ -31,7 +31,6 @@
 #endif
 
 #define DEBUG_NODIRECT 0
-#define DEBUG_FORCEDAC 0
 
 #define ISA_DMA_MASK           0x00ffffff
 
@@ -126,39 +125,67 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
        return iommu_arena_new_node(0, hose, base, window_size, align);
 }
 
+static inline int is_span_boundary(unsigned int index, unsigned int nr,
+                                  unsigned long shift,
+                                  unsigned long boundary_size)
+{
+       shift = (shift + index) & (boundary_size - 1);
+       return shift + nr > boundary_size;
+}
+
 /* Must be called with the arena lock held */
 static long
-iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
+iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
+                      long n, long mask)
 {
        unsigned long *ptes;
        long i, p, nent;
+       int pass = 0;
+       unsigned long base;
+       unsigned long boundary_size;
+
+       BUG_ON(arena->dma_base & ~PAGE_MASK);
+       base = arena->dma_base >> PAGE_SHIFT;
+       if (dev)
+               boundary_size = ALIGN(dma_get_max_seg_size(dev) + 1, PAGE_SIZE)
+                       >> PAGE_SHIFT;
+       else
+               boundary_size = ALIGN(1UL << 32, PAGE_SIZE) >> PAGE_SHIFT;
+
+       BUG_ON(!is_power_of_2(boundary_size));
 
        /* Search forward for the first mask-aligned sequence of N free ptes */
        ptes = arena->ptes;
        nent = arena->size >> PAGE_SHIFT;
-       p = (arena->next_entry + mask) & ~mask;
+       p = ALIGN(arena->next_entry, mask + 1);
        i = 0;
+
+again:
        while (i < n && p+i < nent) {
+               if (!i && is_span_boundary(p, n, base, boundary_size)) {
+                       p = ALIGN(p + 1, mask + 1);
+                       goto again;
+               }
+
                if (ptes[p+i])
-                       p = (p + i + 1 + mask) & ~mask, i = 0;
+                       p = ALIGN(p + i + 1, mask + 1), i = 0;
                else
                        i = i + 1;
        }
 
        if (i < n) {
-                /* Reached the end.  Flush the TLB and restart the
-                   search from the beginning.  */
-               alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
-
-               p = 0, i = 0;
-               while (i < n && p+i < nent) {
-                       if (ptes[p+i])
-                               p = (p + i + 1 + mask) & ~mask, i = 0;
-                       else
-                               i = i + 1;
-               }
-
-               if (i < n)
+               if (pass < 1) {
+                       /*
+                        * Reached the end.  Flush the TLB and restart
+                        * the search from the beginning.
+                       */
+                       alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
+
+                       pass++;
+                       p = 0;
+                       i = 0;
+                       goto again;
+               } else
                        return -1;
        }
 
@@ -168,7 +195,8 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
 }
 
 static long
-iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
+iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
+                 unsigned int align)
 {
        unsigned long flags;
        unsigned long *ptes;
@@ -179,7 +207,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
        /* Search for N empty ptes */
        ptes = arena->ptes;
        mask = max(align, arena->align_entry) - 1;
-       p = iommu_arena_find_pages(arena, n, mask);
+       p = iommu_arena_find_pages(dev, arena, n, mask);
        if (p < 0) {
                spin_unlock_irqrestore(&arena->lock, flags);
                return -1;
@@ -229,6 +257,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
        unsigned long paddr;
        dma_addr_t ret;
        unsigned int align = 0;
+       struct device *dev = pdev ? &pdev->dev : NULL;
 
        paddr = __pa(cpu_addr);
 
@@ -276,7 +305,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
        /* Force allocation to 64KB boundary for ISA bridges. */
        if (pdev && pdev == isa_bridge)
                align = 8;
-       dma_ofs = iommu_arena_alloc(arena, npages, align);
+       dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
        if (dma_ofs < 0) {
                printk(KERN_WARNING "pci_map_single failed: "
                       "could not allocate dma page tables\n");
@@ -563,7 +592,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
 
        paddr &= ~PAGE_MASK;
        npages = calc_npages(paddr + size);
-       dma_ofs = iommu_arena_alloc(arena, npages, 0);
+       dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
        if (dma_ofs < 0) {
                /* If we attempted a direct map above but failed, die.  */
                if (leader->dma_address == 0)
@@ -830,7 +859,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
 
        /* Search for N empty ptes.  */
        ptes = arena->ptes;
-       p = iommu_arena_find_pages(arena, pg_count, align_mask);
+       p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
        if (p < 0) {
                spin_unlock_irqrestore(&arena->lock, flags);
                return -1;
index eeda0cd52ca0716e353a3df781adaa45600feb2b..f8a7d222f995e4654ead5ab79417edbebd3f0f1e 100644 (file)
@@ -12,6 +12,7 @@ config ARM
        select SYS_SUPPORTS_APM_EMULATION
        select HAVE_OPROFILE
        select HAVE_KPROBES if (!XIP_KERNEL)
+       select HAVE_KRETPROBES if (HAVE_KPROBES)
        help
          The ARM series is a line of low-power-consumption RISC chip designs
          licensed by ARM Ltd and targeted at embedded applications and
@@ -941,7 +942,8 @@ config KEXEC
 
 config ATAGS_PROC
        bool "Export atags in procfs"
-       default n
+       depends on KEXEC
+       default y
        help
          Should the atags used to boot the kernel be exported in an "atags"
          file in procfs. Useful with kexec.
index 939a3867f77c8696caa0a62a9b20bfea8f6038ec..4b21479332ae40c073c1f184b1980a502aca4c40 100644 (file)
@@ -43,7 +43,7 @@
 
 #ifdef DEBUG
 static unsigned int freq_debug;
-MODULE_PARM(freq_debug, "i");
+module_param(freq_debug, uint, 0);
 MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
 #else
 #define freq_debug  0
index 7cd9ef8deb02f28c9d81ab62cdac9603d4170929..35f25fdaeba3925303ed0dc1dacd4ed9f105c71a 100644 (file)
@@ -129,28 +129,20 @@ static void clk_pxa3xx_cken_enable(struct clk *clk)
 {
        unsigned long mask = 1ul << (clk->cken & 0x1f);
 
-       local_irq_disable();
-
        if (clk->cken < 32)
                CKENA |= mask;
        else
                CKENB |= mask;
-
-       local_irq_enable();
 }
 
 static void clk_pxa3xx_cken_disable(struct clk *clk)
 {
        unsigned long mask = 1ul << (clk->cken & 0x1f);
 
-       local_irq_disable();
-
        if (clk->cken < 32)
                CKENA &= ~mask;
        else
                CKENB &= ~mask;
-
-       local_irq_enable();
 }
 
 static const struct clkops clk_pxa3xx_cken_ops = {
index 7731d50dd86cfe00e2144ded09ca1caa1805d690..afd2cbfca0d91c993d94a8fd6400d73ff5807cf4 100644 (file)
@@ -58,7 +58,7 @@ static struct platform_device smc91x_device = {
        .resource       = smc91x_resources,
 };
 
-#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULES)
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
 static void zylonite_backlight_power(int on)
 {
        gpio_set_value(gpio_backlight, on);
index 2728b0e7d2bbd9a165e3f031454b9b5f1f7fc8aa..3f6dc40b835321fd469a23ce8ffa49089377e74b 100644 (file)
@@ -120,6 +120,8 @@ full_search:
  */
 int valid_phys_addr_range(unsigned long addr, size_t size)
 {
+       if (addr < PHYS_OFFSET)
+               return 0;
        if (addr + size > __pa(high_memory))
                return 0;
 
index 500c9610ab3085094ec48d012610b4f557136ec1..e0f19ab91163e589562d7b070f29d673fc9ef33f 100644 (file)
@@ -75,7 +75,7 @@ no_pgd:
 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
 {
        pmd_t *pmd;
-       struct page *pte;
+       pgtable_t pte;
 
        if (!pgd)
                return;
@@ -90,10 +90,8 @@ void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
                goto free;
        }
 
-       pte = pmd_page(*pmd);
+       pte = pmd_pgtable(*pmd);
        pmd_clear(pmd);
-       dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
-       pte_lock_deinit(pte);
        pte_free(mm, pte);
        pmd_free(mm, pmd);
 free:
index 5a77030e07a091a8cf22bfa1b1d52867f025dc50..e765a8652b3ee58480847302923170ed668150d3 100644 (file)
@@ -129,7 +129,7 @@ static int __init atstk1004_init(void)
 #ifdef CONFIG_BOARD_ATSTK100X_SPI1
        at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
 #endif
-#ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM
+#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
        at32_add_device_mci(0);
 #endif
        at32_add_device_lcdc(0, &atstk1000_lcdc_data,
index eaaa69bbdc38d5546b395311f29896a6a2c357db..7f4af0b1e111448f4941fbb7d3ae94589f13a961 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/fs.h>
 #include <linux/ptrace.h>
 #include <linux/reboot.h>
+#include <linux/tick.h>
 #include <linux/uaccess.h>
 #include <linux/unistd.h>
 
@@ -30,8 +31,10 @@ void cpu_idle(void)
 {
        /* endless idle loop with no priority at all */
        while (1) {
+               tick_nohz_stop_sched_tick();
                while (!need_resched())
                        cpu_idle_sleep();
+               tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
@@ -345,6 +348,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        p->thread.cpu_context.ksp = (unsigned long)childregs;
        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
 
+       clear_tsk_thread_flag(p, TIF_DEBUG);
        if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG))
                ocd_enable(p);
 
index 6560cb18b4e3403e498cb9076ea7025b61cbb2d0..ce4e4296b95467f3e0b715a9ec953398917be8f7 100644 (file)
@@ -189,6 +189,8 @@ no_context:
 
        page = sysreg_read(PTBR);
        printk(KERN_ALERT "ptbr = %08lx", page);
+       if (address >= TASK_SIZE)
+               page = (unsigned long)swapper_pg_dir;
        if (page) {
                page = ((unsigned long *)page)[address >> 22];
                printk(" pgd = %08lx", page);
index fe254f886a6e15c8702f49d9d08b9c90fefc1a17..75eba2ca788149fb610f3b91ff63aa2286e013bf 100644 (file)
@@ -98,8 +98,11 @@ drivers-$(CONFIG_OPROFILE) += arch/$(ARCH)/oprofile/
 #      them changed.  We use .mach to indicate when they were updated
 #      last, otherwise make uses the target directory mtime.
 
+       show_mach_symlink = :
+ quiet_show_mach_symlink = echo '  SYMLINK include/asm-$(ARCH)/mach-$(MACHINE) -> include/asm-$(ARCH)/mach'
+silent_show_mach_symlink = :
 include/asm-blackfin/.mach: $(wildcard include/config/arch/*.h) include/config/auto.conf
-       @echo '  SYMLINK include/asm-$(ARCH)/mach-$(MACHINE) -> include/asm-$(ARCH)/mach'
+       @$($(quiet)show_mach_symlink)
 ifneq ($(KBUILD_SRC),)
        $(Q)mkdir -p include/asm-$(ARCH)
        $(Q)ln -fsn $(srctree)/include/asm-$(ARCH)/mach-$(MACHINE) include/asm-$(ARCH)/mach
index d59ee1530bd400938ca73324fe4b9efcd6ee9907..ae320dcfedefe9b064bdeddfdf45ae052d340c27 100644 (file)
@@ -1,7 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22.14
-# Thu Nov 29 17:32:47 2007
+# Linux kernel version: 2.6.22.16
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -116,7 +115,10 @@ CONFIG_PREEMPT_VOLUNTARY=y
 # Processor and Board Settings
 #
 # CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
 # CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
 CONFIG_BF527=y
 # CONFIG_BF531 is not set
 # CONFIG_BF532 is not set
@@ -306,6 +308,7 @@ CONFIG_BFIN_DCACHE=y
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
 CONFIG_L1_MAX_PIECE=16
+# CONFIG_MPU is not set
 
 #
 # Asynchonous Memory Configuration
@@ -354,6 +357,7 @@ CONFIG_BINFMT_ZFLAT=y
 # Power management options
 #
 # CONFIG_PM is not set
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # Networking
@@ -496,7 +500,6 @@ CONFIG_MTD_CFI_I2=y
 # CONFIG_MTD_CFI_INTELEXT is not set
 # CONFIG_MTD_CFI_AMDSTD is not set
 # CONFIG_MTD_CFI_STAA is not set
-CONFIG_MTD_MW320D=m
 CONFIG_MTD_RAM=y
 CONFIG_MTD_ROM=m
 # CONFIG_MTD_ABSENT is not set
@@ -506,9 +509,6 @@ CONFIG_MTD_ROM=m
 #
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 # CONFIG_MTD_PHYSMAP is not set
-CONFIG_MTD_BF5xx=m
-CONFIG_BFIN_FLASH_SIZE=0x400000
-CONFIG_EBIU_FLASH_BASE=0x20000000
 # CONFIG_MTD_UCLINUX is not set
 # CONFIG_MTD_PLATRAM is not set
 
@@ -684,7 +684,6 @@ CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
 # CONFIG_INPUT_UINPUT is not set
-# CONFIG_BF53X_PFBUTTONS is not set
 # CONFIG_TWI_KEYPAD is not set
 
 #
@@ -702,12 +701,12 @@ CONFIG_INPUT_MISC=y
 # CONFIG_BF5xx_PPIFCD is not set
 # CONFIG_BFIN_SIMPLE_TIMER is not set
 # CONFIG_BF5xx_PPI is not set
+CONFIG_BFIN_OTP=y
+# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
 # CONFIG_BFIN_SPORT is not set
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_TWI_LCD is not set
 # CONFIG_AD5304 is not set
-# CONFIG_BF5xx_TEA5764 is not set
-# CONFIG_BF5xx_FBDMA is not set
 # CONFIG_VT is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
@@ -772,7 +771,6 @@ CONFIG_I2C_CHARDEV=m
 #
 # I2C Hardware Bus support
 #
-# CONFIG_I2C_BLACKFIN_GPIO is not set
 CONFIG_I2C_BLACKFIN_TWI=m
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
 # CONFIG_I2C_GPIO is not set
index 811711f59a25664ecb3a83af9825d02f8b55b49d..9621caa60b5fc36a5ff0a26d06294f40cb71ae26 100644 (file)
@@ -322,10 +322,9 @@ CONFIG_PM=y
 # CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 # CONFIG_PM_SYSFS_DEPRECATED is not set
-CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y
+CONFIG_PM_BFIN_SLEEP_DEEPER=y
+# CONFIG_PM_BFIN_SLEEP is not set
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
-# CONFIG_PM_WAKEUP_GPIO_API is not set
-CONFIG_PM_WAKEUP_SIC_IWR=0x80
 
 #
 # CPU Frequency scaling
@@ -697,7 +696,6 @@ CONFIG_SERIAL_BFIN_DMA=y
 # CONFIG_SERIAL_BFIN_PIO is not set
 CONFIG_SERIAL_BFIN_UART0=y
 # CONFIG_BFIN_UART0_CTSRTS is not set
-# CONFIG_SERIAL_BFIN_UART1 is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_BFIN_SPORT is not set
index 198f4123af4b4239f2171487d567ca7c2caa2fba..b51e76ce7f4f9812c4e6676e60890842f3300c18 100644 (file)
@@ -323,10 +323,9 @@ CONFIG_PM=y
 # CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 # CONFIG_PM_SYSFS_DEPRECATED is not set
-CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y
+CONFIG_PM_BFIN_SLEEP_DEEPER=y
+# CONFIG_PM_BFIN_SLEEP is not set
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
-# CONFIG_PM_WAKEUP_GPIO_API is not set
-CONFIG_PM_WAKEUP_SIC_IWR=0x80
 
 #
 # CPU Frequency scaling
@@ -714,7 +713,6 @@ CONFIG_SERIAL_BFIN_DMA=y
 # CONFIG_SERIAL_BFIN_PIO is not set
 CONFIG_SERIAL_BFIN_UART0=y
 # CONFIG_BFIN_UART0_CTSRTS is not set
-# CONFIG_SERIAL_BFIN_UART1 is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_BFIN_SPORT is not set
index b37ccc681e7a7b904aa49693e270d8d80d4765e3..d45fa535dad7d4c8494cd279dff63cd51c6bb54d 100644 (file)
@@ -330,10 +330,9 @@ CONFIG_PM=y
 # CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 # CONFIG_PM_SYSFS_DEPRECATED is not set
-CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y
+CONFIG_PM_BFIN_SLEEP_DEEPER=y
+# CONFIG_PM_BFIN_SLEEP is not set
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
-# CONFIG_PM_WAKEUP_GPIO_API is not set
-CONFIG_PM_WAKEUP_SIC_IWR=0x8
 
 #
 # CPU Frequency scaling
@@ -1013,6 +1012,7 @@ CONFIG_SND_BFIN_AD73311_SE=4
 CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_SOC=m
 CONFIG_SND_BF5XX_SOC=m
+CONFIG_SND_MMAP_SUPPORT=y
 CONFIG_SND_BF5XX_SOC_AC97=m
 # CONFIG_SND_BF5XX_SOC_WM8750 is not set
 # CONFIG_SND_BF5XX_SOC_WM8731 is not set
index fd702161ef59f930f69eceab71421d615e2de5ed..c9707f7665ad8ca021bbfe9819a1dec06af40471 100644 (file)
@@ -396,6 +396,7 @@ CONFIG_BINFMT_ZFLAT=y
 # Power management options
 #
 # CONFIG_PM is not set
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # CPU Frequency scaling
@@ -1075,6 +1076,7 @@ CONFIG_SND_VERBOSE_PROCFS=y
 CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_SOC=y
 CONFIG_SND_BF5XX_SOC=y
+CONFIG_SND_MMAP_SUPPORT=y
 CONFIG_SND_BF5XX_SOC_AC97=y
 CONFIG_SND_BF5XX_SOC_BF548_EZKIT=y
 # CONFIG_SND_BF5XX_SOC_WM8750 is not set
index 8546994939fb08c9bfacf7da7c13d81d601475a2..4d8a633313091b95189547d1be6d50d9ea1cce59 100644 (file)
@@ -367,6 +367,7 @@ CONFIG_BINFMT_ZFLAT=y
 # Power management options
 #
 # CONFIG_PM is not set
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # Networking
index 5453bc3664fc40eb47474f8a2297c6ae7e0835ac..8fd5d22cec34d124dfedb031bcad84a4149a4b33 100644 (file)
@@ -105,13 +105,14 @@ int request_dma(unsigned int channel, char *device_id)
        mutex_unlock(&(dma_ch[channel].dmalock));
 
 #ifdef CONFIG_BF54x
-       if (channel >= CH_UART2_RX && channel <= CH_UART3_TX &&
-               strncmp(device_id, "BFIN_UART", 9) == 0)
-               dma_ch[channel].regs->peripheral_map |=
-                       (channel - CH_UART2_RX + 0xC);
-       else
-               dma_ch[channel].regs->peripheral_map |=
-                       (channel - CH_UART2_RX + 0x6);
+       if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
+               if (strncmp(device_id, "BFIN_UART", 9) == 0)
+                       dma_ch[channel].regs->peripheral_map |=
+                               (channel - CH_UART2_RX + 0xC);
+               else
+                       dma_ch[channel].regs->peripheral_map |=
+                               (channel - CH_UART2_RX + 0x6);
+       }
 #endif
 
        dma_ch[channel].device_id = device_id;
index 5cf4bdb1df3bffdd09bea1bd295033345fddcfc9..1904d8b53328fad9440b33a4cfdb391e7d2f0473 100644 (file)
@@ -1,9 +1,9 @@
 /*
- * bfin_gptimers.c - derived from bf53x_timers.c
- *  Driver for General Purpose Timer functions on the Blackfin processor
+ * gptimers.c - Blackfin General Purpose Timer core API
  *
- *  Copyright (C) 2005 John DeHority
- *  Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de)
+ * Copyright (c) 2005-2008 Analog Devices Inc.
+ * Copyright (C) 2005 John DeHority
+ * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de)
  *
  * Licensed under the GPLv2.
  */
index 8229b1090eb9622a427561d675cf59f1df153d85..2255c289a714db37938d0b96345de10335dbc7ae 100644 (file)
@@ -32,6 +32,7 @@
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 u16 _bfin_swrst;
+EXPORT_SYMBOL(_bfin_swrst);
 
 unsigned long memory_start, memory_end, physical_mem_end;
 unsigned long reserved_mem_dcache_on;
@@ -514,6 +515,7 @@ static __init void  memory_setup(void)
        printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
 
        printk(KERN_INFO "Memory map:\n"
+               KERN_INFO "  fixedcode = 0x%p-0x%p\n"
                KERN_INFO "  text      = 0x%p-0x%p\n"
                KERN_INFO "  rodata    = 0x%p-0x%p\n"
                KERN_INFO "  bss       = 0x%p-0x%p\n"
@@ -527,7 +529,8 @@ static __init void  memory_setup(void)
 #if DMA_UNCACHED_REGION > 0
                KERN_INFO "  DMA Zone  = 0x%p-0x%p\n"
 #endif
-               , _stext, _etext,
+               , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
+               _stext, _etext,
                __start_rodata, __end_rodata,
                __bss_start, __bss_stop,
                _sdata, _edata,
index aed832540b3b1e158fe440b566d7aedd50f34c37..cb01a9de26802aad098eef036a434cdaf8d6b0d7 100644 (file)
@@ -147,44 +147,64 @@ SECTIONS
 
        __l1_lma_start = .;
 
+#if L1_CODE_LENGTH
+# define LDS_L1_CODE *(.l1.text)
+#else
+# define LDS_L1_CODE
+#endif
        .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
        {
                . = ALIGN(4);
                __stext_l1 = .;
-               *(.l1.text)
-
+               LDS_L1_CODE
                . = ALIGN(4);
                __etext_l1 = .;
        }
 
+#if L1_DATA_A_LENGTH
+# define LDS_L1_A_DATA  *(.l1.data)
+# define LDS_L1_A_BSS   *(.l1.bss)
+# define LDS_L1_A_CACHE *(.data_l1.cacheline_aligned)
+#else
+# define LDS_L1_A_DATA
+# define LDS_L1_A_BSS
+# define LDS_L1_A_CACHE
+#endif
        .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
        {
                . = ALIGN(4);
                __sdata_l1 = .;
-               *(.l1.data)
+               LDS_L1_A_DATA
                __edata_l1 = .;
 
                . = ALIGN(4);
                __sbss_l1 = .;
-               *(.l1.bss)
+               LDS_L1_A_BSS
 
                . = ALIGN(32);
-               *(.data_l1.cacheline_aligned)
+               LDS_L1_A_CACHE
 
                . = ALIGN(4);
                __ebss_l1 = .;
        }
 
+#if L1_DATA_B_LENGTH
+# define LDS_L1_B_DATA  *(.l1.data.B)
+# define LDS_L1_B_BSS   *(.l1.bss.B)
+#else
+# define LDS_L1_B_DATA
+# define LDS_L1_B_BSS
+#endif
        .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
        {
                . = ALIGN(4);
                __sdata_b_l1 = .;
-               *(.l1.data.B)
+               LDS_L1_B_DATA
                __edata_b_l1 = .;
 
                . = ALIGN(4);
                __sbss_b_l1 = .;
-               *(.l1.bss.B)
+               LDS_L1_B_BSS
 
                . = ALIGN(4);
                __ebss_b_l1 = .;
index 337515fba612e43a3b8f23aaf1822a1196f911fa..cf4bc0d8335521737422e2d3c3796278e4962fee 100644 (file)
@@ -180,8 +180,8 @@ static struct mtd_partition partition_info[] = {
        },
        {
                .name = "File System",
-               .offset = 4 * SIZE_1M,
-               .size = (256 - 4) * SIZE_1M,
+               .offset = MTDPART_OFS_APPEND,
+               .size = MTDPART_SIZ_FULL,
        },
 };
 
@@ -422,11 +422,11 @@ static struct mtd_partition bfin_spi_flash_partitions[] = {
        }, {
                .name = "kernel",
                .size = 0xe0000,
-               .offset = 0x20000
+               .offset = MTDPART_OFS_APPEND,
        }, {
                .name = "file system",
-               .size = 0x700000,
-               .offset = 0x00100000,
+               .size = MTDPART_SIZ_FULL,
+               .offset = MTDPART_OFS_APPEND,
        }
 };
 
@@ -484,13 +484,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-static struct bfin5xx_spi_chip ad5304_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-#endif
-
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
 static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
        .enable_dma = 0,
@@ -611,17 +604,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .mode = SPI_MODE_3,
        },
 #endif
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-       {
-               .modalias = "ad5304_spi",
-               .max_speed_hz = 1250000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 2,
-               .platform_data = NULL,
-               .controller_data = &ad5304_chip_info,
-               .mode = SPI_MODE_2,
-       },
-#endif
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
        {
                .modalias               = "ad7877",
@@ -818,6 +800,19 @@ static struct platform_device bfin_device_gpiokeys = {
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+       .start = 0,
+       .end   = MAX_BLACKFIN_GPIOS - 1,
+       .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+       .name = "simple-gpio",
+       .id = -1,
+       .num_resources = 1,
+       .resource = &bfin_gpios_resources,
+};
+
 static struct platform_device *stamp_devices[] __initdata = {
 #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
        &bf5xx_nand_device,
@@ -895,6 +890,8 @@ static struct platform_device *stamp_devices[] __initdata = {
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
        &bfin_device_gpiokeys,
 #endif
+
+       &bfin_gpios_device,
 };
 
 static int __init stamp_init(void)
@@ -921,13 +918,18 @@ void native_machine_restart(char *cmd)
                bfin_gpio_reset_spi0_ssel1();
 }
 
-/*
- * Currently the MAC address is saved in Flash by U-Boot
- */
-#define FLASH_MAC      0x203f0000
 void bfin_get_ether_addr(char *addr)
 {
-       *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
-       *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
+       /* the MAC is stored in OTP memory page 0xDF */
+       u32 ret;
+       u64 otp_mac;
+       u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A;
+
+       ret = otp_read(0xDF, 0x00, &otp_mac);
+       if (!(ret & 0x1)) {
+               char *otp_mac_p = (char *)&otp_mac;
+               for (ret = 0; ret < 6; ++ret)
+                       addr[ret] = otp_mac_p[5 - ret];
+       }
 }
 EXPORT_SYMBOL(bfin_get_ether_addr);
index 2b09aa39f565274725c770b53ab3abe60513b425..241b5a20a36a78003e1128ce79f9af98e7ae89cd 100644 (file)
@@ -99,11 +99,11 @@ static struct mtd_partition bfin_spi_flash_partitions[] = {
        }, {
                .name = "kernel",
                .size = 0xe0000,
-               .offset = 0x20000
+               .offset = MTDPART_OFS_APPEND,
        }, {
                .name = "file system",
-               .size = 0x700000,
-               .offset = 0x00100000,
+               .size = MTDPART_SIZ_FULL,
+               .offset = MTDPART_OFS_APPEND,
        }
 };
 
@@ -298,6 +298,19 @@ static struct platform_device bfin_device_gpiokeys = {
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+       .start = 0,
+       .end   = MAX_BLACKFIN_GPIOS - 1,
+       .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+       .name = "simple-gpio",
+       .id = -1,
+       .num_resources = 1,
+       .resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
 #include <linux/i2c-gpio.h>
 
@@ -350,6 +363,8 @@ static struct platform_device *ezkit_devices[] __initdata = {
 #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
        &i2c_gpio_device,
 #endif
+
+       &bfin_gpios_device,
 };
 
 static int __init ezkit_init(void)
index a645f6fd091b36a8cd1f6896ef9fa0d0e7d871e5..b2ac4816ae62d585633de456c74e5a07dedee751 100644 (file)
@@ -112,7 +112,7 @@ static struct platform_device net2272_bfin_device = {
 static struct mtd_partition stamp_partitions[] = {
        {
                .name   = "Bootloader",
-               .size   = 0x20000,
+               .size   = 0x40000,
                .offset = 0,
        }, {
                .name   = "Kernel",
@@ -160,17 +160,17 @@ static struct platform_device stamp_flash_device = {
 static struct mtd_partition bfin_spi_flash_partitions[] = {
        {
                .name = "bootloader",
-               .size = 0x00020000,
+               .size = 0x00040000,
                .offset = 0,
                .mask_flags = MTD_CAP_ROM
        }, {
                .name = "kernel",
                .size = 0xe0000,
-               .offset = 0x20000
+               .offset = MTDPART_OFS_APPEND,
        }, {
                .name = "file system",
-               .size = 0x700000,
-               .offset = 0x00100000,
+               .size = MTDPART_SIZ_FULL,
+               .offset = MTDPART_OFS_APPEND,
        }
 };
 
@@ -212,13 +212,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-static struct bfin5xx_spi_chip ad5304_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-#endif
-
 #if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
 static struct bfin5xx_spi_chip spi_mmc_chip_info = {
        .enable_dma = 1,
@@ -308,17 +301,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-       {
-               .modalias = "ad5304_spi",
-               .max_speed_hz = 1000000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 2,
-               .platform_data = NULL,
-               .controller_data = &ad5304_chip_info,
-               .mode = SPI_MODE_2,
-       },
-#endif
 #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
        {
                .modalias = "spidev",
@@ -457,6 +439,19 @@ static struct platform_device bfin_device_gpiokeys = {
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+       .start = 0,
+       .end   = MAX_BLACKFIN_GPIOS - 1,
+       .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+       .name = "simple-gpio",
+       .id = -1,
+       .num_resources = 1,
+       .resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
 #include <linux/i2c-gpio.h>
 
@@ -518,6 +513,8 @@ static struct platform_device *stamp_devices[] __initdata = {
 #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
        &i2c_gpio_device,
 #endif
+
+       &bfin_gpios_device,
        &stamp_flash_device,
 };
 
index 8a3397db1d212ec35dc10b08d71bc0d1026c494a..c95395ba7bfa97c09b948fe9475709ef3da1c317 100644 (file)
@@ -371,13 +371,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-static struct bfin5xx_spi_chip ad5304_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-#endif
-
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
 static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
        .enable_dma = 0,
@@ -483,17 +476,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .mode = SPI_MODE_3,
        },
 #endif
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-       {
-               .modalias = "ad5304_spi",
-               .max_speed_hz = 1250000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 2,
-               .platform_data = NULL,
-               .controller_data = &ad5304_chip_info,
-               .mode = SPI_MODE_2,
-       },
-#endif
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
        {
                .modalias               = "ad7877",
index 9e2277e0d25cde2c628abc69e3a85665beee33d6..ea83148993da1f4dd8266d0d04e2e2744b984b56 100644 (file)
@@ -128,6 +128,19 @@ static struct platform_device bfin_device_gpiokeys = {
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+       .start = 0,
+       .end   = MAX_BLACKFIN_GPIOS - 1,
+       .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+       .name = "simple-gpio",
+       .id = -1,
+       .num_resources = 1,
+       .resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
 static struct resource bfin_pcmcia_cf_resources[] = {
        {
@@ -343,7 +356,7 @@ static struct platform_device net2272_bfin_device = {
 static struct mtd_partition stamp_partitions[] = {
        {
                .name       = "Bootloader",
-               .size       = 0x20000,
+               .size       = 0x40000,
                .offset     = 0,
        }, {
                .name       = "Kernel",
@@ -351,7 +364,7 @@ static struct mtd_partition stamp_partitions[] = {
                .offset     = MTDPART_OFS_APPEND,
        }, {
                .name       = "RootFS",
-               .size       = 0x400000 - 0x20000 - 0xE0000 - 0x10000,
+               .size       = 0x400000 - 0x40000 - 0xE0000 - 0x10000,
                .offset     = MTDPART_OFS_APPEND,
        }, {
                .name       = "MAC Address",
@@ -391,17 +404,17 @@ static struct platform_device stamp_flash_device = {
 static struct mtd_partition bfin_spi_flash_partitions[] = {
        {
                .name = "bootloader",
-               .size = 0x00020000,
+               .size = 0x00040000,
                .offset = 0,
                .mask_flags = MTD_CAP_ROM
        }, {
                .name = "kernel",
                .size = 0xe0000,
-               .offset = 0x20000
+               .offset = MTDPART_OFS_APPEND,
        }, {
                .name = "file system",
-               .size = 0x700000,
-               .offset = 0x00100000,
+               .size = MTDPART_SIZ_FULL,
+               .offset = MTDPART_OFS_APPEND,
        }
 };
 
@@ -459,13 +472,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-static struct bfin5xx_spi_chip ad5304_chip_info = {
-       .enable_dma = 0,
-       .bits_per_word = 16,
-};
-#endif
-
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
 static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
        .enable_dma = 0,
@@ -578,17 +584,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
                .mode = SPI_MODE_3,
        },
 #endif
-#if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE)
-       {
-               .modalias = "ad5304_spi",
-               .max_speed_hz = 1250000,     /* max spi clock (SCK) speed in HZ */
-               .bus_num = 0,
-               .chip_select = 2,
-               .platform_data = NULL,
-               .controller_data = &ad5304_chip_info,
-               .mode = SPI_MODE_2,
-       },
-#endif
 #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
        {
                .modalias               = "ad7877",
@@ -821,6 +816,8 @@ static struct platform_device *stamp_devices[] __initdata = {
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
        &bfin_device_gpiokeys,
 #endif
+
+       &bfin_gpios_device,
        &stamp_flash_device,
 };
 
index 916e963e83ba4eed13b2d36460fc084df260f523..a0950c1fd80027a0917336be24b35d202dbbdcb4 100644 (file)
@@ -285,8 +285,8 @@ static struct mtd_partition partition_info[] = {
        },
        {
                .name = "File System",
-               .offset = 4 * SIZE_1M,
-               .size = (256 - 4) * SIZE_1M,
+               .offset = MTDPART_OFS_APPEND,
+               .size = MTDPART_SIZ_FULL,
        },
 };
 
@@ -333,7 +333,7 @@ static struct platform_device bf54x_sdh_device = {
 static struct mtd_partition ezkit_partitions[] = {
        {
                .name       = "Bootloader",
-               .size       = 0x20000,
+               .size       = 0x40000,
                .offset     = 0,
        }, {
                .name       = "Kernel",
@@ -381,8 +381,8 @@ static struct mtd_partition bfin_spi_flash_partitions[] = {
                .mask_flags = MTD_CAP_ROM
        }, {
                .name = "linux kernel",
-               .size = 0x1c0000,
-               .offset = 0x40000
+               .size = MTDPART_SIZ_FULL,
+               .offset = MTDPART_OFS_APPEND,
        }
 };
 
@@ -594,6 +594,19 @@ static struct platform_device bfin_device_gpiokeys = {
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+       .start = 0,
+       .end   = MAX_BLACKFIN_GPIOS - 1,
+       .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+       .name = "simple-gpio",
+       .id = -1,
+       .num_resources = 1,
+       .resource = &bfin_gpios_resources,
+};
+
 static struct platform_device *ezkit_devices[] __initdata = {
 #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
        &rtc_device,
@@ -646,6 +659,8 @@ static struct platform_device *ezkit_devices[] __initdata = {
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
        &bfin_device_gpiokeys,
 #endif
+
+       &bfin_gpios_device,
        &ezkit_flash_device,
 };
 
index 374803a8d2e87bf55ecb57ea8e30ff175dd8b65f..f5479298bb7913fd55baee1538ed310d090f8cec 100644 (file)
@@ -27,6 +27,8 @@
  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#include <linux/module.h>
+
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
index 74fe258421a5546c88eb231f8265c6e5aba22fa9..46222a75321a5d7f8fa21e103b4ce9ae666967cd 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/init.h>
 #include <asm/blackfin.h>
 #include <asm/trace.h>
 #if CONFIG_BFIN_KERNEL_CLOCK
 
 #define INITIAL_STACK   0xFFB01000
 
-.text
+__INIT
 
 ENTRY(__start)
-ENTRY(__stext)
        /* R0: argument of command line string, passed from uboot, save it */
        R7 = R0;
        /* Enable Cycle Counter and Nesting Of Interrupts */
@@ -213,6 +213,7 @@ ENTRY(__stext)
 
 .LWAIT_HERE:
        jump .LWAIT_HERE;
+ENDPROC(__start)
 
 ENTRY(_real_start)
        [ -- sp ] = reti;
@@ -285,6 +286,9 @@ ENTRY(_real_start)
        call _start_kernel;
 .L_exit:
        jump.s  .L_exit;
+ENDPROC(_real_start)
+
+__FINIT
 
 .section .l1.text
 #if CONFIG_BFIN_KERNEL_CLOCK
@@ -450,6 +454,7 @@ ENTRY(_start_dma_code)
        SSYNC;
 
        RTS;
+ENDPROC(_start_dma_code)
 #endif /* CONFIG_BFIN_KERNEL_CLOCK */
 
 .data
index 43c1b0982819bb6557b6d380cf09e4d21ade30a9..d357f648d963295f630b5db160607804a3bb9ba5 100644 (file)
@@ -223,7 +223,7 @@ static struct platform_device bfin_uart_device = {
 static struct mtd_partition ezkit_partitions[] = {
        {
                .name       = "Bootloader",
-               .size       = 0x20000,
+               .size       = 0x40000,
                .offset     = 0,
        }, {
                .name       = "Kernel",
@@ -389,6 +389,19 @@ static struct platform_device bfin_device_gpiokeys = {
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+       .start = 0,
+       .end   = MAX_BLACKFIN_GPIOS - 1,
+       .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+       .name = "simple-gpio",
+       .id = -1,
+       .num_resources = 1,
+       .resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
 #include <linux/i2c-gpio.h>
 
@@ -446,6 +459,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
        &isp1362_hcd_device,
 #endif
 
+       &bfin_gpios_device,
        &ezkit_flash_device,
 };
 
index b80ddd8b232decc3203490c79ae04071d7a8cae4..9d45aa3265b19a399e403045c1cda8ad3cac5130 100644 (file)
 #include <asm/blackfin.h>
 #include <asm/mach/irq.h>
 
-.text
-
-ENTRY(_unmask_wdog_wakeup_evt)
-       [--SP] = ( R7:0, P5:0 );
-#if defined(CONFIG_BF561)
-       P0.H = hi(SICA_IWR1);
-       P0.L = lo(SICA_IWR1);
-#elif defined(CONFIG_BF54x) || defined(CONFIG_BF52x)
-       P0.h = HI(SIC_IWR0);
-       P0.l = LO(SIC_IWR0);
-#else
-       P0.h = HI(SIC_IWR);
-       P0.l = LO(SIC_IWR);
-#endif
-       R7 = [P0];
-#if defined(CONFIG_BF561)
-       BITSET(R7, 27);
-#else
-       BITSET(R7,(IRQ_WATCH - IVG7));
-#endif
-       [P0] = R7;
-       SSYNC;
-
-       ( R7:0, P5:0 ) = [SP++];
-       RTS;
-
-.LWRITE_TO_STAT:
-       /* When watch dog timer is enabled, a write to STAT will load the
-        * contents of CNT to STAT
-        */
-       R7 = 0x0000(z);
-#if defined(CONFIG_BF561)
-       P0.h = HI(WDOGA_STAT);
-       P0.l = LO(WDOGA_STAT);
-#else
-       P0.h = HI(WDOG_STAT);
-       P0.l = LO(WDOG_STAT);
-#endif
-       [P0] = R7;
-       SSYNC;
-       JUMP .LSKIP_WRITE_TO_STAT;
-
-ENTRY(_program_wdog_timer)
-       [--SP] = ( R7:0, P5:0 );
-#if defined(CONFIG_BF561)
-       P0.h = HI(WDOGA_CNT);
-       P0.l = LO(WDOGA_CNT);
-#else
-       P0.h = HI(WDOG_CNT);
-       P0.l = LO(WDOG_CNT);
-#endif
-       [P0] = R0;
-       SSYNC;
-
-#if defined(CONFIG_BF561)
-       P0.h = HI(WDOGA_CTL);
-       P0.l = LO(WDOGA_CTL);
-#else
-       P0.h = HI(WDOG_CTL);
-       P0.l = LO(WDOG_CTL);
-#endif
-       R7 = W[P0](Z);
-       CC = BITTST(R7,1);
-       if !CC JUMP .LWRITE_TO_STAT;
-       CC = BITTST(R7,2);
-       if !CC JUMP .LWRITE_TO_STAT;
-
-.LSKIP_WRITE_TO_STAT:
-#if defined(CONFIG_BF561)
-       P0.h = HI(WDOGA_CTL);
-       P0.l = LO(WDOGA_CTL);
-#else
-       P0.h = HI(WDOG_CTL);
-       P0.l = LO(WDOG_CTL);
-#endif
-       R7 = W[P0](Z);
-       BITCLR(R7,1);   /* Enable GP event */
-       BITSET(R7,2);
-       W[P0] = R7.L;
-       SSYNC;
-       NOP;
-
-       R7 = W[P0](Z);
-       BITCLR(R7,4);   /* Enable the wdog counter */
-       W[P0] = R7.L;
-       SSYNC;
-
-       ( R7:0, P5:0 ) = [SP++];
-       RTS;
-
-ENTRY(_clear_wdog_wakeup_evt)
-       [--SP] = ( R7:0, P5:0 );
-
-#if defined(CONFIG_BF561)
-       P0.h = HI(WDOGA_CTL);
-       P0.l = LO(WDOGA_CTL);
-#else
-       P0.h = HI(WDOG_CTL);
-       P0.l = LO(WDOG_CTL);
-#endif
-       R7 = 0x0AD6(Z);
-       W[P0] = R7.L;
-       SSYNC;
-
-       R7 = W[P0](Z);
-       BITSET(R7,15);
-       W[P0] = R7.L;
-       SSYNC;
-
-       R7 = W[P0](Z);
-       BITSET(R7,1);
-       BITSET(R7,2);
-       W[P0] = R7.L;
-       SSYNC;
-
-       ( R7:0, P5:0 ) = [SP++];
-       RTS;
-
-ENTRY(_disable_wdog_timer)
-       [--SP] = ( R7:0, P5:0 );
-#if defined(CONFIG_BF561)
-       P0.h = HI(WDOGA_CTL);
-       P0.l = LO(WDOGA_CTL);
-#else
-       P0.h = HI(WDOG_CTL);
-       P0.l = LO(WDOG_CTL);
-#endif
-       R7 = 0xAD6(Z);
-       W[P0] = R7.L;
-       SSYNC;
-       ( R7:0, P5:0 ) = [SP++];
-       RTS;
-
-#if !defined(CONFIG_BF561)
 
 .section .l1.text
 
@@ -459,10 +325,12 @@ ENTRY(_set_sic_iwr)
        RTS;
 
 ENTRY(_set_rtc_istat)
+#ifndef CONFIG_BF561
        P0.H = hi(RTC_ISTAT);
        P0.L = lo(RTC_ISTAT);
        w[P0] = R0.L;
        SSYNC;
+#endif
        RTS;
 
 ENTRY(_test_pll_locked)
@@ -473,4 +341,3 @@ ENTRY(_test_pll_locked)
        CC = BITTST(R0,5);
        IF !CC JUMP 1b;
        RTS;
-#endif
index 880595afe98da61b2ade7323701b17de6b4070b6..225ef14af75eade0ef629575cc5ab646db063796 100644 (file)
@@ -74,7 +74,7 @@ unsigned long bfin_sic_iwr[3];        /* Up to 3 SIC_IWRx registers */
 #endif
 
 struct ivgx {
-       /* irq number for request_irq, available in mach-bf533/irq.h */
+       /* irq number for request_irq, available in mach-bf5xx/irq.h */
        unsigned int irqno;
        /* corresponding bit in the SIC_ISR register */
        unsigned int isrflag;
@@ -86,7 +86,6 @@ struct ivg_slice {
        struct ivgx *istop;
 } ivg7_13[IVG13 - IVG7 + 1];
 
-static void search_IAR(void);
 
 /*
  * Search SIC_IAR and fill tables with the irqvalues
@@ -120,10 +119,10 @@ static void __init search_IAR(void)
 }
 
 /*
- * This is for BF533 internal IRQs
+ * This is for core internal IRQs
  */
 
-static void ack_noop(unsigned int irq)
+static void bfin_ack_noop(unsigned int irq)
 {
        /* Dummy function.  */
 }
@@ -156,11 +155,11 @@ static void bfin_internal_mask_irq(unsigned int irq)
 {
 #ifdef CONFIG_BF53x
        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
-                            ~(1 << (irq - (IRQ_CORETMR + 1))));
+                            ~(1 << SIC_SYSIRQ(irq)));
 #else
        unsigned mask_bank, mask_bit;
-       mask_bank = (irq - (IRQ_CORETMR + 1)) / 32;
-       mask_bit = (irq - (IRQ_CORETMR + 1)) % 32;
+       mask_bank = SIC_SYSIRQ(irq) / 32;
+       mask_bit = SIC_SYSIRQ(irq) % 32;
        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
                             ~(1 << mask_bit));
 #endif
@@ -171,11 +170,11 @@ static void bfin_internal_unmask_irq(unsigned int irq)
 {
 #ifdef CONFIG_BF53x
        bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
-                            (1 << (irq - (IRQ_CORETMR + 1))));
+                            (1 << SIC_SYSIRQ(irq)));
 #else
        unsigned mask_bank, mask_bit;
-       mask_bank = (irq - (IRQ_CORETMR + 1)) / 32;
-       mask_bit = (irq - (IRQ_CORETMR + 1)) % 32;
+       mask_bank = SIC_SYSIRQ(irq) / 32;
+       mask_bit = SIC_SYSIRQ(irq) % 32;
        bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
                             (1 << mask_bit));
 #endif
@@ -187,8 +186,8 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 {
        unsigned bank, bit;
        unsigned long flags;
-       bank = (irq - (IRQ_CORETMR + 1)) / 32;
-       bit = (irq - (IRQ_CORETMR + 1)) % 32;
+       bank = SIC_SYSIRQ(irq) / 32;
+       bit = SIC_SYSIRQ(irq) % 32;
 
        local_irq_save(flags);
 
@@ -204,15 +203,18 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 #endif
 
 static struct irq_chip bfin_core_irqchip = {
-       .ack = ack_noop,
+       .ack = bfin_ack_noop,
        .mask = bfin_core_mask_irq,
        .unmask = bfin_core_unmask_irq,
 };
 
 static struct irq_chip bfin_internal_irqchip = {
-       .ack = ack_noop,
+       .ack = bfin_ack_noop,
        .mask = bfin_internal_mask_irq,
        .unmask = bfin_internal_unmask_irq,
+       .mask_ack = bfin_internal_mask_irq,
+       .disable = bfin_internal_mask_irq,
+       .enable = bfin_internal_unmask_irq,
 #ifdef CONFIG_PM
        .set_wake = bfin_internal_set_wake,
 #endif
@@ -221,38 +223,23 @@ static struct irq_chip bfin_internal_irqchip = {
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 static int error_int_mask;
 
-static void bfin_generic_error_ack_irq(unsigned int irq)
-{
-
-}
-
 static void bfin_generic_error_mask_irq(unsigned int irq)
 {
        error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
 
-       if (!error_int_mask) {
-               local_irq_disable();
-               bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
-                                    ~(1 << (IRQ_GENERIC_ERROR -
-                                       (IRQ_CORETMR + 1))));
-               SSYNC();
-               local_irq_enable();
-       }
+       if (!error_int_mask)
+               bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
 }
 
 static void bfin_generic_error_unmask_irq(unsigned int irq)
 {
-       local_irq_disable();
-       bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 1 <<
-                            (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1)));
-       SSYNC();
-       local_irq_enable();
-
+       bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
        error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
 }
 
 static struct irq_chip bfin_generic_error_irqchip = {
-       .ack = bfin_generic_error_ack_irq,
+       .ack = bfin_ack_noop,
+       .mask_ack = bfin_generic_error_mask_irq,
        .mask = bfin_generic_error_mask_irq,
        .unmask = bfin_generic_error_unmask_irq,
 };
@@ -608,7 +595,7 @@ static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
        (struct pin_int_t *)PINT3_MASK_SET,
 };
 
-unsigned short get_irq_base(u8 bank, u8 bmap)
+inline unsigned short get_irq_base(u8 bank, u8 bmap)
 {
 
        u16 irq_base;
@@ -969,17 +956,12 @@ int __init init_arch_irq(void)
 #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
        bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
        bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
-       bfin_write_SIC_IWR0(IWR_ENABLE_ALL);
-       bfin_write_SIC_IWR1(IWR_ENABLE_ALL);
 # ifdef CONFIG_BF54x
        bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
-       bfin_write_SIC_IWR2(IWR_ENABLE_ALL);
 # endif
 #else
        bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
-       bfin_write_SIC_IWR(IWR_ENABLE_ALL);
 #endif
-       SSYNC();
 
        local_irq_disable();
 
@@ -1001,90 +983,53 @@ int __init init_arch_irq(void)
                        set_irq_chip(irq, &bfin_core_irqchip);
                else
                        set_irq_chip(irq, &bfin_internal_irqchip);
-#ifdef BF537_GENERIC_ERROR_INT_DEMUX
-               if (irq != IRQ_GENERIC_ERROR) {
-#endif
 
-                       switch (irq) {
+               switch (irq) {
 #if defined(CONFIG_BF53x)
-                       case IRQ_PROG_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
+               case IRQ_PROG_INTA:
 # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
-                       case IRQ_MAC_RX:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
+               case IRQ_MAC_RX:
 # endif
 #elif defined(CONFIG_BF54x)
-                       case IRQ_PINT0:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PINT1:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PINT2:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PINT3:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
+               case IRQ_PINT0:
+               case IRQ_PINT1:
+               case IRQ_PINT2:
+               case IRQ_PINT3:
 #elif defined(CONFIG_BF52x)
-                       case IRQ_PORTF_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PORTG_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PORTH_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
+               case IRQ_PORTF_INTA:
+               case IRQ_PORTG_INTA:
+               case IRQ_PORTH_INTA:
 #elif defined(CONFIG_BF561)
-                       case IRQ_PROG0_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PROG1_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
-                       case IRQ_PROG2_INTA:
-                               set_irq_chained_handler(irq,
-                                                       bfin_demux_gpio_irq);
-                               break;
+               case IRQ_PROG0_INTA:
+               case IRQ_PROG1_INTA:
+               case IRQ_PROG2_INTA:
 #endif
-                       default:
-                               set_irq_handler(irq, handle_simple_irq);
-                               break;
-                       }
-
+                       set_irq_chained_handler(irq,
+                                               bfin_demux_gpio_irq);
+                       break;
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
-               } else {
+               case IRQ_GENERIC_ERROR:
                        set_irq_handler(irq, bfin_demux_error_irq);
-               }
+
+                       break;
 #endif
+               default:
+                       set_irq_handler(irq, handle_simple_irq);
+                       break;
+               }
        }
+
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
-       for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) {
-               set_irq_chip(irq, &bfin_generic_error_irqchip);
-               set_irq_handler(irq, handle_level_irq);
-       }
+       for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
+               set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
+                                        handle_level_irq);
 #endif
 
-       for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++) {
+       /* if configured as edge, then will be changed to do_edge_IRQ */
+       for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
+               set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
+                                        handle_level_irq);
 
-               set_irq_chip(irq, &bfin_gpio_irqchip);
-               /* if configured as edge, then will be changed to do_edge_IRQ */
-               set_irq_handler(irq, handle_level_irq);
-       }
 
        bfin_write_IMASK(0);
        CSYNC();
@@ -1106,6 +1051,16 @@ int __init init_arch_irq(void)
            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+       bfin_write_SIC_IWR0(IWR_ENABLE_ALL);
+       bfin_write_SIC_IWR1(IWR_ENABLE_ALL);
+# ifdef CONFIG_BF54x
+       bfin_write_SIC_IWR2(IWR_ENABLE_ALL);
+# endif
+#else
+       bfin_write_SIC_IWR(IWR_ENABLE_ALL);
+#endif
+
        return 0;
 }
 
@@ -1122,7 +1077,6 @@ void do_irq(int vec, struct pt_regs *fp)
 #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
                unsigned long sic_status[3];
 
-               SSYNC();
                sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
                sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
 #ifdef CONFIG_BF54x
@@ -1138,7 +1092,7 @@ void do_irq(int vec, struct pt_regs *fp)
                }
 #else
                unsigned long sic_status;
-               SSYNC();
+
                sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
 
                for (;; ivg++) {
index 1f516c55bde676e14f3ede42e5031cd2ff3a5d40..ec3141fefd20da1554e08d745a8a34f175a04b72 100644 (file)
@@ -181,7 +181,7 @@ void __init mem_init(void)
        }
 }
 
-static __init void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
 {
        unsigned long addr;
        /* next to check that the page we free is not a partial page */
@@ -203,7 +203,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-void __init free_initmem(void)
+void __init_refok free_initmem(void)
 {
 #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
        free_init_pages("unused kernel memory",
index 9310a7b476e95cf6353180de4e88bc8ba4f9f592..525483f0ddf89f3ec0fe0d14136710f86bbd9d6c 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/swap.h>
 #include <linux/sched.h>
 #include <linux/init.h>
-#include <linux/vmstat.h>
+#include <linux/mm.h>
 #include <asm/arch/svinto.h>
 #include <asm/types.h>
 #include <asm/signal.h>
index 7161a2bef4fe341e694780106ced3cccac5421c3..c7bd6ebdc93c0b18aa6d2e9efe213f02d01ad98c 100644 (file)
@@ -1,55 +1,59 @@
-/*#************************************************************************#*/
-/*#-------------------------------------------------------------------------*/
-/*#                                                                         */
-/*# FUNCTION NAME: memcpy()                                                 */
-/*#                                                                         */
-/*# PARAMETERS:  void* dst;   Destination address.                          */
-/*#              void* src;   Source address.                               */
-/*#              int   len;   Number of bytes to copy.                      */
-/*#                                                                         */
-/*# RETURNS:     dst.                                                       */
-/*#                                                                         */
-/*# DESCRIPTION: Copies len bytes of memory from src to dst.  No guarantees */
-/*#              about copying of overlapping memory areas. This routine is */
-/*#              very sensitive to compiler changes in register allocation. */
-/*#              Should really be rewritten to avoid this problem.          */
-/*#                                                                         */
-/*#-------------------------------------------------------------------------*/
-/*#                                                                         */
-/*# HISTORY                                                                 */
-/*#                                                                         */
-/*# DATE      NAME            CHANGES                                       */
-/*# ----      ----            -------                                       */
-/*# 941007    Kenny R         Creation                                      */
-/*# 941011    Kenny R         Lots of optimizations and inlining.           */
-/*# 941129    Ulf A           Adapted for use in libc.                      */
-/*# 950216    HP              N==0 forgotten if non-aligned src/dst.        */
-/*#                           Added some optimizations.                     */
-/*# 001025    HP              Make src and dst char *.  Align dst to       */
-/*#                          dword, not just word-if-both-src-and-dst-     */
-/*#                          are-misaligned.                               */
-/*#                                                                         */
-/*#-------------------------------------------------------------------------*/
-
-#include <linux/types.h>
-
-void *memcpy(void *pdst,
-             const void *psrc,
-             size_t pn)
+/* A memcpy for CRIS.
+   Copyright (C) 1994-2005 Axis Communications.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. Neither the name of Axis Communications nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
+   ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
+   COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+   INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+   (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+   SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+   HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+   STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+   IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+   POSSIBILITY OF SUCH DAMAGE.  */
+
+/* FIXME: This file should really only be used for reference, as the
+   result is somewhat depending on gcc generating what we expect rather
+   than what we describe.  An assembly file should be used instead.  */
+
+#include <stddef.h>
+
+/* Break even between movem and move16 is really at 38.7 * 2, but
+   modulo 44, so up to the next multiple of 44, we use ordinary code.  */
+#define MEMCPY_BY_BLOCK_THRESHOLD (44 * 2)
+
+/* No name ambiguities in this file.  */
+__asm__ (".syntax no_register_prefix");
+
+void *
+memcpy(void *pdst, const void *psrc, size_t pn)
 {
-  /* Ok.  Now we want the parameters put in special registers.
+  /* Now we want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
-      As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
+     As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
 
-     If gcc was alright, it really would need no temporaries, and no
-     stack space to save stuff on. */
+     If gcc was allright, it really would need no temporaries, and no
+     stack space to save stuff on.  */
 
   register void *return_dst __asm__ ("r10") = pdst;
-  register char *dst __asm__ ("r13") = pdst;
-  register const char *src __asm__ ("r11") = psrc;
+  register unsigned char *dst __asm__ ("r13") = pdst;
+  register unsigned const char *src __asm__ ("r11") = psrc;
   register int n __asm__ ("r12") = pn;
-  
+
   /* When src is aligned but not dst, this makes a few extra needless
      cycles.  I believe it would take as many to check that the
      re-alignment was unnecessary.  */
@@ -59,167 +63,174 @@ void *memcpy(void *pdst,
       && n >= 3)
   {
     if ((unsigned long) dst & 1)
-    {
-      n--;
-      *(char*)dst = *(char*)src;
-      src++;
-      dst++;
-    }
+      {
+       n--;
+       *dst = *src;
+       src++;
+       dst++;
+      }
 
     if ((unsigned long) dst & 2)
-    {
-      n -= 2;
-      *(short*)dst = *(short*)src;
-      src += 2;
-      dst += 2;
-    }
+      {
+       n -= 2;
+       *(short *) dst = *(short *) src;
+       src += 2;
+       dst += 2;
+      }
   }
 
-  /* Decide which copying method to use. */
-  if (n >= 44*2)                /* Break even between movem and
-                                   move16 is at 38.7*2, but modulo 44. */
-  {
-    /* For large copies we use 'movem' */
-
-  /* It is not optimal to tell the compiler about clobbering any
-     registers; that will move the saving/restoring of those registers
-     to the function prologue/epilogue, and make non-movem sizes
-     suboptimal.
-
-      This method is not foolproof; it assumes that the "asm reg"
-     declarations at the beginning of the function really are used
-     here (beware: they may be moved to temporary registers).
-      This way, we do not have to save/move the registers around into
-     temporaries; we can safely use them straight away.
-
-      If you want to check that the allocation was right; then
-      check the equalities in the first comment.  It should say
-      "r13=r13, r11=r11, r12=r12" */
-    __asm__ volatile ("\n\
-       ;; Check that the following is true (same register names on     \n\
-       ;; both sides of equal sign, as in r8=r8):                      \n\
-       ;; %0=r13, %1=r11, %2=r12                                       \n\
-       ;;                                                              \n\
-       ;; Save the registers we'll use in the movem process            \n\
-       ;; on the stack.                                                \n\
-       subq    11*4,$sp                                                \n\
-       movem   $r10,[$sp]                                              \n\
+  /* Decide which copying method to use.  */
+  if (n >= MEMCPY_BY_BLOCK_THRESHOLD)
+    {
+      /* It is not optimal to tell the compiler about clobbering any
+        registers; that will move the saving/restoring of those registers
+        to the function prologue/epilogue, and make non-movem sizes
+        suboptimal.  */
+      __asm__ volatile
+       ("\
+        ;; GCC does promise correct register allocations, but let's    \n\
+        ;; make sure it keeps its promises.                            \n\
+        .ifnc %0-%1-%2,$r13-$r11-$r12                                  \n\
+        .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\"       \n\
+        .endif                                                         \n\
+                                                                       \n\
+        ;; Save the registers we'll use in the movem process           \n\
+        ;; on the stack.                                               \n\
+        subq   11*4,sp                                                 \n\
+        movem  r10,[sp]                                                \n\
                                                                        \n\
-       ;; Now we've got this:                                          \n\
-       ;; r11 - src                                                    \n\
-       ;; r13 - dst                                                    \n\
-       ;; r12 - n                                                      \n\
+        ;; Now we've got this:                                         \n\
+        ;; r11 - src                                                   \n\
+        ;; r13 - dst                                                   \n\
+        ;; r12 - n                                                     \n\
                                                                        \n\
-       ;; Update n for the first loop                                  \n\
-       subq    44,$r12                                                 \n\
+        ;; Update n for the first loop.                                \n\
+        subq    44,r12                                                 \n\
 0:                                                                     \n\
-       movem   [$r11+],$r10                                            \n\
-       subq    44,$r12                                                 \n\
-       bge     0b                                                      \n\
-       movem   $r10,[$r13+]                                            \n\
+"
+#ifdef __arch_common_v10_v32
+        /* Cater to branch offset difference between v32 and v10.  We
+           assume the branch below has an 8-bit offset.  */
+"       setf\n"
+#endif
+"       movem  [r11+],r10                                              \n\
+        subq   44,r12                                                  \n\
+        bge     0b                                                     \n\
+        movem  r10,[r13+]                                              \n\
                                                                        \n\
-       addq    44,$r12 ;; compensate for last loop underflowing n      \n\
+        ;; Compensate for last loop underflowing n.                    \n\
+        addq   44,r12                                                  \n\
                                                                        \n\
-       ;; Restore registers from stack                                 \n\
-       movem   [$sp+],$r10"
+        ;; Restore registers from stack.                               \n\
+        movem [sp+],r10"
 
-     /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n) 
-     /* Inputs */ : "0" (dst), "1" (src), "2" (n));
-    
-  }
+        /* Outputs.  */
+        : "=r" (dst), "=r" (src), "=r" (n)
 
-  /* Either we directly starts copying, using dword copying
-     in a loop, or we copy as much as possible with 'movem' 
-     and then the last block (<44 bytes) is copied here.
-     This will work since 'movem' will have updated src,dst,n. */
+        /* Inputs.  */
+        : "0" (dst), "1" (src), "2" (n));
+    }
 
-  while ( n >= 16 )
-  {
-    *((long*)dst)++ = *((long*)src)++;
-    *((long*)dst)++ = *((long*)src)++;
-    *((long*)dst)++ = *((long*)src)++;
-    *((long*)dst)++ = *((long*)src)++;
-    n -= 16;
-  }
+  while (n >= 16)
+    {
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+
+      n -= 16;
+    }
 
-  /* A switch() is definitely the fastest although it takes a LOT of code.
-   * Particularly if you inline code this.
-   */
   switch (n)
-  {
+    {
     case 0:
       break;
+
     case 1:
-      *(char*)dst = *(char*)src;
+      *dst = *src;
       break;
+
     case 2:
-      *(short*)dst = *(short*)src;
+      *(short *) dst = *(short *) src;
       break;
+
     case 3:
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
+
     case 4:
-      *((long*)dst)++ = *((long*)src)++;
+      *(long *) dst = *(long *) src;
       break;
+
     case 5:
-      *((long*)dst)++ = *((long*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *dst = *src;
       break;
+
     case 6:
-      *((long*)dst)++ = *((long*)src)++;
-      *(short*)dst = *(short*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src;
       break;
+
     case 7:
-      *((long*)dst)++ = *((long*)src)++;
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
+
     case 8:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src;
       break;
+
     case 9:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *dst = *src;
       break;
+
     case 10:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(short*)dst = *(short*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src;
       break;
+
     case 11:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
+
     case 12:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src;
       break;
+
     case 13:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *dst = *src;
       break;
+
     case 14:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(short*)dst = *(short*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src;
       break;
+
     case 15:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
-  }
+    }
 
-  return return_dst; /* destination pointer. */
-} /* memcpy() */
+  return return_dst;
+}
index b8e6c0430e5b8832b6c9a5314c4f21a371405e57..b0a608da7bd13d5eff6f2e925153dbd7c47eadb0 100644 (file)
@@ -193,7 +193,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
    inaccessible.  */
 
 unsigned long
-__copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn)
+__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
index 6740b2cebae5169d05a09d69ad5f99851394d8bc..c7bd6ebdc93c0b18aa6d2e9efe213f02d01ad98c 100644 (file)
@@ -1,55 +1,59 @@
-/*#************************************************************************#*/
-/*#-------------------------------------------------------------------------*/
-/*#                                                                         */
-/*# FUNCTION NAME: memcpy()                                                 */
-/*#                                                                         */
-/*# PARAMETERS:  void* dst;   Destination address.                          */
-/*#              void* src;   Source address.                               */
-/*#              int   len;   Number of bytes to copy.                      */
-/*#                                                                         */
-/*# RETURNS:     dst.                                                       */
-/*#                                                                         */
-/*# DESCRIPTION: Copies len bytes of memory from src to dst.  No guarantees */
-/*#              about copying of overlapping memory areas. This routine is */
-/*#              very sensitive to compiler changes in register allocation. */
-/*#              Should really be rewritten to avoid this problem.          */
-/*#                                                                         */
-/*#-------------------------------------------------------------------------*/
-/*#                                                                         */
-/*# HISTORY                                                                 */
-/*#                                                                         */
-/*# DATE      NAME            CHANGES                                       */
-/*# ----      ----            -------                                       */
-/*# 941007    Kenny R         Creation                                      */
-/*# 941011    Kenny R         Lots of optimizations and inlining.           */
-/*# 941129    Ulf A           Adapted for use in libc.                      */
-/*# 950216    HP              N==0 forgotten if non-aligned src/dst.        */
-/*#                           Added some optimizations.                     */
-/*# 001025    HP              Make src and dst char *.  Align dst to       */
-/*#                          dword, not just word-if-both-src-and-dst-     */
-/*#                          are-misaligned.                               */
-/*#                                                                         */
-/*#-------------------------------------------------------------------------*/
-
-#include <linux/types.h>
-
-void *memcpy(void *pdst,
-             const void *psrc,
-             size_t pn)
+/* A memcpy for CRIS.
+   Copyright (C) 1994-2005 Axis Communications.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. Neither the name of Axis Communications nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
+   ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
+   COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+   INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+   (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+   SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+   HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+   STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+   IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+   POSSIBILITY OF SUCH DAMAGE.  */
+
+/* FIXME: This file should really only be used for reference, as the
+   result is somewhat depending on gcc generating what we expect rather
+   than what we describe.  An assembly file should be used instead.  */
+
+#include <stddef.h>
+
+/* Break even between movem and move16 is really at 38.7 * 2, but
+   modulo 44, so up to the next multiple of 44, we use ordinary code.  */
+#define MEMCPY_BY_BLOCK_THRESHOLD (44 * 2)
+
+/* No name ambiguities in this file.  */
+__asm__ (".syntax no_register_prefix");
+
+void *
+memcpy(void *pdst, const void *psrc, size_t pn)
 {
-  /* Ok.  Now we want the parameters put in special registers.
+  /* Now we want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
-      As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
+     As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
 
-     If gcc was alright, it really would need no temporaries, and no
-     stack space to save stuff on. */
+     If gcc was allright, it really would need no temporaries, and no
+     stack space to save stuff on.  */
 
   register void *return_dst __asm__ ("r10") = pdst;
-  register char *dst __asm__ ("r13") = pdst;
-  register const char *src __asm__ ("r11") = psrc;
+  register unsigned char *dst __asm__ ("r13") = pdst;
+  register unsigned const char *src __asm__ ("r11") = psrc;
   register int n __asm__ ("r12") = pn;
 
-
   /* When src is aligned but not dst, this makes a few extra needless
      cycles.  I believe it would take as many to check that the
      re-alignment was unnecessary.  */
@@ -59,161 +63,174 @@ void *memcpy(void *pdst,
       && n >= 3)
   {
     if ((unsigned long) dst & 1)
-    {
-      n--;
-      *(char*)dst = *(char*)src;
-      src++;
-      dst++;
-    }
+      {
+       n--;
+       *dst = *src;
+       src++;
+       dst++;
+      }
 
     if ((unsigned long) dst & 2)
-    {
-      n -= 2;
-      *(short*)dst = *(short*)src;
-      src += 2;
-      dst += 2;
-    }
+      {
+       n -= 2;
+       *(short *) dst = *(short *) src;
+       src += 2;
+       dst += 2;
+      }
   }
 
-  /* Decide which copying method to use.  Movem is dirt cheap, so the
-     overheap is low enough to always use the minimum block size as the
-     threshold.  */
-  if (n >= 44)
-  {
-    /* For large copies we use 'movem' */
-
-  /* It is not optimal to tell the compiler about clobbering any
-     registers; that will move the saving/restoring of those registers
-     to the function prologue/epilogue, and make non-movem sizes
-     suboptimal.  */
-    __asm__ volatile ("                                                        \n\
-        ;; Check that the register asm declaration got right.          \n\
-        ;; The GCC manual explicitly says TRT will happen.             \n\
-       .ifnc %0-%1-%2,$r13-$r11-$r12                                   \n\
-       .err                                                            \n\
-       .endif                                                          \n\
-                                                                       \n\
-       ;; Save the registers we'll use in the movem process            \n\
+  /* Decide which copying method to use.  */
+  if (n >= MEMCPY_BY_BLOCK_THRESHOLD)
+    {
+      /* It is not optimal to tell the compiler about clobbering any
+        registers; that will move the saving/restoring of those registers
+        to the function prologue/epilogue, and make non-movem sizes
+        suboptimal.  */
+      __asm__ volatile
+       ("\
+        ;; GCC does promise correct register allocations, but let's    \n\
+        ;; make sure it keeps its promises.                            \n\
+        .ifnc %0-%1-%2,$r13-$r11-$r12                                  \n\
+        .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\"       \n\
+        .endif                                                         \n\
                                                                        \n\
-       ;; on the stack.                                                \n\
-       subq    11*4,$sp                                                \n\
-       movem   $r10,[$sp]                                              \n\
+        ;; Save the registers we'll use in the movem process           \n\
+        ;; on the stack.                                               \n\
+        subq   11*4,sp                                                 \n\
+        movem  r10,[sp]                                                \n\
                                                                        \n\
-        ;; Now we've got this:                                         \n\
-       ;; r11 - src                                                    \n\
-       ;; r13 - dst                                                    \n\
-       ;; r12 - n                                                      \n\
+        ;; Now we've got this:                                         \n\
+        ;; r11 - src                                                   \n\
+        ;; r13 - dst                                                   \n\
+        ;; r12 - n                                                     \n\
                                                                        \n\
-        ;; Update n for the first loop                                 \n\
-        subq    44,$r12                                                        \n\
+        ;; Update n for the first loop.                                \n\
+        subq    44,r12                                                 \n\
 0:                                                                     \n\
-       movem   [$r11+],$r10                                            \n\
-        subq   44,$r12                                                 \n\
-        bge     0b                                                     \n\
-       movem   $r10,[$r13+]                                            \n\
+"
+#ifdef __arch_common_v10_v32
+        /* Cater to branch offset difference between v32 and v10.  We
+           assume the branch below has an 8-bit offset.  */
+"       setf\n"
+#endif
+"       movem  [r11+],r10                                              \n\
+        subq   44,r12                                                  \n\
+        bge     0b                                                     \n\
+        movem  r10,[r13+]                                              \n\
                                                                        \n\
-        addq   44,$r12  ;; compensate for last loop underflowing n     \n\
+        ;; Compensate for last loop underflowing n.                    \n\
+        addq   44,r12                                                  \n\
                                                                        \n\
-       ;; Restore registers from stack                                 \n\
-        movem [$sp+],$r10"
+        ;; Restore registers from stack.                               \n\
+        movem [sp+],r10"
 
-     /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n)
-     /* Inputs */ : "0" (dst), "1" (src), "2" (n));
+        /* Outputs.  */
+        : "=r" (dst), "=r" (src), "=r" (n)
 
-  }
+        /* Inputs.  */
+        : "0" (dst), "1" (src), "2" (n));
+    }
 
-  /* Either we directly starts copying, using dword copying
-     in a loop, or we copy as much as possible with 'movem'
-     and then the last block (<44 bytes) is copied here.
-     This will work since 'movem' will have updated src,dst,n. */
+  while (n >= 16)
+    {
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
 
-  while ( n >= 16 )
-  {
-    *((long*)dst)++ = *((long*)src)++;
-    *((long*)dst)++ = *((long*)src)++;
-    *((long*)dst)++ = *((long*)src)++;
-    *((long*)dst)++ = *((long*)src)++;
-    n -= 16;
-  }
+      n -= 16;
+    }
 
-  /* A switch() is definitely the fastest although it takes a LOT of code.
-   * Particularly if you inline code this.
-   */
   switch (n)
-  {
+    {
     case 0:
       break;
+
     case 1:
-      *(char*)dst = *(char*)src;
+      *dst = *src;
       break;
+
     case 2:
-      *(short*)dst = *(short*)src;
+      *(short *) dst = *(short *) src;
       break;
+
     case 3:
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
+
     case 4:
-      *((long*)dst)++ = *((long*)src)++;
+      *(long *) dst = *(long *) src;
       break;
+
     case 5:
-      *((long*)dst)++ = *((long*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *dst = *src;
       break;
+
     case 6:
-      *((long*)dst)++ = *((long*)src)++;
-      *(short*)dst = *(short*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src;
       break;
+
     case 7:
-      *((long*)dst)++ = *((long*)src)++;
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
+
     case 8:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src;
       break;
+
     case 9:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *dst = *src;
       break;
+
     case 10:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(short*)dst = *(short*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src;
       break;
+
     case 11:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
+
     case 12:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src;
       break;
+
     case 13:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *dst = *src;
       break;
+
     case 14:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *(short*)dst = *(short*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src;
       break;
+
     case 15:
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((long*)dst)++ = *((long*)src)++;
-      *((short*)dst)++ = *((short*)src)++;
-      *(char*)dst = *(char*)src;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(long *) dst = *(long *) src; dst += 4; src += 4;
+      *(short *) dst = *(short *) src; dst += 2; src += 2;
+      *dst = *src;
       break;
-  }
+    }
 
-  return return_dst; /* destination pointer. */
-} /* memcpy() */
+  return return_dst;
+}
index 04d0cf35a2761b531c1350c58ecf36e081961fb7..0b5b70d5f58a45ca2554daf64ccb4625c1ff725f 100644 (file)
@@ -161,7 +161,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
    inaccessible.  */
 
 unsigned long
-__copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn)
+__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
index dff9edfc7465e9954bc5b16749eaa0068e88671a..8fa3faf5ef1bb0e91c32b54c6c5b7d197c920b79 100644 (file)
@@ -18,6 +18,7 @@ config IA64
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_KPROBES
+       select HAVE_KRETPROBES
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
@@ -155,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB
 
 config IA64_SGI_SN2
        bool "SGI-SN2"
+       select NUMA
+       select ACPI_NUMA
        help
          Selecting this option will optimize the kernel for use on sn2 based
          systems, but the resulting kernel binary will not run on other
index b916ccfdef843f158f1814cc74d278d389dcec70..f1645c4f70393c5294ecc9148300c8368178d5f5 100644 (file)
@@ -11,6 +11,8 @@
 # Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
 #
 
+KBUILD_DEFCONFIG := generic_defconfig
+
 NM := $(CROSS_COMPILE)nm -B
 READELF := $(CROSS_COMPILE)readelf
 
index 85e82f32e480c4707de98d4fcdb73e801f2ebc95..256a7faeda0787fc7ba6df9f48d0899dc0add99a 100644 (file)
@@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
 
        /* This is the X/Open sanctioned signal stack switching.  */
        if (ka->sa.sa_flags & SA_ONSTACK) {
-               if (!on_sig_stack(esp))
+               int onstack = sas_ss_flags(esp);
+
+               if (onstack == 0)
                        esp = current->sas_ss_sp + current->sas_ss_size;
+               else if (onstack == SS_ONSTACK) {
+                       /*
+                        * If we are on the alternate signal stack and would
+                        * overflow it, don't. Return an always-bogus address
+                        * instead so we will die with SIGSEGV.
+                        */
+                       if (!likely(on_sig_stack(esp - frame_size)))
+                               return (void __user *) -1L;
+               }
        }
        /* Legacy stack switching not supported */
 
index 398e2fd1cd2519ef2d6a2186308cd992073b0ec5..7b3292282dea90f2f7d69bb8991bd7de927212e6 100644 (file)
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
        if (cpus_empty(mask))
                return;
 
-       if (reassign_irq_vector(irq, first_cpu(mask)))
+       if (irq_prepare_move(irq, first_cpu(mask)))
                return;
 
        dest = cpu_physical_id(first_cpu(mask));
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
        struct iosapic_rte_info *rte;
        int do_unmask_irq = 0;
 
+       irq_complete_move(irq);
        if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
                do_unmask_irq = 1;
                mask_irq(irq);
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
 {
        irq_desc_t *idesc = irq_desc + irq;
 
+       irq_complete_move(irq);
        move_native_irq(irq);
        /*
         * Once we have recorded IRQ_PENDING already, we can mask the
index 0b52f19ed04615b8f637e2ee496fa8c488b5546c..2b8cf6e85af4efa9e51a4f3b2b669daaa0bcd1de 100644 (file)
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
 }
 
 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+#define IA64_IRQ_MOVE_VECTOR   IA64_DEF_FIRST_DEVICE_VECTOR
+
 static enum vector_domain_type {
        VECTOR_DOMAIN_NONE,
        VECTOR_DOMAIN_PERCPU
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
        return CPU_MASK_ALL;
 }
 
+static int __irq_prepare_move(int irq, int cpu)
+{
+       struct irq_cfg *cfg = &irq_cfg[irq];
+       int vector;
+       cpumask_t domain;
+
+       if (cfg->move_in_progress || cfg->move_cleanup_count)
+               return -EBUSY;
+       if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
+               return -EINVAL;
+       if (cpu_isset(cpu, cfg->domain))
+               return 0;
+       domain = vector_allocation_domain(cpu);
+       vector = find_unassigned_vector(domain);
+       if (vector < 0)
+               return -ENOSPC;
+       cfg->move_in_progress = 1;
+       cfg->old_domain = cfg->domain;
+       cfg->vector = IRQ_VECTOR_UNASSIGNED;
+       cfg->domain = CPU_MASK_NONE;
+       BUG_ON(__bind_irq_vector(irq, vector, domain));
+       return 0;
+}
+
+int irq_prepare_move(int irq, int cpu)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&vector_lock, flags);
+       ret = __irq_prepare_move(irq, cpu);
+       spin_unlock_irqrestore(&vector_lock, flags);
+       return ret;
+}
+
+void irq_complete_move(unsigned irq)
+{
+       struct irq_cfg *cfg = &irq_cfg[irq];
+       cpumask_t cleanup_mask;
+       int i;
+
+       if (likely(!cfg->move_in_progress))
+               return;
+
+       if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+               return;
+
+       cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+       cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+       for_each_cpu_mask(i, cleanup_mask)
+               platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
+       cfg->move_in_progress = 0;
+}
+
+static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
+{
+       int me = smp_processor_id();
+       ia64_vector vector;
+       unsigned long flags;
+
+       for (vector = IA64_FIRST_DEVICE_VECTOR;
+            vector < IA64_LAST_DEVICE_VECTOR; vector++) {
+               int irq;
+               struct irq_desc *desc;
+               struct irq_cfg *cfg;
+               irq = __get_cpu_var(vector_irq)[vector];
+               if (irq < 0)
+                       continue;
+
+               desc = irq_desc + irq;
+               cfg = irq_cfg + irq;
+               spin_lock(&desc->lock);
+               if (!cfg->move_cleanup_count)
+                       goto unlock;
+
+               if (!cpu_isset(me, cfg->old_domain))
+                       goto unlock;
+
+               spin_lock_irqsave(&vector_lock, flags);
+               __get_cpu_var(vector_irq)[vector] = -1;
+               cpu_clear(me, vector_table[vector]);
+               spin_unlock_irqrestore(&vector_lock, flags);
+               cfg->move_cleanup_count--;
+       unlock:
+               spin_unlock(&desc->lock);
+       }
+       return IRQ_HANDLED;
+}
+
+static struct irqaction irq_move_irqaction = {
+       .handler =      smp_irq_move_cleanup_interrupt,
+       .flags =        IRQF_DISABLED,
+       .name =         "irq_move"
+};
+
 static int __init parse_vector_domain(char *arg)
 {
        if (!arg)
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
        spin_unlock_irqrestore(&vector_lock, flags);
 }
 
-static int __reassign_irq_vector(int irq, int cpu)
-{
-       struct irq_cfg *cfg = &irq_cfg[irq];
-       int vector;
-       cpumask_t domain;
-
-       if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
-               return -EINVAL;
-       if (cpu_isset(cpu, cfg->domain))
-               return 0;
-       domain = vector_allocation_domain(cpu);
-       vector = find_unassigned_vector(domain);
-       if (vector < 0)
-               return -ENOSPC;
-       __clear_irq_vector(irq);
-       BUG_ON(__bind_irq_vector(irq, vector, domain));
-       return 0;
-}
-
-int reassign_irq_vector(int irq, int cpu)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&vector_lock, flags);
-       ret = __reassign_irq_vector(irq, cpu);
-       spin_unlock_irqrestore(&vector_lock, flags);
-       return ret;
-}
-
 /*
  * Dynamic irq allocate and deallocation for MSI
  */
@@ -578,6 +645,13 @@ init_IRQ (void)
        register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
        register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
        register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
+       if (vector_domain_type != VECTOR_DOMAIN_NONE) {
+               BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
+               IA64_FIRST_DEVICE_VECTOR++;
+               register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
+       }
+#endif
 #endif
 #ifdef CONFIG_PERFMON
        pfm_init_percpu();
index b618487cdc858b836cee4adb044579bb5cb716f4..615c3d2b634892b2c80cf7945f1c343bbfa60162 100644 (file)
@@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        return 1;
 }
 
+/* ia64 does not need this */
+void __kprobes jprobe_return(void)
+{
+}
+
 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
index e86d0295979465a56c257298028d5fc9798fb249..60c6ef67ebb215267c79eae647984893d0684f66 100644 (file)
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
        if (!cpu_online(cpu))
                return;
 
-       if (reassign_irq_vector(irq, cpu))
+       if (irq_prepare_move(irq, cpu))
                return;
 
        read_msi_msg(irq, &msg);
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
 
 static void ia64_ack_msi_irq(unsigned int irq)
 {
+       irq_complete_move(irq);
        move_native_irq(irq);
        ia64_eoi();
 }
index f44fe8412162c55b73b80663d8bb3e8d4d3d50a9..a3022dc48ef8432a437256948d5a25080b1cf24d 100644 (file)
@@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab)
                sal_revision = SAL_VERSION_CODE(2, 8);
                sal_version = SAL_VERSION_CODE(0, 0);
        }
+
+       if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
+               /*
+                * SGI Altix has hard-coded version 2.9 in their prom
+                * but they actually implement 3.2, so let's fix it here.
+                */
+               sal_revision = SAL_VERSION_CODE(3, 2);
 }
 
 static void __init
index 309da3567bc851966e8a7ad27e01eccac2e26ff2..5740296c35afa3d597e8fd041db4385ff628f268 100644 (file)
@@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
 
        new_sp = scr->pt.r12;
        tramp_addr = (unsigned long) __kernel_sigtramp;
-       if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) {
-               new_sp = current->sas_ss_sp + current->sas_ss_size;
-               /*
-                * We need to check for the register stack being on the signal stack
-                * separately, because it's switched separately (memory stack is switched
-                * in the kernel, register stack is switched in the signal trampoline).
-                */
-               if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
-                       new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
+       if (ka->sa.sa_flags & SA_ONSTACK) {
+               int onstack = sas_ss_flags(new_sp);
+
+               if (onstack == 0) {
+                       new_sp = current->sas_ss_sp + current->sas_ss_size;
+                       /*
+                        * We need to check for the register stack being on the
+                        * signal stack separately, because it's switched
+                        * separately (memory stack is switched in the kernel,
+                        * register stack is switched in the signal trampoline).
+                        */
+                       if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
+                               new_rbs = ALIGN(current->sas_ss_sp,
+                                               sizeof(long));
+               } else if (onstack == SS_ONSTACK) {
+                       unsigned long check_sp;
+
+                       /*
+                        * If we are on the alternate signal stack and would
+                        * overflow it, don't. Return an always-bogus address
+                        * instead so we will die with SIGSEGV.
+                        */
+                       check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
+                       if (!likely(on_sig_stack(check_sp)))
+                               return force_sigsegv_info(sig, (void __user *)
+                                                         check_sp);
+               }
        }
        frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
 
index 6dfa3b3c0e2a737d0b128e33cc1e6965e1aa8d00..18a9c5f4b00d4745dc8dfadb6c33c113ca705b37 100644 (file)
@@ -742,7 +742,9 @@ sys_call_table:
        .long sys_epoll_pwait           /* 315 */
        .long sys_utimensat
        .long sys_signalfd
-       .long sys_ni_syscall
+       .long sys_timerfd_create
        .long sys_eventfd
        .long sys_fallocate             /* 320 */
+       .long sys_timerfd_settime
+       .long sys_timerfd_gettime
 
index 648113075f9722cf0be3c657c7fc28554d533028..670b0a99cfa096424b131e2005a970cfc311b72c 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23
-# Thu Oct 18 13:17:38 2007
+# Linux kernel version: 2.6.25-rc3
+# Mon Feb 25 15:03:00 2008
 #
 CONFIG_M68K=y
 # CONFIG_MMU is not set
@@ -15,8 +15,10 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
 CONFIG_TIME_LOW_RES=y
 CONFIG_NO_IOPORT=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
@@ -31,12 +33,14 @@ CONFIG_LOCALVERSION_AUTO=y
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
 # CONFIG_AUDIT is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
 # CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
@@ -48,15 +52,22 @@ CONFIG_SYSCTL_SYSCALL=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
 # CONFIG_EVENTFD is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+# CONFIG_HAVE_OPROFILE is not set
+# CONFIG_HAVE_KPROBES is not set
+CONFIG_SLABINFO=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
@@ -83,6 +94,8 @@ CONFIG_IOSCHED_NOOP=y
 # CONFIG_DEFAULT_CFQ is not set
 CONFIG_DEFAULT_NOOP=y
 CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_RCU is not set
 
 #
 # Processor type and features
@@ -121,6 +134,7 @@ CONFIG_M5272C3=y
 # CONFIG_MOD5272 is not set
 CONFIG_FREESCALE=y
 CONFIG_4KSTACKS=y
+CONFIG_HZ=100
 
 #
 # RAM configuration
@@ -147,6 +161,7 @@ CONFIG_FLATMEM_MANUAL=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 # CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
@@ -158,10 +173,6 @@ CONFIG_VIRT_TO_BUS=y
 # CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-
 #
 # Executable file formats
 #
@@ -205,6 +216,7 @@ CONFIG_IP_FIB_HASH=y
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
@@ -229,10 +241,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
 # CONFIG_NET_SCHED is not set
 
 #
@@ -240,6 +248,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
@@ -283,6 +292,7 @@ CONFIG_MTD_BLOCK=y
 # CONFIG_INFTL is not set
 # CONFIG_RFD_FTL is not set
 # CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
 
 #
 # RAM/ROM/Flash chip drivers
@@ -339,10 +349,11 @@ CONFIG_BLK_DEV=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
 # CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -360,9 +371,15 @@ CONFIG_NETDEVICES=y
 # CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_TUN is not set
+# CONFIG_VETH is not set
 # CONFIG_PHYLIB is not set
 CONFIG_NET_ETHERNET=y
 # CONFIG_MII is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
 CONFIG_FEC=y
 # CONFIG_FEC2 is not set
 # CONFIG_NETDEV_1000 is not set
@@ -377,7 +394,7 @@ CONFIG_FEC=y
 CONFIG_PPP=y
 # CONFIG_PPP_MULTILINK is not set
 # CONFIG_PPP_FILTER is not set
-# CONFIG_PPP_ASYNC is not set
+CONFIG_PPP_ASYNC=y
 # CONFIG_PPP_SYNC_TTY is not set
 # CONFIG_PPP_DEFLATE is not set
 # CONFIG_PPP_BSDCOMP is not set
@@ -386,7 +403,6 @@ CONFIG_PPP=y
 # CONFIG_PPPOL2TP is not set
 # CONFIG_SLIP is not set
 CONFIG_SLHC=y
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -418,12 +434,16 @@ CONFIG_SLHC=y
 #
 # Non-8250 serial port support
 #
-CONFIG_SERIAL_COLDFIRE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_COLDFIRE is not set
+CONFIG_SERIAL_MCF=y
+CONFIG_SERIAL_MCF_BAUDRATE=19200
+CONFIG_SERIAL_MCF_CONSOLE=y
 # CONFIG_UNIX98_PTYS is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
 # CONFIG_HW_RANDOM is not set
 # CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
@@ -439,6 +459,14 @@ CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 # CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
@@ -450,20 +478,20 @@ CONFIG_LEGACY_PTY_COUNT=256
 #
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
-CONFIG_DAB=y
+# CONFIG_DAB is not set
 
 #
 # Graphics support
 #
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-# CONFIG_VGASTATE is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_FB is not set
 
 #
 # Sound
@@ -471,22 +499,10 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
 # CONFIG_SOUND is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
 # CONFIG_RTC_CLASS is not set
 
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
 #
 # Userspace I/O
 #
@@ -505,11 +521,9 @@ CONFIG_EXT2_FS=y
 # CONFIG_XFS_FS is not set
 # CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-CONFIG_ROMFS_FS=y
+# CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY is not set
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -535,7 +549,6 @@ CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 # CONFIG_TMPFS is not set
 # CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
 # CONFIG_CONFIGFS_FS is not set
 
 #
@@ -551,42 +564,27 @@ CONFIG_RAMFS=y
 # CONFIG_JFFS2_FS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=y
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
 
 #
 # Partition Types
 #
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
 # CONFIG_NLS is not set
-
-#
-# Distributed Lock Manager
-#
 # CONFIG_DLM is not set
 
 #
 # Kernel hacking
 #
 # CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
@@ -594,6 +592,7 @@ CONFIG_MSDOS_PARTITION=y
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
 # CONFIG_FULLDEBUG is not set
 # CONFIG_HIGHPROFILE is not set
 # CONFIG_BOOTPARAM is not set
@@ -605,6 +604,7 @@ CONFIG_MSDOS_PARTITION=y
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_CRYPTO is not set
 
 #
index 1b02b88200680c8ebbec377d4c67b56622bc1595..fca2e49917a37179bb0d99646d011edf52dc96e2 100644 (file)
@@ -336,9 +336,11 @@ ENTRY(sys_call_table)
        .long sys_epoll_pwait           /* 315 */
        .long sys_utimensat
        .long sys_signalfd
-       .long sys_ni_syscall
+       .long sys_timerfd_create
        .long sys_eventfd
        .long sys_fallocate             /* 320 */
+       .long sys_timerfd_settime
+       .long sys_timerfd_gettime
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index 9159fd05c9ac7638e64af74f8f9502936142d721..6bafefa546e5cd810531ff654fd1d8192a3749b1 100644 (file)
@@ -67,16 +67,6 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
 /***************************************************************************/
 
-static irqreturn_t hw_tick(int irq, void *dummy)
-{
-       /* Reset Timer1 */
-       TSTAT &= 0;
-
-       return arch_timer_interrupt(irq, dummy);
-}
-
-/***************************************************************************/
-
 static struct irqaction m68328_timer_irq = {
        .name    = "timer",
        .flags   = IRQF_DISABLED | IRQF_TIMER,
index 5b8d8382b7629e93ccdb07a7d86b96049f587e38..1189d8d6170d252013ddcf112903073297308da7 100644 (file)
@@ -90,6 +90,7 @@ config PPC
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_KPROBES
+       select HAVE_KRETPROBES
 
 config EARLY_PRINTK
        bool
index 900c7ff2b7e985d4aa679fccb1fa230a480cbf29..b5c30f766c401fa0828e61fd10d0c0c8b48a6e80 100644 (file)
@@ -17,6 +17,7 @@
 #include "44x.h"
 #include "cuboot.h"
 
+#define TARGET_4xx
 #define TARGET_44x
 #include "ppcboot.h"
 
index c5f37ce172ea92654a7509c72680de4bf55c26ef..56564ba37f62d1d7a7aba46ff94362627c8f8f69 100644 (file)
@@ -17,6 +17,7 @@
 #include "44x.h"
 #include "cuboot.h"
 
+#define TARGET_4xx
 #define TARGET_44x
 #include "ppcboot.h"
 
index c021167f938122078f3c4081f626c8482099df07..5434d70b56605670e041834d326ab9cb72a561f5 100644 (file)
@@ -22,6 +22,7 @@
 #include "44x.h"
 #include "cuboot.h"
 
+#define TARGET_4xx
 #define TARGET_44x
 #include "ppcboot.h"
 
index f66455a45ab1cfcbf17f5bfbd391f0fb3e37cd37..b55b80467eed9b23e70d7347e134745723116af1 100644 (file)
@@ -21,7 +21,9 @@
 #include "dcr.h"
 #include "4xx.h"
 
+#define TARGET_4xx
 #define TARGET_44x
+#define TARGET_440GX
 #include "ppcboot.h"
 
 static bd_t bd;
index bdedebe1bc1434180527101c5085f386d9b12dc4..3db93e85e9eaa65cc954bfdf0384d3e273b761f0 100644 (file)
@@ -11,6 +11,7 @@
 #include "4xx.h"
 #include "cuboot.h"
 
+#define TARGET_4xx
 #define TARGET_44x
 #include "ppcboot.h"
 
index 5dd3d15f0febabfaf212ad2b4fd5ad8433e53171..ae68fefc01b6a054b62eaaaffde5930e983e72dc 100644 (file)
                        #interrupt-cells = <1>;
                        #size-cells = <2>;
                        #address-cells = <3>;
-                       compatible = "ibm,plb-pciex-405exr", "ibm,plb-pciex";
+                       compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex";
                        primary;
                        port = <0>; /* port number */
                        reg = <a0000000 20000000        /* Config space access */
index bc32ac7250ec5448b4aacc09aec1d78b911e4be0..fc86e5a3afc47ebd16be1b2fc7115721d6d2f79e 100644 (file)
@@ -38,8 +38,8 @@
                        timebase-frequency = <0>; /* Filled in by zImage */
                        i-cache-line-size = <20>;
                        d-cache-line-size = <20>;
-                       i-cache-size = <20000>;
-                       d-cache-size = <20000>;
+                       i-cache-size = <8000>;
+                       d-cache-size = <8000>;
                        dcr-controller;
                        dcr-access-method = "native";
                };
                };
 
                POB0: opb {
-                       compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb";
+                       compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb";
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       ranges = <00000000 4 e0000000 20000000>;
-                       clock-frequency = <0>; /* Filled in by zImage */
+                       ranges = <00000000 4 e0000000 20000000>;
+                       clock-frequency = <0>; /* Filled in by zImage */
 
                        EBC0: ebc {
                                compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc";
                        };
 
                        UART0: serial@10000200 {
-                               device_type = "serial";
-                               compatible = "ns16550";
-                               reg = <10000200 8>;
+                               device_type = "serial";
+                               compatible = "ns16550";
+                               reg = <10000200 8>;
                                virtual-reg = <a0000200>;
-                               clock-frequency = <0>; /* Filled in by zImage */
-                               current-speed = <1c200>;
-                               interrupt-parent = <&UIC0>;
-                               interrupts = <0 4>;
-                       };
+                               clock-frequency = <0>; /* Filled in by zImage */
+                               current-speed = <1c200>;
+                               interrupt-parent = <&UIC0>;
+                               interrupts = <0 4>;
+                       };
 
                        UART1: serial@10000300 {
-                               device_type = "serial";
-                               compatible = "ns16550";
-                               reg = <10000300 8>;
+                               device_type = "serial";
+                               compatible = "ns16550";
+                               reg = <10000300 8>;
                                virtual-reg = <a0000300>;
-                               clock-frequency = <0>;
-                               current-speed = <0>;
-                               interrupt-parent = <&UIC0>;
-                               interrupts = <1 4>;
-                       };
+                               clock-frequency = <0>;
+                               current-speed = <0>;
+                               interrupt-parent = <&UIC0>;
+                               interrupts = <1 4>;
+                       };
 
 
                        UART2: serial@10000600 {
-                               device_type = "serial";
-                               compatible = "ns16550";
-                               reg = <10000600 8>;
+                               device_type = "serial";
+                               compatible = "ns16550";
+                               reg = <10000600 8>;
                                virtual-reg = <a0000600>;
-                               clock-frequency = <0>;
-                               current-speed = <0>;
-                               interrupt-parent = <&UIC1>;
-                               interrupts = <5 4>;
-                       };
+                               clock-frequency = <0>;
+                               current-speed = <0>;
+                               interrupt-parent = <&UIC1>;
+                               interrupts = <5 4>;
+                       };
 
                        IIC0: i2c@10000400 {
                                compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic";
index 13929771bee7c10e3fe70effebcfd3f670868ec8..9eed1f68fcab3495981b72f40375e26ed9be4d7d 100644 (file)
@@ -1151,7 +1151,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
                for (i = 0; i < num_counters; ++i) {
                        if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
                            && ctr[i].enabled) {
-                               oprofile_add_pc(pc, is_kernel, i);
+                               oprofile_add_ext_sample(pc, regs, i, is_kernel);
                                cbe_write_ctr(cpu, i, reset_value[i]);
                        }
                }
index 9aa4425d80b20a4d1fe77eb36ca2eded77095bb0..4d5fd1dbd4007a5235b5b170367e451cc4f40b67 100644 (file)
@@ -199,6 +199,7 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
 
        return 0;
 }
+EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv);
 
 /**
  * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
index edab631a8dcb1bd92ef035a2fa620820112afbee..20ea0e118f246b21819b907d49c25e8c4d70e510 100644 (file)
 
 /* IOMMU sizing */
 #define IO_SEGMENT_SHIFT       28
-#define IO_PAGENO_BITS         (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
+#define IO_PAGENO_BITS(shift)  (IO_SEGMENT_SHIFT - (shift))
 
 /* The high bit needs to be set on every DMA address */
 #define SPIDER_DMA_OFFSET      0x80000000ul
@@ -123,7 +123,6 @@ struct iommu_window {
        struct cbe_iommu *iommu;
        unsigned long offset;
        unsigned long size;
-       unsigned long pte_offset;
        unsigned int ioid;
        struct iommu_table table;
 };
@@ -200,7 +199,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
                (window->ioid & IOPTE_IOID_Mask);
 #endif
 
-       io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
+       io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
 
        for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
                io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
@@ -232,7 +231,7 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
                | (window->ioid & IOPTE_IOID_Mask);
 #endif
 
-       io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
+       io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
 
        for (i = 0; i < npages; i++)
                io_pte[i] = pte;
@@ -307,76 +306,84 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
        return -ENODEV;
 }
 
-static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu,
+static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
                                unsigned long dbase, unsigned long dsize,
                                unsigned long fbase, unsigned long fsize)
 {
        struct page *page;
-       int i;
-       unsigned long reg, segments, pages_per_segment, ptab_size, stab_size,
-                     n_pte_pages, base;
-
-       base = dbase;
-       if (fsize != 0)
-               base = min(fbase, dbase);
+       unsigned long segments, stab_size;
 
        segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
-       pages_per_segment = 1ull << IO_PAGENO_BITS;
 
-       pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
-                       __FUNCTION__, iommu->nid, segments, pages_per_segment);
+       pr_debug("%s: iommu[%d]: segments: %lu\n",
+                       __FUNCTION__, iommu->nid, segments);
 
        /* set up the segment table */
        stab_size = segments * sizeof(unsigned long);
        page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
        BUG_ON(!page);
        iommu->stab = page_address(page);
-       clear_page(iommu->stab);
+       memset(iommu->stab, 0, stab_size);
+}
+
+static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
+               unsigned long base, unsigned long size, unsigned long gap_base,
+               unsigned long gap_size, unsigned long page_shift)
+{
+       struct page *page;
+       int i;
+       unsigned long reg, segments, pages_per_segment, ptab_size,
+                     n_pte_pages, start_seg, *ptab;
+
+       start_seg = base >> IO_SEGMENT_SHIFT;
+       segments  = size >> IO_SEGMENT_SHIFT;
+       pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
+       /* PTEs for each segment must start on a 4K bounday */
+       pages_per_segment = max(pages_per_segment,
+                               (1 << 12) / sizeof(unsigned long));
 
-       /* ... and the page tables. Since these are contiguous, we can treat
-        * the page tables as one array of ptes, like pSeries does.
-        */
        ptab_size = segments * pages_per_segment * sizeof(unsigned long);
        pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
                        iommu->nid, ptab_size, get_order(ptab_size));
        page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
        BUG_ON(!page);
 
-       iommu->ptab = page_address(page);
-       memset(iommu->ptab, 0, ptab_size);
+       ptab = page_address(page);
+       memset(ptab, 0, ptab_size);
 
-       /* allocate a bogus page for the end of each mapping */
-       page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
-       BUG_ON(!page);
-       iommu->pad_page = page_address(page);
-       clear_page(iommu->pad_page);
-
-       /* number of pages needed for a page table */
-       n_pte_pages = (pages_per_segment *
-                      sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
+       /* number of 4K pages needed for a page table */
+       n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
 
        pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
-                       __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab,
+                       __FUNCTION__, iommu->nid, iommu->stab, ptab,
                        n_pte_pages);
 
        /* initialise the STEs */
        reg = IOSTE_V | ((n_pte_pages - 1) << 5);
 
-       if (IOMMU_PAGE_SIZE == 0x1000)
-               reg |= IOSTE_PS_4K;
-       else if (IOMMU_PAGE_SIZE == 0x10000)
-               reg |= IOSTE_PS_64K;
-       else {
-               extern void __unknown_page_size_error(void);
-               __unknown_page_size_error();
+       switch (page_shift) {
+       case 12: reg |= IOSTE_PS_4K;  break;
+       case 16: reg |= IOSTE_PS_64K; break;
+       case 20: reg |= IOSTE_PS_1M;  break;
+       case 24: reg |= IOSTE_PS_16M; break;
+       default: BUG();
        }
 
+       gap_base = gap_base >> IO_SEGMENT_SHIFT;
+       gap_size = gap_size >> IO_SEGMENT_SHIFT;
+
        pr_debug("Setting up IOMMU stab:\n");
-       for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) {
-               iommu->stab[i] = reg |
-                       (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
+       for (i = start_seg; i < (start_seg + segments); i++) {
+               if (i >= gap_base && i < (gap_base + gap_size)) {
+                       pr_debug("\toverlap at %d, skipping\n", i);
+                       continue;
+               }
+               iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
+                                       (i - start_seg));
                pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
        }
+
+       return ptab;
 }
 
 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
@@ -423,7 +430,9 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
        unsigned long base, unsigned long size)
 {
-       cell_iommu_setup_page_tables(iommu, base, size, 0, 0);
+       cell_iommu_setup_stab(iommu, base, size, 0, 0);
+       iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
+                                           IOMMU_PAGE_SHIFT);
        cell_iommu_enable_hardware(iommu);
 }
 
@@ -464,6 +473,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
                        unsigned long pte_offset)
 {
        struct iommu_window *window;
+       struct page *page;
        u32 ioid;
 
        ioid = cell_iommu_get_ioid(np);
@@ -475,13 +485,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
        window->size = size;
        window->ioid = ioid;
        window->iommu = iommu;
-       window->pte_offset = pte_offset;
 
        window->table.it_blocksize = 16;
        window->table.it_base = (unsigned long)iommu->ptab;
        window->table.it_index = iommu->nid;
-       window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) +
-               window->pte_offset;
+       window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
        window->table.it_size = size >> IOMMU_PAGE_SHIFT;
 
        iommu_init_table(&window->table, iommu->nid);
@@ -504,6 +512,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
         * This code also assumes that we have a window that starts at 0,
         * which is the case on all spider based blades.
         */
+       page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+       BUG_ON(!page);
+       iommu->pad_page = page_address(page);
+       clear_page(iommu->pad_page);
+
        __set_bit(0, window->table.it_map);
        tce_build_cell(&window->table, window->table.it_offset, 1,
                       (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
@@ -549,7 +562,7 @@ static void cell_dma_dev_setup_iommu(struct device *dev)
        archdata->dma_data = &window->table;
 }
 
-static void cell_dma_dev_setup_static(struct device *dev);
+static void cell_dma_dev_setup_fixed(struct device *dev);
 
 static void cell_dma_dev_setup(struct device *dev)
 {
@@ -557,7 +570,7 @@ static void cell_dma_dev_setup(struct device *dev)
 
        /* Order is important here, these are not mutually exclusive */
        if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
-               cell_dma_dev_setup_static(dev);
+               cell_dma_dev_setup_fixed(dev);
        else if (get_pci_dma_ops() == &dma_iommu_ops)
                cell_dma_dev_setup_iommu(dev);
        else if (get_pci_dma_ops() == &dma_direct_ops)
@@ -858,7 +871,7 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
        return 0;
 }
 
-static void cell_dma_dev_setup_static(struct device *dev)
+static void cell_dma_dev_setup_fixed(struct device *dev)
 {
        struct dev_archdata *archdata = &dev->archdata;
        u64 addr;
@@ -869,35 +882,45 @@ static void cell_dma_dev_setup_static(struct device *dev)
        dev_dbg(dev, "iommu: fixed addr = %lx\n", addr);
 }
 
+static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
+                          unsigned long base_pte)
+{
+       unsigned long segment, offset;
+
+       segment = addr >> IO_SEGMENT_SHIFT;
+       offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
+       ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
+
+       pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
+                 addr, ptab, segment, offset);
+
+       ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask);
+}
+
 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
        struct device_node *np, unsigned long dbase, unsigned long dsize,
        unsigned long fbase, unsigned long fsize)
 {
-       unsigned long base_pte, uaddr, *io_pte;
-       int i;
+       unsigned long base_pte, uaddr, ioaddr, *ptab;
 
-       dma_iommu_fixed_base = fbase;
+       ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
 
-       /* convert from bytes into page table indices */
-       dbase = dbase >> IOMMU_PAGE_SHIFT;
-       dsize = dsize >> IOMMU_PAGE_SHIFT;
-       fbase = fbase >> IOMMU_PAGE_SHIFT;
-       fsize = fsize >> IOMMU_PAGE_SHIFT;
+       dma_iommu_fixed_base = fbase;
 
        pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
 
-       io_pte = iommu->ptab;
        base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW
                    | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
 
-       uaddr = 0;
-       for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) {
+       for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
                /* Don't touch the dynamic region */
-               if (i >= dbase && i < (dbase + dsize)) {
-                       pr_debug("iommu: static/dynamic overlap, skipping\n");
+               ioaddr = uaddr + fbase;
+               if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
+                       pr_debug("iommu: fixed/dynamic overlap, skipping\n");
                        continue;
                }
-               io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+
+               insert_16M_pte(uaddr, ptab, base_pte);
        }
 
        mb();
@@ -995,7 +1018,9 @@ static int __init cell_iommu_fixed_mapping_init(void)
                        "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
                         dbase + dsize, fbase, fbase + fsize);
 
-               cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize);
+               cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
+               iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
+                                                   IOMMU_PAGE_SHIFT);
                cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
                                             fbase, fsize);
                cell_iommu_enable_hardware(iommu);
index a7f609b3b876d61b1f128bf3902f8262761d0927..dda34650cb07d32afff1c86dadd856e0eb48c394 100644 (file)
@@ -149,6 +149,11 @@ static void __init cell_init_irq(void)
        mpic_init_IRQ();
 }
 
+static void __init cell_set_dabrx(void)
+{
+       mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
+}
+
 static void __init cell_setup_arch(void)
 {
 #ifdef CONFIG_SPU_BASE
@@ -158,6 +163,8 @@ static void __init cell_setup_arch(void)
 
        cbe_regs_init();
 
+       cell_set_dabrx();
+
 #ifdef CONFIG_CBE_RAS
        cbe_ras_init();
 #endif
index 87eb07f94c5f111e345cdebe47640bb1d4a6a1ea..712001f6b7dad366cd1b2ab78107811cf6f485fb 100644 (file)
@@ -81,9 +81,12 @@ struct spu_slb {
 void spu_invalidate_slbs(struct spu *spu)
 {
        struct spu_priv2 __iomem *priv2 = spu->priv2;
+       unsigned long flags;
 
+       spin_lock_irqsave(&spu->register_lock, flags);
        if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
                out_be64(&priv2->slb_invalidate_all_W, 0UL);
+       spin_unlock_irqrestore(&spu->register_lock, flags);
 }
 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
 
@@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
                        __func__, slbe, slb->vsid, slb->esid);
 
        out_be64(&priv2->slb_index_W, slbe);
+       /* set invalid before writing vsid */
+       out_be64(&priv2->slb_esid_RW, 0);
+       /* now it's safe to write the vsid */
        out_be64(&priv2->slb_vsid_RW, slb->vsid);
+       /* setting the new esid makes the entry valid again */
        out_be64(&priv2->slb_esid_RW, slb->esid);
 }
 
@@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
                nr_slbs++;
        }
 
+       spin_lock_irq(&spu->register_lock);
        /* Add the set of SLBs */
        for (i = 0; i < nr_slbs; i++)
                spu_load_slb(spu, i, &slbs[i]);
+       spin_unlock_irq(&spu->register_lock);
 }
 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
 
@@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
        if (stat & CLASS1_STORAGE_FAULT_INTR)
                spu_mfc_dsisr_set(spu, 0ul);
        spu_int_stat_clear(spu, 1, stat);
-       spin_unlock(&spu->register_lock);
-       pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
-                       dar, dsisr);
 
        if (stat & CLASS1_SEGMENT_FAULT_INTR)
                __spu_trap_data_seg(spu, dar);
 
+       spin_unlock(&spu->register_lock);
+       pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
+                       dar, dsisr);
+
        if (stat & CLASS1_STORAGE_FAULT_INTR)
                __spu_trap_data_map(spu, dar, dsisr);
 
index 133995ed5cc78c104745534915ec3331f6601ed6..cf6c2c89211d2dc2fe308e50404370d3ae9af418 100644 (file)
@@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx)
 
        /*
         * This is basically an open-coded spu_acquire_saved, except that
-        * we don't acquire the state mutex interruptible.
+        * we don't acquire the state mutex interruptible, and we don't
+        * want this context to be rescheduled on release.
         */
        mutex_lock(&ctx->state_mutex);
-       if (ctx->state != SPU_STATE_SAVED) {
-               set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
+       if (ctx->state != SPU_STATE_SAVED)
                spu_deactivate(ctx);
-       }
 
        mm = ctx->owner;
        ctx->owner = NULL;
index c66c3756970d53d52ae56df931e38219a9e79ade..f7a7e8635fb6f98d11ab8aa5bf5cf3d2d6f49bf8 100644 (file)
@@ -366,6 +366,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
        if (offset >= ps_size)
                return NOPFN_SIGBUS;
 
+       /*
+        * Because we release the mmap_sem, the context may be destroyed while
+        * we're in spu_wait. Grab an extra reference so it isn't destroyed
+        * in the meantime.
+        */
+       get_spu_context(ctx);
+
        /*
         * We have to wait for context to be loaded before we have
         * pages to hand out to the user, but we don't want to wait
@@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
         * hanged.
         */
        if (spu_acquire(ctx))
-               return NOPFN_REFAULT;
+               goto refault;
 
        if (ctx->state == SPU_STATE_SAVED) {
                up_read(&current->mm->mmap_sem);
@@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
 
        if (!ret)
                spu_release(ctx);
+
+refault:
+       put_spu_context(ctx);
        return NOPFN_REFAULT;
 }
 
index 3a5972117de7cdcd04504ae46d97e8b79376e0bc..5d5f680cd0b8ced5789556e444a0b8b955cc23c8 100644 (file)
@@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
        spu_switch_notify(spu, ctx);
        ctx->state = SPU_STATE_RUNNABLE;
 
-       spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
+       spuctx_switch_state(ctx, SPU_UTIL_USER);
 }
 
 /*
index 01974f7776e184d798d33cb2728dc5c610cad368..79aa773f3c992abff92f38f51608bdcc5497c0a1 100644 (file)
@@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n)
                ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
 
        return snprintf(tbuf, n,
-               "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n",
+               "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n",
                (unsigned long) tv.tv_sec,
                (unsigned long) tv.tv_nsec,
-               t->owner_tid,
-               t->name,
                t->curr_tid,
+               t->name,
+               t->owner_tid,
                t->number);
 }
 
@@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = {
        { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
        { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
        { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
+       { "spufs_stop_callback__enter", "%p %p", spu_context_event },
 };
 
 static int __init sputrace_init(void)
index 6f5886c7b1f9a8e5f16c1220df0a53c9c60cf241..e9dc7a55d1b9466d5f5e5597371a79789b46ed30 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <linux/module.h>
 #include <linux/errno.h>
+#include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
@@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
         *     Write INT_MASK_class1 with value of 0.
         *     Save INT_Mask_class2 in CSA.
         *     Write INT_MASK_class2 with value of 0.
+        *     Synchronize all three interrupts to be sure
+        *     we no longer execute a handler on another CPU.
         */
        spin_lock_irq(&spu->register_lock);
        if (csa) {
@@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
        spu_int_mask_set(spu, 2, 0ul);
        eieio();
        spin_unlock_irq(&spu->register_lock);
+       synchronize_irq(spu->irqs[0]);
+       synchronize_irq(spu->irqs[1]);
+       synchronize_irq(spu->irqs[2]);
 }
 
 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
index b2e292df13ca25792b283bc7655bce2c9486e72f..ac82ac35b9918a98bff6ec5d4fcc8918e5796689 100644 (file)
@@ -21,9 +21,6 @@
 #ifndef _CELLEB_BEAT_H
 #define _CELLEB_BEAT_H
 
-#define DABRX_KERNEL           (1UL<<1)
-#define DABRX_USER             (1UL<<0)
-
 int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*);
 int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t);
 int64_t beat_repository_encode(int, const char *, uint64_t[4]);
index b21444b681b667b526ddbebcca337e24d6b48581..9892827b6176ca5f75f250807ef5e88903f7d1a7 100644 (file)
@@ -61,6 +61,7 @@ config S390
        def_bool y
        select HAVE_OPROFILE
        select HAVE_KPROBES
+       select HAVE_KRETPROBES
 
 source "init/Kconfig"
 
index b3400b5ad5c605cface4aedcd88cd565a25b8909..783cfbbf87cad992ae388047dbdd8f60c43ebbca 100644 (file)
@@ -330,6 +330,7 @@ config CPU_SUBTYPE_SH5_101
 
 config CPU_SUBTYPE_SH5_103
        bool "Support SH5-103 processor"
+       select CPU_SH5
 
 endchoice
 
index 5c3359756a926fe01e1107a1af4bfc53239b25e4..71ff3d6f26e2924462de6132ca9242685c9ddacf 100644 (file)
@@ -90,7 +90,7 @@ static irqreturn_t dma_tei(int irq, void *dev_id)
 
 static int sh_dmac_request_dma(struct dma_channel *chan)
 {
-       if (unlikely(!chan->flags & DMA_TEI_CAPABLE))
+       if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
                return 0;
 
        return request_irq(get_dmte_irq(chan->chan), dma_tei,
index b76a14f12ce24dfba85431fc551cb292bee7f5ed..ab77b0e0fa0ebea7b0cef65f4fbda8f549b2483c 100644 (file)
@@ -93,7 +93,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
        }
 
        hd->base = ioremap_nocache(res->start, res->end - res->start + 1);
-       if (!unlikely(hd->base)) {
+       if (unlikely(!hd->base)) {
                dev_err(&pdev->dev, "ioremap failed\n");
 
                if (!pdev->dev.platform_data)
index 0dac87b19624f5ac7dbfd15362dbe884c865b1bc..e1284fc693611a266c5b4307f569bf81fa1ff0d7 100644 (file)
@@ -83,9 +83,9 @@ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        switch (size) {
-               case 1: *val = ctrl_inb(GAPSPCI_BBA_CONFIG+where); break;
-               case 2: *val = ctrl_inw(GAPSPCI_BBA_CONFIG+where); break;
-               case 4: *val = ctrl_inl(GAPSPCI_BBA_CONFIG+where); break;
+               case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break;
+               case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break;
+               case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break;
        }       
 
         return PCIBIOS_SUCCESSFUL;
@@ -97,9 +97,9 @@ static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        switch (size) {
-               case 1: ctrl_outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break;
-               case 2: ctrl_outw((u16)val, GAPSPCI_BBA_CONFIG+where); break;
-               case 4: ctrl_outl((u32)val, GAPSPCI_BBA_CONFIG+where); break;
+               case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break;
+               case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break;
+               case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break;
        }
 
         return PCIBIOS_SUCCESSFUL;
@@ -127,36 +127,36 @@ int __init gapspci_init(void)
         */
 
        for (i=0; i<16; i++)
-               idbuf[i] = ctrl_inb(GAPSPCI_REGS+i);
+               idbuf[i] = inb(GAPSPCI_REGS+i);
 
        if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16))
                return -ENODEV;
 
-       ctrl_outl(0x5a14a501, GAPSPCI_REGS+0x18);
+       outl(0x5a14a501, GAPSPCI_REGS+0x18);
 
        for (i=0; i<1000000; i++)
                ;
 
-       if (ctrl_inl(GAPSPCI_REGS+0x18) != 1)
+       if (inl(GAPSPCI_REGS+0x18) != 1)
                return -EINVAL;
 
-       ctrl_outl(0x01000000, GAPSPCI_REGS+0x20);
-       ctrl_outl(0x01000000, GAPSPCI_REGS+0x24);
+       outl(0x01000000, GAPSPCI_REGS+0x20);
+       outl(0x01000000, GAPSPCI_REGS+0x24);
 
-       ctrl_outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28);
-       ctrl_outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c);
+       outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28);
+       outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c);
 
-       ctrl_outl(1, GAPSPCI_REGS+0x14);
-       ctrl_outl(1, GAPSPCI_REGS+0x34);
+       outl(1, GAPSPCI_REGS+0x14);
+       outl(1, GAPSPCI_REGS+0x34);
 
        /* Setting Broadband Adapter */
-       ctrl_outw(0xf900, GAPSPCI_BBA_CONFIG+0x06);
-       ctrl_outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30);
-       ctrl_outb(0x00, GAPSPCI_BBA_CONFIG+0x3c);
-       ctrl_outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d);
-       ctrl_outw(0x0006, GAPSPCI_BBA_CONFIG+0x04);
-       ctrl_outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10);
-       ctrl_outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14);
+       outw(0xf900, GAPSPCI_BBA_CONFIG+0x06);
+       outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30);
+       outb(0x00, GAPSPCI_BBA_CONFIG+0x3c);
+       outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d);
+       outw(0x0006, GAPSPCI_BBA_CONFIG+0x04);
+       outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10);
+       outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14);
 
        return 0;
 }
index b230eb278cef73b7f36ce48efb849b3b0f7ca90d..cc530f4d84d679a1418571319cf3b98be02d18fd 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 enum {
        UNUSED = 0,
index 3feb95a4fcbccae462c1fda63548a33709f7bdb5..fb781329848af930b22290d452fe99a8fdc7a169 100644 (file)
@@ -21,8 +21,8 @@
 #include <asm/freq.h>
 #include <asm/io.h>
 
-const static int pll1rate[]={8,12,16,0};
-const static int pfc_divisors[]={1,2,3,4,6,8,12};
+static const int pll1rate[]={8,12,16,0};
+static const int pfc_divisors[]={1,2,3,4,6,8,12};
 #define ifc_divisors pfc_divisors
 
 #if (CONFIG_SH_CLK_MD == 0)
index db6ef5cecde14b12993f5bbce9b657ba58fa86e1..e98dc4450352c6d1a474d2edfce07c03a723c53b 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 enum {
        UNUSED = 0,
index a564425b905f6572ee710624504dc9283fd9b56b..e6d4ec445dd885d693a39e9b3c4d273ace48db6c 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 enum {
        UNUSED = 0,
index fcc80bb7bee7f9de3192d6309554e1b9642848e5..10f2a760c5ee07ae6c2d71503070bea664733602 100644 (file)
@@ -94,9 +94,9 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
                boot_cpu_data.dcache.way_incr   = (1 << 13);
                boot_cpu_data.dcache.entry_mask = 0x1ff0;
                boot_cpu_data.dcache.sets       = 512;
-               ctrl_outl(CCR_CACHE_32KB, CCR3);
+               ctrl_outl(CCR_CACHE_32KB, CCR3_REG);
 #else
-               ctrl_outl(CCR_CACHE_16KB, CCR3);
+               ctrl_outl(CCR_CACHE_16KB, CCR3_REG);
 #endif
 #endif
        }
index dd0a20a685f716309803fcf652d47ebe4a62b7ba..f581534cb732c08163b274dc452745e957d924d5 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 #include <asm/rtc.h>
 
 enum {
index 969804bb523bab00964e6303de157e26f06b0096..d3733b13ea5298194020ee43114ce3bafe8420c4 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/irq.h>
 #include <linux/platform_device.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 enum {
        UNUSED = 0,
@@ -123,15 +123,15 @@ static struct resource rtc_resources[] = {
                .flags  = IORESOURCE_IO,
        },
        [1] =   {
-               .start  = 20,
+               .start  = 21,
                .flags  = IORESOURCE_IRQ,
        },
        [2] =   {
-               .start  = 21,
+               .start  = 22,
                .flags  = IORESOURCE_IRQ,
        },
        [3] =   {
-               .start  = 22,
+               .start  = 20,
                .flags  = IORESOURCE_IRQ,
        },
 };
index 0cc0e2bf135dd744676f3b58cf20a900c94644f1..7406c9ad92597c758cab7cecdf3102e48e72f078 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 #include <asm/rtc.h>
 
 enum {
index 3855ea4c21c8acd30ed3f3c0bbf3a4ad598f9d67..8028082527c55a82d835fc32f800a4262dc1bf06 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/io.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 #include <asm/rtc.h>
 
 #define INTC_ICR1      0xA4140010UL
index dab193293f2034c37f36090c1672540da299db0e..7371abf64f8082a902fefc675aec6579ec73b1ac 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
        {
index ae3603aca615017e2296cf094a0a822ec291006a..ec884039b914cb5d424a544da7c41bd67e8a655b 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/io.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct resource rtc_resources[] = {
        [0] = {
index 85f81579b97e54e7494e556c65c86443f3d57e8a..254c5c55ab9170b846d9a9f5b0ac52db0f31e5fa 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 enum {
        UNUSED = 0,
index c0a3f079dfdcda8565c1d3e83394d9b7930c70ac..6d4f50cd4aaf62ece55cce91bdab88c134f6e165 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
        {
index 967e8b69a2f814a1506d23ce72612d1091c295e3..f26b5cdad0d1fd0349665b30ea2c93ecef918ca1 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
        {
index 73c778d40d13f289f33b96f92945b9be49d1af6e..b98b4bc93ec9df41bfd5b61ada70a65b3bd8ba99 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
+#include <linux/serial_sci.h>
 #include <linux/mm.h>
 #include <asm/mmzone.h>
-#include <asm/sci.h>
 
 static struct resource usbf_resources[] = {
        [0] = {
index eabd5386812d089efa4084903f9244a21cdc688a..07c988dc9de6fa7bf07e7f315f294cc5e90453c0 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/io.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct resource rtc_resources[] = {
        [0] = {
index 32f4f59a837b41ca36329488597177f6aa1573ef..b9cec48b18088dc0b0239f23b120626d10c5abe3 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
        {
index 293004b526ff98eefa9c0a1f61936e3d551a3396..18dbbe23fea1b172a22532b9cd93005d2eb52562 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/io.h>
-#include <asm/sci.h>
+#include <linux/serial_sci.h>
 
 static struct resource rtc_resources[] = {
        [0] = {
index 74b60e96cdf43bf30d7b7624226116459a0bbbe8..621e7329ec63747b766301e72859a324dee8290e 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
+#include <linux/serial_sci.h>
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <asm/mmzone.h>
-#include <asm/sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
        {
index 4dc958b6b31468a35b11d10536bb005e0d846c2f..bd35f32534b98e11a3e231a16940931915995b43 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
+#include <linux/serial_sci.h>
 #include <linux/io.h>
 #include <asm/mmzone.h>
-#include <asm/sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
        {
index e795f282dece9f29d6914bc639ffb0c73da5d9f9..bf1b15d3f6f58be2265f1d6e13a0bbf22c9c0862 100644 (file)
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.62 2000/12/15 00:41:17 davem Exp $
+#
 # Makefile for the linux kernel.
 #
 
@@ -12,7 +12,8 @@ obj-y    := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
            sys_sparc.o sunos_asm.o systbls.o \
            time.o windows.o cpu.o devices.o sclow.o \
            tadpole.o tick14.o ptrace.o sys_solaris.o \
-           unaligned.o muldiv.o semaphore.o prom.o of_device.o devres.o
+           unaligned.o una_asm.o muldiv.o semaphore.o \
+           prom.o of_device.o devres.o
 
 devres-y = ../../../kernel/irq/devres.o
 
index 259a559d4cea120fb2beb13ad808ef1f152056f8..e7a0edfc1a32532823d315a78deb6f1585a35502 100644 (file)
@@ -32,7 +32,7 @@ struct cpu_fp_info {
 /* In order to get the fpu type correct, you need to take the IDPROM's
  * machine type value into consideration too.  I will fix this.
  */
-struct cpu_fp_info linux_sparc_fpu[] = {
+static struct cpu_fp_info linux_sparc_fpu[] = {
   { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
   { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"},
   { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
@@ -76,7 +76,7 @@ struct cpu_fp_info linux_sparc_fpu[] = {
 
 #define NSPARCFPU  ARRAY_SIZE(linux_sparc_fpu)
 
-struct cpu_iu_info linux_sparc_chips[] = {
+static struct cpu_iu_info linux_sparc_chips[] = {
   /* Sun4/100, 4/200, SLC */
   { 0, 0, "Fujitsu  MB86900/1A or LSI L64831 SparcKIT-40"},
   /* borned STP1012PGA */
index d850785b20808716b5d843af7877712db1b9dc7d..96344ff2bbe165aff131a8d9f982286f8804e136 100644 (file)
@@ -101,7 +101,7 @@ void __init fill_ebus_child(struct device_node *dp,
                        prom_printf("UGH: property for %s was %d, need < %d\n",
                                    dev->prom_node->name, len,
                                    dev->parent->num_addrs);
-                       panic(__FUNCTION__);
+                       panic(__func__);
                }
 
                /* XXX resource */
@@ -162,7 +162,7 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
                prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
                            dev->prom_node->name, len,
                            (int)sizeof(struct linux_prom_registers));
-               panic(__FUNCTION__);
+               panic(__func__);
        }
        dev->num_addrs = len / sizeof(struct linux_prom_registers);
 
@@ -324,7 +324,7 @@ void __init ebus_init(void)
                regs = of_get_property(dp, "reg", &len);
                if (!regs) {
                        prom_printf("%s: can't find reg property\n",
-                                   __FUNCTION__);
+                                   __func__);
                        prom_halt();
                }
                nreg = len / sizeof(struct linux_prom_pci_registers);
index 313d1620ae8ec65b3ce43c1ee8b180ec7abdeb68..59e9344e7a0da2f5f0ccc8f4586e3ef89a85debe 100644 (file)
@@ -3,6 +3,9 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
 
 #include <asm/auxio.h>
 
index 0bd69d0b5cd7f75ee293d06ff4ab3c81a0ec48dd..70c0dd22491d2a5e4d3e4aaf1ebdc72ec7ca855f 100644 (file)
@@ -139,8 +139,6 @@ void cpu_idle(void)
 
 #endif
 
-extern char reboot_command [];
-
 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
 void machine_halt(void)
 {
diff --git a/arch/sparc/kernel/una_asm.S b/arch/sparc/kernel/una_asm.S
new file mode 100644 (file)
index 0000000..8cc0345
--- /dev/null
@@ -0,0 +1,153 @@
+/* una_asm.S: Kernel unaligned trap assembler helpers.
+ *
+ * Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/errno.h>
+
+       .text
+
+retl_efault:
+       retl
+        mov    -EFAULT, %o0
+
+       /* int __do_int_store(unsigned long *dst_addr, int size,
+        *                    unsigned long *src_val)
+        *
+        * %o0 = dest_addr
+        * %o1 = size
+        * %o2 = src_val
+        *
+        * Return '0' on success, -EFAULT on failure.
+        */
+       .globl  __do_int_store
+__do_int_store:
+       ld      [%o2], %g1
+       cmp     %1, 2
+       be      2f
+        cmp    %1, 4
+       be      1f
+        srl    %g1, 24, %g2
+       srl     %g1, 16, %g7
+4:     stb     %g2, [%o0]
+       srl     %g1, 8, %g2
+5:     stb     %g7, [%o0 + 1]
+       ld      [%o2 + 4], %g7
+6:     stb     %g2, [%o0 + 2]
+       srl     %g7, 24, %g2
+7:     stb     %g1, [%o0 + 3]
+       srl     %g7, 16, %g1
+8:     stb     %g2, [%o0 + 4]
+       srl     %g7, 8, %g2
+9:     stb     %g1, [%o0 + 5]
+10:    stb     %g2, [%o0 + 6]
+       b       0f
+11:     stb    %g7, [%o0 + 7]
+1:     srl     %g1, 16, %g7
+12:    stb     %g2, [%o0]
+       srl     %g1, 8, %g2
+13:    stb     %g7, [%o0 + 1]
+14:    stb     %g2, [%o0 + 2]
+       b       0f
+15:     stb    %g1, [%o0 + 3]
+2:     srl     %g1, 8, %g2
+16:    stb     %g2, [%o0]
+17:    stb     %g1, [%o0 + 1]
+0:     retl
+        mov    0, %o0
+
+       .section __ex_table,#alloc
+       .word   4b, retl_efault
+       .word   5b, retl_efault
+       .word   6b, retl_efault
+       .word   7b, retl_efault
+       .word   8b, retl_efault
+       .word   9b, retl_efault
+       .word   10b, retl_efault
+       .word   11b, retl_efault
+       .word   12b, retl_efault
+       .word   13b, retl_efault
+       .word   14b, retl_efault
+       .word   15b, retl_efault
+       .word   16b, retl_efault
+       .word   17b, retl_efault
+       .previous
+
+       /* int do_int_load(unsigned long *dest_reg, int size,
+        *                 unsigned long *saddr, int is_signed)
+        *
+        * %o0 = dest_reg
+        * %o1 = size
+        * %o2 = saddr
+        * %o3 = is_signed
+        *
+        * Return '0' on success, -EFAULT on failure.
+        */
+       .globl  do_int_load
+do_int_load:
+       cmp     %o1, 8
+       be      9f
+        cmp    %o1, 4
+       be      6f
+4:      ldub   [%o2], %g1
+5:     ldub    [%o2 + 1], %g2
+       sll     %g1, 8, %g1
+       tst     %o3
+       be      3f
+        or     %g1, %g2, %g1
+       sll     %g1, 16, %g1
+       sra     %g1, 16, %g1
+3:     b       0f
+        st     %g1, [%o0]
+6:     ldub    [%o2 + 1], %g2
+       sll     %g1, 24, %g1
+7:     ldub    [%o2 + 2], %g7
+       sll     %g2, 16, %g2
+8:     ldub    [%o2 + 3], %g3
+       sll     %g7, 8, %g7
+       or      %g3, %g2, %g3
+       or      %g7, %g3, %g7
+       or      %g1, %g7, %g1
+       b       0f
+        st     %g1, [%o0]
+9:     ldub    [%o2], %g1
+10:    ldub    [%o2 + 1], %g2
+       sll     %g1, 24, %g1
+11:    ldub    [%o2 + 2], %g7
+       sll     %g2, 16, %g2
+12:    ldub    [%o2 + 3], %g3
+       sll     %g7, 8, %g7
+       or      %g1, %g2, %g1
+       or      %g7, %g3, %g7
+       or      %g1, %g7, %g7
+13:    ldub    [%o2 + 4], %g1
+       st      %g7, [%o0]
+14:    ldub    [%o2 + 5], %g2
+       sll     %g1, 24, %g1
+15:    ldub    [%o2 + 6], %g7
+       sll     %g2, 16, %g2
+16:    ldub    [%o2 + 7], %g3
+       sll     %g7, 8, %g7
+       or      %g1, %g2, %g1
+       or      %g7, %g3, %g7
+       or      %g1, %g7, %g7
+       st      %g7, [%o0 + 4]
+0:     retl
+        mov    0, %o0
+
+       .section __ex_table,#alloc
+       .word   4b, retl_efault
+       .word   5b, retl_efault
+       .word   6b, retl_efault
+       .word   7b, retl_efault
+       .word   8b, retl_efault
+       .word   9b, retl_efault
+       .word   10b, retl_efault
+       .word   11b, retl_efault
+       .word   12b, retl_efault
+       .word   13b, retl_efault
+       .word   14b, retl_efault
+       .word   15b, retl_efault
+       .word   16b, retl_efault
+       .previous
index a6330fbc9dd935a805aa7b9f05f12dbfe605579a..33857be16661a53e7b2c1741eb51ab02a5fee020 100644 (file)
@@ -175,157 +175,31 @@ static void unaligned_panic(char *str)
        panic(str);
 }
 
-#define do_integer_load(dest_reg, size, saddr, is_signed, errh) ({             \
-__asm__ __volatile__ (                                                         \
-       "cmp    %1, 8\n\t"                                                      \
-       "be     9f\n\t"                                                         \
-       " cmp   %1, 4\n\t"                                                      \
-       "be     6f\n"                                                           \
-"4:\t" " ldub  [%2], %%l1\n"                                                   \
-"5:\t" "ldub   [%2 + 1], %%l2\n\t"                                             \
-       "sll    %%l1, 8, %%l1\n\t"                                              \
-       "tst    %3\n\t"                                                         \
-       "be     3f\n\t"                                                         \
-       " add   %%l1, %%l2, %%l1\n\t"                                           \
-       "sll    %%l1, 16, %%l1\n\t"                                             \
-       "sra    %%l1, 16, %%l1\n"                                               \
-"3:\t" "b      0f\n\t"                                                         \
-       " st    %%l1, [%0]\n"                                                   \
-"6:\t" "ldub   [%2 + 1], %%l2\n\t"                                             \
-       "sll    %%l1, 24, %%l1\n"                                               \
-"7:\t" "ldub   [%2 + 2], %%g7\n\t"                                             \
-       "sll    %%l2, 16, %%l2\n"                                               \
-"8:\t" "ldub   [%2 + 3], %%g1\n\t"                                             \
-       "sll    %%g7, 8, %%g7\n\t"                                              \
-       "or     %%l1, %%l2, %%l1\n\t"                                           \
-       "or     %%g7, %%g1, %%g7\n\t"                                           \
-       "or     %%l1, %%g7, %%l1\n\t"                                           \
-       "b      0f\n\t"                                                         \
-       " st    %%l1, [%0]\n"                                                   \
-"9:\t" "ldub   [%2], %%l1\n"                                                   \
-"10:\t"        "ldub   [%2 + 1], %%l2\n\t"                                             \
-       "sll    %%l1, 24, %%l1\n"                                               \
-"11:\t"        "ldub   [%2 + 2], %%g7\n\t"                                             \
-       "sll    %%l2, 16, %%l2\n"                                               \
-"12:\t"        "ldub   [%2 + 3], %%g1\n\t"                                             \
-       "sll    %%g7, 8, %%g7\n\t"                                              \
-       "or     %%l1, %%l2, %%l1\n\t"                                           \
-       "or     %%g7, %%g1, %%g7\n\t"                                           \
-       "or     %%l1, %%g7, %%g7\n"                                             \
-"13:\t"        "ldub   [%2 + 4], %%l1\n\t"                                             \
-       "st     %%g7, [%0]\n"                                                   \
-"14:\t"        "ldub   [%2 + 5], %%l2\n\t"                                             \
-       "sll    %%l1, 24, %%l1\n"                                               \
-"15:\t"        "ldub   [%2 + 6], %%g7\n\t"                                             \
-       "sll    %%l2, 16, %%l2\n"                                               \
-"16:\t"        "ldub   [%2 + 7], %%g1\n\t"                                             \
-       "sll    %%g7, 8, %%g7\n\t"                                              \
-       "or     %%l1, %%l2, %%l1\n\t"                                           \
-       "or     %%g7, %%g1, %%g7\n\t"                                           \
-       "or     %%l1, %%g7, %%g7\n\t"                                           \
-       "st     %%g7, [%0 + 4]\n"                                               \
-"0:\n\n\t"                                                                     \
-       ".section __ex_table,#alloc\n\t"                                        \
-       ".word  4b, " #errh "\n\t"                                              \
-       ".word  5b, " #errh "\n\t"                                              \
-       ".word  6b, " #errh "\n\t"                                              \
-       ".word  7b, " #errh "\n\t"                                              \
-       ".word  8b, " #errh "\n\t"                                              \
-       ".word  9b, " #errh "\n\t"                                              \
-       ".word  10b, " #errh "\n\t"                                             \
-       ".word  11b, " #errh "\n\t"                                             \
-       ".word  12b, " #errh "\n\t"                                             \
-       ".word  13b, " #errh "\n\t"                                             \
-       ".word  14b, " #errh "\n\t"                                             \
-       ".word  15b, " #errh "\n\t"                                             \
-       ".word  16b, " #errh "\n\n\t"                                           \
-       ".previous\n\t"                                                         \
-       : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed)            \
-       : "l1", "l2", "g7", "g1", "cc");                                        \
-})
-       
-#define store_common(dst_addr, size, src_val, errh) ({                         \
-__asm__ __volatile__ (                                                         \
-       "ld     [%2], %%l1\n"                                                   \
-       "cmp    %1, 2\n\t"                                                      \
-       "be     2f\n\t"                                                         \
-       " cmp   %1, 4\n\t"                                                      \
-       "be     1f\n\t"                                                         \
-       " srl   %%l1, 24, %%l2\n\t"                                             \
-       "srl    %%l1, 16, %%g7\n"                                               \
-"4:\t" "stb    %%l2, [%0]\n\t"                                                 \
-       "srl    %%l1, 8, %%l2\n"                                                \
-"5:\t" "stb    %%g7, [%0 + 1]\n\t"                                             \
-       "ld     [%2 + 4], %%g7\n"                                               \
-"6:\t" "stb    %%l2, [%0 + 2]\n\t"                                             \
-       "srl    %%g7, 24, %%l2\n"                                               \
-"7:\t" "stb    %%l1, [%0 + 3]\n\t"                                             \
-       "srl    %%g7, 16, %%l1\n"                                               \
-"8:\t" "stb    %%l2, [%0 + 4]\n\t"                                             \
-       "srl    %%g7, 8, %%l2\n"                                                \
-"9:\t" "stb    %%l1, [%0 + 5]\n"                                               \
-"10:\t"        "stb    %%l2, [%0 + 6]\n\t"                                             \
-       "b      0f\n"                                                           \
-"11:\t"        " stb   %%g7, [%0 + 7]\n"                                               \
-"1:\t" "srl    %%l1, 16, %%g7\n"                                               \
-"12:\t"        "stb    %%l2, [%0]\n\t"                                                 \
-       "srl    %%l1, 8, %%l2\n"                                                \
-"13:\t"        "stb    %%g7, [%0 + 1]\n"                                               \
-"14:\t"        "stb    %%l2, [%0 + 2]\n\t"                                             \
-       "b      0f\n"                                                           \
-"15:\t"        " stb   %%l1, [%0 + 3]\n"                                               \
-"2:\t" "srl    %%l1, 8, %%l2\n"                                                \
-"16:\t"        "stb    %%l2, [%0]\n"                                                   \
-"17:\t"        "stb    %%l1, [%0 + 1]\n"                                               \
-"0:\n\n\t"                                                                     \
-       ".section __ex_table,#alloc\n\t"                                        \
-       ".word  4b, " #errh "\n\t"                                              \
-       ".word  5b, " #errh "\n\t"                                              \
-       ".word  6b, " #errh "\n\t"                                              \
-       ".word  7b, " #errh "\n\t"                                              \
-       ".word  8b, " #errh "\n\t"                                              \
-       ".word  9b, " #errh "\n\t"                                              \
-       ".word  10b, " #errh "\n\t"                                             \
-       ".word  11b, " #errh "\n\t"                                             \
-       ".word  12b, " #errh "\n\t"                                             \
-       ".word  13b, " #errh "\n\t"                                             \
-       ".word  14b, " #errh "\n\t"                                             \
-       ".word  15b, " #errh "\n\t"                                             \
-       ".word  16b, " #errh "\n\t"                                             \
-       ".word  17b, " #errh "\n\n\t"                                           \
-       ".previous\n\t"                                                         \
-       : : "r" (dst_addr), "r" (size), "r" (src_val)                           \
-       : "l1", "l2", "g7", "g1", "cc");                                        \
-})
-
-#define do_integer_store(reg_num, size, dst_addr, regs, errh) ({               \
-       unsigned long *src_val;                                                 \
-       static unsigned long zero[2] = { 0, };                                  \
-                                                                               \
-       if (reg_num) src_val = fetch_reg_addr(reg_num, regs);                   \
-       else {                                                                  \
-               src_val = &zero[0];                                             \
-               if (size == 8)                                                  \
-                       zero[1] = fetch_reg(1, regs);                           \
-       }                                                                       \
-       store_common(dst_addr, size, src_val, errh);                            \
-})
+/* una_asm.S */
+extern int do_int_load(unsigned long *dest_reg, int size,
+                      unsigned long *saddr, int is_signed);
+extern int __do_int_store(unsigned long *dst_addr, int size,
+                         unsigned long *src_val);
+
+static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
+                       struct pt_regs *regs)
+{
+       unsigned long zero[2] = { 0, 0 };
+       unsigned long *src_val;
+
+       if (reg_num)
+               src_val = fetch_reg_addr(reg_num, regs);
+       else {
+               src_val = &zero[0];
+               if (size == 8)
+                       zero[1] = fetch_reg(1, regs);
+       }
+       return __do_int_store(dst_addr, size, src_val);
+}
 
 extern void smp_capture(void);
 extern void smp_release(void);
 
-#define do_atomic(srcdest_reg, mem, errh) ({                                   \
-       unsigned long flags, tmp;                                               \
-                                                                               \
-       smp_capture();                                                          \
-       local_irq_save(flags);                                                  \
-       tmp = *srcdest_reg;                                                     \
-       do_integer_load(srcdest_reg, 4, mem, 0, errh);                          \
-       store_common(mem, 4, &tmp, errh);                                       \
-       local_irq_restore(flags);                                               \
-       smp_release();                                                          \
-})
-
 static inline void advance(struct pt_regs *regs)
 {
        regs->pc   = regs->npc;
@@ -342,9 +216,7 @@ static inline int ok_for_kernel(unsigned int insn)
        return !floating_point_load_or_store_p(insn);
 }
 
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
-
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
 {
        unsigned long g2 = regs->u_regs [UREG_G2];
        unsigned long fixup = search_extables_range(regs->pc, &g2);
@@ -379,48 +251,34 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
                printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
                       regs->pc);
                unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
-
-               __asm__ __volatile__ ("\n"
-"kernel_unaligned_trap_fault:\n\t"
-               "mov    %0, %%o0\n\t"
-               "call   kernel_mna_trap_fault\n\t"
-               " mov   %1, %%o1\n\t"
-               :
-               : "r" (regs), "r" (insn)
-               : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
-                 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
        } else {
                unsigned long addr = compute_effective_address(regs, insn);
+               int err;
 
 #ifdef DEBUG_MNA
                printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
                       regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
 #endif
-               switch(dir) {
+               switch (dir) {
                case load:
-                       do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
-                                       size, (unsigned long *) addr,
-                                       decode_signedness(insn),
-                                       kernel_unaligned_trap_fault);
+                       err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
+                                                        regs),
+                                         size, (unsigned long *) addr,
+                                         decode_signedness(insn));
                        break;
 
                case store:
-                       do_integer_store(((insn>>25)&0x1f), size,
-                                        (unsigned long *) addr, regs,
-                                        kernel_unaligned_trap_fault);
+                       err = do_int_store(((insn>>25)&0x1f), size,
+                                          (unsigned long *) addr, regs);
                        break;
-#if 0 /* unsupported */
-               case both:
-                       do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
-                                 (unsigned long *) addr,
-                                 kernel_unaligned_trap_fault);
-                       break;
-#endif
                default:
                        panic("Impossible kernel unaligned trap.");
                        /* Not reached... */
                }
-               advance(regs);
+               if (err)
+                       kernel_mna_trap_fault(regs, insn);
+               else
+                       advance(regs);
        }
 }
 
@@ -459,9 +317,7 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
        return 0;
 }
 
-void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault");
-
-void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
 {
        siginfo_t info;
 
@@ -485,7 +341,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
        if(!ok_for_user(regs, insn, dir)) {
                goto kill_user;
        } else {
-               int size = decode_access_size(insn);
+               int err, size = decode_access_size(insn);
                unsigned long addr;
 
                if(floating_point_load_or_store_p(insn)) {
@@ -496,48 +352,34 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
                addr = compute_effective_address(regs, insn);
                switch(dir) {
                case load:
-                       do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
-                                       size, (unsigned long *) addr,
-                                       decode_signedness(insn),
-                                       user_unaligned_trap_fault);
+                       err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
+                                                        regs),
+                                         size, (unsigned long *) addr,
+                                         decode_signedness(insn));
                        break;
 
                case store:
-                       do_integer_store(((insn>>25)&0x1f), size,
-                                        (unsigned long *) addr, regs,
-                                        user_unaligned_trap_fault);
+                       err = do_int_store(((insn>>25)&0x1f), size,
+                                          (unsigned long *) addr, regs);
                        break;
 
                case both:
-#if 0 /* unsupported */
-                       do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
-                                 (unsigned long *) addr,
-                                 user_unaligned_trap_fault);
-#else
                        /*
                         * This was supported in 2.4. However, we question
                         * the value of SWAP instruction across word boundaries.
                         */
                        printk("Unaligned SWAP unsupported.\n");
-                       goto kill_user;
-#endif
+                       err = -EFAULT;
                        break;
 
                default:
                        unaligned_panic("Impossible user unaligned trap.");
-
-                       __asm__ __volatile__ ("\n"
-"user_unaligned_trap_fault:\n\t"
-                       "mov    %0, %%o0\n\t"
-                       "call   user_mna_trap_fault\n\t"
-                       " mov   %1, %%o1\n\t"
-                       :
-                       : "r" (regs), "r" (insn)
-                       : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
-                         "g1", "g2", "g3", "g4", "g5", "g7", "cc");
                        goto out;
                }
-               advance(regs);
+               if (err)
+                       goto kill_user;
+               else
+                       advance(regs);
                goto out;
        }
 
index 3af378ddb6ae8008db5619074414b489165370a6..463d1be32c98614cea3fc98cb31154895a5858eb 100644 (file)
@@ -10,6 +10,7 @@ config SPARC
        default y
        select HAVE_OPROFILE
        select HAVE_KPROBES
+       select HAVE_KRETPROBES
 
 config SPARC64
        bool
index e43db73f2b911151b1e0194b5a7595c8945474b9..dd5d28e3d79899df97342266e79c1372a9c79707 100644 (file)
@@ -30,7 +30,7 @@ struct cpu_fp_info {
   char* fp_name;
 };
 
-struct cpu_fp_info linux_sparc_fpu[] = {
+static struct cpu_fp_info linux_sparc_fpu[] = {
   { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
   { 0x22, 0x10, 0, "UltraSparc I integrated FPU"},
   { 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
@@ -46,7 +46,7 @@ struct cpu_fp_info linux_sparc_fpu[] = {
 
 #define NSPARCFPU  ARRAY_SIZE(linux_sparc_fpu)
 
-struct cpu_iu_info linux_sparc_chips[] = {
+static struct cpu_iu_info linux_sparc_chips[] = {
   { 0x17, 0x10, "TI UltraSparc I   (SpitFire)"},
   { 0x22, 0x10, "TI UltraSparc I   (SpitFire)"},
   { 0x17, 0x11, "TI UltraSparc II  (BlackBird)"},
index eeb5a2fc788d2615e47660ae7c6962ca1777126a..bd76482077be66aa3e9e1d6e22d3523a34e4597a 100644 (file)
@@ -525,10 +525,10 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
        }
 }
 
-static int dr_cpu_configure(struct ds_info *dp,
-                           struct ds_cap_state *cp,
-                           u64 req_num,
-                           cpumask_t *mask)
+static int __cpuinit dr_cpu_configure(struct ds_info *dp,
+                                     struct ds_cap_state *cp,
+                                     u64 req_num,
+                                     cpumask_t *mask)
 {
        struct ds_data *resp;
        int resp_len, ncpus, cpu;
@@ -623,9 +623,9 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
        return 0;
 }
 
-static void dr_cpu_data(struct ds_info *dp,
-                       struct ds_cap_state *cp,
-                       void *buf, int len)
+static void __cpuinit dr_cpu_data(struct ds_info *dp,
+                                 struct ds_cap_state *cp,
+                                 void *buf, int len)
 {
        struct ds_data *data = buf;
        struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
index 856659bb13116ed7444c263ec1a9f8087c660ec7..9100835895691e1c8f93f6ac38b8bfcf42db7132 100644 (file)
@@ -758,7 +758,7 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
        get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
 }
 
-void __devinit mdesc_fill_in_cpu_data(cpumask_t mask)
+void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
 {
        struct mdesc_handle *hp = mdesc_grab();
        u64 mp;
index 2aafce7dfc0ecf31b42ba94f422032208553220e..e116e38b160ec3cd9a9b8a5aaecfef2ff2d659db 100644 (file)
@@ -114,8 +114,6 @@ void cpu_idle(void)
        }
 }
 
-extern char reboot_command [];
-
 void machine_halt(void)
 {
        sstate_halt();
index e2027f27c0fe73f2f43546aa838577e86fc6e3ba..2650d0d33ac25cbc656baa97adec4c29e1a65a93 100644 (file)
@@ -244,16 +244,8 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
        if (regs->tstate & TSTATE_PRIV) {
                const struct exception_table_entry *entry;
 
-               if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
-                       if (insn & 0x2000)
-                               asi = (regs->tstate >> 24);
-                       else
-                               asi = (insn >> 5);
-               }
-       
-               /* Look in asi.h: All _S asis have LS bit set */
-               if ((asi & 0x1) &&
-                   (entry = search_exception_tables(regs->tpc))) {
+               entry = search_exception_tables(regs->tpc);
+               if (entry) {
                        regs->tpc = entry->fixup;
                        regs->tnpc = regs->tpc + 4;
                        return;
@@ -294,7 +286,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                unsigned long tpc = regs->tpc;
 
                /* Sanity check the PC. */
-               if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
+               if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
                    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
                        /* Valid, no problems... */
                } else {
index 9e6bca266d88338df73baf4dd1079a5c17672b69..b5c30416fdac221e9bc31c4d720865769c988e85 100644 (file)
@@ -1010,7 +1010,8 @@ static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
 static int pall_ents __initdata;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
+static unsigned long __ref kernel_map_range(unsigned long pstart,
+                                           unsigned long pend, pgprot_t prot)
 {
        unsigned long vstart = PAGE_OFFSET + pstart;
        unsigned long vend = PAGE_OFFSET + pend;
index 5faf59a9de399e33eb6a07592b768b5904658bca..50e58232cf2b014c128ee9148761c121f947e11b 100644 (file)
@@ -28,7 +28,7 @@ extern unsigned sunos_sys_table[];
 #define SUNOS(x) ((long)sunos_sys_table[x])
 
 #ifdef DEBUG_SOLARIS
-#define SOLD(s) printk("%s,%d,%s(): %s\n",__FILE__,__LINE__,__FUNCTION__,(s))
+#define SOLD(s) printk("%s,%d,%s(): %s\n",__FILE__,__LINE__,__func__,(s))
 #define SOLDD(s) printk("solaris: "); printk s
 #else
 #define SOLD(s)
index f53123c02c2b6b76f9b687bd1f675753d1342633..15234fcd191a209041a6dd6712cdfae9c7fd3000 100644 (file)
@@ -81,7 +81,7 @@ void mykfree(void *p)
 #define MKCTL_MAGIC    0xDEADBABEBADC0DEDL
 #define PUT_MAGIC(a,m) do{(*(u64*)(a))=(m);}while(0)
 #define SCHECK_MAGIC(a,m)      do{if((*(u64*)(a))!=(m))printk("%s,%u,%s(): magic %08x at %p corrupted!\n",\
-                               __FILE__,__LINE__,__FUNCTION__,(m),(a));}while(0)
+                               __FILE__,__LINE__,__func__,(m),(a));}while(0)
 #define BUF_OFFSET     sizeof(u64)
 #define MKCTL_TRAILER  sizeof(u64)
 
index fc50d2f959d12f44bc6c41171f3cc030d6ad38fa..e8cb9ff183e9fe1da41020bc39ffe22b43ef56c7 100644 (file)
@@ -128,8 +128,6 @@ void *get_current(void)
        return current;
 }
 
-extern void schedule_tail(struct task_struct *prev);
-
 /*
  * This is called magically, by its address being stuffed in a jmp_buf
  * and being longjmp-d to.
index 4a88cf7695b418927c61ce760259db85bbd68cfe..f41c9538ca303f2fdb5b231cc9c9592b72024e23 100644 (file)
@@ -21,7 +21,8 @@ config X86
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_KPROBES
-       select HAVE_KVM
+       select HAVE_KRETPROBES
+       select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
 
 
 config GENERIC_LOCKBREAK
index e09a6b73a1aab5c3fc6a353e22af4936b047416f..9304bfba7d450fae5dc61ffa627a49fe1fdfeae6 100644 (file)
@@ -377,6 +377,19 @@ config X86_OOSTORE
        def_bool y
        depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
 
+#
+# P6_NOPs are a relatively minor optimization that require a family >=
+# 6 processor, except that it is broken on certain VIA chips.
+# Furthermore, AMD chips prefer a totally different sequence of NOPs
+# (which work on all CPUs).  As a result, disallow these if we're
+# compiling X86_GENERIC but not X86_64 (these NOPs do work on all
+# x86-64 capable chips); the list of processors in the right-hand clause
+# are the cores that benefit from this optimization.
+#
+config X86_P6_NOP
+       def_bool y
+       depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4)
+
 config X86_TSC
        def_bool y
        depends on ((MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64
@@ -390,6 +403,7 @@ config X86_CMOV
 config X86_MINIMUM_CPU_FAMILY
        int
        default "64" if X86_64
+       default "6" if X86_32 && X86_P6_NOP
        default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
        default "3"
 
index 378353956b5dfc86469b66419b9680e1770a7410..e77d89f9e8aa23c13751bee268718e8ed7ee54a7 100644 (file)
@@ -37,6 +37,12 @@ static int detect_memory_e820(void)
                      "=m" (*desc)
                    : "D" (desc), "d" (SMAP), "a" (0xe820));
 
+               /* BIOSes which terminate the chain with CF = 1 as opposed
+                  to %ebx = 0 don't always report the SMAP signature on
+                  the final, failing, probe. */
+               if (err)
+                       break;
+
                /* Some BIOSes stop returning SMAP in the middle of
                   the search loop.  We don't know exactly how the BIOS
                   screwed up the map at that point, we might have a
@@ -47,9 +53,6 @@ static int detect_memory_e820(void)
                        break;
                }
 
-               if (err)
-                       break;
-
                count++;
                desc++;
        } while (next && count < E820MAX);
index a33d53017997df4849660cccbab4f45a223a2bd3..8ea040124f7dc930de83225dbe45d9c642e7f196 100644 (file)
@@ -128,13 +128,11 @@ void foo(void)
        OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
 #endif
 
-#ifdef CONFIG_LGUEST_GUEST
+#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
        BLANK();
        OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
        OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
-#endif
 
-#ifdef CONFIG_LGUEST
        BLANK();
        OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
        OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
index f86a3c4a2669909be340d195a6ee93201c53cdf9..a38aafaefc230b4bfc49c34c98fc2c443fff8318 100644 (file)
@@ -504,7 +504,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 
        /* Clear all flags overriden by options */
        for (i = 0; i < NCAPINTS; i++)
-               c->x86_capability[i] ^= cleared_cpu_caps[i];
+               c->x86_capability[i] &= ~cleared_cpu_caps[i];
 
        /* Init Machine Check Exception if available. */
        mcheck_init(c);
index b6e136f23d3d3219094bc9fdadaeaba048f01b96..be83336fddba9c6c3629bbe1748aa42480a6e707 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
+#include <asm/kvm_para.h>
 #include "mtrr.h"
 
 u32 num_var_ranges = 0;
@@ -649,6 +650,7 @@ static __init int amd_special_default_mtrr(void)
 
 /**
  * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
+ * @end_pfn: ending page frame number
  *
  * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
  * memory configurations.  This routine checks that the highest MTRR matches
@@ -688,8 +690,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
 
        /* kvm/qemu doesn't have mtrr set right, don't trim them all */
        if (!highest_pfn) {
-               printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
-               WARN_ON(1);
+               if (!kvm_para_available()) {
+                       printk(KERN_WARNING
+                               "WARNING: strange, CPU MTRRs all blank?\n");
+                       WARN_ON(1);
+               }
                return 0;
        }
 
index 200fb3f9ebfbda2ec232deec234ded94128f4cfb..e8b422c1c51267419275e5db474fbd2b5e237ea3 100644 (file)
@@ -76,13 +76,6 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
        /* All Transmeta CPUs have a constant TSC */
        set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
        
-       /* If we can run i686 user-space code, call us an i686 */
-#define USER686 ((1 << X86_FEATURE_TSC)|\
-                (1 << X86_FEATURE_CX8)|\
-                (1 << X86_FEATURE_CMOV))
-        if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686)
-               c->x86 = 6;
-
 #ifdef CONFIG_SYSCTL
        /* randomize_va_space slows us down enormously;
           it probably triggers retranslation of x86->native bytecode */
index 2ad9a1bc6a73fafd6ea8e34b9117877eb2e736dd..c20c9e7e08dd2a644860dabc206ae1fb843ea173 100644 (file)
@@ -453,6 +453,7 @@ ENTRY(stub_execve)
        CFI_REGISTER rip, r11
        SAVE_REST
        FIXUP_TOP_OF_STACK %r11
+       movq %rsp, %rcx
        call sys_execve
        RESTORE_TOP_OF_STACK %r11
        movq %rax,RAX(%rsp)
@@ -1036,15 +1037,16 @@ ENDPROC(child_rip)
  *     rdi: name, rsi: argv, rdx: envp
  *
  * We want to fallback into:
- *     extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
+ *     extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  *
  * do_sys_execve asm fallback arguments:
- *     rdi: name, rsi: argv, rdx: envp, fake frame on the stack
+ *     rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  */
 ENTRY(kernel_execve)
        CFI_STARTPROC
        FAKE_STACK_FRAME $0
        SAVE_ALL        
+       movq %rsp,%rcx
        call sys_execve
        movq %rax, RAX(%rsp)    
        RESTORE_REST
index 25eb98540a41e067f7593753a51b8430fd8930df..fd8ca53943a8e0f01b86bb0157d5027de0bcf5bd 100644 (file)
@@ -606,7 +606,7 @@ ENTRY(_stext)
 .section ".bss.page_aligned","wa"
        .align PAGE_SIZE_asm
 #ifdef CONFIG_X86_PAE
-ENTRY(swapper_pg_pmd)
+swapper_pg_pmd:
        .fill 1024*KPMDS,4,0
 #else
 ENTRY(swapper_pg_dir)
index eb415043a9297d742f3df5d9a1de10fa35d0b69b..a007454133a33743b60fce4ebd3c90e8c32ee85e 100644 (file)
@@ -379,18 +379,24 @@ NEXT_PAGE(level2_ident_pgt)
        /* Since I easily can, map the first 1G.
         * Don't set NX because code runs from these pages.
         */
-       PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
+       PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
 
 NEXT_PAGE(level2_kernel_pgt)
-       /* 40MB kernel mapping. The kernel code cannot be bigger than that.
-          When you change this change KERNEL_TEXT_SIZE in page.h too. */
-       /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
-       PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE)
-       /* Module mapping starts here */
-       .fill   (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0
+       /*
+        * 128 MB kernel mapping. We spend a full page on this pagetable
+        * anyway.
+        *
+        * The kernel code+data+bss must not be bigger than that.
+        *
+        * (NOTE: at +128MB starts the module area, see MODULES_VADDR.
+        *  If you want to increase this then increase MODULES_VADDR
+        *  too.)
+        */
+       PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
+               KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 NEXT_PAGE(level2_spare_pgt)
-       .fill   512,8,0
+       .fill   512, 8, 0
 
 #undef PMDS
 #undef NEXT_PAGE
index 429d084e014d4b11a829bbf01f53fdc860e276e2..235fd6c77504c9baedcfddd1610b143910e97e21 100644 (file)
@@ -368,8 +368,8 @@ static int hpet_clocksource_register(void)
        return 0;
 }
 
-/*
- * Try to setup the HPET timer
+/**
+ * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  */
 int __init hpet_enable(void)
 {
index 763dfc407232b0133750f3f61898a120f0685ebc..60fe8015756961fa44851f7ad5d701337e4c9a87 100644 (file)
@@ -132,7 +132,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
        if (!cpu_has_fxsr)
                return -ENODEV;
 
-       unlazy_fpu(target);
+       init_fpu(target);
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.i387.fxsave, 0, -1);
@@ -147,7 +147,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
        if (!cpu_has_fxsr)
                return -ENODEV;
 
-       unlazy_fpu(target);
+       init_fpu(target);
        set_stopped_child_used_math(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -307,7 +307,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
        if (!HAVE_HWFP)
                return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
 
-       unlazy_fpu(target);
+       init_fpu(target);
 
        if (!cpu_has_fxsr)
                return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -332,7 +332,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
        if (!HAVE_HWFP)
                return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
 
-       unlazy_fpu(target);
+       init_fpu(target);
        set_stopped_child_used_math(target);
 
        if (!cpu_has_fxsr)
index 5b3ce7934363af3a5a3db53c471b889bcd4b834e..3d01e47777db6106a2387016f39a6c70bd744ece 100644 (file)
@@ -15,6 +15,7 @@ static struct files_struct init_files = INIT_FILES;
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
 struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_UNUSED_SYMBOL(init_mm); /* will be removed in 2.6.26 */
 
 /*
  * Initial thread structure.
index a7d50a547dc2a8a1be9f2af87a52226d95c6438c..be3c7a299f02541cb8f1e03797f737230dcca0ce 100644 (file)
@@ -603,11 +603,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        }
 #endif
 
+#ifdef X86_BTS
        if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
                ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
 
        if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
                ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
+#endif
 
 
        if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
index b0cc8f0136d8096e9e4cdad316787f89d4f55a8c..3baf9b9f4c87e9f941223b7ca0e862f52f3a818c 100644 (file)
@@ -604,11 +604,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
                memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
        }
 
+#ifdef X86_BTS
        if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
                ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
 
        if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
                ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
+#endif
 }
 
 /*
@@ -730,16 +732,16 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  */
 asmlinkage
 long sys_execve(char __user *name, char __user * __user *argv,
-               char __user * __user *envp, struct pt_regs regs)
+               char __user * __user *envp, struct pt_regs *regs)
 {
        long error;
        char * filename;
 
        filename = getname(name);
        error = PTR_ERR(filename);
-       if (IS_ERR(filename)) 
+       if (IS_ERR(filename))
                return error;
-       error = do_execve(filename, argv, envp, &regs); 
+       error = do_execve(filename, argv, envp, regs);
        putname(filename);
        return error;
 }
index d862e396b0994e0878a481efd6c9d81edfc90cbd..f41fdc98efb14388bf6a4eb8b7f4f8f7efebcc7f 100644 (file)
@@ -544,6 +544,8 @@ static int ptrace_set_debugreg(struct task_struct *child,
        return 0;
 }
 
+#ifdef X86_BTS
+
 static int ptrace_bts_get_size(struct task_struct *child)
 {
        if (!child->thread.ds_area_msr)
@@ -826,6 +828,7 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
 
        ptrace_bts_write_record(tsk, &rec);
 }
+#endif /* X86_BTS */
 
 /*
  * Called by kernel/ptrace.c when detaching..
@@ -839,7 +842,9 @@ void ptrace_disable(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 #endif
        if (child->thread.ds_area_msr) {
+#ifdef X86_BTS
                ptrace_bts_realloc(child, 0, 0);
+#endif
                child->thread.debugctlmsr &= ~ds_debugctl_mask();
                if (!child->thread.debugctlmsr)
                        clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
@@ -961,6 +966,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 #endif
 
+       /*
+        * These bits need more cooking - not enabled yet:
+        */
+#ifdef X86_BTS
        case PTRACE_BTS_CONFIG:
                ret = ptrace_bts_config
                        (child, data, (struct ptrace_bts_config __user *)addr);
@@ -988,6 +997,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = ptrace_bts_drain
                        (child, data, (struct bts_struct __user *) addr);
                break;
+#endif
 
        default:
                ret = ptrace_request(child, request, addr, data);
@@ -1226,12 +1236,14 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
        case PTRACE_SETOPTIONS:
        case PTRACE_SET_THREAD_AREA:
        case PTRACE_GET_THREAD_AREA:
+#ifdef X86_BTS
        case PTRACE_BTS_CONFIG:
        case PTRACE_BTS_STATUS:
        case PTRACE_BTS_SIZE:
        case PTRACE_BTS_GET:
        case PTRACE_BTS_CLEAR:
        case PTRACE_BTS_DRAIN:
+#endif
                return sys_ptrace(request, pid, addr, data);
 
        default:
index 6fd804f0782145be3b45c223590c5c287a9bdb57..7637dc91c79bebac16365bbcff3b480c408584ef 100644 (file)
@@ -1021,7 +1021,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 
        /* Clear all flags overriden by options */
        for (i = 0; i < NCAPINTS; i++)
-               c->x86_capability[i] ^= cleared_cpu_caps[i];
+               c->x86_capability[i] &= ~cleared_cpu_caps[i];
 
 #ifdef CONFIG_X86_MCE
        mcheck_init(c);
index d53bd6fcb42877106dea1c2e8500c31f2614f15a..0880f2c388a901fd318ac39856e4105b98e4d09d 100644 (file)
@@ -554,10 +554,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
        int timeout;
        unsigned long start_rip;
        struct create_idle c_idle = {
-               .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
                .cpu = cpu,
                .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
+       INIT_WORK(&c_idle.work, do_fork_idle);
 
        /* allocate memory for gdts of secondary cpus. Hotplug is considered */
        if (!cpu_gdt_descr[cpu].address &&
index 02f0f61f5b1131a511851546393ef06665787507..c28c342c162f0a90d3ad6e50ba0fb1d5ecd132a7 100644 (file)
@@ -25,6 +25,8 @@ static int save_stack_stack(void *data, char *name)
 static void save_stack_address(void *data, unsigned long addr, int reliable)
 {
        struct stack_trace *trace = data;
+       if (!reliable)
+               return;
        if (trace->skip > 0) {
                trace->skip--;
                return;
@@ -37,6 +39,8 @@ static void
 save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 {
        struct stack_trace *trace = (struct stack_trace *)data;
+       if (!reliable)
+               return;
        if (in_sched_functions(addr))
                return;
        if (trace->skip > 0) {
index 6dfd4e76661a18d11643eef1d8bafe64783749da..022bcaa3b42ed0c9e270a9734ad36c77bd3092f0 100644 (file)
@@ -91,7 +91,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
 
 asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
 {
-       return do_set_thread_area(current, -1, u_info, 1);
+       int ret = do_set_thread_area(current, -1, u_info, 1);
+       prevent_tail_call(ret);
+       return ret;
 }
 
 
@@ -139,7 +141,9 @@ int do_get_thread_area(struct task_struct *p, int idx,
 
 asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
 {
-       return do_get_thread_area(current, -1, u_info);
+       int ret = do_get_thread_area(current, -1, u_info);
+       prevent_tail_call(ret);
+       return ret;
 }
 
 int regset_tls_active(struct task_struct *target,
index 43517e324be83e73096fe28a39ec5275c9a378e0..f14cfd9d1f94c1a634833da25e86eea9cb43b864 100644 (file)
@@ -28,7 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz);
 static int __init tsc_setup(char *str)
 {
        printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
-                               "cannot disable TSC.\n");
+                               "cannot disable TSC completely.\n");
+       mark_tsc_unstable("user disabled TSC");
        return 1;
 }
 #else
index 3f82427745802f0e30ff19ff5ed5aee7e50d7fa2..edff4c9854854429fda22ce2d35f7f08fa64c593 100644 (file)
 
 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
 #define __syscall_clobber "r11","cx","memory"
-#define __pa_vsymbol(x)                        \
-       ({unsigned long v;              \
-       extern char __vsyscall_0;       \
-         asm("" : "=r" (v) : "0" (x)); \
-         ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
 
 /*
  * vsyscall_gtod_data contains data that is :
@@ -102,7 +97,7 @@ static __always_inline void do_get_tz(struct timezone * tz)
 static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
 {
        int ret;
-       asm volatile("vsysc2: syscall"
+       asm volatile("syscall"
                : "=a" (ret)
                : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
                : __syscall_clobber );
@@ -112,7 +107,7 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
 static __always_inline long time_syscall(long *t)
 {
        long secs;
-       asm volatile("vsysc1: syscall"
+       asm volatile("syscall"
                : "=a" (secs)
                : "0" (__NR_time),"D" (t) : __syscall_clobber);
        return secs;
@@ -228,42 +223,11 @@ long __vsyscall(3) venosys_1(void)
 
 #ifdef CONFIG_SYSCTL
 
-#define SYSCALL 0x050f
-#define NOP2    0x9090
-
-/*
- * NOP out syscall in vsyscall page when not needed.
- */
-static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-                        void __user *buffer, size_t *lenp, loff_t *ppos)
+static int
+vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
+                      void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       extern u16 vsysc1, vsysc2;
-       u16 __iomem *map1;
-       u16 __iomem *map2;
-       int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-       if (!write)
-               return ret;
-       /* gcc has some trouble with __va(__pa()), so just do it this
-          way. */
-       map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
-       if (!map1)
-               return -ENOMEM;
-       map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
-       if (!map2) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       if (!vsyscall_gtod_data.sysctl_enabled) {
-               writew(SYSCALL, map1);
-               writew(SYSCALL, map2);
-       } else {
-               writew(NOP2, map1);
-               writew(NOP2, map2);
-       }
-       iounmap(map2);
-out:
-       iounmap(map1);
-       return ret;
+       return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
 }
 
 static ctl_table kernel_table2[] = {
@@ -279,7 +243,6 @@ static ctl_table kernel_root_table2[] = {
          .child = kernel_table2 },
        {}
 };
-
 #endif
 
 /* Assume __initcall executes before all user space. Hopefully kmod
index 2cbee9479ce423850a99df39290e3efbbe51ae48..68a6b1511934760e97117080166f78b9a7d5ab72 100644 (file)
@@ -647,6 +647,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
        apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
                    APIC_BUS_CYCLE_NS * apic->timer.divide_count;
        atomic_set(&apic->timer.pending, 0);
+
+       if (!apic->timer.period)
+               return;
+
        hrtimer_start(&apic->timer.dev,
                      ktime_add_ns(now, apic->timer.period),
                      HRTIMER_MODE_ABS);
index 8efdcdbebb0356483816de769856fd57c20944ea..d8172aabc660de7503c43b697fea470d81f7eb9a 100644 (file)
@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             unsigned level,
                                             int metaphysical,
                                             unsigned access,
-                                            u64 *parent_pte,
-                                            bool *new_page)
+                                            u64 *parent_pte)
 {
        union kvm_mmu_page_role role;
        unsigned index;
@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
                rmap_write_protect(vcpu->kvm, gfn);
-       if (new_page)
-               *new_page = 1;
        return sp;
 }
 
@@ -876,11 +873,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
 
 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 {
+       struct page *page;
+
        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        if (gpa == UNMAPPED_GVA)
                return NULL;
-       return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+
+       down_read(&current->mm->mmap_sem);
+       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       up_read(&current->mm->mmap_sem);
+
+       return page;
 }
 
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
@@ -999,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, ACC_ALL, &table[index],
-                                                    NULL);
+                                                    1, ACC_ALL, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                kvm_release_page_clean(page);
@@ -1020,15 +1023,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 
        struct page *page;
 
+       down_read(&vcpu->kvm->slots_lock);
+
        down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, gfn);
+       up_read(&current->mm->mmap_sem);
 
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
        r = __nonpaging_map(vcpu, v, write, gfn, page);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        return r;
 }
@@ -1090,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
+                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
@@ -1111,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                     ACC_ALL, NULL, NULL);
+                                     ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
@@ -1172,7 +1178,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-       pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
+       pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
        mmu_free_roots(vcpu);
 }
 
@@ -1362,6 +1368,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        gfn_t gfn;
        int r;
        u64 gpte = 0;
+       struct page *page;
 
        if (bytes != 4 && bytes != 8)
                return;
@@ -1389,6 +1396,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        if (!is_present_pte(gpte))
                return;
        gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+
+       down_read(&current->mm->mmap_sem);
+       page = gfn_to_page(vcpu->kvm, gfn);
+       up_read(&current->mm->mmap_sem);
+
        vcpu->arch.update_pte.gfn = gfn;
        vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
 }
@@ -1496,9 +1508,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        gpa_t gpa;
        int r;
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        spin_lock(&vcpu->kvm->mmu_lock);
        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
index 03ba8608fe0f43816c91fc2bdc77b1c22725d7bb..ecc0856268c47c8c7a5c5773ac0d239d37dbc58e 100644 (file)
@@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
        pt_element_t *table;
        struct page *page;
 
+       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(kvm, table_gfn);
+       up_read(&current->mm->mmap_sem);
+
        table = kmap_atomic(page, KM_USER0);
 
        ret = CMPXCHG(&table[index], orig_pte, new_pte);
@@ -140,7 +143,7 @@ walk:
        }
 #endif
        ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
-              (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
+              (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
 
        pt_access = ACC_ALL;
 
@@ -297,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                u64 shadow_pte;
                int metaphysical;
                gfn_t table_gfn;
-               bool new_page = 0;
 
                shadow_ent = ((u64 *)__va(shadow_addr)) + index;
                if (level == PT_PAGE_TABLE_LEVEL)
@@ -319,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                }
                shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
                                               metaphysical, access,
-                                              shadow_ent, &new_page);
-               if (new_page && !metaphysical) {
+                                              shadow_ent);
+               if (!metaphysical) {
                        int r;
                        pt_element_t curr_pte;
                        r = kvm_read_guest_atomic(vcpu->kvm,
@@ -378,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
        if (r)
                return r;
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        /*
         * Look up the shadow pte for the faulting address.
         */
@@ -392,11 +394,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                pgprintk("%s: guest page fault\n", __FUNCTION__);
                inject_page_fault(vcpu, addr, walker.error_code);
                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-               up_read(&current->mm->mmap_sem);
+               up_read(&vcpu->kvm->slots_lock);
                return 0;
        }
 
+       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, walker.gfn);
+       up_read(&current->mm->mmap_sem);
 
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
@@ -413,14 +417,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
         */
        if (shadow_pte && is_io_pte(*shadow_pte)) {
                spin_unlock(&vcpu->kvm->mmu_lock);
-               up_read(&current->mm->mmap_sem);
+               up_read(&vcpu->kvm->slots_lock);
                return 1;
        }
 
        ++vcpu->stat.pf_fixed;
        kvm_mmu_audit(vcpu, "post page fault (fixed)");
        spin_unlock(&vcpu->kvm->mmu_lock);
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        return write_pt;
 }
index de755cb1431dcef84617b04e29eacb5a06fc59d0..1a582f1090e895aaa19634aa21d02ccf6c1584f0 100644 (file)
@@ -792,6 +792,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        vcpu->arch.cr0 = cr0;
        cr0 |= X86_CR0_PG | X86_CR0_WP;
        cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+       if (!vcpu->fpu_active) {
+               svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
+               cr0 |= X86_CR0_TS;
+       }
        svm->vmcb->save.cr0 = cr0;
 }
 
@@ -1096,6 +1100,24 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
        case MSR_IA32_SYSENTER_ESP:
                *data = svm->vmcb->save.sysenter_esp;
                break;
+       /* Nobody will change the following 5 values in the VMCB so
+          we can safely return them on rdmsr. They will always be 0
+          until LBRV is implemented. */
+       case MSR_IA32_DEBUGCTLMSR:
+               *data = svm->vmcb->save.dbgctl;
+               break;
+       case MSR_IA32_LASTBRANCHFROMIP:
+               *data = svm->vmcb->save.br_from;
+               break;
+       case MSR_IA32_LASTBRANCHTOIP:
+               *data = svm->vmcb->save.br_to;
+               break;
+       case MSR_IA32_LASTINTFROMIP:
+               *data = svm->vmcb->save.last_excp_from;
+               break;
+       case MSR_IA32_LASTINTTOIP:
+               *data = svm->vmcb->save.last_excp_to;
+               break;
        default:
                return kvm_get_msr_common(vcpu, ecx, data);
        }
@@ -1156,6 +1178,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
        case MSR_IA32_SYSENTER_ESP:
                svm->vmcb->save.sysenter_esp = data;
                break;
+       case MSR_IA32_DEBUGCTLMSR:
+               pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
+                               __FUNCTION__, data);
+               break;
        case MSR_K7_EVNTSEL0:
        case MSR_K7_EVNTSEL1:
        case MSR_K7_EVNTSEL2:
index ad36447e696e6c80bbf70ce53e0b88a7e2c2bbe8..94ea724638fda63b87ec248c2c3b40783ad93e08 100644 (file)
@@ -638,6 +638,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
 {
        int save_nmsrs;
 
+       vmx_load_host_state(vmx);
        save_nmsrs = 0;
 #ifdef CONFIG_X86_64
        if (is_long_mode(&vmx->vcpu)) {
@@ -1477,7 +1478,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
-       down_write(&current->mm->mmap_sem);
+       down_write(&kvm->slots_lock);
        if (kvm->arch.apic_access_page)
                goto out;
        kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -1487,9 +1488,12 @@ static int alloc_apic_access_page(struct kvm *kvm)
        r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
        if (r)
                goto out;
+
+       down_read(&current->mm->mmap_sem);
        kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+       up_read(&current->mm->mmap_sem);
 out:
-       up_write(&current->mm->mmap_sem);
+       up_write(&kvm->slots_lock);
        return r;
 }
 
@@ -1602,9 +1606,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
        vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
 
-       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
-               if (alloc_apic_access_page(vmx->vcpu.kvm) != 0)
-                       return -ENOMEM;
 
        return 0;
 }
@@ -2534,6 +2535,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        put_cpu();
        if (err)
                goto free_vmcs;
+       if (vm_need_virtualize_apic_accesses(kvm))
+               if (alloc_apic_access_page(kvm) != 0)
+                       goto free_vmcs;
 
        return &vmx->vcpu;
 
index cf530814868957a9de8058849538405ca493dcc5..6b01552bd1f1cb91c64af4fdf9f3124e734d4d19 100644 (file)
@@ -46,6 +46,9 @@
 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
 
+static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+                                   struct kvm_cpuid_entry2 __user *entries);
+
 struct kvm_x86_ops *kvm_x86_ops;
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -181,7 +184,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
        int ret;
        u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
                                  offset * sizeof(u64), sizeof(pdpte));
        if (ret < 0) {
@@ -198,7 +201,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
 out:
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        return ret;
 }
@@ -212,13 +215,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
        if (is_long_mode(vcpu) || !is_pae(vcpu))
                return false;
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
        if (r < 0)
                goto out;
        changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
 out:
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        return changed;
 }
@@ -356,7 +359,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                 */
        }
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        /*
         * Does the new cr3 value map to physical memory? (Note, we
         * catch an invalid cr3 even in real-mode, because it would
@@ -372,7 +375,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                vcpu->arch.cr3 = cr3;
                vcpu->arch.mmu.new_cr3(vcpu);
        }
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 }
 EXPORT_SYMBOL_GPL(set_cr3);
 
@@ -484,6 +487,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
                        __FUNCTION__, data);
                break;
+       case MSR_IA32_MCG_CTL:
+               pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
+                       __FUNCTION__, data);
+               break;
        case MSR_IA32_UCODE_REV:
        case MSR_IA32_UCODE_WRITE:
        case 0x200 ... 0x2ff: /* MTRRs */
@@ -526,6 +533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MC0_CTL:
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MCG_CAP:
+       case MSR_IA32_MCG_CTL:
        case MSR_IA32_MC0_MISC:
        case MSR_IA32_MC0_MISC+4:
        case MSR_IA32_MC0_MISC+8:
@@ -727,6 +735,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
                r = 0;
                break;
        }
+       case KVM_GET_SUPPORTED_CPUID: {
+               struct kvm_cpuid2 __user *cpuid_arg = argp;
+               struct kvm_cpuid2 cpuid;
+
+               r = -EFAULT;
+               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+                       goto out;
+               r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
+                       cpuid_arg->entries);
+               if (r)
+                       goto out;
+
+               r = -EFAULT;
+               if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
+                       goto out;
+               r = 0;
+               break;
+       }
        default:
                r = -EINVAL;
        }
@@ -974,8 +1000,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        put_cpu();
 }
 
-static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
-                                   struct kvm_cpuid2 *cpuid,
+static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
                                    struct kvm_cpuid_entry2 __user *entries)
 {
        struct kvm_cpuid_entry2 *cpuid_entries;
@@ -1207,12 +1232,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
 
-       down_write(&current->mm->mmap_sem);
+       down_write(&kvm->slots_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
-       up_write(&current->mm->mmap_sem);
+       up_write(&kvm->slots_lock);
        return 0;
 }
 
@@ -1261,7 +1286,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
            < alias->target_phys_addr)
                goto out;
 
-       down_write(&current->mm->mmap_sem);
+       down_write(&kvm->slots_lock);
 
        p = &kvm->arch.aliases[alias->slot];
        p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1275,7 +1300,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
 
        kvm_mmu_zap_all(kvm);
 
-       up_write(&current->mm->mmap_sem);
+       up_write(&kvm->slots_lock);
 
        return 0;
 
@@ -1351,7 +1376,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        struct kvm_memory_slot *memslot;
        int is_dirty = 0;
 
-       down_write(&current->mm->mmap_sem);
+       down_write(&kvm->slots_lock);
 
        r = kvm_get_dirty_log(kvm, log, &is_dirty);
        if (r)
@@ -1367,7 +1392,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        }
        r = 0;
 out:
-       up_write(&current->mm->mmap_sem);
+       up_write(&kvm->slots_lock);
        return r;
 }
 
@@ -1487,24 +1512,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_SUPPORTED_CPUID: {
-               struct kvm_cpuid2 __user *cpuid_arg = argp;
-               struct kvm_cpuid2 cpuid;
-
-               r = -EFAULT;
-               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
-                       goto out;
-               r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
-                       cpuid_arg->entries);
-               if (r)
-                       goto out;
-
-               r = -EFAULT;
-               if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
-                       goto out;
-               r = 0;
-               break;
-       }
        default:
                ;
        }
@@ -1563,7 +1570,7 @@ int emulator_read_std(unsigned long addr,
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        while (bytes) {
                gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
@@ -1585,7 +1592,7 @@ int emulator_read_std(unsigned long addr,
                addr += tocopy;
        }
 out:
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
        return r;
 }
 EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1604,9 +1611,9 @@ static int emulator_read_emulated(unsigned long addr,
                return X86EMUL_CONTINUE;
        }
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        /* For APIC access vmexit */
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1644,14 +1651,14 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 {
        int ret;
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
        if (ret < 0) {
-               up_read(&current->mm->mmap_sem);
+               up_read(&vcpu->kvm->slots_lock);
                return 0;
        }
        kvm_mmu_pte_write(vcpu, gpa, val, bytes);
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
        return 1;
 }
 
@@ -1663,9 +1670,9 @@ static int emulator_write_emulated_onepage(unsigned long addr,
        struct kvm_io_device *mmio_dev;
        gpa_t                 gpa;
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
 
        if (gpa == UNMAPPED_GVA) {
                kvm_inject_page_fault(vcpu, addr, 2);
@@ -1742,7 +1749,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                char *kaddr;
                u64 val;
 
-               down_read(&current->mm->mmap_sem);
+               down_read(&vcpu->kvm->slots_lock);
                gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
 
                if (gpa == UNMAPPED_GVA ||
@@ -1753,13 +1760,17 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                        goto emul_write;
 
                val = *(u64 *)new;
+
+               down_read(&current->mm->mmap_sem);
                page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+               up_read(&current->mm->mmap_sem);
+
                kaddr = kmap_atomic(page, KM_USER0);
                set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
                kunmap_atomic(kaddr, KM_USER0);
                kvm_release_page_dirty(page);
        emul_write:
-               up_read(&current->mm->mmap_sem);
+               up_read(&vcpu->kvm->slots_lock);
        }
 #endif
 
@@ -2152,10 +2163,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
                kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        for (i = 0; i < nr_pages; ++i) {
-               down_read(&current->mm->mmap_sem);
+               down_read(&vcpu->kvm->slots_lock);
                page = gva_to_page(vcpu, address + i * PAGE_SIZE);
                vcpu->arch.pio.guest_pages[i] = page;
-               up_read(&current->mm->mmap_sem);
+               up_read(&vcpu->kvm->slots_lock);
                if (!page) {
                        kvm_inject_gp(vcpu, 0);
                        free_pio_guest_pages(vcpu);
@@ -2478,8 +2489,9 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
 
        down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       vcpu->arch.apic->vapic_page = page;
        up_read(&current->mm->mmap_sem);
+
+       vcpu->arch.apic->vapic_page = page;
 }
 
 static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -2861,8 +2873,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        kvm_x86_ops->decache_cr4_guest_bits(vcpu);
 
        mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
-       vcpu->arch.cr0 = sregs->cr0;
        kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
+       vcpu->arch.cr0 = sregs->cr0;
 
        mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
@@ -2952,9 +2964,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
        gpa_t gpa;
 
        vcpu_load(vcpu);
-       down_read(&current->mm->mmap_sem);
+       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
-       up_read(&current->mm->mmap_sem);
+       up_read(&vcpu->kvm->slots_lock);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
        tr->writeable = 1;
@@ -3227,11 +3239,13 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
         */
        if (!user_alloc) {
                if (npages && !old.rmap) {
+                       down_write(&current->mm->mmap_sem);
                        memslot->userspace_addr = do_mmap(NULL, 0,
                                                     npages * PAGE_SIZE,
                                                     PROT_READ | PROT_WRITE,
                                                     MAP_SHARED | MAP_ANONYMOUS,
                                                     0);
+                       up_write(&current->mm->mmap_sem);
 
                        if (IS_ERR((void *)memslot->userspace_addr))
                                return PTR_ERR((void *)memslot->userspace_addr);
@@ -3239,8 +3253,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                        if (!old.user_alloc && old.rmap) {
                                int ret;
 
+                               down_write(&current->mm->mmap_sem);
                                ret = do_munmap(current->mm, old.userspace_addr,
                                                old.npages * PAGE_SIZE);
+                               up_write(&current->mm->mmap_sem);
                                if (ret < 0)
                                        printk(KERN_WARNING
                                       "kvm_vm_ioctl_set_memory_region: "
index 5afdde4895dcefe823e0e500df57d977a7ffa350..cccb38a59653bff74271593004f995a457645b54 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/lguest_launcher.h>
 #include <linux/virtio_console.h>
 #include <linux/pm.h>
+#include <asm/lguest.h>
 #include <asm/paravirt.h>
 #include <asm/param.h>
 #include <asm/page.h>
  * behaving in simplified but equivalent ways.  In particular, the Guest is the
  * same kernel as the Host (or at least, built from the same source code). :*/
 
-/* Declarations for definitions in lguest_guest.S */
-extern char lguest_noirq_start[], lguest_noirq_end[];
-extern const char lgstart_cli[], lgend_cli[];
-extern const char lgstart_sti[], lgend_sti[];
-extern const char lgstart_popf[], lgend_popf[];
-extern const char lgstart_pushf[], lgend_pushf[];
-extern const char lgstart_iret[], lgend_iret[];
-extern void lguest_iret(void);
-
 struct lguest_data lguest_data = {
        .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
        .noirq_start = (u32)lguest_noirq_start,
@@ -489,7 +481,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
 {
        *pmdp = pmdval;
        lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
-                  (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
+                  (__pa(pmdp)&(PAGE_SIZE-1)), 0);
 }
 
 /* There are a couple of legacy places where the kernel sets a PTE, but we
index bb652f5a93fb9c6e73e1fa09dd97617d9c41c574..a02a14f0f324f9a3392810a46d587dfcda23a157 100644 (file)
@@ -172,8 +172,9 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
 }
 
 /*
- * The head.S code sets up the kernel high mapping from:
- * __START_KERNEL_map to __START_KERNEL_map + KERNEL_TEXT_SIZE
+ * The head.S code sets up the kernel high mapping:
+ *
+ *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
  *
  * phys_addr holds the negative offset to the kernel, which is added
  * to the compile time generated pmds. This results in invalid pmds up
@@ -515,14 +516,6 @@ void __init mem_init(void)
 
        /* clear_bss() already clear the empty_zero_page */
 
-       /* temporary debugging - double check it's true: */
-       {
-               int i;
-
-               for (i = 0; i < 1024; i++)
-                       WARN_ON_ONCE(empty_zero_page[i]);
-       }
-
        reservedpages = 0;
 
        /* this will put all low memory onto the freelists */
index 882328efc3dbab30920cfa041892f1e877be9a50..ac3c959e271d2e6e4f16875f0c20b6f792bb8c88 100644 (file)
@@ -162,7 +162,7 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
        area->phys_addr = phys_addr;
        vaddr = (unsigned long) area->addr;
        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
-               remove_vm_area((void *)(vaddr & PAGE_MASK));
+               free_vm_area(area);
                return NULL;
        }
 
index 59898fb0a4aa1edc115858e10de10fc9004c820e..8ccfee10f5b52258526a2942a238b135f3d75743 100644 (file)
@@ -622,13 +622,17 @@ void __init init_cpu_to_node(void)
        int i;
 
        for (i = 0; i < NR_CPUS; i++) {
+               int node;
                u16 apicid = x86_cpu_to_apicid_init[i];
 
                if (apicid == BAD_APICID)
                        continue;
-               if (apicid_to_node[apicid] == NUMA_NO_NODE)
+               node = apicid_to_node[apicid];
+               if (node == NUMA_NO_NODE)
                        continue;
-               numa_set_node(i, apicid_to_node[apicid]);
+               if (!node_online(node))
+                       continue;
+               numa_set_node(i, node);
        }
 }
 
index 464d8fc21ce69b67d8d8bf057f5a57ef6a84c677..14e48b5a94ba193c1cb4acc667482e05905d373e 100644 (file)
@@ -44,6 +44,12 @@ static inline unsigned long highmap_end_pfn(void)
 
 #endif
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+# define debug_pagealloc 1
+#else
+# define debug_pagealloc 0
+#endif
+
 static inline int
 within(unsigned long addr, unsigned long start, unsigned long end)
 {
@@ -355,45 +361,48 @@ out_unlock:
 
 static LIST_HEAD(page_pool);
 static unsigned long pool_size, pool_pages, pool_low;
-static unsigned long pool_used, pool_failed, pool_refill;
+static unsigned long pool_used, pool_failed;
 
-static void cpa_fill_pool(void)
+static void cpa_fill_pool(struct page **ret)
 {
-       struct page *p;
        gfp_t gfp = GFP_KERNEL;
+       unsigned long flags;
+       struct page *p;
 
-       /* Do not allocate from interrupt context */
-       if (in_irq() || irqs_disabled())
-               return;
        /*
-        * Check unlocked. I does not matter when we have one more
-        * page in the pool. The bit lock avoids recursive pool
-        * allocations:
+        * Avoid recursion (on debug-pagealloc) and also signal
+        * our priority to get to these pagetables:
         */
-       if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill))
+       if (current->flags & PF_MEMALLOC)
                return;
+       current->flags |= PF_MEMALLOC;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
        /*
-        * We could do:
-        * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-        * but this fails on !PREEMPT kernels
+        * Allocate atomically from atomic contexts:
         */
-       gfp =  GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
-#endif
+       if (in_atomic() || irqs_disabled() || debug_pagealloc)
+               gfp =  GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 
-       while (pool_pages < pool_size) {
+       while (pool_pages < pool_size || (ret && !*ret)) {
                p = alloc_pages(gfp, 0);
                if (!p) {
                        pool_failed++;
                        break;
                }
-               spin_lock_irq(&pgd_lock);
+               /*
+                * If the call site needs a page right now, provide it:
+                */
+               if (ret && !*ret) {
+                       *ret = p;
+                       continue;
+               }
+               spin_lock_irqsave(&pgd_lock, flags);
                list_add(&p->lru, &page_pool);
                pool_pages++;
-               spin_unlock_irq(&pgd_lock);
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
-       clear_bit_unlock(0, &pool_refill);
+
+       current->flags &= ~PF_MEMALLOC;
 }
 
 #define SHIFT_MB               (20 - PAGE_SHIFT)
@@ -414,11 +423,15 @@ void __init cpa_init(void)
         * GiB. Shift MiB to Gib and multiply the result by
         * POOL_PAGES_PER_GB:
         */
-       gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
-       pool_size = POOL_PAGES_PER_GB * gb;
+       if (debug_pagealloc) {
+               gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
+               pool_size = POOL_PAGES_PER_GB * gb;
+       } else {
+               pool_size = 1;
+       }
        pool_low = pool_size;
 
-       cpa_fill_pool();
+       cpa_fill_pool(NULL);
        printk(KERN_DEBUG
               "CPA: page pool initialized %lu of %lu pages preallocated\n",
               pool_pages, pool_size);
@@ -440,16 +453,20 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        spin_lock_irqsave(&pgd_lock, flags);
        if (list_empty(&page_pool)) {
                spin_unlock_irqrestore(&pgd_lock, flags);
-               return -ENOMEM;
+               base = NULL;
+               cpa_fill_pool(&base);
+               if (!base)
+                       return -ENOMEM;
+               spin_lock_irqsave(&pgd_lock, flags);
+       } else {
+               base = list_first_entry(&page_pool, struct page, lru);
+               list_del(&base->lru);
+               pool_pages--;
+
+               if (pool_pages < pool_low)
+                       pool_low = pool_pages;
        }
 
-       base = list_first_entry(&page_pool, struct page, lru);
-       list_del(&base->lru);
-       pool_pages--;
-
-       if (pool_pages < pool_low)
-               pool_low = pool_pages;
-
        /*
         * Check for races, another CPU might have split this page
         * up for us already:
@@ -734,7 +751,8 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
                cpa_flush_all(cache);
 
 out:
-       cpa_fill_pool();
+       cpa_fill_pool(NULL);
+
        return ret;
 }
 
@@ -897,7 +915,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
         * Try to refill the page pool here. We can do this only after
         * the tlb flush.
         */
-       cpa_fill_pool();
+       cpa_fill_pool(NULL);
 }
 
 #ifdef CONFIG_HIBERNATION
index f385a4b4a484e36d3262ca154b96dcfd596b7510..0a8f4742ef519f2aa2b43d7318fed524eede671d 100644 (file)
@@ -50,7 +50,9 @@ obj-$(VDSO64-y)                       += vdso-syms.lds
 sed-vdsosym := -e 's/^00*/0/' \
        -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
 quiet_cmd_vdsosym = VDSOSYM $@
-      cmd_vdsosym = $(NM) $< | sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
+define cmd_vdsosym
+       $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
+endef
 
 $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
        $(call if_changed,vdsosym)
index 49e5358f481a2fdcb5e7eec2c2d4087ec256786a..8b9ee27805fdd10be4cb19b082131745d1c4a670 100644 (file)
@@ -153,6 +153,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
        if (*ax == 1)
                maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
                            (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
+                           (1 << X86_FEATURE_SEP)  |  /* disable SEP */
                            (1 << X86_FEATURE_ACC));   /* thermal monitoring */
 
        asm(XEN_EMULATE_PREFIX "cpuid"
index 3bad4773a2f3c36ff0eb8694014a010cfd903fb8..2341492bf7a056743097e06bfabd0b8c01f7436f 100644 (file)
@@ -38,7 +38,8 @@ char * __init xen_memory_setup(void)
        unsigned long max_pfn = xen_start_info->nr_pages;
 
        e820.nr_map = 0;
-       add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
+       add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+       add_memory_region(HIGH_MEMORY, PFN_PHYS(max_pfn)-HIGH_MEMORY, E820_RAM);
 
        return "Xen";
 }
index 6901eedeffce83bd3db09b3d0b802da6486994b4..55c5f1fc4f1fcd90c1f119a9bc0f58b46f446e83 100644 (file)
@@ -259,8 +259,11 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
 
 static void bio_end_empty_barrier(struct bio *bio, int err)
 {
-       if (err)
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
                clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       }
 
        complete(bio->bi_private);
 }
@@ -309,7 +312,9 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
                *error_sector = bio->bi_sector;
 
        ret = 0;
-       if (!bio_flagged(bio, BIO_UPTODATE))
+       if (bio_flagged(bio, BIO_EOPNOTSUPP))
+               ret = -EOPNOTSUPP;
+       else if (!bio_flagged(bio, BIO_UPTODATE))
                ret = -EIO;
 
        bio_put(bio);
index 775c8516abf5fe5b083416e3be5f07e969ec6f93..2a438a93f7233d8bc2033956f23e3f436f0e1f44 100644 (file)
@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq)
        rq->nr_hw_segments = 0;
        rq->ioprio = 0;
        rq->special = NULL;
-       rq->raw_data_len = 0;
        rq->buffer = NULL;
        rq->tag = -1;
        rq->errors = 0;
@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq)
        rq->cmd_len = 0;
        memset(rq->cmd, 0, sizeof(rq->cmd));
        rq->data_len = 0;
+       rq->extra_len = 0;
        rq->sense_len = 0;
        rq->data = NULL;
        rq->sense = NULL;
@@ -424,7 +424,6 @@ void blk_put_queue(struct request_queue *q)
 {
        kobject_put(&q->kobj);
 }
-EXPORT_SYMBOL(blk_put_queue);
 
 void blk_cleanup_queue(struct request_queue *q)
 {
@@ -592,7 +591,6 @@ int blk_get_queue(struct request_queue *q)
 
        return 1;
 }
-EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(struct request_queue *q, struct request *rq)
 {
@@ -1768,6 +1766,7 @@ static inline void __end_request(struct request *rq, int uptodate,
 
 /**
  * blk_rq_bytes - Returns bytes left to complete in the entire request
+ * @rq: the request being processed
  **/
 unsigned int blk_rq_bytes(struct request *rq)
 {
@@ -1780,6 +1779,7 @@ EXPORT_SYMBOL_GPL(blk_rq_bytes);
 
 /**
  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ * @rq: the request being processed
  **/
 unsigned int blk_rq_cur_bytes(struct request *rq)
 {
@@ -2016,7 +2016,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
        rq->hard_cur_sectors = rq->current_nr_sectors;
        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
        rq->buffer = bio_data(bio);
-       rq->raw_data_len = bio->bi_size;
        rq->data_len = bio->bi_size;
 
        rq->bio = rq->biotail = bio;
index 09f7fd0bcb73000d4899f2b96dc088d852d41aab..c07d9c8317f4c055d3821302c22df20cf2627195 100644 (file)
@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                rq->biotail->bi_next = bio;
                rq->biotail = bio;
 
-               rq->raw_data_len += bio->bi_size;
                rq->data_len += bio->bi_size;
        }
        return 0;
@@ -44,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
                             void __user *ubuf, unsigned int len)
 {
        unsigned long uaddr;
+       unsigned int alignment;
        struct bio *bio, *orig_bio;
        int reading, ret;
 
@@ -54,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
         * direct dma. else, set up kernel bounce buffers
         */
        uaddr = (unsigned long) ubuf;
-       if (!(uaddr & queue_dma_alignment(q)) &&
-           !(len & queue_dma_alignment(q)))
+       alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+       if (!(uaddr & alignment) && !(len & alignment))
                bio = bio_map_user(q, NULL, uaddr, len, reading);
        else
                bio = bio_copy_user(q, uaddr, len, reading);
@@ -142,20 +142,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
 
        /*
         * __blk_rq_map_user() copies the buffers if starting address
-        * or length isn't aligned.  As the copied buffer is always
-        * page aligned, we know that there's enough room for padding.
-        * Extend the last bio and update rq->data_len accordingly.
+        * or length isn't aligned to dma_pad_mask.  As the copied
+        * buffer is always page aligned, we know that there's enough
+        * room for padding.  Extend the last bio and update
+        * rq->data_len accordingly.
         *
         * On unmap, bio_uncopy_user() will use unmodified
         * bio_map_data pointed to by bio->bi_private.
         */
-       if (len & queue_dma_alignment(q)) {
-               unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
-               struct bio *bio = rq->biotail;
+       if (len & q->dma_pad_mask) {
+               unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
+               struct bio *tail = rq->biotail;
 
-               bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
-               bio->bi_size += pad_len;
-               rq->data_len += pad_len;
+               tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
+               tail->bi_size += pad_len;
+
+               rq->extra_len += pad_len;
        }
 
        rq->buffer = rq->data = NULL;
@@ -215,7 +217,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        rq->buffer = rq->data = NULL;
        return 0;
 }
-EXPORT_SYMBOL(blk_rq_map_user_iov);
 
 /**
  * blk_rq_unmap_user - unmap a request with user data
index 7506c4fe0264bd6038d24108f5438fea42478508..0f58616bcd7f183514c43eb913f0e0c23c855215 100644 (file)
@@ -231,7 +231,7 @@ new_segment:
                            ((unsigned long)q->dma_drain_buffer) &
                            (PAGE_SIZE - 1));
                nsegs++;
-               rq->data_len += q->dma_drain_size;
+               rq->extra_len += q->dma_drain_size;
        }
 
        if (sg)
index 9a8ffdd0ce3d4a570deac27111b5559ccf6b1714..1344a0ea5cc6c00a89ef9536cbbfda7183d2b28b 100644 (file)
@@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
        /* Assume anything <= 4GB can be handled by IOMMU.
           Actually some IOMMUs can handle everything, but I don't
           know of a way to test this here. */
-       if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+       if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
                dma = 1;
        q->bounce_pfn = max_low_pfn;
 #else
@@ -293,8 +293,24 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
 /**
- * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * blk_queue_dma_pad - set pad mask
+ * @q:     the request queue for the device
+ * @mask:  pad mask
+ *
+ * Set pad mask.  Direct IO requests are padded to the mask specified.
  *
+ * Appending pad buffer to a request modifies ->data_len such that it
+ * includes the pad buffer.  The original requested data length can be
+ * obtained using blk_rq_raw_data_len().
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+       q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  * @q:  the request queue for the device
  * @dma_drain_needed: fn which returns non-zero if drain is necessary
  * @buf:       physically contiguous buffer
@@ -316,7 +332,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * device can support otherwise there won't be room for the drain
  * buffer.
  */
-extern int blk_queue_dma_drain(struct request_queue *q,
+int blk_queue_dma_drain(struct request_queue *q,
                               dma_drain_needed_fn *dma_drain_needed,
                               void *buf, unsigned int size)
 {
index a8c37d4bbb32064f8a7f4dcd39734ccb403c2fae..4780a46ce2346898953085a6f4079c7734dcbaf7 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 
+#include "blk.h"
+
 /**
  * blk_queue_find_tag - find a request by its tag and queue
  * @q:  The request queue for the device
index ec898dd0c65c466ca2854ee987d7f178a83458d5..ec9120fb789a67c560df9c12e38dc8e0cc46a008 100644 (file)
@@ -32,6 +32,8 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect);
 
 void blk_queue_congestion_threshold(struct request_queue *q);
 
+int blk_dev_init(void);
+
 /*
  * Return the threshold (number of used requests) at which the queue is
  * considered to be congested.  It include a little hysteresis to keep the
index 7f3c09549e4be7364be012d834450215bcd7096f..8917c5174dc2646c5ad8d4d67eb67529a98783f5 100644 (file)
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
        }
 
        if (rq->next_rq) {
-               hdr->dout_resid = rq->raw_data_len;
-               hdr->din_resid = rq->next_rq->raw_data_len;
+               hdr->dout_resid = rq->data_len;
+               hdr->din_resid = rq->next_rq->data_len;
                blk_rq_unmap_user(bidi_bio);
                blk_put_request(rq->next_rq);
        } else if (rq_data_dir(rq) == READ)
-               hdr->din_resid = rq->raw_data_len;
+               hdr->din_resid = rq->data_len;
        else
-               hdr->dout_resid = rq->raw_data_len;
+               hdr->dout_resid = rq->data_len;
 
        /*
         * If the request generated a negative error number, return it
index 53f2238e69c8494997ad8660a9f9fbdb298ab33a..c44527d16c52d66058ca434e8c6e5af05b8874c6 100644 (file)
 #include <linux/buffer_head.h>
 #include <linux/mutex.h>
 
+#include "blk.h"
+
 static DEFINE_MUTEX(block_class_lock);
 #ifndef CONFIG_SYSFS_DEPRECATED
 struct kobject *block_depr;
 #endif
 
+static struct device_type disk_type;
+
 /*
  * Can be deleted altogether. Later.
  *
@@ -346,8 +350,6 @@ const struct seq_operations partitions_op = {
 #endif
 
 
-extern int blk_dev_init(void);
-
 static struct kobject *base_probe(dev_t devt, int *part, void *data)
 {
        if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
@@ -502,7 +504,7 @@ struct class block_class = {
        .name           = "block",
 };
 
-struct device_type disk_type = {
+static struct device_type disk_type = {
        .name           = "disk",
        .groups         = disk_attr_groups,
        .release        = disk_release,
@@ -632,12 +634,14 @@ static void media_change_notify_thread(struct work_struct *work)
        put_device(gd->driverfs_dev);
 }
 
+#if 0
 void genhd_media_change_notify(struct gendisk *disk)
 {
        get_device(disk->driverfs_dev);
        schedule_work(&disk->async_notify);
 }
 EXPORT_SYMBOL_GPL(genhd_media_change_notify);
+#endif  /*  0  */
 
 dev_t blk_lookup_devt(const char *name)
 {
index e993cac4911dbc9f3d3c0249f3912285ea61e581..a2c3a936ebf98e1481346e76a7ef268b803c76ea 100644 (file)
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
        hdr->info = 0;
        if (hdr->masked_status || hdr->host_status || hdr->driver_status)
                hdr->info |= SG_INFO_CHECK;
-       hdr->resid = rq->raw_data_len;
+       hdr->resid = rq->data_len;
        hdr->sb_len_wr = 0;
 
        if (rq->sense_len && hdr->sbp) {
@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->data = NULL;
-       rq->raw_data_len = 0;
        rq->data_len = 0;
+       rq->extra_len = 0;
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
        memset(rq->cmd, 0, sizeof(rq->cmd));
        rq->cmd[0] = cmd;
index 28a5fbc6aa1a456d335852914aa24ea48880c74b..93d80a1c36f994d4a98fcdba821acd142fe6d07b 100644 (file)
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = {
 };
 
 struct kbdiacruc accent_table[MAX_DIACR] = {
-       {'`', 'A', '\300'},     {'`', 'a', '\340'},
-       {'\'', 'A', '\301'},    {'\'', 'a', '\341'},
-       {'^', 'A', '\302'},     {'^', 'a', '\342'},
-       {'~', 'A', '\303'},     {'~', 'a', '\343'},
-       {'"', 'A', '\304'},     {'"', 'a', '\344'},
-       {'O', 'A', '\305'},     {'o', 'a', '\345'},
-       {'0', 'A', '\305'},     {'0', 'a', '\345'},
-       {'A', 'A', '\305'},     {'a', 'a', '\345'},
-       {'A', 'E', '\306'},     {'a', 'e', '\346'},
-       {',', 'C', '\307'},     {',', 'c', '\347'},
-       {'`', 'E', '\310'},     {'`', 'e', '\350'},
-       {'\'', 'E', '\311'},    {'\'', 'e', '\351'},
-       {'^', 'E', '\312'},     {'^', 'e', '\352'},
-       {'"', 'E', '\313'},     {'"', 'e', '\353'},
-       {'`', 'I', '\314'},     {'`', 'i', '\354'},
-       {'\'', 'I', '\315'},    {'\'', 'i', '\355'},
-       {'^', 'I', '\316'},     {'^', 'i', '\356'},
-       {'"', 'I', '\317'},     {'"', 'i', '\357'},
-       {'-', 'D', '\320'},     {'-', 'd', '\360'},
-       {'~', 'N', '\321'},     {'~', 'n', '\361'},
-       {'`', 'O', '\322'},     {'`', 'o', '\362'},
-       {'\'', 'O', '\323'},    {'\'', 'o', '\363'},
-       {'^', 'O', '\324'},     {'^', 'o', '\364'},
-       {'~', 'O', '\325'},     {'~', 'o', '\365'},
-       {'"', 'O', '\326'},     {'"', 'o', '\366'},
-       {'/', 'O', '\330'},     {'/', 'o', '\370'},
-       {'`', 'U', '\331'},     {'`', 'u', '\371'},
-       {'\'', 'U', '\332'},    {'\'', 'u', '\372'},
-       {'^', 'U', '\333'},     {'^', 'u', '\373'},
-       {'"', 'U', '\334'},     {'"', 'u', '\374'},
-       {'\'', 'Y', '\335'},    {'\'', 'y', '\375'},
-       {'T', 'H', '\336'},     {'t', 'h', '\376'},
-       {'s', 's', '\337'},     {'"', 'y', '\377'},
-       {'s', 'z', '\337'},     {'i', 'j', '\377'},
+       {'`', 'A', 0300},       {'`', 'a', 0340},
+       {'\'', 'A', 0301},      {'\'', 'a', 0341},
+       {'^', 'A', 0302},       {'^', 'a', 0342},
+       {'~', 'A', 0303},       {'~', 'a', 0343},
+       {'"', 'A', 0304},       {'"', 'a', 0344},
+       {'O', 'A', 0305},       {'o', 'a', 0345},
+       {'0', 'A', 0305},       {'0', 'a', 0345},
+       {'A', 'A', 0305},       {'a', 'a', 0345},
+       {'A', 'E', 0306},       {'a', 'e', 0346},
+       {',', 'C', 0307},       {',', 'c', 0347},
+       {'`', 'E', 0310},       {'`', 'e', 0350},
+       {'\'', 'E', 0311},      {'\'', 'e', 0351},
+       {'^', 'E', 0312},       {'^', 'e', 0352},
+       {'"', 'E', 0313},       {'"', 'e', 0353},
+       {'`', 'I', 0314},       {'`', 'i', 0354},
+       {'\'', 'I', 0315},      {'\'', 'i', 0355},
+       {'^', 'I', 0316},       {'^', 'i', 0356},
+       {'"', 'I', 0317},       {'"', 'i', 0357},
+       {'-', 'D', 0320},       {'-', 'd', 0360},
+       {'~', 'N', 0321},       {'~', 'n', 0361},
+       {'`', 'O', 0322},       {'`', 'o', 0362},
+       {'\'', 'O', 0323},      {'\'', 'o', 0363},
+       {'^', 'O', 0324},       {'^', 'o', 0364},
+       {'~', 'O', 0325},       {'~', 'o', 0365},
+       {'"', 'O', 0326},       {'"', 'o', 0366},
+       {'/', 'O', 0330},       {'/', 'o', 0370},
+       {'`', 'U', 0331},       {'`', 'u', 0371},
+       {'\'', 'U', 0332},      {'\'', 'u', 0372},
+       {'^', 'U', 0333},       {'^', 'u', 0373},
+       {'"', 'U', 0334},       {'"', 'u', 0374},
+       {'\'', 'Y', 0335},      {'\'', 'y', 0375},
+       {'T', 'H', 0336},       {'t', 'h', 0376},
+       {'s', 's', 0337},       {'"', 'y', 0377},
+       {'s', 'z', 0337},       {'i', 'j', 0377},
 };
 
 unsigned int accent_table_size = 68;
index fbc24358ada030bf3cc329682ee617374229dd3a..4fbcce758b04deb227729ebd6b445009cb37777a 100644 (file)
@@ -113,7 +113,7 @@ int atapi_enabled = 1;
 module_param(atapi_enabled, int, 0444);
 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
 
-int atapi_dmadir = 0;
+static int atapi_dmadir = 0;
 module_param(atapi_dmadir, int, 0444);
 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
 
@@ -6567,6 +6567,8 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
        ata_lpm_enable(host);
 
        rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+       if (rc == 0)
+               host->dev->power.power_state = mesg;
        return rc;
 }
 
@@ -6585,6 +6587,7 @@ void ata_host_resume(struct ata_host *host)
 {
        ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
                            ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
+       host->dev->power.power_state = PMSG_ON;
 
        /* reenable link pm */
        ata_lpm_disable(host);
index 0562b0a49f3b2acc474c3145ee0f48911eefe349..8f0e8f2bc628a283c0000b5cab3e43287aa4bb58 100644 (file)
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
                struct request_queue *q = sdev->request_queue;
                void *buf;
 
-               /* set the min alignment */
+               /* set the min alignment and padding */
                blk_queue_update_dma_alignment(sdev->request_queue,
                                               ATA_DMA_PAD_SZ - 1);
+               blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1);
 
                /* configure draining */
                buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
        u8 *rbuf;
        unsigned int buflen, rc;
        struct scsi_cmnd *cmd = args->cmd;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        buflen = ata_scsi_rbuf_get(cmd, &rbuf);
        memset(rbuf, 0, buflen);
        rc = actor(args, rbuf, buflen);
        ata_scsi_rbuf_put(cmd, rbuf);
 
+       local_irq_restore(flags);
+
        if (rc == 0)
                cmd->result = SAM_STAT_GOOD;
        args->done(cmd);
@@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
                if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
                        u8 *buf = NULL;
                        unsigned int buflen;
+                       unsigned long flags;
+
+                       local_irq_save(flags);
 
                        buflen = ata_scsi_rbuf_get(cmd, &buf);
 
@@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
                        }
 
                        ata_scsi_rbuf_put(cmd, buf);
+
+                       local_irq_restore(flags);
                }
 
                cmd->result = SAM_STAT_GOOD;
@@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
        }
 
        qc->tf.command = ATA_CMD_PACKET;
-       qc->nbytes = scsi_bufflen(scmd);
+       qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
 
        /* check whether ATAPI DMA is safe */
        if (!using_pio && ata_check_atapi_dma(qc))
@@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
         * want to set it properly, and for DMA where it is
         * effectively meaningless.
         */
-       nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
+       nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024);
 
        /* Most ATAPI devices which honor transfer chunk size don't
         * behave according to the spec when odd chunk size which
@@ -2865,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
         * TODO: find out if we need to do more here to
         *       cover scatter/gather case.
         */
-       qc->nbytes = scsi_bufflen(scmd);
+       qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
 
        /* request result TF and be quiet about device error */
        qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
index 6036dedfe377579b494a85cff095a793d2b3331d..aa884f71a12a4733d9de5dab53a42650abdbfd84 100644 (file)
@@ -56,7 +56,6 @@ enum {
 extern unsigned int ata_print_id;
 extern struct workqueue_struct *ata_aux_wq;
 extern int atapi_enabled;
-extern int atapi_dmadir;
 extern int atapi_passthru16;
 extern int libata_fua;
 extern int libata_noacpi;
index 69f651e0bc98dc9106917c9c46c90d6451da419a..840d1c4a7850e831651f897758a8005516799515 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi.h>
 #include <linux/libata.h>
 
 #ifdef CONFIG_PPC_OF
@@ -59,6 +61,7 @@ enum {
        /* ap->flags bits */
        K2_FLAG_SATA_8_PORTS            = (1 << 24),
        K2_FLAG_NO_ATAPI_DMA            = (1 << 25),
+       K2_FLAG_BAR_POS_3                       = (1 << 26),
 
        /* Taskfile registers offsets */
        K2_SATA_TF_CMD_OFFSET           = 0x00,
@@ -88,8 +91,10 @@ enum {
        /* Port stride */
        K2_SATA_PORT_OFFSET             = 0x100,
 
-       board_svw4                      = 0,
-       board_svw8                      = 1,
+       chip_svw4                       = 0,
+       chip_svw8                       = 1,
+       chip_svw42                      = 2,    /* bar 3 */
+       chip_svw43                      = 3,    /* bar 5 */
 };
 
 static u8 k2_stat_check_status(struct ata_port *ap);
@@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap);
 
 static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
 {
+       u8 cmnd = qc->scsicmd->cmnd[0];
+
        if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA)
                return -1;      /* ATAPI DMA not supported */
+       else {
+               switch (cmnd) {
+               case READ_10:
+               case READ_12:
+               case READ_16:
+               case WRITE_10:
+               case WRITE_12:
+               case WRITE_16:
+                       return 0;
+
+               default:
+                       return -1;
+               }
 
-       return 0;
+       }
 }
 
 static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
@@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = {
 };
 
 static const struct ata_port_info k2_port_info[] = {
-       /* board_svw4 */
+       /* chip_svw4 */
        {
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
                                  ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
@@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &k2_sata_ops,
        },
-       /* board_svw8 */
+       /* chip_svw8 */
        {
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
                                  ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
@@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &k2_sata_ops,
        },
+       /* chip_svw42 */
+       {
+               .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+                                 ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
+               .pio_mask       = 0x1f,
+               .mwdma_mask     = 0x07,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &k2_sata_ops,
+       },
+       /* chip_svw43 */
+       {
+               .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+                                 ATA_FLAG_MMIO,
+               .pio_mask       = 0x1f,
+               .mwdma_mask     = 0x07,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &k2_sata_ops,
+       },
 };
 
 static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
@@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
                { &k2_port_info[ent->driver_data], NULL };
        struct ata_host *host;
        void __iomem *mmio_base;
-       int n_ports, i, rc;
+       int n_ports, i, rc, bar_pos;
 
        if (!printed_version++)
                dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
        if (!host)
                return -ENOMEM;
 
+       bar_pos = 5;
+       if (ppi[0]->flags & K2_FLAG_BAR_POS_3)
+               bar_pos = 3;
        /*
         * If this driver happens to only be useful on Apple's K2, then
         * we should check that here as it has a normal Serverworks ID
@@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
         * Check if we have resources mapped at all (second function may
         * have been disabled by firmware)
         */
-       if (pci_resource_len(pdev, 5) == 0)
+       if (pci_resource_len(pdev, bar_pos) == 0) {
+               /* In IDE mode we need to pin the device to ensure that
+                       pcim_release does not clear the busmaster bit in config
+                       space, clearing causes busmaster DMA to fail on
+                       ports 3 & 4 */
+               pcim_pin_device(pdev);
                return -ENODEV;
+       }
 
        /* Request and iomap PCI regions */
-       rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+       rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME);
        if (rc == -EBUSY)
                pcim_pin_device(pdev);
        if (rc)
                return rc;
        host->iomap = pcim_iomap_table(pdev);
-       mmio_base = host->iomap[5];
+       mmio_base = host->iomap[bar_pos];
 
        /* different controllers have different number of ports - currently 4 or 8 */
        /* All ports are on the same function. Multi-function device is no
@@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
  * controller
  * */
 static const struct pci_device_id k2_sata_pci_tbl[] = {
-       { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 },
-       { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 },
-       { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 },
-       { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 },
-       { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 },
+       { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 },
+       { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 },
+       { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 },
+       { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 },
+       { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 },
+       { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 },
+       { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 },
 
        { }
 };
index 9c0070b5bd3e75d828a44d405dca4695b70e6ef6..7de543d1d0b4337328f95c502915d99b3a4fa871 100644 (file)
@@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev,
 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
 {
        /* see if we live in a "glue" directory */
-       if (!dev->class || glue_dir->kset != &dev->class->class_dirs)
+       if (!glue_dir || !dev->class ||
+           glue_dir->kset != &dev->class->class_dirs)
                return;
 
        kobject_put(glue_dir);
@@ -770,17 +771,10 @@ int device_add(struct device *dev)
        struct class_interface *class_intf;
        int error;
 
-       error = pm_sleep_lock();
-       if (error) {
-               dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__);
-               dump_stack();
-               return error;
-       }
-
        dev = get_device(dev);
        if (!dev || !strlen(dev->bus_id)) {
                error = -EINVAL;
-               goto Error;
+               goto Done;
        }
 
        pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
@@ -843,11 +837,9 @@ int device_add(struct device *dev)
        }
  Done:
        put_device(dev);
-       pm_sleep_unlock();
        return error;
  BusError:
        device_pm_remove(dev);
-       dpm_sysfs_remove(dev);
  PMError:
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
index ee9d1c8db0d6215c407a18d4a015e3e0ebb56c53..d887d5cb5bef74496f323a84d94b691ab24a2550 100644 (file)
@@ -48,7 +48,6 @@
  */
 
 LIST_HEAD(dpm_active);
-static LIST_HEAD(dpm_locked);
 static LIST_HEAD(dpm_off);
 static LIST_HEAD(dpm_off_irq);
 static LIST_HEAD(dpm_destroy);
@@ -81,28 +80,6 @@ void device_pm_add(struct device *dev)
  */
 void device_pm_remove(struct device *dev)
 {
-       /*
-        * If this function is called during a suspend, it will be blocked,
-        * because we're holding the device's semaphore at that time, which may
-        * lead to a deadlock.  In that case we want to print a warning.
-        * However, it may also be called by unregister_dropped_devices() with
-        * the device's semaphore released, in which case the warning should
-        * not be printed.
-        */
-       if (down_trylock(&dev->sem)) {
-               if (down_read_trylock(&pm_sleep_rwsem)) {
-                       /* No suspend in progress, wait on dev->sem */
-                       down(&dev->sem);
-                       up_read(&pm_sleep_rwsem);
-               } else {
-                       /* Suspend in progress, we may deadlock */
-                       dev_warn(dev, "Suspicious %s during suspend\n",
-                               __FUNCTION__);
-                       dump_stack();
-                       /* The user has been warned ... */
-                       down(&dev->sem);
-               }
-       }
        pr_debug("PM: Removing info for %s:%s\n",
                 dev->bus ? dev->bus->name : "No Bus",
                 kobject_name(&dev->kobj));
@@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev)
        dpm_sysfs_remove(dev);
        list_del_init(&dev->power.entry);
        mutex_unlock(&dpm_list_mtx);
-       up(&dev->sem);
 }
 
 /**
@@ -230,6 +206,8 @@ static int resume_device(struct device *dev)
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       down(&dev->sem);
+
        if (dev->bus && dev->bus->resume) {
                dev_dbg(dev,"resuming\n");
                error = dev->bus->resume(dev);
@@ -245,6 +223,8 @@ static int resume_device(struct device *dev)
                error = dev->class->resume(dev);
        }
 
+       up(&dev->sem);
+
        TRACE_RESUME(error);
        return error;
 }
@@ -266,7 +246,7 @@ static void dpm_resume(void)
                struct list_head *entry = dpm_off.next;
                struct device *dev = to_device(entry);
 
-               list_move_tail(entry, &dpm_locked);
+               list_move_tail(entry, &dpm_active);
                mutex_unlock(&dpm_list_mtx);
                resume_device(dev);
                mutex_lock(&dpm_list_mtx);
@@ -274,25 +254,6 @@ static void dpm_resume(void)
        mutex_unlock(&dpm_list_mtx);
 }
 
-/**
- *     unlock_all_devices - Release each device's semaphore
- *
- *     Go through the dpm_off list.  Put each device on the dpm_active
- *     list and unlock it.
- */
-static void unlock_all_devices(void)
-{
-       mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_locked)) {
-               struct list_head *entry = dpm_locked.prev;
-               struct device *dev = to_device(entry);
-
-               list_move(entry, &dpm_active);
-               up(&dev->sem);
-       }
-       mutex_unlock(&dpm_list_mtx);
-}
-
 /**
  *     unregister_dropped_devices - Unregister devices scheduled for removal
  *
@@ -305,7 +266,6 @@ static void unregister_dropped_devices(void)
                struct list_head *entry = dpm_destroy.next;
                struct device *dev = to_device(entry);
 
-               up(&dev->sem);
                mutex_unlock(&dpm_list_mtx);
                /* This also removes the device from the list */
                device_unregister(dev);
@@ -324,7 +284,6 @@ void device_resume(void)
 {
        might_sleep();
        dpm_resume();
-       unlock_all_devices();
        unregister_dropped_devices();
        up_write(&pm_sleep_rwsem);
 }
@@ -388,18 +347,15 @@ int device_power_down(pm_message_t state)
                struct list_head *entry = dpm_off.prev;
                struct device *dev = to_device(entry);
 
-               list_del_init(&dev->power.entry);
                error = suspend_device_late(dev, state);
                if (error) {
                        printk(KERN_ERR "Could not power down device %s: "
                                        "error %d\n",
                                        kobject_name(&dev->kobj), error);
-                       if (list_empty(&dev->power.entry))
-                               list_add(&dev->power.entry, &dpm_off);
                        break;
                }
-               if (list_empty(&dev->power.entry))
-                       list_add(&dev->power.entry, &dpm_off_irq);
+               if (!list_empty(&dev->power.entry))
+                       list_move(&dev->power.entry, &dpm_off_irq);
        }
 
        if (!error)
@@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state)
 {
        int error = 0;
 
+       down(&dev->sem);
+
        if (dev->power.power_state.event) {
                dev_dbg(dev, "PM: suspend %d-->%d\n",
                        dev->power.power_state.event, state.event);
@@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state)
                error = dev->bus->suspend(dev, state);
                suspend_report_result(dev->bus->suspend, error);
        }
+
+       up(&dev->sem);
+
        return error;
 }
 
@@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state)
        int error = 0;
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_locked)) {
-               struct list_head *entry = dpm_locked.prev;
+       while (!list_empty(&dpm_active)) {
+               struct list_head *entry = dpm_active.prev;
                struct device *dev = to_device(entry);
 
-               list_del_init(&dev->power.entry);
                mutex_unlock(&dpm_list_mtx);
                error = suspend_device(dev, state);
+               mutex_lock(&dpm_list_mtx);
                if (error) {
                        printk(KERN_ERR "Could not suspend device %s: "
                                        "error %d%s\n",
@@ -476,50 +437,16 @@ static int dpm_suspend(pm_message_t state)
                                        (error == -EAGAIN ?
                                        " (please convert to suspend_late)" :
                                        ""));
-                       mutex_lock(&dpm_list_mtx);
-                       if (list_empty(&dev->power.entry))
-                               list_add(&dev->power.entry, &dpm_locked);
                        break;
                }
-               mutex_lock(&dpm_list_mtx);
-               if (list_empty(&dev->power.entry))
-                       list_add(&dev->power.entry, &dpm_off);
+               if (!list_empty(&dev->power.entry))
+                       list_move(&dev->power.entry, &dpm_off);
        }
        mutex_unlock(&dpm_list_mtx);
 
        return error;
 }
 
-/**
- *     lock_all_devices - Acquire every device's semaphore
- *
- *     Go through the dpm_active list. Carefully lock each device's
- *     semaphore and put it in on the dpm_locked list.
- */
-static void lock_all_devices(void)
-{
-       mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_active)) {
-               struct list_head *entry = dpm_active.next;
-               struct device *dev = to_device(entry);
-
-               /* Required locking order is dev->sem first,
-                * then dpm_list_mutex.  Hence this awkward code.
-                */
-               get_device(dev);
-               mutex_unlock(&dpm_list_mtx);
-               down(&dev->sem);
-               mutex_lock(&dpm_list_mtx);
-
-               if (list_empty(entry))
-                       up(&dev->sem);          /* Device was removed */
-               else
-                       list_move_tail(entry, &dpm_locked);
-               put_device(dev);
-       }
-       mutex_unlock(&dpm_list_mtx);
-}
-
 /**
  *     device_suspend - Save state and stop all devices in system.
  *     @state: new power management state
@@ -533,7 +460,6 @@ int device_suspend(pm_message_t state)
 
        might_sleep();
        down_write(&pm_sleep_rwsem);
-       lock_all_devices();
        error = dpm_suspend(state);
        if (error)
                device_resume();
index f25e7c6b2d27301990eb6bc133015b5889a754e9..40bca48abc12dcfa18de9e92c8e4327fb1bf6845 100644 (file)
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont,
 }
 
 /**
- * transport_setup_device - declare a new dev for transport class association
- *                         but don't make it visible yet.
- *
+ * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
  * @dev: the generic device representing the entity being added
  *
  * Usually, dev represents some component in the HBA system (either
index 9715be3f2487c161493f231ee2ef739806cdddc4..55bd35c0f082344a1d33822d081fcd7f9a104663 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/blkpg.h>
 #include <linux/timer.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/hdreg.h>
 #include <linux/spinlock.h>
@@ -131,7 +132,6 @@ static struct board_type products[] = {
 /*define how many times we will try a command because of bus resets */
 #define MAX_CMD_RETRIES 3
 
-#define READ_AHEAD      1024
 #define MAX_CTLR       32
 
 /* Originally cciss driver only supports 8 major numbers */
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
 static void fail_all_cmds(unsigned long ctlr);
 
 #ifdef CONFIG_PROC_FS
-static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
-                              int length, int *eof, void *data);
 static void cciss_procinit(int i);
 #else
 static void cciss_procinit(int i)
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
  */
 #define ENG_GIG 1000000000
 #define ENG_GIG_FACTOR (ENG_GIG/512)
+#define ENGAGE_SCSI    "engage scsi"
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
        "UNKNOWN"
 };
 
 static struct proc_dir_entry *proc_cciss;
 
-static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
-                              int length, int *eof, void *data)
+static void cciss_seq_show_header(struct seq_file *seq)
 {
-       off_t pos = 0;
-       off_t len = 0;
-       int size, i, ctlr;
-       ctlr_info_t *h = (ctlr_info_t *) data;
-       drive_info_struct *drv;
-       unsigned long flags;
-       sector_t vol_sz, vol_sz_frac;
+       ctlr_info_t *h = seq->private;
+
+       seq_printf(seq, "%s: HP %s Controller\n"
+               "Board ID: 0x%08lx\n"
+               "Firmware Version: %c%c%c%c\n"
+               "IRQ: %d\n"
+               "Logical drives: %d\n"
+               "Current Q depth: %d\n"
+               "Current # commands on controller: %d\n"
+               "Max Q depth since init: %d\n"
+               "Max # commands on controller since init: %d\n"
+               "Max SG entries since init: %d\n",
+               h->devname,
+               h->product_name,
+               (unsigned long)h->board_id,
+               h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
+               h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
+               h->num_luns,
+               h->Qdepth, h->commands_outstanding,
+               h->maxQsinceinit, h->max_outstanding, h->maxSG);
 
-       ctlr = h->ctlr;
+#ifdef CONFIG_CISS_SCSI_TAPE
+       cciss_seq_tape_report(seq, h->ctlr);
+#endif /* CONFIG_CISS_SCSI_TAPE */
+}
+
+static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       ctlr_info_t *h = seq->private;
+       unsigned ctlr = h->ctlr;
+       unsigned long flags;
 
        /* prevent displaying bogus info during configuration
         * or deconfiguration of a logical volume
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
        spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
        if (h->busy_configuring) {
                spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-               return -EBUSY;
+               return ERR_PTR(-EBUSY);
        }
        h->busy_configuring = 1;
        spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
 
-       size = sprintf(buffer, "%s: HP %s Controller\n"
-                      "Board ID: 0x%08lx\n"
-                      "Firmware Version: %c%c%c%c\n"
-                      "IRQ: %d\n"
-                      "Logical drives: %d\n"
-                      "Max sectors: %d\n"
-                      "Current Q depth: %d\n"
-                      "Current # commands on controller: %d\n"
-                      "Max Q depth since init: %d\n"
-                      "Max # commands on controller since init: %d\n"
-                      "Max SG entries since init: %d\n\n",
-                      h->devname,
-                      h->product_name,
-                      (unsigned long)h->board_id,
-                      h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
-                      h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
-                      h->num_luns,
-                      h->cciss_max_sectors,
-                      h->Qdepth, h->commands_outstanding,
-                      h->maxQsinceinit, h->max_outstanding, h->maxSG);
-
-       pos += size;
-       len += size;
-       cciss_proc_tape_report(ctlr, buffer, &pos, &len);
-       for (i = 0; i <= h->highest_lun; i++) {
-
-               drv = &h->drv[i];
-               if (drv->heads == 0)
-                       continue;
+       if (*pos == 0)
+               cciss_seq_show_header(seq);
 
-               vol_sz = drv->nr_blocks;
-               vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
-               vol_sz_frac *= 100;
-               sector_div(vol_sz_frac, ENG_GIG_FACTOR);
+       return pos;
+}
+
+static int cciss_seq_show(struct seq_file *seq, void *v)
+{
+       sector_t vol_sz, vol_sz_frac;
+       ctlr_info_t *h = seq->private;
+       unsigned ctlr = h->ctlr;
+       loff_t *pos = v;
+       drive_info_struct *drv = &h->drv[*pos];
+
+       if (*pos > h->highest_lun)
+               return 0;
+
+       if (drv->heads == 0)
+               return 0;
+
+       vol_sz = drv->nr_blocks;
+       vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
+       vol_sz_frac *= 100;
+       sector_div(vol_sz_frac, ENG_GIG_FACTOR);
+
+       if (drv->raid_level > 5)
+               drv->raid_level = RAID_UNKNOWN;
+       seq_printf(seq, "cciss/c%dd%d:"
+                       "\t%4u.%02uGB\tRAID %s\n",
+                       ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
+                       raid_label[drv->raid_level]);
+       return 0;
+}
+
+static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ctlr_info_t *h = seq->private;
+
+       if (*pos > h->highest_lun)
+               return NULL;
+       *pos += 1;
+
+       return pos;
+}
+
+static void cciss_seq_stop(struct seq_file *seq, void *v)
+{
+       ctlr_info_t *h = seq->private;
+
+       /* Only reset h->busy_configuring if we succeeded in setting
+        * it during cciss_seq_start. */
+       if (v == ERR_PTR(-EBUSY))
+               return;
 
-               if (drv->raid_level > 5)
-                       drv->raid_level = RAID_UNKNOWN;
-               size = sprintf(buffer + len, "cciss/c%dd%d:"
-                              "\t%4u.%02uGB\tRAID %s\n",
-                              ctlr, i, (int)vol_sz, (int)vol_sz_frac,
-                              raid_label[drv->raid_level]);
-               pos += size;
-               len += size;
-       }
-
-       *eof = 1;
-       *start = buffer + offset;
-       len -= offset;
-       if (len > length)
-               len = length;
        h->busy_configuring = 0;
-       return len;
 }
 
-static int
-cciss_proc_write(struct file *file, const char __user *buffer,
-                unsigned long count, void *data)
+static struct seq_operations cciss_seq_ops = {
+       .start = cciss_seq_start,
+       .show  = cciss_seq_show,
+       .next  = cciss_seq_next,
+       .stop  = cciss_seq_stop,
+};
+
+static int cciss_seq_open(struct inode *inode, struct file *file)
 {
-       unsigned char cmd[80];
-       int len;
-#ifdef CONFIG_CISS_SCSI_TAPE
-       ctlr_info_t *h = (ctlr_info_t *) data;
-       int rc;
+       int ret = seq_open(file, &cciss_seq_ops);
+       struct seq_file *seq = file->private_data;
+
+       if (!ret)
+               seq->private = PDE(inode)->data;
+
+       return ret;
+}
+
+static ssize_t
+cciss_proc_write(struct file *file, const char __user *buf,
+                size_t length, loff_t *ppos)
+{
+       int err;
+       char *buffer;
+
+#ifndef CONFIG_CISS_SCSI_TAPE
+       return -EINVAL;
 #endif
 
-       if (count > sizeof(cmd) - 1)
+       if (!buf || length > PAGE_SIZE - 1)
                return -EINVAL;
-       if (copy_from_user(cmd, buffer, count))
-               return -EFAULT;
-       cmd[count] = '\0';
-       len = strlen(cmd);      // above 3 lines ensure safety
-       if (len && cmd[len - 1] == '\n')
-               cmd[--len] = '\0';
-#      ifdef CONFIG_CISS_SCSI_TAPE
-       if (strcmp("engage scsi", cmd) == 0) {
+
+       buffer = (char *)__get_free_page(GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       err = -EFAULT;
+       if (copy_from_user(buffer, buf, length))
+               goto out;
+       buffer[length] = '\0';
+
+#ifdef CONFIG_CISS_SCSI_TAPE
+       if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
+               struct seq_file *seq = file->private_data;
+               ctlr_info_t *h = seq->private;
+               int rc;
+
                rc = cciss_engage_scsi(h->ctlr);
                if (rc != 0)
-                       return -rc;
-               return count;
-       }
+                       err = -rc;
+               else
+                       err = length;
+       } else
+#endif /* CONFIG_CISS_SCSI_TAPE */
+               err = -EINVAL;
        /* might be nice to have "disengage" too, but it's not
           safely possible. (only 1 module use count, lock issues.) */
-#      endif
-       return -EINVAL;
+
+out:
+       free_page((unsigned long)buffer);
+       return err;
 }
 
-/*
- * Get us a file in /proc/cciss that says something about each controller.
- * Create /proc/cciss if it doesn't exist yet.
- */
+static struct file_operations cciss_proc_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cciss_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+       .write   = cciss_proc_write,
+};
+
 static void __devinit cciss_procinit(int i)
 {
        struct proc_dir_entry *pde;
 
-       if (proc_cciss == NULL) {
+       if (proc_cciss == NULL)
                proc_cciss = proc_mkdir("cciss", proc_root_driver);
-               if (!proc_cciss)
-                       return;
-       }
+       if (!proc_cciss)
+               return;
+       pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+                                       S_IROTH, proc_cciss,
+                                       &cciss_proc_fops);
+       if (!pde)
+               return;
 
-       pde = create_proc_read_entry(hba[i]->devname,
-                                    S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
-                                    proc_cciss, cciss_proc_get_info, hba[i]);
-       pde->write_proc = cciss_proc_write;
+       pde->data = hba[i];
 }
 #endif                         /* CONFIG_PROC_FS */
 
@@ -1341,7 +1401,6 @@ geo_inq:
                disk->private_data = &h->drv[drv_index];
 
                /* Set up queue information */
-               disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
                blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
 
                /* This is a hardware imposed limit. */
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
                }
                drv->queue = q;
 
-               q->backing_dev_info.ra_pages = READ_AHEAD;
                blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
 
                /* This is a hardware imposed limit. */
index 55178e9973a094fe08fd23bf040c237e58695a02..45ac09300eb338c50f372bd51f2532e8bdec8d1b 100644 (file)
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr)
 }
 
 static void
-cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
+cciss_seq_tape_report(struct seq_file *seq, int ctlr)
 {
        unsigned long flags;
-       int size;
-
-       *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
 
        CPQ_TAPE_LOCK(ctlr, flags);
-       size = sprintf(buffer + *len, 
+       seq_printf(seq,
                "Sequential access devices: %d\n\n",
                        ccissscsi[ctlr].ndevices);
        CPQ_TAPE_UNLOCK(ctlr, flags);
-       *pos += size; *len += size;
 }
 
+
 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 
  * complaining.  Doing a host- or bus-reset can't do anything good here. 
  * Despite what it might say in scsi_error.c, there may well be commands
@@ -1498,6 +1495,5 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
 #define cciss_scsi_setup(cntl_num)
 #define cciss_unregister_scsi(ctlr)
 #define cciss_register_scsi(ctlr)
-#define cciss_proc_tape_report(ctlr, buffer, pos, len)
 
 #endif /* CONFIG_CISS_SCSI_TAPE */
index 674cd66dcabaae261ca0e97c5ab61a1f272b2db5..18feb1c7c33b1b03ec0d394767e252b466000876 100644 (file)
@@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd)
 /*
  * speed is given as the normal factor, e.g. 4 for 4x
  */
-static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
+static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
+                               unsigned write_speed, unsigned read_speed)
 {
        struct packet_command cgc;
        struct request_sense sense;
@@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type,
        return pkt_generic_packet(pd, &cgc);
 }
 
-static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
+static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
+                                               long *last_written)
 {
        disc_information di;
        track_information ti;
@@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
 /*
  * write mode select package based on pd->settings
  */
-static int pkt_set_write_settings(struct pktcdvd_device *pd)
+static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
 {
        struct packet_command cgc;
        struct request_sense sense;
@@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
        return 1;
 }
 
-static int pkt_probe_settings(struct pktcdvd_device *pd)
+static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
 {
        struct packet_command cgc;
        unsigned char buf[12];
@@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
 /*
  * enable/disable write caching on drive
  */
-static int pkt_write_caching(struct pktcdvd_device *pd, int set)
+static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
+                                               int set)
 {
        struct packet_command cgc;
        struct request_sense sense;
@@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
 /*
  * Returns drive maximum write speed
  */
-static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
+static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
+                                               unsigned *write_speed)
 {
        struct packet_command cgc;
        struct request_sense sense;
@@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = {
 /*
  * reads the maximum media speed from ATIP
  */
-static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
+static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
+                                               unsigned *speed)
 {
        struct packet_command cgc;
        struct request_sense sense;
@@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
        }
 }
 
-static int pkt_perform_opc(struct pktcdvd_device *pd)
+static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
 {
        struct packet_command cgc;
        struct request_sense sense;
index db259e60289b51ca84b46f56f67a741b0c473c16..12f5baea439bbf8a85874188b4e2dbeb4d79459a 100644 (file)
@@ -1152,8 +1152,8 @@ clean_up_and_return:
 /* This code is similar to that in open_for_data. The routine is called
    whenever an audio play operation is requested.
 */
-int check_for_audio_disc(struct cdrom_device_info * cdi,
-                        struct cdrom_device_ops * cdo)
+static int check_for_audio_disc(struct cdrom_device_info * cdi,
+                               struct cdrom_device_ops * cdo)
 {
         int ret;
        tracktype tracks;
index 0aa419a617674dfe20bb6d8dfe09aa706ad5cf18..d2208dfe3f678443bf7e8547e89ff0c9445b03c4 100644 (file)
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = {
 };
 
 struct kbdiacruc accent_table[MAX_DIACR] = {
-       {'`', 'A', '\300'},     {'`', 'a', '\340'},
-       {'\'', 'A', '\301'},    {'\'', 'a', '\341'},
-       {'^', 'A', '\302'},     {'^', 'a', '\342'},
-       {'~', 'A', '\303'},     {'~', 'a', '\343'},
-       {'"', 'A', '\304'},     {'"', 'a', '\344'},
-       {'O', 'A', '\305'},     {'o', 'a', '\345'},
-       {'0', 'A', '\305'},     {'0', 'a', '\345'},
-       {'A', 'A', '\305'},     {'a', 'a', '\345'},
-       {'A', 'E', '\306'},     {'a', 'e', '\346'},
-       {',', 'C', '\307'},     {',', 'c', '\347'},
-       {'`', 'E', '\310'},     {'`', 'e', '\350'},
-       {'\'', 'E', '\311'},    {'\'', 'e', '\351'},
-       {'^', 'E', '\312'},     {'^', 'e', '\352'},
-       {'"', 'E', '\313'},     {'"', 'e', '\353'},
-       {'`', 'I', '\314'},     {'`', 'i', '\354'},
-       {'\'', 'I', '\315'},    {'\'', 'i', '\355'},
-       {'^', 'I', '\316'},     {'^', 'i', '\356'},
-       {'"', 'I', '\317'},     {'"', 'i', '\357'},
-       {'-', 'D', '\320'},     {'-', 'd', '\360'},
-       {'~', 'N', '\321'},     {'~', 'n', '\361'},
-       {'`', 'O', '\322'},     {'`', 'o', '\362'},
-       {'\'', 'O', '\323'},    {'\'', 'o', '\363'},
-       {'^', 'O', '\324'},     {'^', 'o', '\364'},
-       {'~', 'O', '\325'},     {'~', 'o', '\365'},
-       {'"', 'O', '\326'},     {'"', 'o', '\366'},
-       {'/', 'O', '\330'},     {'/', 'o', '\370'},
-       {'`', 'U', '\331'},     {'`', 'u', '\371'},
-       {'\'', 'U', '\332'},    {'\'', 'u', '\372'},
-       {'^', 'U', '\333'},     {'^', 'u', '\373'},
-       {'"', 'U', '\334'},     {'"', 'u', '\374'},
-       {'\'', 'Y', '\335'},    {'\'', 'y', '\375'},
-       {'T', 'H', '\336'},     {'t', 'h', '\376'},
-       {'s', 's', '\337'},     {'"', 'y', '\377'},
-       {'s', 'z', '\337'},     {'i', 'j', '\377'},
+       {'`', 'A', 0300},       {'`', 'a', 0340},
+       {'\'', 'A', 0301},      {'\'', 'a', 0341},
+       {'^', 'A', 0302},       {'^', 'a', 0342},
+       {'~', 'A', 0303},       {'~', 'a', 0343},
+       {'"', 'A', 0304},       {'"', 'a', 0344},
+       {'O', 'A', 0305},       {'o', 'a', 0345},
+       {'0', 'A', 0305},       {'0', 'a', 0345},
+       {'A', 'A', 0305},       {'a', 'a', 0345},
+       {'A', 'E', 0306},       {'a', 'e', 0346},
+       {',', 'C', 0307},       {',', 'c', 0347},
+       {'`', 'E', 0310},       {'`', 'e', 0350},
+       {'\'', 'E', 0311},      {'\'', 'e', 0351},
+       {'^', 'E', 0312},       {'^', 'e', 0352},
+       {'"', 'E', 0313},       {'"', 'e', 0353},
+       {'`', 'I', 0314},       {'`', 'i', 0354},
+       {'\'', 'I', 0315},      {'\'', 'i', 0355},
+       {'^', 'I', 0316},       {'^', 'i', 0356},
+       {'"', 'I', 0317},       {'"', 'i', 0357},
+       {'-', 'D', 0320},       {'-', 'd', 0360},
+       {'~', 'N', 0321},       {'~', 'n', 0361},
+       {'`', 'O', 0322},       {'`', 'o', 0362},
+       {'\'', 'O', 0323},      {'\'', 'o', 0363},
+       {'^', 'O', 0324},       {'^', 'o', 0364},
+       {'~', 'O', 0325},       {'~', 'o', 0365},
+       {'"', 'O', 0326},       {'"', 'o', 0366},
+       {'/', 'O', 0330},       {'/', 'o', 0370},
+       {'`', 'U', 0331},       {'`', 'u', 0371},
+       {'\'', 'U', 0332},      {'\'', 'u', 0372},
+       {'^', 'U', 0333},       {'^', 'u', 0373},
+       {'"', 'U', 0334},       {'"', 'u', 0374},
+       {'\'', 'Y', 0335},      {'\'', 'y', 0375},
+       {'T', 'H', 0336},       {'t', 'h', 0376},
+       {'s', 's', 0337},       {'"', 'y', 0377},
+       {'s', 'z', 0337},       {'i', 'j', 0377},
 };
 
 unsigned int accent_table_size = 68;
index 85d596a3c18c35b7248b4ee98fcaba65c60abcf0..eba2883b630ede817677c54a7cfe3e94f1582be1 100644 (file)
@@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev,
        msleep(10);
 
        portcount = inw(base + 0x2);
-       if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 &&
+       if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 &&
                                portcount != 8 && portcount != 16)) {
                dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n",
                        card + 1);
index ff35230058d36e7fd235b73b8884f2bb799f497c..d793e68b3e0d913030402978e46a05eedd8abf40 100644 (file)
@@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network,
        for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
                struct ipw_tty *tty = network->associated_ttys[channel_idx][i];
 
+               if (!tty)
+                       continue;
+
                /*
                 * If it's associated with a tty (other than the RAS channel
                 * when we're online), then send the data to that tty.  The RAS
                 * channel's data is handled above - it always goes through
                 * ppp_generic.
                 */
-               if (tty && channel_idx == IPW_CHANNEL_RAS
+               if (channel_idx == IPW_CHANNEL_RAS
                                && (network->ras_control_lines &
                                        IPW_CONTROL_LINE_DCD) != 0
                                && ipwireless_tty_is_modem(tty)) {
index 78b151c4d20f94b0bccecc1acb3540e14b77cdc9..5c3142b6f1fcdf102a92b79dec3b9d000b4d4a3b 100644 (file)
@@ -110,8 +110,8 @@ static int rtc_has_irq = 1;
 #define hpet_set_rtc_irq_bit(arg)              0
 #define hpet_rtc_timer_init()                  do { } while (0)
 #define hpet_rtc_dropped_irq()                 0
-#define hpet_register_irq_handler(h)           0
-#define hpet_unregister_irq_handler(h)         0
+#define hpet_register_irq_handler(h)           ({ 0; })
+#define hpet_unregister_irq_handler(h)         ({ 0; })
 #ifdef RTC_IRQ
 static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
 {
index c0e08c7bca2f4ef713c966eeed532164d33282e6..5ff83df67b447d18054bf45359c183d18fdb9e5a 100644 (file)
@@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty)
        sx_out(bp, CD186x_CAR, port_No(port));
        spin_unlock_irqrestore(&bp->lock, flags);
        if (I_IXOFF(tty)) {
-               spin_unlock_irqrestore(&bp->lock, flags);
                sx_wait_CCR(bp);
                spin_lock_irqsave(&bp->lock, flags);
                sx_out(bp, CD186x_CCR, CCR_SSCH2);
index 367be917506117b08d2407b7f74beb55432701d4..9b58b894f823764ac9c0e3d3620b4a2080404fba 100644 (file)
@@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
        if (is_switch) {
                set_leds();
                compute_shiftstate();
+               notify_update(vc);
        }
 }
 
index dfea2bde162b8c43349681e01580834cfe084983..f577daedb630d1babb74c8e7c9e9e3d5458d052c 100644 (file)
@@ -73,8 +73,8 @@
 #define XHI_BUFFER_START 0
 
 /**
- * buffer_icap_get_status: Get the contents of the status register.
- * @parameter base_address: is the base address of the device
+ * buffer_icap_get_status - Get the contents of the status register.
+ * @base_address: is the base address of the device
  *
  * The status register contains the ICAP status and the done bit.
  *
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address)
 }
 
 /**
- * buffer_icap_get_bram: Reads data from the storage buffer bram.
- * @parameter base_address: contains the base address of the component.
- * @parameter offset: The word offset from which the data should be read.
+ * buffer_icap_get_bram - Reads data from the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset from which the data should be read.
  *
  * A bram is used as a configuration memory cache.  One frame of data can
  * be stored in this "storage buffer".
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address,
 }
 
 /**
- * buffer_icap_busy: Return true if the icap device is busy
- * @parameter base_address: is the base address of the device
+ * buffer_icap_busy - Return true if the icap device is busy
+ * @base_address: is the base address of the device
  *
  * The queries the low order bit of the status register, which
  * indicates whether the current configuration or readback operation
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address)
 }
 
 /**
- * buffer_icap_busy: Return true if the icap device is not busy
- * @parameter base_address: is the base address of the device
+ * buffer_icap_busy - Return true if the icap device is not busy
+ * @base_address: is the base address of the device
  *
  * The queries the low order bit of the status register, which
  * indicates whether the current configuration or readback operation
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address)
 }
 
 /**
- * buffer_icap_set_size: Set the size register.
- * @parameter base_address: is the base address of the device
- * @parameter data: The size in bytes.
+ * buffer_icap_set_size - Set the size register.
+ * @base_address: is the base address of the device
+ * @data: The size in bytes.
  *
  * The size register holds the number of 8 bit bytes to transfer between
  * bram and the icap (or icap to bram).
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address,
 }
 
 /**
- * buffer_icap_mSetoffsetReg: Set the bram offset register.
- * @parameter base_address: contains the base address of the device.
- * @parameter data: is the value to be written to the data register.
+ * buffer_icap_set_offset - Set the bram offset register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
  *
  * The bram offset register holds the starting bram address to transfer
  * data from during configuration or write data to during readback.
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address,
 }
 
 /**
- * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register.
- * @parameter base_address: contains the base address of the device.
- * @parameter data: is the value to be written to the data register.
+ * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
  *
  * The RNC register determines the direction of the data transfer.  It
  * controls whether a configuration or readback take place.  Writing to
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address,
 }
 
 /**
- * buffer_icap_set_bram: Write data to the storage buffer bram.
- * @parameter base_address: contains the base address of the component.
- * @parameter offset: The word offset at which the data should be written.
- * @parameter data: The value to be written to the bram offset.
+ * buffer_icap_set_bram - Write data to the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset at which the data should be written.
+ * @data: The value to be written to the bram offset.
  *
  * A bram is used as a configuration memory cache.  One frame of data can
  * be stored in this "storage buffer".
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address,
 }
 
 /**
- * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter offset: The storage buffer start address.
- * @parameter count: The number of words (32 bit) to read from the
+ * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
  *           device (ICAP).
  **/
 static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
 };
 
 /**
- * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter offset: The storage buffer start address.
- * @parameter count: The number of words (32 bit) to read from the
+ * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
  *           device (ICAP).
  **/
 static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
 };
 
 /**
- * buffer_icap_reset: Reset the logic of the icap device.
- * @parameter drvdata: a pointer to the drvdata.
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
  *
  * Writing to the status register resets the ICAP logic in an internal
  * version of the core.  For the version of the core published in EDK,
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * buffer_icap_set_configuration: Load a partial bitstream from system memory.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter data: Kernel address of the partial bitstream.
- * @parameter size: the size of the partial bitstream in 32 bit words.
+ * buffer_icap_set_configuration - Load a partial bitstream from system memory.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Kernel address of the partial bitstream.
+ * @size: the size of the partial bitstream in 32 bit words.
  **/
 int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
                             u32 size)
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
 };
 
 /**
- * buffer_icap_get_configuration: Read configuration data from the device.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter data: Address of the data representing the partial bitstream
- * @parameter size: the size of the partial bitstream in 32 bit words.
+ * buffer_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
  **/
 int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
                             u32 size)
index 0988314694a6a59ce45aaca43986e203c3e20d1b..6f45dbd47125636a44af08d8b2dab33a70e3bf4c 100644 (file)
@@ -94,9 +94,9 @@
 
 
 /**
- * fifo_icap_fifo_write: Write data to the write FIFO.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter data: the 32-bit value to be written to the FIFO.
+ * fifo_icap_fifo_write - Write data to the write FIFO.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the 32-bit value to be written to the FIFO.
  *
  * This function will silently fail if the fifo is full.
  **/
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
 }
 
 /**
- * fifo_icap_fifo_read: Read data from the Read FIFO.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_fifo_read - Read data from the Read FIFO.
+ * @drvdata: a pointer to the drvdata.
  *
  * This function will silently fail if the fifo is empty.
  **/
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * fifo_icap_set_read_size: Set the the size register.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter data: the size of the following read transaction, in words.
+ * fifo_icap_set_read_size - Set the the size register.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the size of the following read transaction, in words.
  **/
 static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
                u32 data)
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
 }
 
 /**
- * fifo_icap_start_config: Initiate a configuration (write) to the device.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_start_config - Initiate a configuration (write) to the device.
+ * @drvdata: a pointer to the drvdata.
  **/
 static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
 {
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * fifo_icap_start_readback: Initiate a readback from the device.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_start_readback - Initiate a readback from the device.
+ * @drvdata: a pointer to the drvdata.
  **/
 static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
 {
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * fifo_icap_busy: Return true if the ICAP is still processing a transaction.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_busy - Return true if the ICAP is still processing a transaction.
+ * @drvdata: a pointer to the drvdata.
  **/
 static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
 {
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * fifo_icap_write_fifo_vacancy: Query the write fifo available space.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_write_fifo_vacancy - Query the write fifo available space.
+ * @drvdata: a pointer to the drvdata.
  *
  * Return the number of words that can be safely pushed into the write fifo.
  **/
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy(
 }
 
 /**
- * fifo_icap_read_fifo_occupancy: Query the read fifo available data.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_read_fifo_occupancy - Query the read fifo available data.
+ * @drvdata: a pointer to the drvdata.
  *
  * Return the number of words that can be safely read from the read fifo.
  **/
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy(
 }
 
 /**
- * fifo_icap_set_configuration: Send configuration data to the ICAP.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter frame_buffer: a pointer to the data to be written to the
+ * fifo_icap_set_configuration - Send configuration data to the ICAP.
+ * @drvdata: a pointer to the drvdata.
+ * @frame_buffer: a pointer to the data to be written to the
  *             ICAP device.
- * @parameter num_words: the number of words (32 bit) to write to the ICAP
+ * @num_words: the number of words (32 bit) to write to the ICAP
  *             device.
 
  * This function writes the given user data to the Write FIFO in
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
 }
 
 /**
- * fifo_icap_get_configuration: Read configuration data from the device.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter data: Address of the data representing the partial bitstream
- * @parameter size: the size of the partial bitstream in 32 bit words.
+ * fifo_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
  *
  * This function reads the specified number of words from the ICAP device in
  * the polled mode.
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
 }
 
 /**
- * buffer_icap_reset: Reset the logic of the icap device.
- * @parameter drvdata: a pointer to the drvdata.
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
  *
  * This function forces the software reset of the complete HWICAP device.
  * All the registers will return to the default value and the FIFO is also
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * fifo_icap_flush_fifo: This function flushes the FIFOs in the device.
- * @parameter drvdata: a pointer to the drvdata.
+ * fifo_icap_flush_fifo - This function flushes the FIFOs in the device.
+ * @drvdata: a pointer to the drvdata.
  */
 void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
 {
index 24f6aef0fd3ceb605a0e4ba6cbf4def597360084..2284fa2a5a5726c52c873e4f13caff36cfab5f3b 100644 (file)
@@ -84,7 +84,7 @@
 #include <linux/init.h>
 #include <linux/poll.h>
 #include <linux/proc_fs.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
 #include <linux/sysctl.h>
 #include <linux/version.h>
 #include <linux/fs.h>
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO);
 
 /* An array, which is set to true when the device is registered. */
 static bool probed_devices[HWICAP_DEVICES];
+static struct mutex icap_sem;
 
 static struct class *icap_class;
 
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = {
 };
 
 /**
- * hwicap_command_desync: Send a DESYNC command to the ICAP port.
- * @parameter drvdata: a pointer to the drvdata.
+ * hwicap_command_desync - Send a DESYNC command to the ICAP port.
+ * @drvdata: a pointer to the drvdata.
  *
  * This command desynchronizes the ICAP After this command, a
  * bitstream containing a NULL packet, followed by a SYNCH packet is
  * required before the ICAP will recognize commands.
  */
-int hwicap_command_desync(struct hwicap_drvdata *drvdata)
+static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
 {
        u32 buffer[4];
        u32 index = 0;
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata)
 }
 
 /**
- * hwicap_command_capture: Send a CAPTURE command to the ICAP port.
- * @parameter drvdata: a pointer to the drvdata.
- *
- * This command captures all of the flip flop states so they will be
- * available during readback.  One can use this command instead of
- * enabling the CAPTURE block in the design.
- */
-int hwicap_command_capture(struct hwicap_drvdata *drvdata)
-{
-       u32 buffer[7];
-       u32 index = 0;
-
-       /*
-        * Create the data to be written to the ICAP.
-        */
-       buffer[index++] = XHI_DUMMY_PACKET;
-       buffer[index++] = XHI_SYNC_PACKET;
-       buffer[index++] = XHI_NOOP_PACKET;
-       buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
-       buffer[index++] = XHI_CMD_GCAPTURE;
-       buffer[index++] = XHI_DUMMY_PACKET;
-       buffer[index++] = XHI_DUMMY_PACKET;
-
-       /*
-        * Write the data to the FIFO and intiate the transfer of data
-        * present in the FIFO to the ICAP device.
-        */
-       return drvdata->config->set_configuration(drvdata,
-                       &buffer[0], index);
-
-}
-
-/**
- * hwicap_get_configuration_register: Query a configuration register.
- * @parameter drvdata: a pointer to the drvdata.
- * @parameter reg: a constant which represents the configuration
+ * hwicap_get_configuration_register - Query a configuration register.
+ * @drvdata: a pointer to the drvdata.
+ * @reg: a constant which represents the configuration
  *             register value to be returned.
  *             Examples:  XHI_IDCODE, XHI_FLR.
- * @parameter RegData: returns the value of the register.
+ * @reg_data: returns the value of the register.
  *
  * Sends a query packet to the ICAP and then receives the response.
  * The icap is left in Synched state.
  */
-int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
-               u32 reg, u32 *RegData)
+static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
+               u32 reg, u32 *reg_data)
 {
        int status;
        u32 buffer[6];
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
        /*
         * Read the configuration register
         */
-       status = drvdata->config->get_configuration(drvdata, RegData, 1);
+       status = drvdata->config->get_configuration(drvdata, reg_data, 1);
        if (status)
                return status;
 
        return 0;
 }
 
-int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
+static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
 {
        int status;
        u32 idcode;
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
 }
 
 static ssize_t
-hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
        struct hwicap_drvdata *drvdata = file->private_data;
        ssize_t bytes_to_read = 0;
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
        u32 bytes_remaining;
        int status;
 
-       if (down_interruptible(&drvdata->sem))
-               return -ERESTARTSYS;
+       status = mutex_lock_interruptible(&drvdata->sem);
+       if (status)
+               return status;
 
        if (drvdata->read_buffer_in_use) {
                /* If there are leftover bytes in the buffer, just */
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
                        goto error;
                }
                drvdata->read_buffer_in_use -= bytes_to_read;
-               memcpy(drvdata->read_buffer + bytes_to_read,
-                               drvdata->read_buffer, 4 - bytes_to_read);
+               memmove(drvdata->read_buffer,
+                      drvdata->read_buffer + bytes_to_read,
+                      4 - bytes_to_read);
        } else {
                /* Get new data from the ICAP, and return was was requested. */
                kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
                        status = -EFAULT;
                        goto error;
                }
-               memcpy(kbuf, drvdata->read_buffer, bytes_remaining);
+               memcpy(drvdata->read_buffer,
+                      kbuf,
+                      bytes_remaining);
                drvdata->read_buffer_in_use = bytes_remaining;
                free_page((unsigned long)kbuf);
        }
        status = bytes_to_read;
  error:
-       up(&drvdata->sem);
+       mutex_unlock(&drvdata->sem);
        return status;
 }
 
 static ssize_t
-hwicap_write(struct file *file, const char *buf,
+hwicap_write(struct file *file, const char __user *buf,
                size_t count, loff_t *ppos)
 {
        struct hwicap_drvdata *drvdata = file->private_data;
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf,
        ssize_t len;
        ssize_t status;
 
-       if (down_interruptible(&drvdata->sem))
-               return -ERESTARTSYS;
+       status = mutex_lock_interruptible(&drvdata->sem);
+       if (status)
+               return status;
 
        left += drvdata->write_buffer_in_use;
 
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf,
                        memcpy(kbuf, drvdata->write_buffer,
                                        drvdata->write_buffer_in_use);
                        if (copy_from_user(
-                           (((char *)kbuf) + (drvdata->write_buffer_in_use)),
+                           (((char *)kbuf) + drvdata->write_buffer_in_use),
                            buf + written,
                            len - (drvdata->write_buffer_in_use))) {
                                free_page((unsigned long)kbuf);
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf,
        free_page((unsigned long)kbuf);
        status = written;
  error:
-       up(&drvdata->sem);
+       mutex_unlock(&drvdata->sem);
        return status;
 }
 
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file)
 
        drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
 
-       if (down_interruptible(&drvdata->sem))
-               return -ERESTARTSYS;
+       status = mutex_lock_interruptible(&drvdata->sem);
+       if (status)
+               return status;
 
        if (drvdata->is_open) {
                status = -EBUSY;
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file)
        drvdata->is_open = 1;
 
  error:
-       up(&drvdata->sem);
+       mutex_unlock(&drvdata->sem);
        return status;
 }
 
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
        int i;
        int status = 0;
 
-       if (down_interruptible(&drvdata->sem))
-               return -ERESTARTSYS;
+       mutex_lock(&drvdata->sem);
 
        if (drvdata->write_buffer_in_use) {
                /* Flush write buffer. */
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
 
  error:
        drvdata->is_open = 0;
-       up(&drvdata->sem);
+       mutex_unlock(&drvdata->sem);
        return status;
 }
 
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id,
 
        dev_info(dev, "Xilinx icap port driver\n");
 
+       mutex_lock(&icap_sem);
+
        if (id < 0) {
                for (id = 0; id < HWICAP_DEVICES; id++)
                        if (!probed_devices[id])
                                break;
        }
        if (id < 0 || id >= HWICAP_DEVICES) {
+               mutex_unlock(&icap_sem);
                dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
                return -EINVAL;
        }
        if (probed_devices[id]) {
+               mutex_unlock(&icap_sem);
                dev_err(dev, "cannot assign to %s%i; it is already in use\n",
                        DRIVER_NAME, id);
                return -EBUSY;
        }
 
        probed_devices[id] = 1;
+       mutex_unlock(&icap_sem);
 
        devt = MKDEV(xhwicap_major, xhwicap_minor + id);
 
-       drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+       drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
        if (!drvdata) {
                dev_err(dev, "Couldn't allocate device private record\n");
-               return -ENOMEM;
+               retval = -ENOMEM;
+               goto failed0;
        }
-       memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata));
        dev_set_drvdata(dev, (void *)drvdata);
 
        if (!regs_res) {
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
        drvdata->config = config;
        drvdata->config_regs = config_regs;
 
-       init_MUTEX(&drvdata->sem);
+       mutex_init(&drvdata->sem);
        drvdata->is_open = 0;
 
        dev_info(dev, "ioremap %lx to %p with size %x\n",
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
                goto failed3;
        }
        /*  devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */
-       class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME);
+       device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id);
        return 0;               /* success */
 
  failed3:
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id,
  failed1:
        kfree(drvdata);
 
+ failed0:
+       mutex_lock(&icap_sem);
+       probed_devices[id] = 0;
+       mutex_unlock(&icap_sem);
+
        return retval;
 }
 
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev)
        if (!drvdata)
                return 0;
 
-       class_device_destroy(icap_class, drvdata->devt);
+       device_destroy(icap_class, drvdata->devt);
        cdev_del(&drvdata->cdev);
        iounmap(drvdata->base_address);
        release_mem_region(drvdata->mem_start, drvdata->mem_size);
        kfree(drvdata);
        dev_set_drvdata(dev, NULL);
-       probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0;
 
+       mutex_lock(&icap_sem);
+       probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0;
+       mutex_unlock(&icap_sem);
        return 0;               /* success */
 }
 
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = {
 };
 
 /* Registration helpers to keep the number of #ifdefs to a minimum */
-static inline int __devinit hwicap_of_register(void)
+static inline int __init hwicap_of_register(void)
 {
        pr_debug("hwicap: calling of_register_platform_driver()\n");
        return of_register_platform_driver(&hwicap_of_driver);
 }
 
-static inline void __devexit hwicap_of_unregister(void)
+static inline void __exit hwicap_of_unregister(void)
 {
        of_unregister_platform_driver(&hwicap_of_driver);
 }
 #else /* CONFIG_OF */
 /* CONFIG_OF not enabled; do nothing helpers */
-static inline int __devinit hwicap_of_register(void) { return 0; }
-static inline void __devexit hwicap_of_unregister(void) { }
+static inline int __init hwicap_of_register(void) { return 0; }
+static inline void __exit hwicap_of_unregister(void) { }
 #endif /* CONFIG_OF */
 
-static int __devinit hwicap_module_init(void)
+static int __init hwicap_module_init(void)
 {
        dev_t devt;
        int retval;
 
        icap_class = class_create(THIS_MODULE, "xilinx_config");
+       mutex_init(&icap_sem);
 
        if (xhwicap_major) {
                devt = MKDEV(xhwicap_major, xhwicap_minor);
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void)
        return retval;
 }
 
-static void __devexit hwicap_module_cleanup(void)
+static void __exit hwicap_module_cleanup(void)
 {
        dev_t devt = MKDEV(xhwicap_major, xhwicap_minor);
 
index ae771cac16298b1c675e3f9de902880898e75a78..405fee7e189bd72da5ff8d08514ad141a03771f3 100644 (file)
@@ -48,9 +48,9 @@ struct hwicap_drvdata {
        u8 write_buffer[4];
        u32 read_buffer_in_use;   /* Always in [0,3] */
        u8 read_buffer[4];
-       u32 mem_start;            /* phys. address of the control registers */
-       u32 mem_end;              /* phys. address of the control registers */
-       u32 mem_size;
+       resource_size_t mem_start;/* phys. address of the control registers */
+       resource_size_t mem_end;  /* phys. address of the control registers */
+       resource_size_t mem_size;
        void __iomem *base_address;/* virt. address of the control registers */
 
        struct device *dev;
@@ -61,7 +61,7 @@ struct hwicap_drvdata {
        const struct config_registers *config_regs;
        void *private_data;
        bool is_open;
-       struct semaphore sem;
+       struct mutex sem;
 };
 
 struct hwicap_driver_config {
@@ -164,29 +164,29 @@ struct config_registers {
 #define XHI_DISABLED_AUTO_CRC       0x0000DEFCUL
 
 /**
- * hwicap_type_1_read: Generates a Type 1 read packet header.
- * @parameter: Register is the address of the register to be read back.
+ * hwicap_type_1_read - Generates a Type 1 read packet header.
+ * @reg: is the address of the register to be read back.
  *
  * Generates a Type 1 read packet header, which is used to indirectly
  * read registers in the configuration logic.  This packet must then
  * be sent through the icap device, and a return packet received with
  * the information.
  **/
-static inline u32 hwicap_type_1_read(u32 Register)
+static inline u32 hwicap_type_1_read(u32 reg)
 {
        return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
-               (Register << XHI_REGISTER_SHIFT) |
+               (reg << XHI_REGISTER_SHIFT) |
                (XHI_OP_READ << XHI_OP_SHIFT);
 }
 
 /**
- * hwicap_type_1_write: Generates a Type 1 write packet header
- * @parameter: Register is the address of the register to be read back.
+ * hwicap_type_1_write - Generates a Type 1 write packet header
+ * @reg: is the address of the register to be read back.
  **/
-static inline u32 hwicap_type_1_write(u32 Register)
+static inline u32 hwicap_type_1_write(u32 reg)
 {
        return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
-               (Register << XHI_REGISTER_SHIFT) |
+               (reg << XHI_REGISTER_SHIFT) |
                (XHI_OP_WRITE << XHI_OP_SHIFT);
 }
 
index fea2d3ed9cbdacd57661f5a1a5227a1bde2ffeb2..85e2ba7fcfbab1b4533079da8bd812f22203952d 100644 (file)
@@ -47,7 +47,7 @@ static LIST_HEAD(notify_list);
 
 static struct cn_dev cdev;
 
-int cn_already_initialized = 0;
+static int cn_already_initialized;
 
 /*
  * msg->seq and msg->ack are used to determine message genealogy.
index a703deffb7954cd3a1fd4831adaa48403b1b06ba..27340a7b19dddb8129f8ca85e8c24cc90075b6c3 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig DMADEVICES
        bool "DMA Engine support"
-       depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+       depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
        depends on !HIGHMEM64G
        help
          DMA engines can do asynchronous data transfers without
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA
        help
          Enable support for the Intel(R) IOP Series RAID engines.
 
+config FSL_DMA
+       bool "Freescale MPC85xx/MPC83xx DMA support"
+       depends on PPC
+       select DMA_ENGINE
+       ---help---
+         Enable support for the Freescale DMA engine. Now, it support
+         MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
+         The MPC8349, MPC8360 is also supported.
+
+config FSL_DMA_SELFTEST
+       bool "Enable the self test for each DMA channel"
+       depends on FSL_DMA
+       default y
+       ---help---
+         Enable the self test for each DMA channel. A self test will be
+         performed after the channel probed to ensure the DMA works well.
+
 config DMA_ENGINE
        bool
 
index b152cd84e123653b8ecb2f4fe58ea45f3459ce4b..c8036d94590277d24f994a82c61fccbe51a89f07 100644 (file)
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o
 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
 ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_FSL_DMA) += fsldma.o
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
new file mode 100644 (file)
index 0000000..cc9a681
--- /dev/null
@@ -0,0 +1,1067 @@
+/*
+ * Freescale MPC85xx, MPC83xx DMA Engine support
+ *
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author:
+ *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
+ *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
+ *
+ * Description:
+ *   DMA engine driver for Freescale MPC8540 DMA controller, which is
+ *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
+ *   The support for MPC8349 DMA contorller is also added.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/of_platform.h>
+
+#include "fsldma.h"
+
+static void dma_init(struct fsl_dma_chan *fsl_chan)
+{
+       /* Reset the channel */
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
+
+       switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
+       case FSL_DMA_IP_85XX:
+               /* Set the channel to below modes:
+                * EIE - Error interrupt enable
+                * EOSIE - End of segments interrupt enable (basic mode)
+                * EOLNIE - End of links interrupt enable
+                */
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
+                               | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
+               break;
+       case FSL_DMA_IP_83XX:
+               /* Set the channel to below modes:
+                * EOTIE - End-of-transfer interrupt enable
+                */
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
+                               32);
+               break;
+       }
+
+}
+
+static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val)
+{
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
+}
+
+static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan)
+{
+       return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
+}
+
+static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
+                               struct fsl_dma_ld_hw *hw, u32 count)
+{
+       hw->count = CPU_TO_DMA(fsl_chan, count, 32);
+}
+
+static void set_desc_src(struct fsl_dma_chan *fsl_chan,
+                               struct fsl_dma_ld_hw *hw, dma_addr_t src)
+{
+       u64 snoop_bits;
+
+       snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+               ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
+       hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
+}
+
+static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
+                               struct fsl_dma_ld_hw *hw, dma_addr_t dest)
+{
+       u64 snoop_bits;
+
+       snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+               ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
+       hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
+}
+
+static void set_desc_next(struct fsl_dma_chan *fsl_chan,
+                               struct fsl_dma_ld_hw *hw, dma_addr_t next)
+{
+       u64 snoop_bits;
+
+       snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+               ? FSL_DMA_SNEN : 0;
+       hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
+}
+
+static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+{
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
+}
+
+static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
+{
+       return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
+}
+
+static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+{
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
+}
+
+static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
+{
+       return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
+}
+
+static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
+{
+       u32 sr = get_sr(fsl_chan);
+       return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
+}
+
+static void dma_start(struct fsl_dma_chan *fsl_chan)
+{
+       u32 mr_set = 0;;
+
+       if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
+               mr_set |= FSL_DMA_MR_EMP_EN;
+       } else
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+                               & ~FSL_DMA_MR_EMP_EN, 32);
+
+       if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
+               mr_set |= FSL_DMA_MR_EMS_EN;
+       else
+               mr_set |= FSL_DMA_MR_CS;
+
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+                       | mr_set, 32);
+}
+
+static void dma_halt(struct fsl_dma_chan *fsl_chan)
+{
+       int i = 0;
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+               DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
+               32);
+       DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+               DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
+               | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+
+       while (!dma_is_idle(fsl_chan) && (i++ < 100))
+               udelay(10);
+       if (i >= 100 && !dma_is_idle(fsl_chan))
+               dev_err(fsl_chan->dev, "DMA halt timeout!\n");
+}
+
+static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
+                       struct fsl_desc_sw *desc)
+{
+       desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
+               DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
+               64);
+}
+
+static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
+               struct fsl_desc_sw *new_desc)
+{
+       struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
+
+       if (list_empty(&fsl_chan->ld_queue))
+               return;
+
+       /* Link to the new descriptor physical address and
+        * Enable End-of-segment interrupt for
+        * the last link descriptor.
+        * (the previous node's next link descriptor)
+        *
+        * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
+        */
+       queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
+                       new_desc->async_tx.phys | FSL_DMA_EOSIE |
+                       (((fsl_chan->feature & FSL_DMA_IP_MASK)
+                               == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
+}
+
+/**
+ * fsl_chan_set_src_loop_size - Set source address hold transfer size
+ * @fsl_chan : Freescale DMA channel
+ * @size     : Address loop size, 0 for disable loop
+ *
+ * The set source address hold transfer size. The source
+ * address hold or loop transfer size is when the DMA transfer
+ * data from source address (SA), if the loop size is 4, the DMA will
+ * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
+ * SA + 1 ... and so on.
+ */
+static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+{
+       switch (size) {
+       case 0:
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
+                       (~FSL_DMA_MR_SAHE), 32);
+               break;
+       case 1:
+       case 2:
+       case 4:
+       case 8:
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
+                       FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
+                       32);
+               break;
+       }
+}
+
+/**
+ * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
+ * @fsl_chan : Freescale DMA channel
+ * @size     : Address loop size, 0 for disable loop
+ *
+ * The set destination address hold transfer size. The destination
+ * address hold or loop transfer size is when the DMA transfer
+ * data to destination address (TA), if the loop size is 4, the DMA will
+ * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
+ * TA + 1 ... and so on.
+ */
+static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+{
+       switch (size) {
+       case 0:
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
+                       (~FSL_DMA_MR_DAHE), 32);
+               break;
+       case 1:
+       case 2:
+       case 4:
+       case 8:
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
+                       FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
+                       32);
+               break;
+       }
+}
+
+/**
+ * fsl_chan_toggle_ext_pause - Toggle channel external pause status
+ * @fsl_chan : Freescale DMA channel
+ * @size     : Pause control size, 0 for disable external pause control.
+ *             The maximum is 1024.
+ *
+ * The Freescale DMA channel can be controlled by the external
+ * signal DREQ#. The pause control size is how many bytes are allowed
+ * to transfer before pausing the channel, after which a new assertion
+ * of DREQ# resumes channel operation.
+ */
+static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
+{
+       if (size > 1024)
+               return;
+
+       if (size) {
+               DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+                       DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+                               | ((__ilog2(size) << 24) & 0x0f000000),
+                       32);
+               fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
+       } else
+               fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
+}
+
+/**
+ * fsl_chan_toggle_ext_start - Toggle channel external start status
+ * @fsl_chan : Freescale DMA channel
+ * @enable   : 0 is disabled, 1 is enabled.
+ *
+ * If enable the external start, the channel can be started by an
+ * external DMA start pin. So the dma_start() does not start the
+ * transfer immediately. The DMA channel will wait for the
+ * control pin asserted.
+ */
+static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
+{
+       if (enable)
+               fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
+       else
+               fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+}
+
+static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
+       struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+       unsigned long flags;
+       dma_cookie_t cookie;
+
+       /* cookie increment and adding to ld_queue must be atomic */
+       spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+       cookie = fsl_chan->common.cookie;
+       cookie++;
+       if (cookie < 0)
+               cookie = 1;
+       desc->async_tx.cookie = cookie;
+       fsl_chan->common.cookie = desc->async_tx.cookie;
+
+       append_ld_queue(fsl_chan, desc);
+       list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
+
+       spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+       return cookie;
+}
+
+/**
+ * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
+ * @fsl_chan : Freescale DMA channel
+ *
+ * Return - The descriptor allocated. NULL for failed.
+ */
+static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
+                                       struct fsl_dma_chan *fsl_chan)
+{
+       dma_addr_t pdesc;
+       struct fsl_desc_sw *desc_sw;
+
+       desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
+       if (desc_sw) {
+               memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
+               dma_async_tx_descriptor_init(&desc_sw->async_tx,
+                                               &fsl_chan->common);
+               desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
+               INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
+               desc_sw->async_tx.phys = pdesc;
+       }
+
+       return desc_sw;
+}
+
+
+/**
+ * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
+ * @fsl_chan : Freescale DMA channel
+ *
+ * This function will create a dma pool for descriptor allocation.
+ *
+ * Return - The number of descriptors allocated.
+ */
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+       LIST_HEAD(tmp_list);
+
+       /* We need the descriptor to be aligned to 32bytes
+        * for meeting FSL DMA specification requirement.
+        */
+       fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
+                       fsl_chan->dev, sizeof(struct fsl_desc_sw),
+                       32, 0);
+       if (!fsl_chan->desc_pool) {
+               dev_err(fsl_chan->dev, "No memory for channel %d "
+                       "descriptor dma pool.\n", fsl_chan->id);
+               return 0;
+       }
+
+       return 1;
+}
+
+/**
+ * fsl_dma_free_chan_resources - Free all resources of the channel.
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+       struct fsl_desc_sw *desc, *_desc;
+       unsigned long flags;
+
+       dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
+       spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+       list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+#ifdef FSL_DMA_LD_DEBUG
+               dev_dbg(fsl_chan->dev,
+                               "LD %p will be released.\n", desc);
+#endif
+               list_del(&desc->node);
+               /* free link descriptor */
+               dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+       }
+       spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+       dma_pool_destroy(fsl_chan->desc_pool);
+}
+
+static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
+       struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+       size_t len, unsigned long flags)
+{
+       struct fsl_dma_chan *fsl_chan;
+       struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
+       size_t copy;
+       LIST_HEAD(link_chain);
+
+       if (!chan)
+               return NULL;
+
+       if (!len)
+               return NULL;
+
+       fsl_chan = to_fsl_chan(chan);
+
+       do {
+
+               /* Allocate the link descriptor from DMA pool */
+               new = fsl_dma_alloc_descriptor(fsl_chan);
+               if (!new) {
+                       dev_err(fsl_chan->dev,
+                                       "No free memory for link descriptor\n");
+                       return NULL;
+               }
+#ifdef FSL_DMA_LD_DEBUG
+               dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+#endif
+
+               copy = min(len, FSL_DMA_BCR_MAX_CNT);
+
+               set_desc_cnt(fsl_chan, &new->hw, copy);
+               set_desc_src(fsl_chan, &new->hw, dma_src);
+               set_desc_dest(fsl_chan, &new->hw, dma_dest);
+
+               if (!first)
+                       first = new;
+               else
+                       set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
+
+               new->async_tx.cookie = 0;
+               new->async_tx.ack = 1;
+
+               prev = new;
+               len -= copy;
+               dma_src += copy;
+               dma_dest += copy;
+
+               /* Insert the link descriptor to the LD ring */
+               list_add_tail(&new->node, &first->async_tx.tx_list);
+       } while (len);
+
+       new->async_tx.ack = 0; /* client is in control of this ack */
+       new->async_tx.cookie = -EBUSY;
+
+       /* Set End-of-link to the last link descriptor of new list*/
+       set_ld_eol(fsl_chan, new);
+
+       return first ? &first->async_tx : NULL;
+}
+
+/**
+ * fsl_dma_update_completed_cookie - Update the completed cookie.
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
+{
+       struct fsl_desc_sw *cur_desc, *desc;
+       dma_addr_t ld_phy;
+
+       ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
+
+       if (ld_phy) {
+               cur_desc = NULL;
+               list_for_each_entry(desc, &fsl_chan->ld_queue, node)
+                       if (desc->async_tx.phys == ld_phy) {
+                               cur_desc = desc;
+                               break;
+                       }
+
+               if (cur_desc && cur_desc->async_tx.cookie) {
+                       if (dma_is_idle(fsl_chan))
+                               fsl_chan->completed_cookie =
+                                       cur_desc->async_tx.cookie;
+                       else
+                               fsl_chan->completed_cookie =
+                                       cur_desc->async_tx.cookie - 1;
+               }
+       }
+}
+
+/**
+ * fsl_chan_ld_cleanup - Clean up link descriptors
+ * @fsl_chan : Freescale DMA channel
+ *
+ * This function clean up the ld_queue of DMA channel.
+ * If 'in_intr' is set, the function will move the link descriptor to
+ * the recycle list. Otherwise, free it directly.
+ */
+static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
+{
+       struct fsl_desc_sw *desc, *_desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+       fsl_dma_update_completed_cookie(fsl_chan);
+       dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
+                       fsl_chan->completed_cookie);
+       list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+               dma_async_tx_callback callback;
+               void *callback_param;
+
+               if (dma_async_is_complete(desc->async_tx.cookie,
+                           fsl_chan->completed_cookie, fsl_chan->common.cookie)
+                               == DMA_IN_PROGRESS)
+                       break;
+
+               callback = desc->async_tx.callback;
+               callback_param = desc->async_tx.callback_param;
+
+               /* Remove from ld_queue list */
+               list_del(&desc->node);
+
+               dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
+                               desc);
+               dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+
+               /* Run the link descriptor callback function */
+               if (callback) {
+                       spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+                       dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
+                                       desc);
+                       callback(callback_param);
+                       spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+}
+
+/**
+ * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
+{
+       struct list_head *ld_node;
+       dma_addr_t next_dest_addr;
+       unsigned long flags;
+
+       if (!dma_is_idle(fsl_chan))
+               return;
+
+       dma_halt(fsl_chan);
+
+       /* If there are some link descriptors
+        * not transfered in queue. We need to start it.
+        */
+       spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+       /* Find the first un-transfer desciptor */
+       for (ld_node = fsl_chan->ld_queue.next;
+               (ld_node != &fsl_chan->ld_queue)
+                       && (dma_async_is_complete(
+                               to_fsl_desc(ld_node)->async_tx.cookie,
+                               fsl_chan->completed_cookie,
+                               fsl_chan->common.cookie) == DMA_SUCCESS);
+               ld_node = ld_node->next);
+
+       spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+       if (ld_node != &fsl_chan->ld_queue) {
+               /* Get the ld start address from ld_queue */
+               next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
+               dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n",
+                               (u64)next_dest_addr);
+               set_cdar(fsl_chan, next_dest_addr);
+               dma_start(fsl_chan);
+       } else {
+               set_cdar(fsl_chan, 0);
+               set_ndar(fsl_chan, 0);
+       }
+}
+
+/**
+ * fsl_dma_memcpy_issue_pending - Issue the DMA start command
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+       struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+
+#ifdef FSL_DMA_LD_DEBUG
+       struct fsl_desc_sw *ld;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+       if (list_empty(&fsl_chan->ld_queue)) {
+               spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+               return;
+       }
+
+       dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
+       list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
+               int i;
+               dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
+                               fsl_chan->id, ld->async_tx.phys);
+               for (i = 0; i < 8; i++)
+                       dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
+                                       i, *(((u32 *)&ld->hw) + i));
+       }
+       dev_dbg(fsl_chan->dev, "----------------\n");
+       spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+#endif
+
+       fsl_chan_xfer_ld_queue(fsl_chan);
+}
+
+static void fsl_dma_dependency_added(struct dma_chan *chan)
+{
+       struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+
+       fsl_chan_ld_cleanup(fsl_chan);
+}
+
+/**
+ * fsl_dma_is_complete - Determine the DMA status
+ * @fsl_chan : Freescale DMA channel
+ */
+static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
+                                       dma_cookie_t cookie,
+                                       dma_cookie_t *done,
+                                       dma_cookie_t *used)
+{
+       struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+       dma_cookie_t last_used;
+       dma_cookie_t last_complete;
+
+       fsl_chan_ld_cleanup(fsl_chan);
+
+       last_used = chan->cookie;
+       last_complete = fsl_chan->completed_cookie;
+
+       if (done)
+               *done = last_complete;
+
+       if (used)
+               *used = last_used;
+
+       return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
+{
+       struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
+       dma_addr_t stat;
+
+       stat = get_sr(fsl_chan);
+       dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
+                                               fsl_chan->id, stat);
+       set_sr(fsl_chan, stat);         /* Clear the event register */
+
+       stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
+       if (!stat)
+               return IRQ_NONE;
+
+       if (stat & FSL_DMA_SR_TE)
+               dev_err(fsl_chan->dev, "Transfer Error!\n");
+
+       /* If the link descriptor segment transfer finishes,
+        * we will recycle the used descriptor.
+        */
+       if (stat & FSL_DMA_SR_EOSI) {
+               dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
+               dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, "
+                               "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan),
+                               (u64)get_ndar(fsl_chan));
+               stat &= ~FSL_DMA_SR_EOSI;
+       }
+
+       /* If it current transfer is the end-of-transfer,
+        * we should clear the Channel Start bit for
+        * prepare next transfer.
+        */
+       if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) {
+               dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
+               stat &= ~FSL_DMA_SR_EOLNI;
+               fsl_chan_xfer_ld_queue(fsl_chan);
+       }
+
+       if (stat)
+               dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
+                                       stat);
+
+       dev_dbg(fsl_chan->dev, "event: Exit\n");
+       tasklet_schedule(&fsl_chan->tasklet);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
+{
+       struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
+       u32 gsr;
+       int ch_nr;
+
+       gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
+                       : in_le32(fdev->reg_base);
+       ch_nr = (32 - ffs(gsr)) / 8;
+
+       return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
+                       fdev->chan[ch_nr]) : IRQ_NONE;
+}
+
+static void dma_do_tasklet(unsigned long data)
+{
+       struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
+       fsl_chan_ld_cleanup(fsl_chan);
+}
+
+static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
+{
+       if (fsl_chan)
+               dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
+}
+
+static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
+{
+       struct dma_chan *chan;
+       int err = 0;
+       dma_addr_t dma_dest, dma_src;
+       dma_cookie_t cookie;
+       u8 *src, *dest;
+       int i;
+       size_t test_size;
+       struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
+
+       test_size = 4096;
+
+       src = kmalloc(test_size * 2, GFP_KERNEL);
+       if (!src) {
+               dev_err(fsl_chan->dev,
+                               "selftest: Cannot alloc memory for test!\n");
+               err = -ENOMEM;
+               goto out;
+       }
+
+       dest = src + test_size;
+
+       for (i = 0; i < test_size; i++)
+               src[i] = (u8) i;
+
+       chan = &fsl_chan->common;
+
+       if (fsl_dma_alloc_chan_resources(chan) < 1) {
+               dev_err(fsl_chan->dev,
+                               "selftest: Cannot alloc resources for DMA\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+       /* TX 1 */
+       dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
+                                DMA_TO_DEVICE);
+       dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
+                                 DMA_FROM_DEVICE);
+       tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
+       async_tx_ack(tx1);
+
+       cookie = fsl_dma_tx_submit(tx1);
+       fsl_dma_memcpy_issue_pending(chan);
+       msleep(2);
+
+       if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+               dev_err(fsl_chan->dev, "selftest: Time out!\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+       /* Test free and re-alloc channel resources */
+       fsl_dma_free_chan_resources(chan);
+
+       if (fsl_dma_alloc_chan_resources(chan) < 1) {
+               dev_err(fsl_chan->dev,
+                               "selftest: Cannot alloc resources for DMA\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+       /* Continue to test
+        * TX 2
+        */
+       dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
+                                       test_size / 4, DMA_TO_DEVICE);
+       dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
+                                       test_size / 4, DMA_FROM_DEVICE);
+       tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
+       async_tx_ack(tx2);
+
+       /* TX 3 */
+       dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
+                                       test_size / 4, DMA_TO_DEVICE);
+       dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
+                                       test_size / 4, DMA_FROM_DEVICE);
+       tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
+       async_tx_ack(tx3);
+
+       /* Test exchanging the prepared tx sort */
+       cookie = fsl_dma_tx_submit(tx3);
+       cookie = fsl_dma_tx_submit(tx2);
+
+#ifdef FSL_DMA_CALLBACKTEST
+       if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
+           dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
+               tx3->callback = fsl_dma_callback_test;
+               tx3->callback_param = fsl_chan;
+       }
+#endif
+       fsl_dma_memcpy_issue_pending(chan);
+       msleep(2);
+
+       if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+               dev_err(fsl_chan->dev, "selftest: Time out!\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+       err = memcmp(src, dest, test_size);
+       if (err) {
+               for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
+                               i++);
+               dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is "
+                               "error! src 0x%x, dest 0x%x\n",
+                               i, test_size, *(src + i), *(dest + i));
+       }
+
+free_resources:
+       fsl_dma_free_chan_resources(chan);
+out:
+       kfree(src);
+       return err;
+}
+
+static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
+                       const struct of_device_id *match)
+{
+       struct fsl_dma_device *fdev;
+       struct fsl_dma_chan *new_fsl_chan;
+       int err;
+
+       fdev = dev_get_drvdata(dev->dev.parent);
+       BUG_ON(!fdev);
+
+       /* alloc channel */
+       new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
+       if (!new_fsl_chan) {
+               dev_err(&dev->dev, "No free memory for allocating "
+                               "dma channels!\n");
+               err = -ENOMEM;
+               goto err;
+       }
+
+       /* get dma channel register base */
+       err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg);
+       if (err) {
+               dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+                               dev->node->full_name);
+               goto err;
+       }
+
+       new_fsl_chan->feature = *(u32 *)match->data;
+
+       if (!fdev->feature)
+               fdev->feature = new_fsl_chan->feature;
+
+       /* If the DMA device's feature is different than its channels',
+        * report the bug.
+        */
+       WARN_ON(fdev->feature != new_fsl_chan->feature);
+
+       new_fsl_chan->dev = &dev->dev;
+       new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
+                       new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
+
+       new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
+       if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
+               dev_err(&dev->dev, "There is no %d channel!\n",
+                               new_fsl_chan->id);
+               err = -EINVAL;
+               goto err;
+       }
+       fdev->chan[new_fsl_chan->id] = new_fsl_chan;
+       tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
+                       (unsigned long)new_fsl_chan);
+
+       /* Init the channel */
+       dma_init(new_fsl_chan);
+
+       /* Clear cdar registers */
+       set_cdar(new_fsl_chan, 0);
+
+       switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
+       case FSL_DMA_IP_85XX:
+               new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
+               new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+       case FSL_DMA_IP_83XX:
+               new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
+               new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
+       }
+
+       spin_lock_init(&new_fsl_chan->desc_lock);
+       INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
+
+       new_fsl_chan->common.device = &fdev->common;
+
+       /* Add the channel to DMA device channel list */
+       list_add_tail(&new_fsl_chan->common.device_node,
+                       &fdev->common.channels);
+       fdev->common.chancnt++;
+
+       new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0);
+       if (new_fsl_chan->irq != NO_IRQ) {
+               err = request_irq(new_fsl_chan->irq,
+                                       &fsl_dma_chan_do_interrupt, IRQF_SHARED,
+                                       "fsldma-channel", new_fsl_chan);
+               if (err) {
+                       dev_err(&dev->dev, "DMA channel %s request_irq error "
+                               "with return %d\n", dev->node->full_name, err);
+                       goto err;
+               }
+       }
+
+#ifdef CONFIG_FSL_DMA_SELFTEST
+       err = fsl_dma_self_test(new_fsl_chan);
+       if (err)
+               goto err;
+#endif
+
+       dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
+                               match->compatible, new_fsl_chan->irq);
+
+       return 0;
+err:
+       dma_halt(new_fsl_chan);
+       iounmap(new_fsl_chan->reg_base);
+       free_irq(new_fsl_chan->irq, new_fsl_chan);
+       list_del(&new_fsl_chan->common.device_node);
+       kfree(new_fsl_chan);
+       return err;
+}
+
+const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN;
+const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
+
+static struct of_device_id of_fsl_dma_chan_ids[] = {
+       {
+               .compatible = "fsl,mpc8540-dma-channel",
+               .data = (void *)&mpc8540_dma_ip_feature,
+       },
+       {
+               .compatible = "fsl,mpc8349-dma-channel",
+               .data = (void *)&mpc8349_dma_ip_feature,
+       },
+       {}
+};
+
+static struct of_platform_driver of_fsl_dma_chan_driver = {
+       .name = "of-fsl-dma-channel",
+       .match_table = of_fsl_dma_chan_ids,
+       .probe = of_fsl_dma_chan_probe,
+};
+
+static __init int of_fsl_dma_chan_init(void)
+{
+       return of_register_platform_driver(&of_fsl_dma_chan_driver);
+}
+
+static int __devinit of_fsl_dma_probe(struct of_device *dev,
+                       const struct of_device_id *match)
+{
+       int err;
+       unsigned int irq;
+       struct fsl_dma_device *fdev;
+
+       fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
+       if (!fdev) {
+               dev_err(&dev->dev, "No enough memory for 'priv'\n");
+               err = -ENOMEM;
+               goto err;
+       }
+       fdev->dev = &dev->dev;
+       INIT_LIST_HEAD(&fdev->common.channels);
+
+       /* get DMA controller register base */
+       err = of_address_to_resource(dev->node, 0, &fdev->reg);
+       if (err) {
+               dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+                               dev->node->full_name);
+               goto err;
+       }
+
+       dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
+                       "controller at 0x%08x...\n",
+                       match->compatible, fdev->reg.start);
+       fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
+                                               - fdev->reg.start + 1);
+
+       dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
+       dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
+       fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
+       fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
+       fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
+       fdev->common.device_is_tx_complete = fsl_dma_is_complete;
+       fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
+       fdev->common.device_dependency_added = fsl_dma_dependency_added;
+       fdev->common.dev = &dev->dev;
+
+       irq = irq_of_parse_and_map(dev->node, 0);
+       if (irq != NO_IRQ) {
+               err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED,
+                                       "fsldma-device", fdev);
+               if (err) {
+                       dev_err(&dev->dev, "DMA device request_irq error "
+                               "with return %d\n", err);
+                       goto err;
+               }
+       }
+
+       dev_set_drvdata(&(dev->dev), fdev);
+       of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev);
+
+       dma_async_device_register(&fdev->common);
+       return 0;
+
+err:
+       iounmap(fdev->reg_base);
+       kfree(fdev);
+       return err;
+}
+
+static struct of_device_id of_fsl_dma_ids[] = {
+       { .compatible = "fsl,mpc8540-dma", },
+       { .compatible = "fsl,mpc8349-dma", },
+       {}
+};
+
+static struct of_platform_driver of_fsl_dma_driver = {
+       .name = "of-fsl-dma",
+       .match_table = of_fsl_dma_ids,
+       .probe = of_fsl_dma_probe,
+};
+
+static __init int of_fsl_dma_init(void)
+{
+       return of_register_platform_driver(&of_fsl_dma_driver);
+}
+
+subsys_initcall(of_fsl_dma_chan_init);
+subsys_initcall(of_fsl_dma_init);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
new file mode 100644 (file)
index 0000000..ba78c42
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author:
+ *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
+ *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __DMA_FSLDMA_H
+#define __DMA_FSLDMA_H
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+
+/* Define data structures needed by Freescale
+ * MPC8540 and MPC8349 DMA controller.
+ */
+#define FSL_DMA_MR_CS          0x00000001
+#define FSL_DMA_MR_CC          0x00000002
+#define FSL_DMA_MR_CA          0x00000008
+#define FSL_DMA_MR_EIE         0x00000040
+#define FSL_DMA_MR_XFE         0x00000020
+#define FSL_DMA_MR_EOLNIE      0x00000100
+#define FSL_DMA_MR_EOLSIE      0x00000080
+#define FSL_DMA_MR_EOSIE       0x00000200
+#define FSL_DMA_MR_CDSM                0x00000010
+#define FSL_DMA_MR_CTM         0x00000004
+#define FSL_DMA_MR_EMP_EN      0x00200000
+#define FSL_DMA_MR_EMS_EN      0x00040000
+#define FSL_DMA_MR_DAHE                0x00002000
+#define FSL_DMA_MR_SAHE                0x00001000
+
+/* Special MR definition for MPC8349 */
+#define FSL_DMA_MR_EOTIE       0x00000080
+
+#define FSL_DMA_SR_CH          0x00000020
+#define FSL_DMA_SR_CB          0x00000004
+#define FSL_DMA_SR_TE          0x00000080
+#define FSL_DMA_SR_EOSI                0x00000002
+#define FSL_DMA_SR_EOLSI       0x00000001
+#define FSL_DMA_SR_EOCDI       0x00000001
+#define FSL_DMA_SR_EOLNI       0x00000008
+
+#define FSL_DMA_SATR_SBPATMU                   0x20000000
+#define FSL_DMA_SATR_STRANSINT_RIO             0x00c00000
+#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ      0x00050000
+#define FSL_DMA_SATR_SREADTYPE_BP_IORH         0x00020000
+#define FSL_DMA_SATR_SREADTYPE_BP_NREAD                0x00040000
+#define FSL_DMA_SATR_SREADTYPE_BP_MREAD                0x00070000
+
+#define FSL_DMA_DATR_DBPATMU                   0x20000000
+#define FSL_DMA_DATR_DTRANSINT_RIO             0x00c00000
+#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE    0x00050000
+#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH       0x00010000
+
+#define FSL_DMA_EOL            ((u64)0x1)
+#define FSL_DMA_SNEN           ((u64)0x10)
+#define FSL_DMA_EOSIE          0x8
+#define FSL_DMA_NLDA_MASK      (~(u64)0x1f)
+
+#define FSL_DMA_BCR_MAX_CNT    0x03ffffffu
+
+#define FSL_DMA_DGSR_TE                0x80
+#define FSL_DMA_DGSR_CH                0x20
+#define FSL_DMA_DGSR_PE                0x10
+#define FSL_DMA_DGSR_EOLNI     0x08
+#define FSL_DMA_DGSR_CB                0x04
+#define FSL_DMA_DGSR_EOSI      0x02
+#define FSL_DMA_DGSR_EOLSI     0x01
+
+struct fsl_dma_ld_hw {
+       u64 __bitwise   src_addr;
+       u64 __bitwise   dst_addr;
+       u64 __bitwise   next_ln_addr;
+       u32 __bitwise   count;
+       u32 __bitwise   reserve;
+} __attribute__((aligned(32)));
+
+struct fsl_desc_sw {
+       struct fsl_dma_ld_hw hw;
+       struct list_head node;
+       struct dma_async_tx_descriptor async_tx;
+       struct list_head *ld;
+       void *priv;
+} __attribute__((aligned(32)));
+
+struct fsl_dma_chan_regs {
+       u32 __bitwise   mr;     /* 0x00 - Mode Register */
+       u32 __bitwise   sr;     /* 0x04 - Status Register */
+       u64 __bitwise   cdar;   /* 0x08 - Current descriptor address register */
+       u64 __bitwise   sar;    /* 0x10 - Source Address Register */
+       u64 __bitwise   dar;    /* 0x18 - Destination Address Register */
+       u32 __bitwise   bcr;    /* 0x20 - Byte Count Register */
+       u64 __bitwise   ndar;   /* 0x24 - Next Descriptor Address Register */
+};
+
+struct fsl_dma_chan;
+#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
+
+struct fsl_dma_device {
+       void __iomem *reg_base; /* DGSR register base */
+       struct resource reg;    /* Resource for register */
+       struct device *dev;
+       struct dma_device common;
+       struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
+       u32 feature;            /* The same as DMA channels */
+};
+
+/* Define macros for fsl_dma_chan->feature property */
+#define FSL_DMA_LITTLE_ENDIAN  0x00000000
+#define FSL_DMA_BIG_ENDIAN     0x00000001
+
+#define FSL_DMA_IP_MASK                0x00000ff0
+#define FSL_DMA_IP_85XX                0x00000010
+#define FSL_DMA_IP_83XX                0x00000020
+
+#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
+#define FSL_DMA_CHAN_START_EXT 0x00002000
+
+struct fsl_dma_chan {
+       struct fsl_dma_chan_regs __iomem *reg_base;
+       dma_cookie_t completed_cookie;  /* The maximum cookie completed */
+       spinlock_t desc_lock;           /* Descriptor operation lock */
+       struct list_head ld_queue;      /* Link descriptors queue */
+       struct dma_chan common;         /* DMA common channel */
+       struct dma_pool *desc_pool;     /* Descriptors pool */
+       struct device *dev;             /* Channel device */
+       struct resource reg;            /* Resource for register */
+       int irq;                        /* Channel IRQ */
+       int id;                         /* Raw id of this channel */
+       struct tasklet_struct tasklet;
+       u32 feature;
+
+       void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size);
+       void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
+       void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
+       void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
+};
+
+#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
+#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
+#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
+
+#ifndef __powerpc64__
+static u64 in_be64(const u64 __iomem *addr)
+{
+       return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1));
+}
+
+static void out_be64(u64 __iomem *addr, u64 val)
+{
+       out_be32((u32 *)addr, val >> 32);
+       out_be32((u32 *)addr + 1, (u32)val);
+}
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static u64 in_le64(const u64 __iomem *addr)
+{
+       return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr));
+}
+
+static void out_le64(u64 __iomem *addr, u64 val)
+{
+       out_le32((u32 *)addr + 1, val >> 32);
+       out_le32((u32 *)addr, (u32)val);
+}
+#endif
+
+#define DMA_IN(fsl_chan, addr, width)                                  \
+               (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?           \
+                       in_be##width(addr) : in_le##width(addr))
+#define DMA_OUT(fsl_chan, addr, val, width)                            \
+               (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?           \
+                       out_be##width(addr, val) : out_le##width(addr, val))
+
+#define DMA_TO_CPU(fsl_chan, d, width)                                 \
+               (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?           \
+                       be##width##_to_cpu(d) : le##width##_to_cpu(d))
+#define CPU_TO_DMA(fsl_chan, c, width)                                 \
+               (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?           \
+                       cpu_to_be##width(c) : cpu_to_le##width(c))
+
+#endif /* __DMA_FSLDMA_H */
index dff38accc5c1df47193a73b4fb81e467012d80b6..4017d9e7acd2a2b68020e2584c71dc654dd20cae 100644 (file)
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
                new->len = len;
                new->dst = dma_dest;
                new->src = dma_src;
+               new->async_tx.ack = 0;
                return &new->async_tx;
        } else
                return NULL;
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
                new->len = len;
                new->dst = dma_dest;
                new->src = dma_src;
+               new->async_tx.ack = 0;
                return &new->async_tx;
        } else
                return NULL;
index 3e9719948a8e7817658496929115fe52bda45473..a03462750b95ee0045408a5c8d669ba7bc3d461d 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/errno.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/crc-itu-t.h>
@@ -214,17 +215,29 @@ static void
 fw_card_bm_work(struct work_struct *work)
 {
        struct fw_card *card = container_of(work, struct fw_card, work.work);
-       struct fw_device *root;
+       struct fw_device *root_device;
+       struct fw_node *root_node, *local_node;
        struct bm_data bmd;
        unsigned long flags;
        int root_id, new_root_id, irm_id, gap_count, generation, grace;
        int do_reset = 0;
 
        spin_lock_irqsave(&card->lock, flags);
+       local_node = card->local_node;
+       root_node  = card->root_node;
+
+       if (local_node == NULL) {
+               spin_unlock_irqrestore(&card->lock, flags);
+               return;
+       }
+       fw_node_get(local_node);
+       fw_node_get(root_node);
 
        generation = card->generation;
-       root = card->root_node->data;
-       root_id = card->root_node->node_id;
+       root_device = root_node->data;
+       if (root_device)
+               fw_device_get(root_device);
+       root_id = root_node->node_id;
        grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
 
        if (card->bm_generation + 1 == generation ||
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work)
 
                irm_id = card->irm_node->node_id;
                if (!card->irm_node->link_on) {
-                       new_root_id = card->local_node->node_id;
+                       new_root_id = local_node->node_id;
                        fw_notify("IRM has link off, making local node (%02x) root.\n",
                                  new_root_id);
                        goto pick_me;
                }
 
                bmd.lock.arg = cpu_to_be32(0x3f);
-               bmd.lock.data = cpu_to_be32(card->local_node->node_id);
+               bmd.lock.data = cpu_to_be32(local_node->node_id);
 
                spin_unlock_irqrestore(&card->lock, flags);
 
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work)
                         * Another bus reset happened. Just return,
                         * the BM work has been rescheduled.
                         */
-                       return;
+                       goto out;
                }
 
                if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
                        /* Somebody else is BM, let them do the work. */
-                       return;
+                       goto out;
 
                spin_lock_irqsave(&card->lock, flags);
                if (bmd.rcode != RCODE_COMPLETE) {
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work)
                         * do a bus reset and pick the local node as
                         * root, and thus, IRM.
                         */
-                       new_root_id = card->local_node->node_id;
+                       new_root_id = local_node->node_id;
                        fw_notify("BM lock failed, making local node (%02x) root.\n",
                                  new_root_id);
                        goto pick_me;
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work)
                 */
                spin_unlock_irqrestore(&card->lock, flags);
                schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
-               return;
+               goto out;
        }
 
        /*
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work)
         */
        card->bm_generation = generation;
 
-       if (root == NULL) {
+       if (root_device == NULL) {
                /*
                 * Either link_on is false, or we failed to read the
                 * config rom.  In either case, pick another root.
                 */
-               new_root_id = card->local_node->node_id;
-       } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
+               new_root_id = local_node->node_id;
+       } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) {
                /*
                 * If we haven't probed this device yet, bail out now
                 * and let's try again once that's done.
                 */
                spin_unlock_irqrestore(&card->lock, flags);
-               return;
-       } else if (root->config_rom[2] & BIB_CMC) {
+               goto out;
+       } else if (root_device->config_rom[2] & BIB_CMC) {
                /*
                 * FIXME: I suppose we should set the cmstr bit in the
                 * STATE_CLEAR register of this node, as described in
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work)
                 * successfully read the config rom, but it's not
                 * cycle master capable.
                 */
-               new_root_id = card->local_node->node_id;
+               new_root_id = local_node->node_id;
        }
 
  pick_me:
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work)
         * the typically much larger 1394b beta repeater delays though.
         */
        if (!card->beta_repeaters_present &&
-           card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
-               gap_count = gap_count_table[card->root_node->max_hops];
+           root_node->max_hops < ARRAY_SIZE(gap_count_table))
+               gap_count = gap_count_table[root_node->max_hops];
        else
                gap_count = 63;
 
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work)
                fw_send_phy_config(card, new_root_id, generation, gap_count);
                fw_core_initiate_bus_reset(card, 1);
        }
+ out:
+       if (root_device)
+               fw_device_put(root_device);
+       fw_node_put(root_node);
+       fw_node_put(local_node);
 }
 
 static void
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
        static atomic_t index = ATOMIC_INIT(-1);
 
        kref_init(&card->kref);
+       atomic_set(&card->device_count, 0);
        card->index = atomic_inc_return(&index);
        card->driver = driver;
        card->device = device;
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card)
        card->driver = &dummy_driver;
 
        fw_destroy_nodes(card);
-       flush_scheduled_work();
+       /*
+        * Wait for all device workqueue jobs to finish.  Otherwise the
+        * firewire-core module could be unloaded before the jobs ran.
+        */
+       while (atomic_read(&card->device_count) > 0)
+               msleep(100);
 
+       cancel_delayed_work_sync(&card->work);
        fw_flush_transactions(card);
        del_timer_sync(&card->flush_timer);
 
index 7e73cbaa4121047ffe6eb7c5f137357e0739c58e..46bc197a047fe84329f4ec868e351ae0438cac2e 100644 (file)
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
        struct client *client;
        unsigned long flags;
 
-       device = fw_device_from_devt(inode->i_rdev);
+       device = fw_device_get_by_devt(inode->i_rdev);
        if (device == NULL)
                return -ENODEV;
 
        client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (client == NULL)
+       if (client == NULL) {
+               fw_device_put(device);
                return -ENOMEM;
+       }
 
-       client->device = fw_device_get(device);
+       client->device = device;
        INIT_LIST_HEAD(&client->event_list);
        INIT_LIST_HEAD(&client->resource_list);
        spin_lock_init(&client->lock);
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
        struct fw_cdev_create_iso_context *request = buffer;
        struct fw_iso_context *context;
 
+       /* We only support one context at this time. */
+       if (client->iso_context != NULL)
+               return -EBUSY;
+
        if (request->channel > 63)
                return -EINVAL;
 
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer)
 {
        struct fw_cdev_start_iso *request = buffer;
 
-       if (request->handle != 0)
+       if (client->iso_context == NULL || request->handle != 0)
                return -EINVAL;
+
        if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
                if (request->tags == 0 || request->tags > 15)
                        return -EINVAL;
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer)
 {
        struct fw_cdev_stop_iso *request = buffer;
 
-       if (request->handle != 0)
+       if (client->iso_context == NULL || request->handle != 0)
                return -EINVAL;
 
        return fw_iso_context_stop(client->iso_context);
index de9066e69adfbeea440d31cec1c0945f337db728..870125a3638e5a8e647684de358b4be5f4290491 100644 (file)
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = {
 };
 EXPORT_SYMBOL(fw_bus_type);
 
-struct fw_device *fw_device_get(struct fw_device *device)
-{
-       get_device(&device->device);
-
-       return device;
-}
-
-void fw_device_put(struct fw_device *device)
-{
-       put_device(&device->device);
-}
-
 static void fw_device_release(struct device *dev)
 {
        struct fw_device *device = fw_device(dev);
+       struct fw_card *card = device->card;
        unsigned long flags;
 
        /*
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev)
        spin_unlock_irqrestore(&device->card->lock, flags);
 
        fw_node_put(device->node);
-       fw_card_put(device->card);
        kfree(device->config_rom);
        kfree(device);
+       atomic_dec(&card->device_count);
 }
 
 int fw_device_enable_phys_dma(struct fw_device *device)
@@ -358,12 +347,9 @@ static ssize_t
 guid_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct fw_device *device = fw_device(dev);
-       u64 guid;
-
-       guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
 
-       return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
-                       (unsigned long long)guid);
+       return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
+                       device->config_rom[3], device->config_rom[4]);
 }
 
 static struct device_attribute fw_device_attributes[] = {
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem);
 static DEFINE_IDR(fw_device_idr);
 int fw_cdev_major;
 
-struct fw_device *fw_device_from_devt(dev_t devt)
+struct fw_device *fw_device_get_by_devt(dev_t devt)
 {
        struct fw_device *device;
 
        down_read(&idr_rwsem);
        device = idr_find(&fw_device_idr, MINOR(devt));
+       if (device)
+               fw_device_get(device);
        up_read(&idr_rwsem);
 
        return device;
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work)
                container_of(work, struct fw_device, work.work);
        int minor = MINOR(device->device.devt);
 
-       down_write(&idr_rwsem);
-       idr_remove(&fw_device_idr, minor);
-       up_write(&idr_rwsem);
-
        fw_device_cdev_remove(device);
        device_for_each_child(&device->device, NULL, shutdown_unit);
        device_unregister(&device->device);
+
+       down_write(&idr_rwsem);
+       idr_remove(&fw_device_idr, minor);
+       up_write(&idr_rwsem);
+       fw_device_put(device);
 }
 
 static struct device_type fw_device_type = {
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work)
         */
 
        if (read_bus_info_block(device, device->generation) < 0) {
-               if (device->config_rom_retries < MAX_RETRIES) {
+               if (device->config_rom_retries < MAX_RETRIES &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
                        device->config_rom_retries++;
                        schedule_delayed_work(&device->work, RETRY_DELAY);
                } else {
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work)
        }
 
        err = -ENOMEM;
+
+       fw_device_get(device);
        down_write(&idr_rwsem);
        if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
                err = idr_get_new(&fw_device_idr, device, &minor);
        up_write(&idr_rwsem);
+
        if (err < 0)
                goto error;
 
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work)
         */
        if (atomic_cmpxchg(&device->state,
                    FW_DEVICE_INITIALIZING,
-                   FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+                   FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
                fw_device_shutdown(&device->work.work);
-       else
-               fw_notify("created new fw device %s "
-                         "(%d config rom retries, S%d00)\n",
-                         device->device.bus_id, device->config_rom_retries,
-                         1 << device->max_speed);
+       } else {
+               if (device->config_rom_retries)
+                       fw_notify("created device %s: GUID %08x%08x, S%d00, "
+                                 "%d config ROM retries\n",
+                                 device->device.bus_id,
+                                 device->config_rom[3], device->config_rom[4],
+                                 1 << device->max_speed,
+                                 device->config_rom_retries);
+               else
+                       fw_notify("created device %s: GUID %08x%08x, S%d00\n",
+                                 device->device.bus_id,
+                                 device->config_rom[3], device->config_rom[4],
+                                 1 << device->max_speed);
+       }
 
        /*
         * Reschedule the IRM work if we just finished reading the
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work)
        idr_remove(&fw_device_idr, minor);
        up_write(&idr_rwsem);
  error:
-       put_device(&device->device);
+       fw_device_put(device);          /* fw_device_idr's reference */
+
+       put_device(&device->device);    /* our reference */
 }
 
 static int update_unit(struct device *dev, void *data)
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                 */
                device_initialize(&device->device);
                atomic_set(&device->state, FW_DEVICE_INITIALIZING);
-               device->card = fw_card_get(card);
+               atomic_inc(&card->device_count);
+               device->card = card;
                device->node = fw_node_get(node);
                device->node_id = node->node_id;
                device->generation = card->generation;
index 0854fe2bc11085943d0b7edcdef5884d8996c60c..78ecd3991b7f230ac83aa07ac6ef47e98dbcf310 100644 (file)
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device)
        return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
 }
 
-struct fw_device *fw_device_get(struct fw_device *device);
-void fw_device_put(struct fw_device *device);
+static inline struct fw_device *
+fw_device_get(struct fw_device *device)
+{
+       get_device(&device->device);
+
+       return device;
+}
+
+static inline void
+fw_device_put(struct fw_device *device)
+{
+       put_device(&device->device);
+}
+
+struct fw_device *fw_device_get_by_devt(dev_t devt);
 int fw_device_enable_phys_dma(struct fw_device *device);
 
 void fw_device_cdev_update(struct fw_device *device);
 void fw_device_cdev_remove(struct fw_device *device);
 
-struct fw_device *fw_device_from_devt(dev_t devt);
 extern int fw_cdev_major;
 
 struct fw_unit {
index 19ece9b6d7425906d6cff48cee2ebfba172f5141..03069a454c07c57ec9eb53c177f586d0565981ac 100644 (file)
  * and many others.
  */
 
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/device.h>
 #include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
 #include <linux/string.h>
 #include <linux/stringify.h>
 #include <linux/timer.h>
@@ -47,9 +48,9 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 
-#include "fw-transaction.h"
-#include "fw-topology.h"
 #include "fw-device.h"
+#include "fw-topology.h"
+#include "fw-transaction.h"
 
 /*
  * So far only bridges from Oxford Semiconductor are known to support
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
  *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
  *   Don't use this with devices which don't have this bug.
  *
+ * - delay inquiry
+ *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
+ *
  * - override internal blacklist
  *   Instead of adding to the built-in blacklist, use only the workarounds
  *   specified in the module load parameter.
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
 #define SBP2_WORKAROUND_INQUIRY_36     0x2
 #define SBP2_WORKAROUND_MODE_SENSE_8   0x4
 #define SBP2_WORKAROUND_FIX_CAPACITY   0x8
+#define SBP2_WORKAROUND_DELAY_INQUIRY  0x10
+#define SBP2_INQUIRY_DELAY             12
 #define SBP2_WORKAROUND_OVERRIDE       0x100
 
 static int sbp2_param_workarounds;
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
        ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
        ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
        ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+       ", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
        ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
        ", or a combination)");
 
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2";
 struct sbp2_logical_unit {
        struct sbp2_target *tgt;
        struct list_head link;
-       struct scsi_device *sdev;
        struct fw_address_handler address_handler;
        struct list_head orb_list;
 
@@ -132,6 +138,8 @@ struct sbp2_logical_unit {
        int generation;
        int retries;
        struct delayed_work work;
+       bool has_sdev;
+       bool blocked;
 };
 
 /*
@@ -141,16 +149,18 @@ struct sbp2_logical_unit {
 struct sbp2_target {
        struct kref kref;
        struct fw_unit *unit;
+       const char *bus_id;
+       struct list_head lu_list;
 
        u64 management_agent_address;
        int directory_id;
        int node_id;
        int address_high;
-
-       unsigned workarounds;
-       struct list_head lu_list;
-
+       unsigned int workarounds;
        unsigned int mgt_orb_timeout;
+
+       int dont_block; /* counter for each logical unit */
+       int blocked;    /* ditto */
 };
 
 /*
@@ -160,7 +170,7 @@ struct sbp2_target {
  */
 #define SBP2_MIN_LOGIN_ORB_TIMEOUT     5000U   /* Timeout in ms */
 #define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
-#define SBP2_ORB_TIMEOUT               2000    /* Timeout in ms */
+#define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
 #define SBP2_ORB_NULL                  0x80000000
 #define SBP2_MAX_SG_ELEMENT_LENGTH     0xf000
 
@@ -297,7 +307,7 @@ struct sbp2_command_orb {
 static const struct {
        u32 firmware_revision;
        u32 model;
-       unsigned workarounds;
+       unsigned int workarounds;
 } sbp2_workarounds_table[] = {
        /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
                .firmware_revision      = 0x002800,
@@ -305,6 +315,11 @@ static const struct {
                .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
                                          SBP2_WORKAROUND_MODE_SENSE_8,
        },
+       /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
+               .firmware_revision      = 0x002800,
+               .model                  = 0x000000,
+               .workarounds            = SBP2_WORKAROUND_DELAY_INQUIRY,
+       },
        /* Initio bridges, actually only needed for some older ones */ {
                .firmware_revision      = 0x000200,
                .model                  = ~0,
@@ -501,6 +516,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
        unsigned int timeout;
        int retval = -ENOMEM;
 
+       if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
+               return 0;
+
        orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
        if (orb == NULL)
                return -ENOMEM;
@@ -553,20 +571,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
 
        retval = -EIO;
        if (sbp2_cancel_orbs(lu) == 0) {
-               fw_error("orb reply timed out, rcode=0x%02x\n",
-                        orb->base.rcode);
+               fw_error("%s: orb reply timed out, rcode=0x%02x\n",
+                        lu->tgt->bus_id, orb->base.rcode);
                goto out;
        }
 
        if (orb->base.rcode != RCODE_COMPLETE) {
-               fw_error("management write failed, rcode 0x%02x\n",
-                        orb->base.rcode);
+               fw_error("%s: management write failed, rcode 0x%02x\n",
+                        lu->tgt->bus_id, orb->base.rcode);
                goto out;
        }
 
        if (STATUS_GET_RESPONSE(orb->status) != 0 ||
            STATUS_GET_SBP_STATUS(orb->status) != 0) {
-               fw_error("error status: %d:%d\n",
+               fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
                         STATUS_GET_RESPONSE(orb->status),
                         STATUS_GET_SBP_STATUS(orb->status));
                goto out;
@@ -590,29 +608,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
 
 static void
 complete_agent_reset_write(struct fw_card *card, int rcode,
-                          void *payload, size_t length, void *data)
+                          void *payload, size_t length, void *done)
 {
-       struct fw_transaction *t = data;
+       complete(done);
+}
 
-       kfree(t);
+static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+       DECLARE_COMPLETION_ONSTACK(done);
+       struct fw_transaction t;
+       static u32 z;
+
+       fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
+                       lu->tgt->node_id, lu->generation, device->max_speed,
+                       lu->command_block_agent_address + SBP2_AGENT_RESET,
+                       &z, sizeof(z), complete_agent_reset_write, &done);
+       wait_for_completion(&done);
+}
+
+static void
+complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
+                                  void *payload, size_t length, void *data)
+{
+       kfree(data);
 }
 
-static int sbp2_agent_reset(struct sbp2_logical_unit *lu)
+static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
 {
        struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
        struct fw_transaction *t;
-       static u32 zero;
+       static u32 z;
 
-       t = kzalloc(sizeof(*t), GFP_ATOMIC);
+       t = kmalloc(sizeof(*t), GFP_ATOMIC);
        if (t == NULL)
-               return -ENOMEM;
+               return;
 
        fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
                        lu->tgt->node_id, lu->generation, device->max_speed,
                        lu->command_block_agent_address + SBP2_AGENT_RESET,
-                       &zero, sizeof(zero), complete_agent_reset_write, t);
+                       &z, sizeof(z), complete_agent_reset_write_no_wait, t);
+}
 
-       return 0;
+static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
+{
+       struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card;
+       unsigned long flags;
+
+       /* serialize with comparisons of lu->generation and card->generation */
+       spin_lock_irqsave(&card->lock, flags);
+       lu->generation = generation;
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+{
+       /*
+        * We may access dont_block without taking card->lock here:
+        * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
+        * are currently serialized against each other.
+        * And a wrong result in sbp2_conditionally_block()'s access of
+        * dont_block is rather harmless, it simply misses its first chance.
+        */
+       --lu->tgt->dont_block;
+}
+
+/*
+ * Blocks lu->tgt if all of the following conditions are met:
+ *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
+ *     logical units have been finished (indicated by dont_block == 0).
+ *   - lu->generation is stale.
+ *
+ * Note, scsi_block_requests() must be called while holding card->lock,
+ * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
+ * unblock the target.
+ */
+static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
+{
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       if (!tgt->dont_block && !lu->blocked &&
+           lu->generation != card->generation) {
+               lu->blocked = true;
+               if (++tgt->blocked == 1) {
+                       scsi_block_requests(shost);
+                       fw_notify("blocked %s\n", lu->tgt->bus_id);
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/*
+ * Unblocks lu->tgt as soon as all its logical units can be unblocked.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section.  On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
+{
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+       bool unblock = false;
+
+       spin_lock_irqsave(&card->lock, flags);
+       if (lu->blocked && lu->generation == card->generation) {
+               lu->blocked = false;
+               unblock = --tgt->blocked == 0;
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (unblock) {
+               scsi_unblock_requests(shost);
+               fw_notify("unblocked %s\n", lu->tgt->bus_id);
+       }
+}
+
+/*
+ * Prevents future blocking of tgt and unblocks it.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section.  On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_unblock(struct sbp2_target *tgt)
+{
+       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       ++tgt->dont_block;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       scsi_unblock_requests(shost);
+}
+
+static int sbp2_lun2int(u16 lun)
+{
+       struct scsi_lun eight_bytes_lun;
+
+       memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
+       eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
+       eight_bytes_lun.scsi_lun[1] = lun & 0xff;
+
+       return scsilun_to_int(&eight_bytes_lun);
 }
 
 static void sbp2_release_target(struct kref *kref)
@@ -621,26 +768,31 @@ static void sbp2_release_target(struct kref *kref)
        struct sbp2_logical_unit *lu, *next;
        struct Scsi_Host *shost =
                container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       struct scsi_device *sdev;
        struct fw_device *device = fw_device(tgt->unit->device.parent);
 
-       list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
-               if (lu->sdev)
-                       scsi_remove_device(lu->sdev);
+       /* prevent deadlocks */
+       sbp2_unblock(tgt);
 
-               if (!fw_device_is_shutdown(device))
-                       sbp2_send_management_orb(lu, tgt->node_id,
-                                       lu->generation, SBP2_LOGOUT_REQUEST,
-                                       lu->login_id, NULL);
+       list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+               sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+               if (sdev) {
+                       scsi_remove_device(sdev);
+                       scsi_device_put(sdev);
+               }
+               sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
+                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
 
                fw_core_remove_address_handler(&lu->address_handler);
                list_del(&lu->link);
                kfree(lu);
        }
        scsi_remove_host(shost);
-       fw_notify("released %s\n", tgt->unit->device.bus_id);
+       fw_notify("released %s\n", tgt->bus_id);
 
        put_device(&tgt->unit->device);
        scsi_host_put(shost);
+       fw_device_put(device);
 }
 
 static struct workqueue_struct *sbp2_wq;
@@ -666,33 +818,42 @@ static void sbp2_login(struct work_struct *work)
 {
        struct sbp2_logical_unit *lu =
                container_of(work, struct sbp2_logical_unit, work.work);
-       struct Scsi_Host *shost =
-               container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]);
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_device *device = fw_device(tgt->unit->device.parent);
+       struct Scsi_Host *shost;
        struct scsi_device *sdev;
-       struct scsi_lun eight_bytes_lun;
-       struct fw_unit *unit = lu->tgt->unit;
-       struct fw_device *device = fw_device(unit->device.parent);
        struct sbp2_login_response response;
        int generation, node_id, local_node_id;
 
+       if (fw_device_is_shutdown(device))
+               goto out;
+
        generation    = device->generation;
        smp_rmb();    /* node_id must not be older than generation */
        node_id       = device->node_id;
        local_node_id = device->card->node_id;
 
+       /* If this is a re-login attempt, log out, or we might be rejected. */
+       if (lu->has_sdev)
+               sbp2_send_management_orb(lu, device->node_id, generation,
+                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+
        if (sbp2_send_management_orb(lu, node_id, generation,
                                SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
-               if (lu->retries++ < 5)
+               if (lu->retries++ < 5) {
                        sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-               else
-                       fw_error("failed to login to %s LUN %04x\n",
-                                unit->device.bus_id, lu->lun);
+               } else {
+                       fw_error("%s: failed to login to LUN %04x\n",
+                                tgt->bus_id, lu->lun);
+                       /* Let any waiting I/O fail from now on. */
+                       sbp2_unblock(lu->tgt);
+               }
                goto out;
        }
 
-       lu->generation        = generation;
-       lu->tgt->node_id      = node_id;
-       lu->tgt->address_high = local_node_id << 16;
+       tgt->node_id      = node_id;
+       tgt->address_high = local_node_id << 16;
+       sbp2_set_generation(lu, generation);
 
        /* Get command block agent offset and login id. */
        lu->command_block_agent_address =
@@ -700,8 +861,8 @@ static void sbp2_login(struct work_struct *work)
                response.command_block_agent.low;
        lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
 
-       fw_notify("logged in to %s LUN %04x (%d retries)\n",
-                 unit->device.bus_id, lu->lun, lu->retries);
+       fw_notify("%s: logged in to LUN %04x (%d retries)\n",
+                 tgt->bus_id, lu->lun, lu->retries);
 
 #if 0
        /* FIXME: The linux1394 sbp2 does this last step. */
@@ -711,26 +872,58 @@ static void sbp2_login(struct work_struct *work)
        PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
        sbp2_agent_reset(lu);
 
-       memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
-       eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff;
-       eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff;
+       /* This was a re-login. */
+       if (lu->has_sdev) {
+               sbp2_cancel_orbs(lu);
+               sbp2_conditionally_unblock(lu);
+               goto out;
+       }
 
-       sdev = __scsi_add_device(shost, 0, 0,
-                                scsilun_to_int(&eight_bytes_lun), lu);
-       if (IS_ERR(sdev)) {
-               sbp2_send_management_orb(lu, node_id, generation,
-                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
-               /*
-                * Set this back to sbp2_login so we fall back and
-                * retry login on bus reset.
-                */
-               PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
-       } else {
-               lu->sdev = sdev;
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
+               ssleep(SBP2_INQUIRY_DELAY);
+
+       shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
+       /*
+        * FIXME:  We are unable to perform reconnects while in sbp2_login().
+        * Therefore __scsi_add_device() will get into trouble if a bus reset
+        * happens in parallel.  It will either fail or leave us with an
+        * unusable sdev.  As a workaround we check for this and retry the
+        * whole login and SCSI probing.
+        */
+
+       /* Reported error during __scsi_add_device() */
+       if (IS_ERR(sdev))
+               goto out_logout_login;
+
+       /* Unreported error during __scsi_add_device() */
+       smp_rmb(); /* get current card generation */
+       if (generation != device->card->generation) {
+               scsi_remove_device(sdev);
                scsi_device_put(sdev);
+               goto out_logout_login;
        }
+
+       /* No error during __scsi_add_device() */
+       lu->has_sdev = true;
+       scsi_device_put(sdev);
+       sbp2_allow_block(lu);
+       goto out;
+
+ out_logout_login:
+       smp_rmb(); /* generation may have changed */
+       generation = device->generation;
+       smp_rmb(); /* node_id must not be older than generation */
+
+       sbp2_send_management_orb(lu, device->node_id, generation,
+                                SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+       /*
+        * If a bus reset happened, sbp2_update will have requeued
+        * lu->work already.  Reset the work from reconnect to login.
+        */
+       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
  out:
-       sbp2_target_put(lu->tgt);
+       sbp2_target_put(tgt);
 }
 
 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -751,10 +944,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
                return -ENOMEM;
        }
 
-       lu->tgt  = tgt;
-       lu->sdev = NULL;
-       lu->lun  = lun_entry & 0xffff;
-       lu->retries = 0;
+       lu->tgt      = tgt;
+       lu->lun      = lun_entry & 0xffff;
+       lu->retries  = 0;
+       lu->has_sdev = false;
+       lu->blocked  = false;
+       ++tgt->dont_block;
        INIT_LIST_HEAD(&lu->orb_list);
        INIT_DELAYED_WORK(&lu->work, sbp2_login);
 
@@ -813,7 +1008,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
                        if (timeout > tgt->mgt_orb_timeout)
                                fw_notify("%s: config rom contains %ds "
                                          "management ORB timeout, limiting "
-                                         "to %ds\n", tgt->unit->device.bus_id,
+                                         "to %ds\n", tgt->bus_id,
                                          timeout / 1000,
                                          tgt->mgt_orb_timeout / 1000);
                        break;
@@ -836,12 +1031,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
                                  u32 firmware_revision)
 {
        int i;
-       unsigned w = sbp2_param_workarounds;
+       unsigned int w = sbp2_param_workarounds;
 
        if (w)
                fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
                          "if you need the workarounds parameter for %s\n",
-                         tgt->unit->device.bus_id);
+                         tgt->bus_id);
 
        if (w & SBP2_WORKAROUND_OVERRIDE)
                goto out;
@@ -863,8 +1058,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
        if (w)
                fw_notify("Workarounds for %s: 0x%x "
                          "(firmware_revision 0x%06x, model_id 0x%06x)\n",
-                         tgt->unit->device.bus_id,
-                         w, firmware_revision, model);
+                         tgt->bus_id, w, firmware_revision, model);
        tgt->workarounds = w;
 }
 
@@ -888,6 +1082,7 @@ static int sbp2_probe(struct device *dev)
        tgt->unit = unit;
        kref_init(&tgt->kref);
        INIT_LIST_HEAD(&tgt->lu_list);
+       tgt->bus_id = unit->device.bus_id;
 
        if (fw_device_enable_phys_dma(device) < 0)
                goto fail_shost_put;
@@ -895,6 +1090,8 @@ static int sbp2_probe(struct device *dev)
        if (scsi_add_host(shost, &unit->device) < 0)
                goto fail_shost_put;
 
+       fw_device_get(device);
+
        /* Initialize to values that won't match anything in our table. */
        firmware_revision = 0xff000000;
        model = 0xff000000;
@@ -938,10 +1135,13 @@ static void sbp2_reconnect(struct work_struct *work)
 {
        struct sbp2_logical_unit *lu =
                container_of(work, struct sbp2_logical_unit, work.work);
-       struct fw_unit *unit = lu->tgt->unit;
-       struct fw_device *device = fw_device(unit->device.parent);
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_device *device = fw_device(tgt->unit->device.parent);
        int generation, node_id, local_node_id;
 
+       if (fw_device_is_shutdown(device))
+               goto out;
+
        generation    = device->generation;
        smp_rmb();    /* node_id must not be older than generation */
        node_id       = device->node_id;
@@ -950,10 +1150,17 @@ static void sbp2_reconnect(struct work_struct *work)
        if (sbp2_send_management_orb(lu, node_id, generation,
                                     SBP2_RECONNECT_REQUEST,
                                     lu->login_id, NULL) < 0) {
-               if (lu->retries++ >= 5) {
-                       fw_error("failed to reconnect to %s\n",
-                                unit->device.bus_id);
-                       /* Fall back and try to log in again. */
+               /*
+                * If reconnect was impossible even though we are in the
+                * current generation, fall back and try to log in again.
+                *
+                * We could check for "Function rejected" status, but
+                * looking at the bus generation as simpler and more general.
+                */
+               smp_rmb(); /* get current card generation */
+               if (generation == device->card->generation ||
+                   lu->retries++ >= 5) {
+                       fw_error("%s: failed to reconnect\n", tgt->bus_id);
                        lu->retries = 0;
                        PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
                }
@@ -961,17 +1168,18 @@ static void sbp2_reconnect(struct work_struct *work)
                goto out;
        }
 
-       lu->generation        = generation;
-       lu->tgt->node_id      = node_id;
-       lu->tgt->address_high = local_node_id << 16;
+       tgt->node_id      = node_id;
+       tgt->address_high = local_node_id << 16;
+       sbp2_set_generation(lu, generation);
 
-       fw_notify("reconnected to %s LUN %04x (%d retries)\n",
-                 unit->device.bus_id, lu->lun, lu->retries);
+       fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+                 tgt->bus_id, lu->lun, lu->retries);
 
        sbp2_agent_reset(lu);
        sbp2_cancel_orbs(lu);
+       sbp2_conditionally_unblock(lu);
  out:
-       sbp2_target_put(lu->tgt);
+       sbp2_target_put(tgt);
 }
 
 static void sbp2_update(struct fw_unit *unit)
@@ -986,6 +1194,7 @@ static void sbp2_update(struct fw_unit *unit)
         * Iteration over tgt->lu_list is therefore safe here.
         */
        list_for_each_entry(lu, &tgt->lu_list, link) {
+               sbp2_conditionally_block(lu);
                lu->retries = 0;
                sbp2_queue_work(lu, 0);
        }
@@ -1063,7 +1272,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
 
        if (status != NULL) {
                if (STATUS_GET_DEAD(*status))
-                       sbp2_agent_reset(orb->lu);
+                       sbp2_agent_reset_no_wait(orb->lu);
 
                switch (STATUS_GET_RESPONSE(*status)) {
                case SBP2_STATUS_REQUEST_COMPLETE:
@@ -1089,6 +1298,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
                 * or when sending the write (less likely).
                 */
                result = DID_BUS_BUSY << 16;
+               sbp2_conditionally_block(orb->lu);
        }
 
        dma_unmap_single(device->card->device, orb->base.request_bus,
@@ -1197,7 +1407,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
        struct sbp2_logical_unit *lu = cmd->device->hostdata;
        struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
        struct sbp2_command_orb *orb;
-       unsigned max_payload;
+       unsigned int max_payload;
        int retval = SCSI_MLQUEUE_HOST_BUSY;
 
        /*
@@ -1275,6 +1485,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
 {
        struct sbp2_logical_unit *lu = sdev->hostdata;
 
+       /* (Re-)Adding logical units via the SCSI stack is not supported. */
+       if (!lu)
+               return -ENOSYS;
+
        sdev->allow_restart = 1;
 
        /*
@@ -1319,7 +1533,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
 {
        struct sbp2_logical_unit *lu = cmd->device->hostdata;
 
-       fw_notify("sbp2_scsi_abort\n");
+       fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
        sbp2_agent_reset(lu);
        sbp2_cancel_orbs(lu);
 
index 172c1867e9aa358c19cbb47afa85478a448ef510..e47bb040197afb229f8c1078bca1741c18e6c7fc 100644 (file)
@@ -383,6 +383,7 @@ void fw_destroy_nodes(struct fw_card *card)
        card->color++;
        if (card->local_node != NULL)
                for_each_fw_node(card, card->local_node, report_lost_node);
+       card->local_node = NULL;
        spin_unlock_irqrestore(&card->lock, flags);
 }
 
index fa7967b57408586359f70ae5bc610f39d7a55223..09cb72870454ea72ceee81f5139c3460665d9ae8 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/dma-mapping.h>
 #include <linux/firewire-constants.h>
+#include <asm/atomic.h>
 
 #define TCODE_IS_READ_REQUEST(tcode)   (((tcode) & ~1) == 4)
 #define TCODE_IS_BLOCK_PACKET(tcode)   (((tcode) &  1) != 0)
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type;
 struct fw_card {
        const struct fw_card_driver *driver;
        struct device *device;
+       atomic_t device_count;
        struct kref kref;
 
        int node_id;
index 310e497b58380d5d348c29d9d890204e1cf47edf..c8d0e8715997475abe51e683ee5b5aff2f44109c 100644 (file)
@@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
  * and attempt to recover if there are problems.  Returns  0 if everything's
  * ok; nonzero if the request has been terminated.
  */
-static
-int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw)
+static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
+                               int len, int ireason, int rw)
 {
        /*
         * ireason == 0: the drive wants to receive data from us
@@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw)
                                drive->name, __FUNCTION__, ireason);
        }
 
+       if (rq->cmd_type == REQ_TYPE_ATA_PC)
+               rq->cmd_flags |= REQ_FAILED;
+
        cdrom_end_request(drive, 0);
        return -1;
 }
@@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        /*
         * check which way to transfer data
         */
-       if (blk_fs_request(rq) || blk_pc_request(rq)) {
-               if (ide_cd_check_ireason(drive, len, ireason, write))
-                       return ide_stopped;
+       if (ide_cd_check_ireason(drive, rq, len, ireason, write))
+               return ide_stopped;
 
-               if (blk_fs_request(rq) && write == 0) {
+       if (blk_fs_request(rq)) {
+               if (write == 0) {
                        int nskip;
 
                        if (ide_cd_check_transfer_size(drive, len)) {
@@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        if (ireason == 0) {
                write = 1;
                xferfunc = HWIF(drive)->atapi_output_bytes;
-       } else if (ireason == 2 || (ireason == 1 &&
-                  (blk_fs_request(rq) || blk_pc_request(rq)))) {
+       } else {
                write = 0;
                xferfunc = HWIF(drive)->atapi_input_bytes;
-       } else {
-               printk(KERN_ERR "%s: %s: The drive "
-                               "appears confused (ireason = 0x%02x). "
-                               "Trying to recover by ending request.\n",
-                               drive->name, __FUNCTION__, ireason);
-               goto end_request;
        }
 
        /*
@@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                        else
                                rq->data += blen;
                }
+               if (!write && blk_sense_request(rq))
+                       rq->sense_len += blen;
        }
 
-       if (write && blk_sense_request(rq))
-               rq->sense_len += thislen;
-
        /*
         * pad, if necessary
         */
@@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = {
        { "MATSHITADVD-ROM SR-8186", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
        { "MATSHITADVD-ROM SR-8176", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
        { "MATSHITADVD-ROM SR-8174", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
+       { "Optiarc DVD RW AD-5200A", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
        { NULL, NULL, 0 }
 };
 
index 8f5bed471050d955713f5d5230aa7327ad993dec..39501d130256109ad50d06a6b904249e9163f08a 100644 (file)
@@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive)
 
        /* Only print cache size when it was specified */
        if (id->buf_size)
-               printk (" w/%dKiB Cache", id->buf_size/2);
+               printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2);
 
        printk(KERN_CONT ", CHS=%d/%d/%d\n",
                         drive->bios_cyl, drive->bios_head, drive->bios_sect);
@@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive)
                return;
        }
 
-       printk("Shutdown: %s\n", drive->name);
+       printk(KERN_INFO "Shutdown: %s\n", drive->name);
+
        drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
 }
 
index d0e7b537353e38ea1bb23ff6f0604f0d93777752..2de99e4be5c9b963ebfbf263884d6f55f1d82d56 100644 (file)
@@ -1,9 +1,13 @@
 /*
+ *  IDE DMA support (including IDE PCI BM-DMA).
+ *
  *  Copyright (C) 1995-1998   Mark Lord
  *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
  *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
  *
  *  May be copied or modified under the terms of the GNU General Public License
+ *
+ *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
  */
 
 /*
  */
 
 /*
- * This module provides support for the bus-master IDE DMA functions
- * of various PCI chipsets, including the Intel PIIX (i82371FB for
- * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and 
- * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset)
- * ("PIIX" stands for "PCI ISA IDE Xcellerator").
- *
- * Pretty much the same code works for other IDE PCI bus-mastering chipsets.
- *
- * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
- *
- * By default, DMA support is prepared for use, but is currently enabled only
- * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single),
- * or which are recognized as "good" (see table below).  Drives with only mode0
- * or mode1 (multi/single) DMA should also work with this chipset/driver
- * (eg. MC2112A) but are not enabled by default.
- *
- * Use "hdparm -i" to view modes supported by a given drive.
- *
- * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling
- * DMA support, but must be (re-)compiled against this kernel version or later.
- *
- * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
- * If problems arise, ide.c will disable DMA operation after a few retries.
- * This error recovery mechanism works and has been extremely well exercised.
- *
- * IDE drives, depending on their vintage, may support several different modes
- * of DMA operation.  The boot-time modes are indicated with a "*" in
- * the "hdparm -i" listing, and can be changed with *knowledgeable* use of
- * the "hdparm -X" feature.  There is seldom a need to do this, as drives
- * normally power-up with their "best" PIO/DMA modes enabled.
- *
- * Testing has been done with a rather extensive number of drives,
- * with Quantum & Western Digital models generally outperforming the pack,
- * and Fujitsu & Conner (and some Seagate which are really Conner) drives
- * showing more lackluster throughput.
- *
- * Keep an eye on /var/adm/messages for "DMA disabled" messages.
- *
- * Some people have reported trouble with Intel Zappa motherboards.
- * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
- * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
- * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
- *
  * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
  * fixing the problem with the BIOS on some Acer motherboards.
  *
  *
  * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
  * for supplying a Promise UDMA board & WD UDMA drive for this work!
- *
- * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports.
- *
- * ATA-66/100 and recovery functions, I forgot the rest......
- *
  */
 
 #include <linux/module.h>
index 4a2cb28682263d6a3c3a861490dfbe770d96c2d6..194ecb0049eb1ed37c3dde4c00d3a83273c1dc88 100644 (file)
@@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif)
 
        BUG_ON(hwif->present);
 
-       if (hwif->noprobe)
+       if (hwif->noprobe ||
+           (hwif->drives[0].noprobe && hwif->drives[1].noprobe))
                return -EACCES;
 
        /*
index 0598ecfd5f3706b0402182875f324474361bbf5b..43e0e05577763b4bcd166dc4d14128dd94dac9d2 100644 (file)
@@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive)
        g->fops = &idetape_block_ops;
        ide_register_region(g);
 
+       printk(KERN_WARNING "It is possible that this driver does not have any"
+               " users anymore and, as a result, it will be REMOVED soon."
+               " Please notify Bart <bzolnier@gmail.com> or Boris"
+               " <petkovbb@gmail.com> in case you still need it.\n");
+
        return 0;
 
 out_free_tape:
index 477833f0daf501cbb1a67150e6523818a1fc38f6..fa16bc30bbc985efe4efc4ad9c91d93b3d7b0ed7 100644 (file)
@@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore)
                hwif->extra_ports = 0;
        }
 
-       /*
-        * Note that we only release the standard ports,
-        * and do not even try to handle any extra ports
-        * allocated for weird IDE interface chipsets.
-        */
        ide_hwif_release_regions(hwif);
 
        /* copy original settings */
@@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
                        drive->nice1 = (arg >> IDE_NICE_1) & 1;
                        return 0;
                case HDIO_DRIVE_RESET:
-               {
-                       unsigned long flags;
-                       if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-                       
+                       if (!capable(CAP_SYS_ADMIN))
+                               return -EACCES;
+
                        /*
                         *      Abort the current command on the
                         *      group if there is one, taking
@@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
                        ide_abort(drive, "drive reset");
 
                        BUG_ON(HWGROUP(drive)->handler);
-                               
+
                        /* Ensure nothing gets queued after we
                           drop the lock. Reset will clear the busy */
-                  
+
                        HWGROUP(drive)->busy = 1;
                        spin_unlock_irqrestore(&ide_lock, flags);
                        (void) ide_do_reset(drive);
 
                        return 0;
-               }
-
                case HDIO_GET_BUSSTATE:
                        if (!capable(CAP_SYS_ADMIN))
                                return -EACCES;
@@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s)
 
                        case -1: /* "noprobe" */
                                hwif->noprobe = 1;
-                               goto done;
+                               goto obsolete_option;
 
                        case 1: /* base */
                                vals[1] = vals[0] + 0x206; /* default ctl */
index bba29df5f21d6094660c777724c38ef9d2de454b..2f4f47ad602f61ca8ec3614d484dcc5939a46c05 100644 (file)
@@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
        hwif->drives[1].drive_data = t2;
 }
 
-/*
- * qd_unsetup:
- *
- * called to unsetup an ata channel : back to default values, unlinks tuning
- */
-/*
-static void __exit qd_unsetup(ide_hwif_t *hwif)
-{
-       u8 config = hwif->config_data;
-       int base = hwif->select_data;
-       void *set_pio_mode = (void *)hwif->set_pio_mode;
-
-       if (hwif->chipset != ide_qd65xx)
-               return;
-
-       printk(KERN_NOTICE "%s: back to defaults\n", hwif->name);
-
-       hwif->selectproc = NULL;
-       hwif->set_pio_mode = NULL;
-
-       if (set_pio_mode == (void *)qd6500_set_pio_mode) {
-               // will do it for both
-               outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0]));
-       } else if (set_pio_mode == (void *)qd6580_set_pio_mode) {
-               if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) {
-                       outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0]));
-                       outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1]));
-               } else {
-                       outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0]));
-               }
-       } else {
-               printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n");
-               printk(KERN_WARNING "keeping settings !\n");
-       }
-}
-*/
-
 static const struct ide_port_info qd65xx_port_info __initdata = {
        .chipset                = ide_qd65xx,
        .host_flags             = IDE_HFLAG_IO_32BIT |
@@ -444,6 +407,8 @@ static int __init qd_probe(int base)
                printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n",
                        config, control, QD_ID3);
 
+               outb(QD_DEF_CONTR, QD_CONTROL_PORT);
+
                if (control & QD_CONTR_SEC_DISABLED) {
                        /* secondary disabled */
 
@@ -460,8 +425,6 @@ static int __init qd_probe(int base)
 
                        ide_device_add(idx, &qd65xx_port_info);
 
-                       outb(QD_DEF_CONTR, QD_CONTROL_PORT);
-
                        return 1;
                } else {
                        ide_hwif_t *mate;
@@ -487,8 +450,6 @@ static int __init qd_probe(int base)
 
                        ide_device_add(idx, &qd65xx_port_info);
 
-                       outb(QD_DEF_CONTR, QD_CONTROL_PORT);
-
                        return 0; /* no other qd65xx possible */
                }
        }
index bd24dad3cfc6b2a82c990120ff7e10ac567599ab..ec667982809c18ae8ddf59bc5023190134cb33c1 100644 (file)
@@ -787,7 +787,8 @@ static int __init cmd640x_init(void)
        /*
         * Try to enable the secondary interface, if not already enabled
         */
-       if (cmd_hwif1->noprobe) {
+       if (cmd_hwif1->noprobe ||
+           (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) {
                port2 = "not probed";
        } else {
                b = get_cmd640_reg(CNTRL);
index d0f7bb8b8adf1d881fe4d2f7c5f86106dc4b63d1..6357bb6269ab42bfc14623006e0fef089484ae08 100644 (file)
@@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
                if (rev < 3)
                        info = &hpt36x;
                else {
-                       static const struct hpt_info *hpt37x_info[] =
-                               { &hpt370, &hpt370a, &hpt372, &hpt372n };
-
-                       info = hpt37x_info[min_t(u8, rev, 6) - 3];
+                       switch (min_t(u8, rev, 6)) {
+                       case 3: info = &hpt370;  break;
+                       case 4: info = &hpt370a; break;
+                       case 5: info = &hpt372;  break;
+                       case 6: info = &hpt372n; break;
+                       }
                        idx++;
                }
                break;
@@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
        return ide_setup_pci_device(dev, &d);
 }
 
-static const struct pci_device_id hpt366_pci_tbl[] = {
+static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = {
        { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366),  0 },
        { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372),  1 },
        { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302),  2 },
index 28e155a9e2a5a6bc8e424e2e1dc235f94fac2c26..9e2b1964d71add2103a735164cc7560de5ca3b38 100644 (file)
@@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
  *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
  *   Don't use this with devices which don't have this bug.
  *
+ * - delay inquiry
+ *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
+ *
  * - override internal blacklist
  *   Instead of adding to the built-in blacklist, use only the workarounds
  *   specified in the module load parameter.
@@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
        ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
        ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
        ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+       ", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
        ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
        ", or a combination)");
 
@@ -357,6 +361,11 @@ static const struct {
                .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
                                          SBP2_WORKAROUND_MODE_SENSE_8,
        },
+       /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
+               .firmware_revision      = 0x002800,
+               .model_id               = 0x000000,
+               .workarounds            = SBP2_WORKAROUND_DELAY_INQUIRY,
+       },
        /* Initio bridges, actually only needed for some older ones */ {
                .firmware_revision      = 0x000200,
                .model_id               = SBP2_ROM_VALUE_WILDCARD,
@@ -914,6 +923,9 @@ static int sbp2_start_device(struct sbp2_lu *lu)
        sbp2_agent_reset(lu, 1);
        sbp2_max_speed_and_size(lu);
 
+       if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
+               ssleep(SBP2_INQUIRY_DELAY);
+
        error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
        if (error) {
                SBP2_ERR("scsi_add_device failed");
@@ -1962,6 +1974,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
 {
        struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
 
+       if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0)
+               return -ENODEV;
+
        lu->sdev = sdev;
        sdev->allow_restart = 1;
 
index d2ecb0d8a1bba4e577eef7c03dbcbb8df5ee0eee..80d8e097b0651487a17d8b6e1fd5105ce79706aa 100644 (file)
@@ -343,6 +343,8 @@ enum sbp2lu_state_types {
 #define SBP2_WORKAROUND_INQUIRY_36     0x2
 #define SBP2_WORKAROUND_MODE_SENSE_8   0x4
 #define SBP2_WORKAROUND_FIX_CAPACITY   0x8
+#define SBP2_WORKAROUND_DELAY_INQUIRY  0x10
+#define SBP2_INQUIRY_DELAY             12
 #define SBP2_WORKAROUND_OVERRIDE       0x100
 
 #endif /* SBP2_H */
index 73bfd1656f86434bae2c36215a6d073607a07a7c..b8797c66676d6837b265f816e55d923d450047be 100644 (file)
@@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
 
        /* Find largest page shift we can use to cover buffers */
        for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
-               if (num_phys_buf > 1) {
-                       if ((1ULL << *shift) & mask)
-                               break;
-               } else
-                       if (1ULL << *shift >=
-                           buffer_list[0].size +
-                           (buffer_list[0].addr & ((1ULL << *shift) - 1)))
-                               break;
+               if ((1ULL << *shift) & mask)
+                       break;
 
        buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
        buffer_list[0].addr &= ~0ull << *shift;
index 7f8853b44ee173579d2af89d54c09acfa5117ff9..b2112f5a422fbd8bcf6d993f707a0b8b604ad5e1 100644 (file)
@@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
 
        /* Init the adapter */
        nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
-       nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
        if (!nesdev->nesadapter) {
                printk(KERN_ERR PFX "Unable to initialize adapter.\n");
                ret = -ENOMEM;
                goto bail5;
        }
+       nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
 
        /* nesdev->base_doorbell_index =
                        nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
index fd57e8a1582f210383044ab7748eabbccdcf662d..a48b288618ece5d569644915455e51251e4d5e97 100644 (file)
@@ -285,6 +285,21 @@ struct nes_device {
 };
 
 
+static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad)
+{
+       u32 crc_value;
+       crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
+
+       /*
+        * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc
+        * state in cpu order"), behavior of crc32c changes on
+        * big-endian platforms.  Our algorithm expects the previous
+        * behavior; otherwise we have RDMA connection establishment
+        * issue on big-endian.
+        */
+       return cpu_to_le32(crc_value);
+}
+
 static inline void
 set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
 {
index bd5cfeaac203df2da0f09ea63ff2fc5e5c9b0c0f..39adb267fb1553fb45d96104b361eb6442ba8658 100644 (file)
@@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
        int ret = 0;
        u32 was_timer_set;
 
+       if (!cm_node)
+               return -EINVAL;
        new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
        if (!new_send)
                return -1;
-       if (!cm_node)
-               return -EINVAL;
 
        /* new_send->timetosend = currenttime */
        new_send->retrycount = NES_DEFAULT_RETRYS;
@@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
                nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
 
                kfree(listener);
+               listener = NULL;
                ret = 0;
                cm_listens_destroyed++;
        } else {
@@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct iw_cm_event cm_event;
        struct nes_hw_qp_wqe *wqe;
        struct nes_v4_quad nes_quad;
+       u32 crc_value;
        int ret;
 
        ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
@@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        nes_quad.TcpPorts[1]   = cm_id->local_addr.sin_port;
 
        /* Produce hash key */
-       nesqp->hte_index = cpu_to_be32(
-                       crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
+       crc_value = get_crc_value(&nes_quad);
+       nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
        nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
                        nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
 
@@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event)
        struct iw_cm_event cm_event;
        struct nes_hw_qp_wqe *wqe;
        struct nes_v4_quad nes_quad;
+       u32 crc_value;
        int ret;
 
        /* get all our handles */
@@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event)
        nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
 
        /* Produce hash key */
-       nesqp->hte_index = cpu_to_be32(
-                       crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
+       crc_value = get_crc_value(&nes_quad);
+       nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
        nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
                        nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
 
index 7c4c0fbf0abd3ff9322a07888afe5cb00d4c34d2..49e53e4c1ebef475bf5337bb346472c98a8ec72f 100644 (file)
@@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev)
 
        spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
 
-       if (shared_timer->cq_count_old < cq_count) {
-               if (cq_count > shared_timer->threshold_low)
-                       shared_timer->cq_direction_downward=0;
-       }
-       if (shared_timer->cq_count_old >= cq_count)
+       if (shared_timer->cq_count_old <= cq_count)
+               shared_timer->cq_direction_downward = 0;
+       else
                shared_timer->cq_direction_downward++;
        shared_timer->cq_count_old = cq_count;
        if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) {
-               if (cq_count <= shared_timer->threshold_low) {
+               if (cq_count <= shared_timer->threshold_low &&
+                   shared_timer->threshold_low > 4) {
                        shared_timer->threshold_low = shared_timer->threshold_low/2;
                        shared_timer->cq_direction_downward=0;
                        nesdev->currcq_count = 0;
@@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev)
                        nesdev->int_req &= ~NES_INT_TIMER;
                        nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
                        nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
-                       nesadapter->tune_timer.timer_in_use_old = 0;
                }
                nesdev->deepcq_count = 0;
                return 1;
@@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param)
                                        nesdev->int_req &= ~NES_INT_TIMER;
                                        nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
                                        nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
-                                       nesdev->nesadapter->tune_timer.timer_in_use_old = 0;
                                } else {
                                        nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
                                }
index 1e10df550c9ea49e3a2f04414a61bbb941f97ac2..b7e2844f096b620489c0b338e65e400937c16c4a 100644 (file)
@@ -962,7 +962,7 @@ struct nes_arp_entry {
 #define DEFAULT_JUMBO_NES_QL_LOW    12
 #define DEFAULT_JUMBO_NES_QL_TARGET 40
 #define DEFAULT_JUMBO_NES_QL_HIGH   128
-#define NES_NIC_CQ_DOWNWARD_TREND   8
+#define NES_NIC_CQ_DOWNWARD_TREND   16
 
 struct nes_hw_tune_timer {
     //u16 cq_count;
index 4dafbe16e82a8366aed841269c3d54535c091cfc..a651e9d9f0efdba6e38612b738f9455ccf2a1bcf 100644 (file)
@@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
                                NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
                nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
                                nespd->mmap_db_index, nespd->pd_id);
-               if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) {
+               if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) {
                        nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
                        nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
                        kfree(nespd);
@@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                                                                  (long long unsigned int)req.user_wqe_buffers);
                                                        nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
                                                        kfree(nesqp->allocated_buffer);
-                                                       return ERR_PTR(-ENOMEM);
+                                                       return ERR_PTR(-EFAULT);
                                                }
                                        }
 
@@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
                }
                nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
                                (unsigned long)req.user_cq_buffer, entries);
+               err = 1;
                list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) {
                        if (nespbl->user_base == (unsigned long )req.user_cq_buffer) {
                                list_del(&nespbl->list);
@@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
                if (err) {
                        nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
                        kfree(nescq);
-                       return ERR_PTR(err);
+                       return ERR_PTR(-EFAULT);
                }
 
                pbl_entries = nespbl->pbl_size >> 3;
@@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
                                spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
                        }
                }
-               nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X,"
-                               " minor code = 0x%04X\n",
-                               nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code);
                if (!context)
                        pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
                                        nescq->hw_cq.cq_pbase);
index 8b10d9f23bef29c7fd3acb8c3cd56566447ee96f..c5263d63aca3a2cc56914f7651f44d56bf8c8a32 100644 (file)
@@ -42,14 +42,14 @@ config INPUT_M68K_BEEP
 
 config INPUT_APANEL
        tristate "Fujitsu Lifebook Application Panel buttons"
-       depends on X86
-       select I2C_I801
+       depends on X86 && I2C && LEDS_CLASS
        select INPUT_POLLDEV
        select CHECK_SIGNATURE
        help
         Say Y here for support of the Application Panel buttons, used on
         Fujitsu Lifebook. These are attached to the mainboard through
-        an SMBus interface managed by the I2C Intel ICH (i801) driver.
+        an SMBus interface managed by the I2C Intel ICH (i801) driver,
+        which you should also build for this kernel.
 
         To compile this driver as a module, choose M here: the module will
         be called apanel.
index 7993e01f9fc5cd08e22e0caac469e7208b2e3930..76043dedba5b442bc0e66e70a59bb1772a63400d 100644 (file)
@@ -723,23 +723,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter)
        if (!request_region(adapter->io, 32, "fcpcipnp"))
                goto err;
 
-       switch (adapter->type) {
-       case AVM_FRITZ_PCIV2:
-               retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED,
-                                    "fcpcipnp", adapter);
-               break;
-       case AVM_FRITZ_PCI:
-               retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED,
-                                    "fcpcipnp", adapter);
-               break;
-       case AVM_FRITZ_PNP:
-               retval = request_irq(adapter->irq, fcpci_irq, 0,
-                                    "fcpcipnp", adapter);
-               break;
-       }
-       if (retval)
-               goto err_region;
-
        switch (adapter->type) {
        case AVM_FRITZ_PCIV2:
        case AVM_FRITZ_PCI:
@@ -794,6 +777,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter)
        outb(0, adapter->io + AVM_STATUS0);
        mdelay(10);
 
+       switch (adapter->type) {
+       case AVM_FRITZ_PCIV2:
+               retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED,
+                                    "fcpcipnp", adapter);
+               break;
+       case AVM_FRITZ_PCI:
+               retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED,
+                                    "fcpcipnp", adapter);
+               break;
+       case AVM_FRITZ_PNP:
+               retval = request_irq(adapter->irq, fcpci_irq, 0,
+                                    "fcpcipnp", adapter);
+               break;
+       }
+       if (retval)
+               goto err_region;
+
        switch (adapter->type) {
        case AVM_FRITZ_PCIV2:
                fcpci2_init(adapter);
index f93de4a303550afa4ea48b3ca6af7f7e4568a446..78f7660c1d0ea9c66153cfc1a1c1af9515d95580 100644 (file)
@@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info)
                        sprintf(rs, "\r\n0-2");
                        isdn_tty_at_cout(rs, info);
                } else {
-                       if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1))
+                       if ((f->phase != ISDN_FAX_PHASE_D) ||
+                           (!(info->faxonline & 1)))
                                PARSE_ERROR1;
                        par = isdn_getnum(p);
                        if ((par < 0) || (par > 2))
index 655ef9a3f4df2a023d319967e9aa99dda93e4dc8..a335c85a736e25a472c3d66157ef40a0313304da 100644 (file)
@@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
                                }
                                break;
                case ISDN_CMD_CLREAZ:
-                               if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+                               if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
                                        return -ENODEV;
                                if (card->leased)
                                        break;
@@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
                                }
                                break;
                case ISDN_CMD_SETL3:
-                               if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+                               if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
                                        return -ENODEV;
                                return 0;
                default:
@@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
        isdnloop_card *card = isdnloop_findcard(id);
 
        if (card) {
-               if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+               if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
                        return -ENODEV;
                return (isdnloop_writecmd(buf, len, 1, card));
        }
index 7aeceedcf7d46de93824a14ce9b93888350930dd..831aed9c56ffce68384365f95b432634ababa6f3 100644 (file)
@@ -1047,6 +1047,11 @@ void bitmap_daemon_work(struct bitmap *bitmap)
        if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
                return;
        bitmap->daemon_lastrun = jiffies;
+       if (bitmap->allclean) {
+               bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+               return;
+       }
+       bitmap->allclean = 1;
 
        for (j = 0; j < bitmap->chunks; j++) {
                bitmap_counter_t *bmc;
@@ -1068,8 +1073,10 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                                        clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
 
                                spin_unlock_irqrestore(&bitmap->lock, flags);
-                               if (need_write)
+                               if (need_write) {
                                        write_page(bitmap, page, 0);
+                                       bitmap->allclean = 0;
+                               }
                                continue;
                        }
 
@@ -1098,6 +1105,9 @@ void bitmap_daemon_work(struct bitmap *bitmap)
 /*
   if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
 */
+                       if (*bmc)
+                               bitmap->allclean = 0;
+
                        if (*bmc == 2) {
                                *bmc=1; /* maybe clear the bit next time */
                                set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
@@ -1132,6 +1142,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                }
        }
 
+       if (bitmap->allclean == 0)
+               bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
 }
 
 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1226,6 +1238,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
                        sectors -= blocks;
                else sectors = 0;
        }
+       bitmap->allclean = 0;
        return 0;
 }
 
@@ -1296,6 +1309,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
                }
        }
        spin_unlock_irq(&bitmap->lock);
+       bitmap->allclean = 0;
        return rv;
 }
 
@@ -1332,6 +1346,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab
        }
  unlock:
        spin_unlock_irqrestore(&bitmap->lock, flags);
+       bitmap->allclean = 0;
 }
 
 void bitmap_close_sync(struct bitmap *bitmap)
@@ -1399,7 +1414,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
                set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
        }
        spin_unlock_irq(&bitmap->lock);
-
+       bitmap->allclean = 0;
 }
 
 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
index 7da6ec244e15de75184478818b553a66ec59454f..827824a9f3e917de4ae1ce38fa6017efbb49602d 100644 (file)
@@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
        bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
        if (rdev->sb_size & bmask)
-               rdev-> sb_size = (rdev->sb_size | bmask)+1;
+               rdev->sb_size = (rdev->sb_size | bmask) + 1;
+
+       if (minor_version
+           && rdev->data_offset < sb_offset + (rdev->sb_size/512))
+               return -EINVAL;
 
        if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
                rdev->desc_nr = -1;
@@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
                else
                        ret = 0;
        }
-       if (minor_version) 
+       if (minor_version)
                rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
        else
                rdev->size = rdev->sb_offset;
@@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev)
        free_disk_sb(rdev);
        list_del_init(&rdev->same_set);
 #ifndef MODULE
-       md_autodetect_dev(rdev->bdev->bd_dev);
+       if (test_bit(AutoDetected, &rdev->flags))
+               md_autodetect_dev(rdev->bdev->bd_dev);
 #endif
        unlock_rdev(rdev);
        kobject_put(&rdev->kobj);
@@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
        char *e;
        unsigned long long size = simple_strtoull(buf, &e, 10);
        unsigned long long oldsize = rdev->size;
+       mddev_t *my_mddev = rdev->mddev;
+
        if (e==buf || (*e && *e != '\n'))
                return -EINVAL;
-       if (rdev->mddev->pers)
+       if (my_mddev->pers)
                return -EBUSY;
        rdev->size = size;
        if (size > oldsize && rdev->mddev->external) {
@@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                int overlap = 0;
                struct list_head *tmp, *tmp2;
 
-               mddev_unlock(rdev->mddev);
+               mddev_unlock(my_mddev);
                for_each_mddev(mddev, tmp) {
                        mdk_rdev_t *rdev2;
 
@@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                                break;
                        }
                }
-               mddev_lock(rdev->mddev);
+               mddev_lock(my_mddev);
                if (overlap) {
                        /* Someone else could have slipped in a size
                         * change here, but doing so is just silly.
@@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                        return -EBUSY;
                }
        }
-       if (size < rdev->mddev->size || rdev->mddev->size == 0)
-               rdev->mddev->size = size;
+       if (size < my_mddev->size || my_mddev->size == 0)
+               my_mddev->size = size;
        return len;
 }
 
@@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
        mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+       mddev_t *mddev = rdev->mddev;
+       ssize_t rv;
 
        if (!entry->show)
                return -EIO;
-       return entry->show(rdev, page);
+
+       rv = mddev ? mddev_lock(mddev) : -EBUSY;
+       if (!rv) {
+               if (rdev->mddev == NULL)
+                       rv = -EBUSY;
+               else
+                       rv = entry->show(rdev, page);
+               mddev_unlock(mddev);
+       }
+       return rv;
 }
 
 static ssize_t
@@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
 {
        struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
        mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
-       int rv;
+       ssize_t rv;
+       mddev_t *mddev = rdev->mddev;
 
        if (!entry->store)
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
-       rv = mddev_lock(rdev->mddev);
+       rv = mddev ? mddev_lock(mddev): -EBUSY;
        if (!rv) {
-               rv = entry->store(rdev, page, length);
+               if (rdev->mddev == NULL)
+                       rv = -EBUSY;
+               else
+                       rv = entry->store(rdev, page, length);
                mddev_unlock(rdev->mddev);
        }
        return rv;
@@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                mddev->ro = 0;
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
+               md_wakeup_thread(mddev->sync_thread);
        }
        atomic_inc(&mddev->writes_pending);
        if (mddev->in_sync) {
@@ -6021,6 +6044,7 @@ static void autostart_arrays(int part)
                        MD_BUG();
                        continue;
                }
+               set_bit(AutoDetected, &rdev->flags);
                list_add(&rdev->same_set, &pending_raid_disks);
                i_passed++;
        }
index 5c7fef091cec800da3f3cb482f7e97f574cb43c6..ff61b309129aa8ffa9dbd00987a71c1b6eb35bf5 100644 (file)
@@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits)
 }
 
 
+static int flush_pending_writes(conf_t *conf)
+{
+       /* Any writes that have been queued but are awaiting
+        * bitmap updates get flushed here.
+        * We return 1 if any requests were actually submitted.
+        */
+       int rv = 0;
+
+       spin_lock_irq(&conf->device_lock);
+
+       if (conf->pending_bio_list.head) {
+               struct bio *bio;
+               bio = bio_list_get(&conf->pending_bio_list);
+               blk_remove_plug(conf->mddev->queue);
+               spin_unlock_irq(&conf->device_lock);
+               /* flush any pending bitmap writes to
+                * disk before proceeding w/ I/O */
+               bitmap_unplug(conf->mddev->bitmap);
+
+               while (bio) { /* submit pending writes */
+                       struct bio *next = bio->bi_next;
+                       bio->bi_next = NULL;
+                       generic_make_request(bio);
+                       bio = next;
+               }
+               rv = 1;
+       } else
+               spin_unlock_irq(&conf->device_lock);
+       return rv;
+}
+
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf)
        /* stop syncio and normal IO and wait for everything to
         * go quite.
         * We increment barrier and nr_waiting, and then
-        * wait until barrier+nr_pending match nr_queued+2
+        * wait until nr_pending match nr_queued+1
+        * This is called in the context of one normal IO request
+        * that has failed. Thus any sync request that might be pending
+        * will be blocked by nr_pending, and we need to wait for
+        * pending IO requests to complete or be queued for re-try.
+        * Thus the number queued (nr_queued) plus this request (1)
+        * must match the number of pending IOs (nr_pending) before
+        * we continue.
         */
        spin_lock_irq(&conf->resync_lock);
        conf->barrier++;
        conf->nr_waiting++;
        wait_event_lock_irq(conf->wait_barrier,
-                           conf->barrier+conf->nr_pending == conf->nr_queued+2,
+                           conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           raid1_unplug(conf->mddev->queue));
+                           ({ flush_pending_writes(conf);
+                              raid1_unplug(conf->mddev->queue); }));
        spin_unlock_irq(&conf->resync_lock);
 }
 static void unfreeze_array(conf_t *conf)
@@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
        blk_plug_device(mddev->queue);
        spin_unlock_irqrestore(&conf->device_lock, flags);
 
+       /* In case raid1d snuck into freeze_array */
+       wake_up(&conf->wait_barrier);
+
        if (do_sync)
                md_wakeup_thread(mddev->thread);
 #if 0
@@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev)
        
        for (;;) {
                char b[BDEVNAME_SIZE];
-               spin_lock_irqsave(&conf->device_lock, flags);
-
-               if (conf->pending_bio_list.head) {
-                       bio = bio_list_get(&conf->pending_bio_list);
-                       blk_remove_plug(mddev->queue);
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
-                       /* flush any pending bitmap writes to disk before proceeding w/ I/O */
-                       bitmap_unplug(mddev->bitmap);
 
-                       while (bio) { /* submit pending writes */
-                               struct bio *next = bio->bi_next;
-                               bio->bi_next = NULL;
-                               generic_make_request(bio);
-                               bio = next;
-                       }
-                       unplug = 1;
+               unplug += flush_pending_writes(conf);
 
-                       continue;
-               }
-
-               if (list_empty(head))
+               spin_lock_irqsave(&conf->device_lock, flags);
+               if (list_empty(head)) {
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
                        break;
+               }
                r1_bio = list_entry(head->prev, r1bio_t, retry_list);
                list_del(head->prev);
                conf->nr_queued--;
@@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev)
                        }
                }
        }
-       spin_unlock_irqrestore(&conf->device_lock, flags);
        if (unplug)
                unplug_slaves(mddev);
 }
index 017f58113c33604e381a5fcc7c75f01344f67841..32389d2f18fcdfcadc87137936441e6d51560c05 100644 (file)
@@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
        current_distance = abs(r10_bio->devs[slot].addr -
                               conf->mirrors[disk].head_position);
 
-       /* Find the disk whose head is closest */
+       /* Find the disk whose head is closest,
+        * or - for far > 1 - find the closest to partition beginning */
 
        for (nslot = slot; nslot < conf->copies; nslot++) {
                int ndisk = r10_bio->devs[nslot].devnum;
@@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
                        slot = nslot;
                        break;
                }
-               new_distance = abs(r10_bio->devs[nslot].addr -
-                                  conf->mirrors[ndisk].head_position);
+
+               /* for far > 1 always use the lowest address */
+               if (conf->far_copies > 1)
+                       new_distance = r10_bio->devs[nslot].addr;
+               else
+                       new_distance = abs(r10_bio->devs[nslot].addr -
+                                          conf->mirrors[ndisk].head_position);
                if (new_distance < current_distance) {
                        current_distance = new_distance;
                        disk = ndisk;
@@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits)
        return ret;
 }
 
-
+static int flush_pending_writes(conf_t *conf)
+{
+       /* Any writes that have been queued but are awaiting
+        * bitmap updates get flushed here.
+        * We return 1 if any requests were actually submitted.
+        */
+       int rv = 0;
+
+       spin_lock_irq(&conf->device_lock);
+
+       if (conf->pending_bio_list.head) {
+               struct bio *bio;
+               bio = bio_list_get(&conf->pending_bio_list);
+               blk_remove_plug(conf->mddev->queue);
+               spin_unlock_irq(&conf->device_lock);
+               /* flush any pending bitmap writes to disk
+                * before proceeding w/ I/O */
+               bitmap_unplug(conf->mddev->bitmap);
+
+               while (bio) { /* submit pending writes */
+                       struct bio *next = bio->bi_next;
+                       bio->bi_next = NULL;
+                       generic_make_request(bio);
+                       bio = next;
+               }
+               rv = 1;
+       } else
+               spin_unlock_irq(&conf->device_lock);
+       return rv;
+}
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf)
        /* stop syncio and normal IO and wait for everything to
         * go quiet.
         * We increment barrier and nr_waiting, and then
-        * wait until barrier+nr_pending match nr_queued+2
+        * wait until nr_pending match nr_queued+1
+        * This is called in the context of one normal IO request
+        * that has failed. Thus any sync request that might be pending
+        * will be blocked by nr_pending, and we need to wait for
+        * pending IO requests to complete or be queued for re-try.
+        * Thus the number queued (nr_queued) plus this request (1)
+        * must match the number of pending IOs (nr_pending) before
+        * we continue.
         */
        spin_lock_irq(&conf->resync_lock);
        conf->barrier++;
        conf->nr_waiting++;
        wait_event_lock_irq(conf->wait_barrier,
-                           conf->barrier+conf->nr_pending == conf->nr_queued+2,
+                           conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           raid10_unplug(conf->mddev->queue));
+                           ({ flush_pending_writes(conf);
+                              raid10_unplug(conf->mddev->queue); }));
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
        blk_plug_device(mddev->queue);
        spin_unlock_irqrestore(&conf->device_lock, flags);
 
+       /* In case raid10d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
+
        if (do_sync)
                md_wakeup_thread(mddev->thread);
 
@@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev)
 
        for (;;) {
                char b[BDEVNAME_SIZE];
-               spin_lock_irqsave(&conf->device_lock, flags);
 
-               if (conf->pending_bio_list.head) {
-                       bio = bio_list_get(&conf->pending_bio_list);
-                       blk_remove_plug(mddev->queue);
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
-                       /* flush any pending bitmap writes to disk before proceeding w/ I/O */
-                       bitmap_unplug(mddev->bitmap);
-
-                       while (bio) { /* submit pending writes */
-                               struct bio *next = bio->bi_next;
-                               bio->bi_next = NULL;
-                               generic_make_request(bio);
-                               bio = next;
-                       }
-                       unplug = 1;
-
-                       continue;
-               }
+               unplug += flush_pending_writes(conf);
 
-               if (list_empty(head))
+               spin_lock_irqsave(&conf->device_lock, flags);
+               if (list_empty(head)) {
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
                        break;
+               }
                r10_bio = list_entry(head->prev, r10bio_t, retry_list);
                list_del(head->prev);
                conf->nr_queued--;
@@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev)
                        }
                }
        }
-       spin_unlock_irqrestore(&conf->device_lock, flags);
        if (unplug)
                unplug_slaves(mddev);
 }
@@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                                if (j == conf->copies) {
                                        /* Cannot recover, so abort the recovery */
                                        put_buf(r10_bio);
+                                       if (rb2)
+                                               atomic_dec(&rb2->remaining);
                                        r10_bio = rb2;
                                        if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery))
                                                printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
index 0c303c84b37bbcebf86b8f29f8c4167de6b3db37..6b6df8679585dfae3715f327683a8f677e17eae8 100644 (file)
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_event_register - Register protocol-specific event callback
- *     handler.
+ *     mpt_event_register - Register protocol-specific event callback handler.
  *     @cb_idx: previously registered (via mpt_register) callback handle
  *     @ev_cbfunc: callback function
  *
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_event_deregister - Deregister protocol-specific event callback
- *     handler.
+ *     mpt_event_deregister - Deregister protocol-specific event callback handler
  *     @cb_idx: previously registered callback handle
  *
  *     Each protocol-specific driver should call this routine
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024)
- *     allocated per MPT adapter.
+ *     mpt_get_msg_frame - Obtain an MPT request frame from the pool
  *     @cb_idx: Handle of registered MPT protocol driver
  *     @ioc: Pointer to MPT adapter structure
  *
+ *     Obtain an MPT request frame from the pool (of 1024) that are
+ *     allocated per MPT adapter.
+ *
  *     Returns pointer to a MPT request frame or %NULL if none are available
  *     or IOC is not active.
  */
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_put_msg_frame - Send a protocol specific MPT request frame
- *     to a IOC.
+ *     mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
  *     @cb_idx: Handle of registered MPT protocol driver
  *     @ioc: Pointer to MPT adapter structure
  *     @mf: Pointer to MPT request frame
  *
- *     This routine posts a MPT request frame to the request post FIFO of a
+ *     This routine posts an MPT request frame to the request post FIFO of a
  *     specific MPT adapter.
  */
 void
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
 }
 
 /**
- *     mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame
- *     to a IOC using hi priority request queue.
+ *     mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
  *     @cb_idx: Handle of registered MPT protocol driver
  *     @ioc: Pointer to MPT adapter structure
  *     @mf: Pointer to MPT request frame
  *
- *     This routine posts a MPT request frame to the request post FIFO of a
+ *     Send a protocol-specific MPT request frame to an IOC using
+ *     hi-priority request queue.
+ *
+ *     This routine posts an MPT request frame to the request post FIFO of a
  *     specific MPT adapter.
  **/
 void
index af1de0ccee2f54c37c3567585e9e73db6f83bfe4..0c252f60c4c1ba2c5ee4e7012b21dc6dddf36129 100644 (file)
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
  *
  *     Remark: Currently invoked from a non-interrupt thread (_bh).
  *
- *     Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC
+ *     Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC
  *     will be active.
  *
  *     Returns 0 for SUCCESS, or %FAILED.
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
 
 /**
  * mptscsih_get_scsi_lookup
- *
- * retrieves scmd entry from ScsiLookup[] array list
- *
  * @ioc: Pointer to MPT_ADAPTER structure
  * @i: index into the array
  *
- * Returns the scsi_cmd pointer
+ * retrieves scmd entry from ScsiLookup[] array list
  *
+ * Returns the scsi_cmd pointer
  **/
 static struct scsi_cmnd *
 mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
 
 /**
  * mptscsih_getclear_scsi_lookup
- *
- * retrieves and clears scmd entry from ScsiLookup[] array list
- *
  * @ioc: Pointer to MPT_ADAPTER structure
  * @i: index into the array
  *
- * Returns the scsi_cmd pointer
+ * retrieves and clears scmd entry from ScsiLookup[] array list
  *
+ * Returns the scsi_cmd pointer
  **/
 static struct scsi_cmnd *
 mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
index afd82966f9a0782ad5624f45fcf4538e3755a73f..13bac53db69a1ffe58726720a461ee0bbac9edd0 100644 (file)
@@ -48,31 +48,13 @@ struct sm501_devdata {
        unsigned int                     pdev_id;
        unsigned int                     irq;
        void __iomem                    *regs;
+       unsigned int                     rev;
 };
 
 #define MHZ (1000 * 1000)
 
 #ifdef DEBUG
-static const unsigned int misc_div[] = {
-       [0]             = 1,
-       [1]             = 2,
-       [2]             = 4,
-       [3]             = 8,
-       [4]             = 16,
-       [5]             = 32,
-       [6]             = 64,
-       [7]             = 128,
-       [8]             = 3,
-       [9]             = 6,
-       [10]            = 12,
-       [11]            = 24,
-       [12]            = 48,
-       [13]            = 96,
-       [14]            = 192,
-       [15]            = 384,
-};
-
-static const unsigned int px_div[] = {
+static const unsigned int div_tab[] = {
        [0]             = 1,
        [1]             = 2,
        [2]             = 4,
@@ -101,12 +83,12 @@ static const unsigned int px_div[] = {
 
 static unsigned long decode_div(unsigned long pll2, unsigned long val,
                                unsigned int lshft, unsigned int selbit,
-                               unsigned long mask, const unsigned int *dtab)
+                               unsigned long mask)
 {
        if (val & selbit)
                pll2 = 288 * MHZ;
 
-       return pll2 / dtab[(val >> lshft) & mask];
+       return pll2 / div_tab[(val >> lshft) & mask];
 }
 
 #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x)
@@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm)
        }
 
        sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ;
-       sdclk0 /= misc_div[((misct >> 8) & 0xf)];
+       sdclk0 /= div_tab[((misct >> 8) & 0xf)];
 
        sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ;
-       sdclk1 /= misc_div[((misct >> 16) & 0xf)];
+       sdclk1 /= div_tab[((misct >> 16) & 0xf)];
 
        dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n",
                misct, pm0, pm1);
@@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm)
                 "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), "
                 "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n",
                 (pmc & 3 ) == 0 ? '*' : '-',
-                fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)),
-                fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)),
-                fmt_freq(decode_div(pll2, pm0, 8,  1<<12, 15, misc_div)),
-                fmt_freq(decode_div(pll2, pm0, 0,  1<<4,  15, misc_div)));
+                fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)),
+                fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)),
+                fmt_freq(decode_div(pll2, pm0, 8,  1<<12, 15)),
+                fmt_freq(decode_div(pll2, pm0, 0,  1<<4,  15)));
 
        dev_dbg(sm->dev, "PM1[%c]: "
                "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), "
                "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n",
                (pmc & 3 ) == 1 ? '*' : '-',
-               fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)),
-               fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)),
-               fmt_freq(decode_div(pll2, pm1, 8,  1<<12, 15, misc_div)),
-               fmt_freq(decode_div(pll2, pm1, 0,  1<<4,  15, misc_div)));
+               fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)),
+               fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)),
+               fmt_freq(decode_div(pll2, pm1, 8,  1<<12, 15)),
+               fmt_freq(decode_div(pll2, pm1, 0,  1<<4,  15)));
 }
 
 static void sm501_dump_regs(struct sm501_devdata *sm)
@@ -436,46 +418,108 @@ struct sm501_clock {
        unsigned long mclk;
        int divider;
        int shift;
+       unsigned int m, n, k;
 };
 
+/* sm501_calc_clock
+ *
+ * Calculates the nearest discrete clock frequency that
+ * can be achieved with the specified input clock.
+ *   the maximum divisor is 3 or 5
+ */
+
+static int sm501_calc_clock(unsigned long freq,
+                           struct sm501_clock *clock,
+                           int max_div,
+                           unsigned long mclk,
+                           long *best_diff)
+{
+       int ret = 0;
+       int divider;
+       int shift;
+       long diff;
+
+       /* try dividers 1 and 3 for CRT and for panel,
+          try divider 5 for panel only.*/
+
+       for (divider = 1; divider <= max_div; divider += 2) {
+               /* try all 8 shift values.*/
+               for (shift = 0; shift < 8; shift++) {
+                       /* Calculate difference to requested clock */
+                       diff = sm501fb_round_div(mclk, divider << shift) - freq;
+                       if (diff < 0)
+                               diff = -diff;
+
+                       /* If it is less than the current, use it */
+                       if (diff < *best_diff) {
+                               *best_diff = diff;
+
+                               clock->mclk = mclk;
+                               clock->divider = divider;
+                               clock->shift = shift;
+                               ret = 1;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+/* sm501_calc_pll
+ *
+ * Calculates the nearest discrete clock frequency that can be
+ * achieved using the programmable PLL.
+ *   the maximum divisor is 3 or 5
+ */
+
+static unsigned long sm501_calc_pll(unsigned long freq,
+                                       struct sm501_clock *clock,
+                                       int max_div)
+{
+       unsigned long mclk;
+       unsigned int m, n, k;
+       long best_diff = 999999999;
+
+       /*
+        * The SM502 datasheet doesn't specify the min/max values for M and N.
+        * N = 1 at least doesn't work in practice.
+        */
+       for (m = 2; m <= 255; m++) {
+               for (n = 2; n <= 127; n++) {
+                       for (k = 0; k <= 1; k++) {
+                               mclk = (24000000UL * m / n) >> k;
+
+                               if (sm501_calc_clock(freq, clock, max_div,
+                                                    mclk, &best_diff)) {
+                                       clock->m = m;
+                                       clock->n = n;
+                                       clock->k = k;
+                               }
+                       }
+               }
+       }
+
+       /* Return best clock. */
+       return clock->mclk / (clock->divider << clock->shift);
+}
+
 /* sm501_select_clock
  *
- * selects nearest discrete clock frequency the SM501 can achive
+ * Calculates the nearest discrete clock frequency that can be
+ * achieved using the 288MHz and 336MHz PLLs.
  *   the maximum divisor is 3 or 5
  */
+
 static unsigned long sm501_select_clock(unsigned long freq,
                                        struct sm501_clock *clock,
                                        int max_div)
 {
        unsigned long mclk;
-       int divider;
-       int shift;
-       long diff;
        long best_diff = 999999999;
 
        /* Try 288MHz and 336MHz clocks. */
        for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) {
-               /* try dividers 1 and 3 for CRT and for panel,
-                  try divider 5 for panel only.*/
-
-               for (divider = 1; divider <= max_div; divider += 2) {
-                       /* try all 8 shift values.*/
-                       for (shift = 0; shift < 8; shift++) {
-                               /* Calculate difference to requested clock */
-                               diff = sm501fb_round_div(mclk, divider << shift) - freq;
-                               if (diff < 0)
-                                       diff = -diff;
-
-                               /* If it is less than the current, use it */
-                               if (diff < best_diff) {
-                                       best_diff = diff;
-
-                                       clock->mclk = mclk;
-                                       clock->divider = divider;
-                                       clock->shift = shift;
-                               }
-                       }
-               }
+               sm501_calc_clock(freq, clock, max_div, mclk, &best_diff);
        }
 
        /* Return best clock. */
@@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev,
        unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE);
        unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK);
        unsigned char reg;
+       unsigned int pll_reg = 0;
        unsigned long sm501_freq; /* the actual frequency acheived */
 
        struct sm501_clock to;
@@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev,
                 * requested frequency the value must be multiplied by
                 * 2. This clock also has an additional pre divisor */
 
-               sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2);
-               reg=to.shift & 0x07;/* bottom 3 bits are shift */
-               if (to.divider == 3)
-                       reg |= 0x08; /* /3 divider required */
-               else if (to.divider == 5)
-                       reg |= 0x10; /* /5 divider required */
-               if (to.mclk != 288000000)
-                       reg |= 0x20; /* which mclk pll is source */
+               if (sm->rev >= 0xC0) {
+                       /* SM502 -> use the programmable PLL */
+                       sm501_freq = (sm501_calc_pll(2 * req_freq,
+                                                    &to, 5) / 2);
+                       reg = to.shift & 0x07;/* bottom 3 bits are shift */
+                       if (to.divider == 3)
+                               reg |= 0x08; /* /3 divider required */
+                       else if (to.divider == 5)
+                               reg |= 0x10; /* /5 divider required */
+                       reg |= 0x40; /* select the programmable PLL */
+                       pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m;
+               } else {
+                       sm501_freq = (sm501_select_clock(2 * req_freq,
+                                                        &to, 5) / 2);
+                       reg = to.shift & 0x07;/* bottom 3 bits are shift */
+                       if (to.divider == 3)
+                               reg |= 0x08; /* /3 divider required */
+                       else if (to.divider == 5)
+                               reg |= 0x10; /* /5 divider required */
+                       if (to.mclk != 288000000)
+                               reg |= 0x20; /* which mclk pll is source */
+               }
                break;
 
        case SM501_CLOCK_V2XCLK:
@@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev,
        }
 
        writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
+
+       if (pll_reg)
+               writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL);
+
        sm501_sync_regs(sm);
 
        dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
@@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock);
  * finds the closest available frequency for a given clock
 */
 
-unsigned long sm501_find_clock(int clksrc,
+unsigned long sm501_find_clock(struct device *dev,
+                              int clksrc,
                               unsigned long req_freq)
 {
+       struct sm501_devdata *sm = dev_get_drvdata(dev);
        unsigned long sm501_freq; /* the frequency achiveable by the 501 */
        struct sm501_clock to;
 
        switch (clksrc) {
        case SM501_CLOCK_P2XCLK:
-               sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2);
+               if (sm->rev >= 0xC0) {
+                       /* SM502 -> use the programmable PLL */
+                       sm501_freq = (sm501_calc_pll(2 * req_freq,
+                                                    &to, 5) / 2);
+               } else {
+                       sm501_freq = (sm501_select_clock(2 * req_freq,
+                                                        &to, 5) / 2);
+               }
                break;
 
        case SM501_CLOCK_V2XCLK:
@@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm)
        dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n",
                 sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq);
 
+       sm->rev = devid & SM501_DEVICEID_REVMASK;
+
        sm501_dump_gate(sm);
 
        ret = device_create_file(sm->dev, &dev_attr_dbg_regs);
index bb269d0c677edbc4cadd268dc634e90b6120a80e..6cb781262f947611a20916bf6b996c9ff3e713cb 100644 (file)
@@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status)
        if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
                return -EIO;
 
-       return ((s & TP_HOTKEY_TABLET_MASK) != 0);
+       *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+       return 0;
 }
 
 /*
index f337800076c0fdba7fe986e8dd27d897f8cce1ef..a0f0e605d630389bbed8450c1446905002e1a836 100644 (file)
@@ -90,6 +90,11 @@ config MACVLAN
          This allows one to create virtual interfaces that map packets to
          or from specific MAC addresses to a particular interface.
 
+         Macvlan devices can be added using the "ip" command from the
+         iproute2 package starting with the iproute2-2.6.23 release:
+
+         "ip link add link <real dev> [ address MAC ] [ NAME ] type macvlan"
+
          To compile this driver as a module, choose M here: the module
          will be called macvlan.
 
@@ -2363,6 +2368,7 @@ config GELIC_NET
 config GELIC_WIRELESS
        bool "PS3 Wireless support"
        depends on GELIC_NET
+       select WIRELESS_EXT
        help
         This option adds the support for the wireless feature of PS3.
         If you have the wireless-less model of PS3 or have no plan to
index afc7f34b1dcf3afbf8dd6eb02b272abe94c8edf5..8af142ccf373feb060b82fae91e7a5fe9adae10b 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x.c: Broadcom Everest network driver.
  *
- * Copyright (c) 2007 Broadcom Corporation
+ * Copyright (c) 2007-2008 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
  * Slowpath rework by Vladislav Zolotarov
- * Statistics and Link managment by Yitchak Gertner
+ * Statistics and Link management by Yitchak Gertner
  *
  */
 
 /* define this to make the driver freeze on error
  * to allow getting debug info
- * (you will need to reboot afterwords)
+ * (you will need to reboot afterwards)
  */
 /*#define BNX2X_STOP_ON_ERROR*/
 
 #include "bnx2x.h"
 #include "bnx2x_init.h"
 
-#define DRV_MODULE_VERSION      "0.40.15"
-#define DRV_MODULE_RELDATE      "$DateTime: 2007/11/15 07:28:37 $"
-#define BNX2X_BC_VER           0x040009
+#define DRV_MODULE_VERSION      "1.40.22"
+#define DRV_MODULE_RELDATE      "2007/11/27"
+#define BNX2X_BC_VER           0x040200
 
 /* Time in jiffies before concluding the transmitter is hung. */
 #define TX_TIMEOUT             (5*HZ)
 
 static char version[] __devinitdata =
-       "Broadcom NetXtreme II 577xx 10Gigabit Ethernet Driver "
+       "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
        DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
-MODULE_INFO(cvs_version, "$Revision: #356 $");
 
 static int use_inta;
 static int poll;
@@ -94,8 +93,8 @@ module_param(debug, int, 0);
 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
 MODULE_PARM_DESC(poll, "use polling (for debug)");
 MODULE_PARM_DESC(onefunc, "enable only first function");
-MODULE_PARM_DESC(nomcp, "ignore managment CPU (Implies onefunc)");
-MODULE_PARM_DESC(debug, "defualt debug msglevel");
+MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
+MODULE_PARM_DESC(debug, "default debug msglevel");
 
 #ifdef BNX2X_MULTI
 module_param(use_multi, int, 0);
@@ -298,8 +297,7 @@ static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
 
 static int bnx2x_mc_assert(struct bnx2x *bp)
 {
-       int i, j;
-       int rc = 0;
+       int i, j, rc = 0;
        char last_idx;
        const char storm[] = {"XTCU"};
        const u32 intmem_base[] = {
@@ -313,8 +311,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
        for (i = 0; i < 4; i++) {
                last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
                                   intmem_base[i]);
-               BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
-                         storm[i], last_idx);
+               if (last_idx)
+                       BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
+                                 storm[i], last_idx);
 
                /* print the asserts */
                for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
@@ -330,7 +329,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
                                      intmem_base[i]);
 
                        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-                               BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
+                               BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
                                          " 0x%08x 0x%08x 0x%08x 0x%08x\n",
                                          storm[i], j, row3, row2, row1, row0);
                                rc++;
@@ -341,6 +340,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
        }
        return rc;
 }
+
 static void bnx2x_fw_dump(struct bnx2x *bp)
 {
        u32 mark, offset;
@@ -348,21 +348,22 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
        int word;
 
        mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
-       printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
+       mark = ((mark + 0x3) & ~0x3);
+       printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
 
        for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
                                                  offset + 4*word));
                data[8] = 0x0;
-               printk(KERN_ERR PFX "%s", (char *)data);
+               printk(KERN_CONT "%s", (char *)data);
        }
        for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
                                                  offset + 4*word));
                data[8] = 0x0;
-               printk(KERN_ERR PFX "%s", (char *)data);
+               printk(KERN_CONT "%s", (char *)data);
        }
        printk("\n" KERN_ERR PFX "end of fw dump\n");
 }
@@ -427,10 +428,10 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
                }
        }
 
-       BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_t_idx(%u)"
-                 "  def_x_idx(%u)  def_att_idx(%u)  attn_state(%u)"
+       BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
+                 "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
                  "  spq_prod_idx(%u)\n",
-                 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
+                 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
                  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
 
 
@@ -441,7 +442,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
        DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
 }
 
-static void bnx2x_enable_int(struct bnx2x *bp)
+static void bnx2x_int_enable(struct bnx2x *bp)
 {
        int port = bp->port;
        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -454,18 +455,26 @@ static void bnx2x_enable_int(struct bnx2x *bp)
                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
        } else {
                val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+                       HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
                        HC_CONFIG_0_REG_INT_LINE_EN_0 |
                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+               /* Errata A0.158 workaround */
+               DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
+                  val, port, addr, msix);
+
+               REG_WR(bp, addr, val);
+
                val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
        }
 
-       DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  msi %d\n",
+       DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
           val, port, addr, msix);
 
        REG_WR(bp, addr, val);
 }
 
-static void bnx2x_disable_int(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
 {
        int port = bp->port;
        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -484,15 +493,15 @@ static void bnx2x_disable_int(struct bnx2x *bp)
                BNX2X_ERR("BUG! proper val not read from IGU!\n");
 }
 
-static void bnx2x_disable_int_sync(struct bnx2x *bp)
+static void bnx2x_int_disable_sync(struct bnx2x *bp)
 {
 
        int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
        int i;
 
        atomic_inc(&bp->intr_sem);
-       /* prevent the HW from sending interrupts*/
-       bnx2x_disable_int(bp);
+       /* prevent the HW from sending interrupts */
+       bnx2x_int_disable(bp);
 
        /* make sure all ISRs are done */
        if (msix) {
@@ -775,6 +784,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
                mb(); /* force bnx2x_wait_ramrod to see the change */
                return;
        }
+
        switch (command | bp->state) {
        case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
                DP(NETIF_MSG_IFUP, "got setup ramrod\n");
@@ -787,20 +797,20 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
                fp->state = BNX2X_FP_STATE_HALTED;
                break;
 
-       case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
-               DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
-               bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
-               break;
-
        case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
-               DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
-               bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
+               DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
+                  cid);
+               bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
                break;
 
        case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
                DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
                break;
 
+       case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
+               DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
+               break;
+
        default:
                BNX2X_ERR("unexpected ramrod (%d)  state is %x\n",
                          command, bp->state);
@@ -1179,12 +1189,175 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
        return val;
 }
 
+static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
+{
+       u32 cnt;
+       u32 lock_status;
+       u32 resource_bit = (1 << resource);
+       u8 func = bp->port;
+
+       /* Validating that the resource is within range */
+       if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+               DP(NETIF_MSG_HW,
+                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+                  resource, HW_LOCK_MAX_RESOURCE_VALUE);
+               return -EINVAL;
+       }
+
+       /* Validating that the resource is not already taken */
+       lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
+       if (lock_status & resource_bit) {
+               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+                  lock_status, resource_bit);
+               return -EEXIST;
+       }
+
+       /* Try for 1 second every 5ms */
+       for (cnt = 0; cnt < 200; cnt++) {
+               /* Try to acquire the lock */
+               REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
+                      resource_bit);
+               lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
+               if (lock_status & resource_bit)
+                       return 0;
+
+               msleep(5);
+       }
+       DP(NETIF_MSG_HW, "Timeout\n");
+       return -EAGAIN;
+}
+
+static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
+{
+       u32 lock_status;
+       u32 resource_bit = (1 << resource);
+       u8 func = bp->port;
+
+       /* Validating that the resource is within range */
+       if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+               DP(NETIF_MSG_HW,
+                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+                  resource, HW_LOCK_MAX_RESOURCE_VALUE);
+               return -EINVAL;
+       }
+
+       /* Validating that the resource is currently taken */
+       lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
+       if (!(lock_status & resource_bit)) {
+               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+                  lock_status, resource_bit);
+               return -EFAULT;
+       }
+
+       REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
+       return 0;
+}
+
+static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
+{
+       /* The GPIO should be swapped if swap register is set and active */
+       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
+       int gpio_shift = gpio_num +
+                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+       u32 gpio_mask = (1 << gpio_shift);
+       u32 gpio_reg;
+
+       if (gpio_num > MISC_REGISTERS_GPIO_3) {
+               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+               return -EINVAL;
+       }
+
+       bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+       /* read GPIO and mask except the float bits */
+       gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+       switch (mode) {
+       case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+                  gpio_num, gpio_shift);
+               /* clear FLOAT and set CLR */
+               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+               break;
+
+       case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+                  gpio_num, gpio_shift);
+               /* clear FLOAT and set SET */
+               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+               break;
+
+       case MISC_REGISTERS_GPIO_INPUT_HI_Z :
+               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+                  gpio_num, gpio_shift);
+               /* set FLOAT */
+               gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+               break;
+
+       default:
+               break;
+       }
+
+       REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+       bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
+
+       return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+{
+       u32 spio_mask = (1 << spio_num);
+       u32 spio_reg;
+
+       if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+           (spio_num > MISC_REGISTERS_SPIO_7)) {
+               BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+               return -EINVAL;
+       }
+
+       bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+       /* read SPIO and mask except the float bits */
+       spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+
+       switch (mode) {
+       case MISC_REGISTERS_SPIO_OUTPUT_LOW :
+               DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+               /* clear FLOAT and set CLR */
+               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+               break;
+
+       case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
+               DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+               /* clear FLOAT and set SET */
+               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+               break;
+
+       case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+               DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+               /* set FLOAT */
+               spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               break;
+
+       default:
+               break;
+       }
+
+       REG_WR(bp, MISC_REG_SPIO, spio_reg);
+       bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
+
+       return 0;
+}
+
 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
 {
-       int rc;
-       u32 tmp, i;
        int port = bp->port;
        u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+       u32 tmp;
+       int i, rc;
 
 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  val 0x%08x\n",
           bp->phy_addr, reg, val); */
@@ -1236,8 +1409,8 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
 {
        int port = bp->port;
        u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-       u32 val, i;
-       int rc;
+       u32 val;
+       int i, rc;
 
        if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
 
@@ -1286,58 +1459,54 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
        return rc;
 }
 
-static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
+static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
+                                  u32 phy_addr, u32 reg, u32 addr, u32 val)
 {
-       int rc = 0;
-       u32 tmp, i;
-       int port = bp->port;
-       u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+       u32 tmp;
+       int i, rc = 0;
 
-       if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
-
-               tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-               tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
-               EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
-               REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-               udelay(40);
-       }
-
-       /* set clause 45 mode */
-       tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-       tmp |= EMAC_MDIO_MODE_CLAUSE_45;
-       EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+       /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+        * (a value of 49==0x31) and make sure that the AUTO poll is off
+        */
+       tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+       tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+       tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
+               (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
+       REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
+       REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+       udelay(40);
 
        /* address */
-       tmp = ((bp->phy_addr << 21) | (reg << 16) | addr |
+       tmp = ((phy_addr << 21) | (reg << 16) | addr |
               EMAC_MDIO_COMM_COMMAND_ADDRESS |
               EMAC_MDIO_COMM_START_BUSY);
-       EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
+       REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
 
        for (i = 0; i < 50; i++) {
                udelay(10);
 
-               tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
+               tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
                if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
                        udelay(5);
                        break;
                }
        }
-
        if (tmp & EMAC_MDIO_COMM_START_BUSY) {
                BNX2X_ERR("write phy register failed\n");
 
                rc = -EBUSY;
+
        } else {
                /* data */
-               tmp = ((bp->phy_addr << 21) | (reg << 16) | val |
+               tmp = ((phy_addr << 21) | (reg << 16) | val |
                       EMAC_MDIO_COMM_COMMAND_WRITE_45 |
                       EMAC_MDIO_COMM_START_BUSY);
-               EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
+               REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
 
                for (i = 0; i < 50; i++) {
                        udelay(10);
 
-                       tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
+                       tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
                        if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
                                udelay(5);
                                break;
@@ -1351,75 +1520,78 @@ static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
                }
        }
 
-       /* unset clause 45 mode */
-       tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-       tmp &= ~EMAC_MDIO_MODE_CLAUSE_45;
-       EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
-
-       if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
-
-               tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+       /* unset clause 45 mode, set the MDIO clock to a faster value
+        * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
+        */
+       tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+       tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
+       tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+       if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
                tmp |= EMAC_MDIO_MODE_AUTO_POLL;
-               EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
-       }
+       REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
 
        return rc;
 }
 
-static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
-                            u32 *ret_val)
+static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
+                             u32 addr, u32 val)
 {
-       int port = bp->port;
-       u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-       u32 val, i;
-       int rc = 0;
+       u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 
-       if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+       return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
+                                      reg, addr, val);
+}
 
-               val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-               val &= ~EMAC_MDIO_MODE_AUTO_POLL;
-               EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
-               REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-               udelay(40);
-       }
+static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
+                                 u32 phy_addr, u32 reg, u32 addr,
+                                 u32 *ret_val)
+{
+       u32 val;
+       int i, rc = 0;
 
-       /* set clause 45 mode */
-       val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-       val |= EMAC_MDIO_MODE_CLAUSE_45;
-       EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+       /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+        * (a value of 49==0x31) and make sure that the AUTO poll is off
+        */
+       val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+       val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+       val |= (EMAC_MDIO_MODE_CLAUSE_45 |
+               (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
+       REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
+       REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+       udelay(40);
 
        /* address */
-       val = ((bp->phy_addr << 21) | (reg << 16) | addr |
+       val = ((phy_addr << 21) | (reg << 16) | addr |
               EMAC_MDIO_COMM_COMMAND_ADDRESS |
               EMAC_MDIO_COMM_START_BUSY);
-       EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
+       REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
 
        for (i = 0; i < 50; i++) {
                udelay(10);
 
-               val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
+               val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
                if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
                        udelay(5);
                        break;
                }
        }
-
        if (val & EMAC_MDIO_COMM_START_BUSY) {
                BNX2X_ERR("read phy register failed\n");
 
                *ret_val = 0;
                rc = -EBUSY;
+
        } else {
                /* data */
-               val = ((bp->phy_addr << 21) | (reg << 16) |
+               val = ((phy_addr << 21) | (reg << 16) |
                       EMAC_MDIO_COMM_COMMAND_READ_45 |
                       EMAC_MDIO_COMM_START_BUSY);
-               EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
+               REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
 
                for (i = 0; i < 50; i++) {
                        udelay(10);
 
-                       val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
+                       val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
                        if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
                                val &= EMAC_MDIO_COMM_DATA;
                                break;
@@ -1436,31 +1608,39 @@ static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
                *ret_val = val;
        }
 
-       /* unset clause 45 mode */
-       val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-       val &= ~EMAC_MDIO_MODE_CLAUSE_45;
-       EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
-
-       if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
-
-               val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+       /* unset clause 45 mode, set the MDIO clock to a faster value
+        * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
+        */
+       val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+       val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
+       val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+       if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
                val |= EMAC_MDIO_MODE_AUTO_POLL;
-               EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
-       }
+       REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
 
        return rc;
 }
 
-static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
+static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
+                            u32 addr, u32 *ret_val)
+{
+       u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+       return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
+                                     reg, addr, ret_val);
+}
+
+static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
+                              u32 addr, u32 val)
 {
        int i;
        u32 rd_val;
 
        might_sleep();
        for (i = 0; i < 10; i++) {
-               bnx2x_mdio45_write(bp, reg, addr, val);
+               bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
                msleep(5);
-               bnx2x_mdio45_read(bp, reg, addr, &rd_val);
+               bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
                /* if the read value is not the same as the value we wrote,
                   we should write it again */
                if (rd_val == val)
@@ -1471,18 +1651,81 @@ static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
 }
 
 /*
- * link managment
+ * link management
  */
 
+static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
+{
+       switch (pause_result) {                 /* ASYM P ASYM P */
+       case 0xb:                               /*   1  0   1  1 */
+               bp->flow_ctrl = FLOW_CTRL_TX;
+               break;
+
+       case 0xe:                               /*   1  1   1  0 */
+               bp->flow_ctrl = FLOW_CTRL_RX;
+               break;
+
+       case 0x5:                               /*   0  1   0  1 */
+       case 0x7:                               /*   0  1   1  1 */
+       case 0xd:                               /*   1  1   0  1 */
+       case 0xf:                               /*   1  1   1  1 */
+               bp->flow_ctrl = FLOW_CTRL_BOTH;
+               break;
+
+       default:
+               break;
+       }
+}
+
+static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
+{
+       u32 ext_phy_addr;
+       u32 ld_pause;   /* local */
+       u32 lp_pause;   /* link partner */
+       u32 an_complete; /* AN complete */
+       u32 pause_result;
+       u8 ret = 0;
+
+       ext_phy_addr = ((bp->ext_phy_config &
+                        PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+                                       PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+
+       /* read twice */
+       bnx2x_mdio45_read(bp, ext_phy_addr,
+                         EXT_PHY_KR_AUTO_NEG_DEVAD,
+                         EXT_PHY_KR_STATUS, &an_complete);
+       bnx2x_mdio45_read(bp, ext_phy_addr,
+                         EXT_PHY_KR_AUTO_NEG_DEVAD,
+                         EXT_PHY_KR_STATUS, &an_complete);
+
+       if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
+               ret = 1;
+               bnx2x_mdio45_read(bp, ext_phy_addr,
+                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
+               bnx2x_mdio45_read(bp, ext_phy_addr,
+                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
+               pause_result = (ld_pause &
+                               EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
+               pause_result |= (lp_pause &
+                                EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
+               DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
+                  pause_result);
+               bnx2x_pause_resolve(bp, pause_result);
+       }
+       return ret;
+}
+
 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
 {
-       u32 ld_pause;   /* local driver */
-       u32 lp_pause;   /* link partner */
+       u32 ld_pause;   /* local driver */
+       u32 lp_pause;   /* link partner */
        u32 pause_result;
 
        bp->flow_ctrl = 0;
 
-       /* reolve from gp_status in case of AN complete and not sgmii */
+       /* resolve from gp_status in case of AN complete and not sgmii */
        if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
            (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
            (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
@@ -1499,45 +1742,57 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
                pause_result |= (lp_pause &
                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
                DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
+               bnx2x_pause_resolve(bp, pause_result);
+       } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
+                  !(bnx2x_ext_phy_resove_fc(bp))) {
+               /* forced speed */
+               if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
+                       switch (bp->req_flow_ctrl) {
+                       case FLOW_CTRL_AUTO:
+                               if (bp->dev->mtu <= 4500)
+                                       bp->flow_ctrl = FLOW_CTRL_BOTH;
+                               else
+                                       bp->flow_ctrl = FLOW_CTRL_TX;
+                               break;
 
-               switch (pause_result) {                 /* ASYM P ASYM P */
-               case 0xb:                               /*   1  0   1  1 */
-                       bp->flow_ctrl = FLOW_CTRL_TX;
-                       break;
-
-               case 0xe:                               /*   1  1   1  0 */
-                       bp->flow_ctrl = FLOW_CTRL_RX;
-                       break;
+                       case FLOW_CTRL_TX:
+                               bp->flow_ctrl = FLOW_CTRL_TX;
+                               break;
 
-               case 0x5:                               /*   0  1   0  1 */
-               case 0x7:                               /*   0  1   1  1 */
-               case 0xd:                               /*   1  1   0  1 */
-               case 0xf:                               /*   1  1   1  1 */
-                       bp->flow_ctrl = FLOW_CTRL_BOTH;
-                       break;
+                       case FLOW_CTRL_RX:
+                               if (bp->dev->mtu <= 4500)
+                                       bp->flow_ctrl = FLOW_CTRL_RX;
+                               break;
 
-               default:
-                       break;
-               }
+                       case FLOW_CTRL_BOTH:
+                               if (bp->dev->mtu <= 4500)
+                                       bp->flow_ctrl = FLOW_CTRL_BOTH;
+                               else
+                                       bp->flow_ctrl = FLOW_CTRL_TX;
+                               break;
 
-       } else { /* forced mode */
-               switch (bp->req_flow_ctrl) {
-               case FLOW_CTRL_AUTO:
-                       if (bp->dev->mtu <= 4500)
-                               bp->flow_ctrl = FLOW_CTRL_BOTH;
-                       else
-                               bp->flow_ctrl = FLOW_CTRL_TX;
-                       break;
+                       case FLOW_CTRL_NONE:
+                       default:
+                               break;
+                       }
+               } else { /* forced mode */
+                       switch (bp->req_flow_ctrl) {
+                       case FLOW_CTRL_AUTO:
+                               DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
+                                                  " req_autoneg 0x%x\n",
+                                  bp->req_flow_ctrl, bp->req_autoneg);
+                               break;
 
-               case FLOW_CTRL_TX:
-               case FLOW_CTRL_RX:
-               case FLOW_CTRL_BOTH:
-                       bp->flow_ctrl = bp->req_flow_ctrl;
-                       break;
+                       case FLOW_CTRL_TX:
+                       case FLOW_CTRL_RX:
+                       case FLOW_CTRL_BOTH:
+                               bp->flow_ctrl = bp->req_flow_ctrl;
+                               break;
 
-               case FLOW_CTRL_NONE:
-               default:
-                       break;
+                       case FLOW_CTRL_NONE:
+                       default:
+                               break;
+                       }
                }
        }
        DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
@@ -1548,9 +1803,9 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
        bp->link_status = 0;
 
        if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
-               DP(NETIF_MSG_LINK, "link up\n");
+               DP(NETIF_MSG_LINK, "phy link up\n");
 
-               bp->link_up = 1;
+               bp->phy_link_up = 1;
                bp->link_status |= LINK_STATUS_LINK_UP;
 
                if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
@@ -1659,20 +1914,20 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
                       bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
 
        } else { /* link_down */
-               DP(NETIF_MSG_LINK, "link down\n");
+               DP(NETIF_MSG_LINK, "phy link down\n");
 
-               bp->link_up = 0;
+               bp->phy_link_up = 0;
 
                bp->line_speed = 0;
                bp->duplex = DUPLEX_FULL;
                bp->flow_ctrl = 0;
        }
 
-       DP(NETIF_MSG_LINK, "gp_status 0x%x  link_up %d\n"
+       DP(NETIF_MSG_LINK, "gp_status 0x%x  phy_link_up %d\n"
           DP_LEVEL "  line_speed %d  duplex %d  flow_ctrl 0x%x"
                    "  link_status 0x%x\n",
-          gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl,
-          bp->link_status);
+          gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
+          bp->flow_ctrl, bp->link_status);
 }
 
 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
@@ -1680,40 +1935,40 @@ static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
        int port = bp->port;
 
        /* first reset all status
-        * we asume only one line will be change at a time */
+        * we assume only one line will be change at a time */
        bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-                      (NIG_XGXS0_LINK_STATUS |
-                       NIG_SERDES0_LINK_STATUS |
-                       NIG_STATUS_INTERRUPT_XGXS0_LINK10G));
-       if (bp->link_up) {
+                      (NIG_STATUS_XGXS0_LINK10G |
+                       NIG_STATUS_XGXS0_LINK_STATUS |
+                       NIG_STATUS_SERDES0_LINK_STATUS));
+       if (bp->phy_link_up) {
                if (is_10g) {
                        /* Disable the 10G link interrupt
                         * by writing 1 to the status register
                         */
-                       DP(NETIF_MSG_LINK, "10G XGXS link up\n");
+                       DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
                        bnx2x_bits_en(bp,
                                      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-                                     NIG_STATUS_INTERRUPT_XGXS0_LINK10G);
+                                     NIG_STATUS_XGXS0_LINK10G);
 
                } else if (bp->phy_flags & PHY_XGXS_FLAG) {
                        /* Disable the link interrupt
                         * by writing 1 to the relevant lane
                         * in the status register
                         */
-                       DP(NETIF_MSG_LINK, "1G XGXS link up\n");
+                       DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
                        bnx2x_bits_en(bp,
                                      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
                                      ((1 << bp->ser_lane) <<
-                                      NIG_XGXS0_LINK_STATUS_SIZE));
+                                      NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
 
                } else { /* SerDes */
-                       DP(NETIF_MSG_LINK, "SerDes link up\n");
+                       DP(NETIF_MSG_LINK, "SerDes phy link up\n");
                        /* Disable the link interrupt
                         * by writing 1 to the status register
                         */
                        bnx2x_bits_en(bp,
                                      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-                                     NIG_SERDES0_LINK_STATUS);
+                                     NIG_STATUS_SERDES0_LINK_STATUS);
                }
 
        } else { /* link_down */
@@ -1724,91 +1979,182 @@ static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
 {
        u32 ext_phy_type;
        u32 ext_phy_addr;
-       u32 local_phy;
-       u32 val = 0;
+       u32 val1 = 0, val2;
        u32 rx_sd, pcs_status;
 
        if (bp->phy_flags & PHY_XGXS_FLAG) {
-               local_phy = bp->phy_addr;
                ext_phy_addr = ((bp->ext_phy_config &
                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
                                PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
-               bp->phy_addr = (u8)ext_phy_addr;
 
                ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
                switch (ext_phy_type) {
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
                        DP(NETIF_MSG_LINK, "XGXS Direct\n");
-                       val = 1;
+                       val1 = 1;
                        break;
 
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
                        DP(NETIF_MSG_LINK, "XGXS 8705\n");
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
-                                         EXT_PHY_OPT_LASI_STATUS, &val);
-                       DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
-
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
-                                         EXT_PHY_OPT_LASI_STATUS, &val);
-                       DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
-
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_WIS_DEVAD,
+                                         EXT_PHY_OPT_LASI_STATUS, &val1);
+                       DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_WIS_DEVAD,
+                                         EXT_PHY_OPT_LASI_STATUS, &val1);
+                       DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
                                          EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
-                       val = (rx_sd & 0x1);
+                       DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
+                       val1 = (rx_sd & 0x1);
                        break;
 
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
                        DP(NETIF_MSG_LINK, "XGXS 8706\n");
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
-                                         EXT_PHY_OPT_LASI_STATUS, &val);
-                       DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
-
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
-                                         EXT_PHY_OPT_LASI_STATUS, &val);
-                       DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
-
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_OPT_LASI_STATUS, &val1);
+                       DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
+
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_OPT_LASI_STATUS, &val1);
+                       DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
+
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
                                          EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
-                       bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD,
-                                        EXT_PHY_OPT_PCS_STATUS, &pcs_status);
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PCS_DEVAD,
+                                         EXT_PHY_OPT_PCS_STATUS, &pcs_status);
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_AUTO_NEG_DEVAD,
+                                         EXT_PHY_OPT_AN_LINK_STATUS, &val2);
+
                        DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
-                          "  pcs_status 0x%x\n", rx_sd, pcs_status);
-                       /* link is up if both bit 0 of pmd_rx and
-                        * bit 0 of pcs_status are set
+                          "  pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
+                          rx_sd, pcs_status, val2, (val2 & (1<<1)));
+                       /* link is up if both bit 0 of pmd_rx_sd and
+                        * bit 0 of pcs_status are set, or if the autoneg bit
+                          1 is set
+                        */
+                       val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+                       bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+
+                       /* clear the interrupt LASI status register */
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                              ext_phy_addr,
+                                              EXT_PHY_KR_PCS_DEVAD,
+                                              EXT_PHY_KR_LASI_STATUS, &val2);
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                              ext_phy_addr,
+                                              EXT_PHY_KR_PCS_DEVAD,
+                                              EXT_PHY_KR_LASI_STATUS, &val1);
+                       DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
+                          val2, val1);
+                       /* Check the LASI */
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                              ext_phy_addr,
+                                              EXT_PHY_KR_PMA_PMD_DEVAD,
+                                              0x9003, &val2);
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                              ext_phy_addr,
+                                              EXT_PHY_KR_PMA_PMD_DEVAD,
+                                              0x9003, &val1);
+                       DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
+                          val2, val1);
+                       /* Check the link status */
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                              ext_phy_addr,
+                                              EXT_PHY_KR_PCS_DEVAD,
+                                              EXT_PHY_KR_PCS_STATUS, &val2);
+                       DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+                       /* Check the link status on 1.1.2 */
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                         ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_KR_STATUS, &val2);
+                       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                         ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_KR_STATUS, &val1);
+                       DP(NETIF_MSG_LINK,
+                          "KR PMA status 0x%x->0x%x\n", val2, val1);
+                       val1 = ((val1 & 4) == 4);
+                       /* If 1G was requested assume the link is up */
+                       if (!(bp->req_autoneg & AUTONEG_SPEED) &&
+                           (bp->req_line_speed == SPEED_1000))
+                               val1 = 1;
+                       bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_OPT_LASI_STATUS, &val2);
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_OPT_LASI_STATUS, &val1);
+                       DP(NETIF_MSG_LINK,
+                          "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_KR_STATUS, &val2);
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                         EXT_PHY_KR_STATUS, &val1);
+                       DP(NETIF_MSG_LINK,
+                          "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
+                       val1 = ((val1 & 4) == 4);
+                       /* if link is up
+                        * print the AN outcome of the SFX7101 PHY
                         */
-                       val = (rx_sd & pcs_status);
+                       if (val1) {
+                               bnx2x_mdio45_read(bp, ext_phy_addr,
+                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                                 0x21, &val2);
+                               DP(NETIF_MSG_LINK,
+                                  "SFX7101 AN status 0x%x->%s\n", val2,
+                                  (val2 & (1<<14)) ? "Master" : "Slave");
+                       }
                        break;
 
                default:
                        DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
                           bp->ext_phy_config);
-                       val = 0;
+                       val1 = 0;
                        break;
                }
-               bp->phy_addr = local_phy;
 
        } else { /* SerDes */
                ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
                switch (ext_phy_type) {
                case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
                        DP(NETIF_MSG_LINK, "SerDes Direct\n");
-                       val = 1;
+                       val1 = 1;
                        break;
 
                case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
                        DP(NETIF_MSG_LINK, "SerDes 5482\n");
-                       val = 1;
+                       val1 = 1;
                        break;
 
                default:
                        DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
                           bp->ext_phy_config);
-                       val = 0;
+                       val1 = 0;
                        break;
                }
        }
 
-       return val;
+       return val1;
 }
 
 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
@@ -1819,7 +2165,7 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
        u32 wb_write[2];
        u32 val;
 
-       DP(NETIF_MSG_LINK, "enableing BigMAC\n");
+       DP(NETIF_MSG_LINK, "enabling BigMAC\n");
        /* reset and unreset the BigMac */
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
               (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -1933,6 +2279,35 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
        bp->stats_state = STATS_STATE_ENABLE;
 }
 
+static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
+{
+       int port = bp->port;
+       u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+                              NIG_REG_INGRESS_BMAC0_MEM;
+       u32 wb_write[2];
+
+       /* Only if the bmac is out of reset */
+       if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+                       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
+               /* Clear Rx Enable bit in BMAC_CONTROL register */
+#ifdef BNX2X_DMAE_RD
+               bnx2x_read_dmae(bp, bmac_addr +
+                               BIGMAC_REGISTER_BMAC_CONTROL, 2);
+               wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
+               wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
+#else
+               wb_write[0] = REG_RD(bp,
+                               bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
+               wb_write[1] = REG_RD(bp,
+                               bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
+#endif
+               wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
+               REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
+                           wb_write, 2);
+               msleep(1);
+       }
+}
+
 static void bnx2x_emac_enable(struct bnx2x *bp)
 {
        int port = bp->port;
@@ -1940,7 +2315,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp)
        u32 val;
        int timeout;
 
-       DP(NETIF_MSG_LINK, "enableing EMAC\n");
+       DP(NETIF_MSG_LINK, "enabling EMAC\n");
        /* reset and unreset the emac core */
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
               (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
@@ -2033,7 +2408,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp)
                                      EMAC_TX_MODE_EXT_PAUSE_EN);
        }
 
-       /* KEEP_VLAN_TAG, promiscous */
+       /* KEEP_VLAN_TAG, promiscuous */
        val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
        val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
        EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
@@ -2161,7 +2536,6 @@ static void bnx2x_pbf_update(struct bnx2x *bp)
        u32 count = 1000;
        u32 pause = 0;
 
-
        /* disable port */
        REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
 
@@ -2232,7 +2606,7 @@ static void bnx2x_pbf_update(struct bnx2x *bp)
 static void bnx2x_update_mng(struct bnx2x *bp)
 {
        if (!nomcp)
-               SHMEM_WR(bp, drv_fw_mb[bp->port].link_status,
+               SHMEM_WR(bp, port_mb[bp->port].link_status,
                         bp->link_status);
 }
 
@@ -2294,19 +2668,19 @@ static void bnx2x_link_down(struct bnx2x *bp)
                DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
        }
 
-       /* indicate link down */
+       /* indicate no mac active */
        bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
 
-       /* reset BigMac */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+       /* update shared memory */
+       bnx2x_update_mng(bp);
 
-       /* ignore drain flag interrupt */
        /* activate nig drain */
        NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
 
-       /* update shared memory */
-       bnx2x_update_mng(bp);
+       /* reset BigMac */
+       bnx2x_bmac_rx_disable(bp);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 
        /* indicate link down */
        bnx2x_link_report(bp);
@@ -2317,14 +2691,15 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp);
 /* This function is called upon link interrupt */
 static void bnx2x_link_update(struct bnx2x *bp)
 {
-       u32 gp_status;
        int port = bp->port;
        int i;
+       u32 gp_status;
        int link_10g;
 
-       DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x,"
+       DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
           " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
-          " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG),
+          " 10G %x, XGXS_LINK %x\n", port,
+          (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
           REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
           REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
           REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
@@ -2336,7 +2711,7 @@ static void bnx2x_link_update(struct bnx2x *bp)
        might_sleep();
        MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
        /* avoid fast toggling */
-       for (i = 0 ; i < 10 ; i++) {
+       for (i = 0; i < 10; i++) {
                msleep(10);
                bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
                                  &gp_status);
@@ -2351,7 +2726,8 @@ static void bnx2x_link_update(struct bnx2x *bp)
        bnx2x_link_int_ack(bp, link_10g);
 
        /* link is up only if both local phy and external phy are up */
-       if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) {
+       bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
+       if (bp->link_up) {
                if (link_10g) {
                        bnx2x_bmac_enable(bp, 0);
                        bnx2x_leds_set(bp, SPEED_10000);
@@ -2427,7 +2803,9 @@ static void bnx2x_reset_unicore(struct bnx2x *bp)
                }
        }
 
-       BNX2X_ERR("BUG! unicore is still in reset!\n");
+       BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
+                 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
+                 bp->phy_addr);
 }
 
 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
@@ -2475,12 +2853,12 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp)
                MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
 
                bnx2x_mdio22_write(bp,
-                                  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
 
                bnx2x_mdio22_read(bp,
-                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-                                 &control2);
+                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+                               &control2);
 
                if (bp->autoneg & AUTONEG_PARALLEL) {
                        control2 |=
@@ -2490,8 +2868,14 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp)
                   ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
                }
                bnx2x_mdio22_write(bp,
-                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-                                  control2);
+                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+                               control2);
+
+               /* Disable parallel detection of HiG */
+               MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
+               bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
        }
 }
 
@@ -2625,7 +3009,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
        MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
 
        /* set extended capabilities */
-       if (bp->advertising & ADVERTISED_2500baseT_Full)
+       if (bp->advertising & ADVERTISED_2500baseX_Full)
                val |= MDIO_OVER_1G_UP1_2_5G;
        if (bp->advertising & ADVERTISED_10000baseT_Full)
                val |= MDIO_OVER_1G_UP1_10G;
@@ -2638,23 +3022,94 @@ static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
 {
        u32 an_adv;
 
-       /* for AN, we are always publishing full duplex */
-       an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+       /* for AN, we are always publishing full duplex */
+       an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+
+       /* resolve pause mode and advertisement
+        * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+       if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
+               switch (bp->req_flow_ctrl) {
+               case FLOW_CTRL_AUTO:
+                       if (bp->dev->mtu <= 4500) {
+                               an_adv |=
+                                    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+                               bp->advertising |= (ADVERTISED_Pause |
+                                                   ADVERTISED_Asym_Pause);
+                       } else {
+                               an_adv |=
+                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+                               bp->advertising |= ADVERTISED_Asym_Pause;
+                       }
+                       break;
+
+               case FLOW_CTRL_TX:
+                       an_adv |=
+                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+                       bp->advertising |= ADVERTISED_Asym_Pause;
+                       break;
+
+               case FLOW_CTRL_RX:
+                       if (bp->dev->mtu <= 4500) {
+                               an_adv |=
+                                    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+                               bp->advertising |= (ADVERTISED_Pause |
+                                                   ADVERTISED_Asym_Pause);
+                       } else {
+                               an_adv |=
+                                    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+                               bp->advertising &= ~(ADVERTISED_Pause |
+                                                    ADVERTISED_Asym_Pause);
+                       }
+                       break;
+
+               case FLOW_CTRL_BOTH:
+                       if (bp->dev->mtu <= 4500) {
+                               an_adv |=
+                                    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+                               bp->advertising |= (ADVERTISED_Pause |
+                                                   ADVERTISED_Asym_Pause);
+                       } else {
+                               an_adv |=
+                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+                               bp->advertising |= ADVERTISED_Asym_Pause;
+                       }
+                       break;
+
+               case FLOW_CTRL_NONE:
+               default:
+                       an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+                       bp->advertising &= ~(ADVERTISED_Pause |
+                                            ADVERTISED_Asym_Pause);
+                       break;
+               }
+       } else { /* forced mode */
+               switch (bp->req_flow_ctrl) {
+               case FLOW_CTRL_AUTO:
+                       DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
+                                          " req_autoneg 0x%x\n",
+                          bp->req_flow_ctrl, bp->req_autoneg);
+                       break;
+
+               case FLOW_CTRL_TX:
+                       an_adv |=
+                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+                       bp->advertising |= ADVERTISED_Asym_Pause;
+                       break;
 
-       /* set pause */
-       switch (bp->pause_mode) {
-       case PAUSE_SYMMETRIC:
-               an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
-               break;
-       case PAUSE_ASYMMETRIC:
-               an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
-               break;
-       case PAUSE_BOTH:
-               an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
-               break;
-       case PAUSE_NONE:
-               an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
-               break;
+               case FLOW_CTRL_RX:
+               case FLOW_CTRL_BOTH:
+                       an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+                       bp->advertising |= (ADVERTISED_Pause |
+                                           ADVERTISED_Asym_Pause);
+                       break;
+
+               case FLOW_CTRL_NONE:
+               default:
+                       an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+                       bp->advertising &= ~(ADVERTISED_Pause |
+                                            ADVERTISED_Asym_Pause);
+                       break;
+               }
        }
 
        MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
@@ -2752,47 +3207,162 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
 static void bnx2x_link_int_enable(struct bnx2x *bp)
 {
        int port = bp->port;
+       u32 ext_phy_type;
+       u32 mask;
 
        /* setting the status to report on link up
           for either XGXS or SerDes */
        bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-                      (NIG_XGXS0_LINK_STATUS |
-                       NIG_STATUS_INTERRUPT_XGXS0_LINK10G |
-                       NIG_SERDES0_LINK_STATUS));
+                      (NIG_STATUS_XGXS0_LINK10G |
+                       NIG_STATUS_XGXS0_LINK_STATUS |
+                       NIG_STATUS_SERDES0_LINK_STATUS));
 
        if (bp->phy_flags & PHY_XGXS_FLAG) {
-               /* TBD -
-                * in force mode (not AN) we can enable just the relevant
-                * interrupt
-                * Even in AN we might enable only one according to the AN
-                * speed mask
-                */
-               bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                             (NIG_MASK_XGXS0_LINK_STATUS |
-                              NIG_MASK_XGXS0_LINK10G));
-               DP(NETIF_MSG_LINK, "enable XGXS interrupt\n");
+               mask = (NIG_MASK_XGXS0_LINK10G |
+                       NIG_MASK_XGXS0_LINK_STATUS);
+               DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+               ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+               if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+                   (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+                   (ext_phy_type !=
+                               PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
+                       mask |= NIG_MASK_MI_INT;
+                       DP(NETIF_MSG_LINK, "enabled external phy int\n");
+               }
 
        } else { /* SerDes */
-               bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                             NIG_MASK_SERDES0_LINK_STATUS);
-               DP(NETIF_MSG_LINK, "enable SerDes interrupt\n");
+               mask = NIG_MASK_SERDES0_LINK_STATUS;
+               DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+               ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+               if ((ext_phy_type !=
+                               PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
+                   (ext_phy_type !=
+                               PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
+                       mask |= NIG_MASK_MI_INT;
+                       DP(NETIF_MSG_LINK, "enabled external phy int\n");
+               }
        }
+       bnx2x_bits_en(bp,
+                     NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+                     mask);
+       DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
+          " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
+          " 10G %x, XGXS_LINK %x\n", port,
+          (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
+          REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
+          REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+          REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+          REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
+          REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+          REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
+       );
+}
+
+static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
+{
+       u32 ext_phy_addr = ((bp->ext_phy_config &
+                            PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+                           PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+       u32 fw_ver1, fw_ver2;
+
+       /* Need to wait 200ms after reset */
+       msleep(200);
+       /* Boot port from external ROM
+        * Set ser_boot_ctl bit in the MISC_CTRL1 register
+        */
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD,
+                               EXT_PHY_KR_MISC_CTRL1, 0x0001);
+
+       /* Reset internal microprocessor */
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
+                               EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
+       /* set micro reset = 0 */
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
+                               EXT_PHY_KR_ROM_MICRO_RESET);
+       /* Reset internal microprocessor */
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
+                               EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
+       /* wait for 100ms for code download via SPI port */
+       msleep(100);
+
+       /* Clear ser_boot_ctl bit */
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD,
+                               EXT_PHY_KR_MISC_CTRL1, 0x0000);
+       /* Wait 100ms */
+       msleep(100);
+
+       /* Print the PHY FW version */
+       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
+                              EXT_PHY_KR_PMA_PMD_DEVAD,
+                              0xca19, &fw_ver1);
+       bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
+                              EXT_PHY_KR_PMA_PMD_DEVAD,
+                              0xca1a, &fw_ver2);
+       DP(NETIF_MSG_LINK,
+          "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
+}
+
+static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
+{
+       u32 ext_phy_addr = ((bp->ext_phy_config &
+                            PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+                           PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+
+       /* Force KR or KX */
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
+                               0x2040);
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
+                               0x000b);
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
+                               0x0000);
+       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+                               EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
+                               0x0000);
 }
 
 static void bnx2x_ext_phy_init(struct bnx2x *bp)
 {
-       int port = bp->port;
        u32 ext_phy_type;
        u32 ext_phy_addr;
-       u32 local_phy;
+       u32 cnt;
+       u32 ctrl;
+       u32 val = 0;
 
        if (bp->phy_flags & PHY_XGXS_FLAG) {
-               local_phy = bp->phy_addr;
                ext_phy_addr = ((bp->ext_phy_config &
                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
                                PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
 
                ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+               /* Make sure that the soft reset is off (expect for the 8072:
+                * due to the lock, it will be done inside the specific
+                * handling)
+                */
+               if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+                   (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
+                   (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
+                       /* Wait for soft reset to get cleared upto 1 sec */
+                       for (cnt = 0; cnt < 1000; cnt++) {
+                               bnx2x_mdio45_read(bp, ext_phy_addr,
+                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                                 EXT_PHY_OPT_CNTL, &ctrl);
+                               if (!(ctrl & (1<<15)))
+                                       break;
+                               msleep(1);
+                       }
+                       DP(NETIF_MSG_LINK,
+                          "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+               }
+
                switch (ext_phy_type) {
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
                        DP(NETIF_MSG_LINK, "XGXS Direct\n");
@@ -2800,49 +3370,235 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
 
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
                        DP(NETIF_MSG_LINK, "XGXS 8705\n");
-                       bnx2x_bits_en(bp,
-                                     NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                                     NIG_MASK_MI_INT);
-                       DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
 
-                       bp->phy_addr = ext_phy_type;
-                       bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
                                            EXT_PHY_OPT_PMD_MISC_CNTL,
                                            0x8288);
-                       bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
                                            EXT_PHY_OPT_PHY_IDENTIFIER,
                                            0x7fbf);
-                       bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
                                            EXT_PHY_OPT_CMU_PLL_BYPASS,
                                            0x0100);
-                       bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD,
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_OPT_WIS_DEVAD,
                                            EXT_PHY_OPT_LASI_CNTL, 0x1);
                        break;
 
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
                        DP(NETIF_MSG_LINK, "XGXS 8706\n");
-                       bnx2x_bits_en(bp,
-                                     NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                                     NIG_MASK_MI_INT);
-                       DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
-
-                       bp->phy_addr = ext_phy_type;
-                       bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
-                                           EXT_PHY_OPT_PMD_DIGITAL_CNT,
-                                           0x400);
-                       bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+
+                       if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+                               /* Force speed */
+                               if (bp->req_line_speed == SPEED_10000) {
+                                       DP(NETIF_MSG_LINK,
+                                          "XGXS 8706 force 10Gbps\n");
+                                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                               EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                               EXT_PHY_OPT_PMD_DIGITAL_CNT,
+                                               0x400);
+                               } else {
+                                       /* Force 1Gbps */
+                                       DP(NETIF_MSG_LINK,
+                                          "XGXS 8706 force 1Gbps\n");
+
+                                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                               EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                               EXT_PHY_OPT_CNTL,
+                                               0x0040);
+
+                                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                               EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                               EXT_PHY_OPT_CNTL2,
+                                               0x000D);
+                               }
+
+                               /* Enable LASI */
+                               bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                                   EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                                   EXT_PHY_OPT_LASI_CNTL,
+                                                   0x1);
+                       } else {
+                               /* AUTONEG */
+                               /* Allow CL37 through CL73 */
+                               DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+                               bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                                   EXT_PHY_AUTO_NEG_DEVAD,
+                                                   EXT_PHY_OPT_AN_CL37_CL73,
+                                                   0x040c);
+
+                               /* Enable Full-Duplex advertisment on CL37 */
+                               bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                                   EXT_PHY_AUTO_NEG_DEVAD,
+                                                   EXT_PHY_OPT_AN_CL37_FD,
+                                                   0x0020);
+                               /* Enable CL37 AN */
+                               bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                                   EXT_PHY_AUTO_NEG_DEVAD,
+                                                   EXT_PHY_OPT_AN_CL37_AN,
+                                                   0x1000);
+                               /* Advertise 10G/1G support */
+                               if (bp->advertising &
+                                   ADVERTISED_1000baseT_Full)
+                                       val = (1<<5);
+                               if (bp->advertising &
+                                   ADVERTISED_10000baseT_Full)
+                                       val |= (1<<7);
+
+                               bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                                   EXT_PHY_AUTO_NEG_DEVAD,
+                                                   EXT_PHY_OPT_AN_ADV, val);
+                               /* Enable LASI */
+                               bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                                   EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                                   EXT_PHY_OPT_LASI_CNTL,
+                                                   0x1);
+
+                               /* Enable clause 73 AN */
+                               bnx2x_mdio45_write(bp, ext_phy_addr,
+                                                  EXT_PHY_AUTO_NEG_DEVAD,
+                                                  EXT_PHY_OPT_CNTL,
+                                                  0x1200);
+                       }
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+                       bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+                       /* Wait for soft reset to get cleared upto 1 sec */
+                       for (cnt = 0; cnt < 1000; cnt++) {
+                               bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                               EXT_PHY_OPT_CNTL, &ctrl);
+                               if (!(ctrl & (1<<15)))
+                                       break;
+                               msleep(1);
+                       }
+                       DP(NETIF_MSG_LINK,
+                          "8072 control reg 0x%x (after %d ms)\n",
+                          ctrl, cnt);
+
+                       bnx2x_bcm8072_external_rom_boot(bp);
+                       DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
+
+                       /* enable LASI */
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_KR_PMA_PMD_DEVAD,
+                                               0x9000, 0x0400);
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_KR_PMA_PMD_DEVAD,
+                                               EXT_PHY_KR_LASI_CNTL, 0x0004);
+
+                       /* If this is forced speed, set to KR or KX
+                        * (all other are not supported)
+                        */
+                       if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+                               if (bp->req_line_speed == SPEED_10000) {
+                                       bnx2x_bcm8072_force_10G(bp);
+                                       DP(NETIF_MSG_LINK,
+                                          "Forced speed 10G on 8072\n");
+                                       /* unlock */
+                                       bnx2x_hw_unlock(bp,
+                                               HW_LOCK_RESOURCE_8072_MDIO);
+                                       break;
+                               } else
+                                       val = (1<<5);
+                       } else {
+
+                               /* Advertise 10G/1G support */
+                               if (bp->advertising &
+                                               ADVERTISED_1000baseT_Full)
+                                       val = (1<<5);
+                               if (bp->advertising &
+                                               ADVERTISED_10000baseT_Full)
+                                       val |= (1<<7);
+                       }
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                       ext_phy_addr,
+                                       EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                       0x11, val);
+                       /* Add support for CL37 ( passive mode ) I */
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                               0x8370, 0x040c);
+                       /* Add support for CL37 ( passive mode ) II */
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                               0xffe4, 0x20);
+                       /* Add support for CL37 ( passive mode ) III */
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                               0xffe0, 0x1000);
+                       /* Restart autoneg */
+                       msleep(500);
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                       ext_phy_addr,
+                                       EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                       EXT_PHY_KR_CTRL, 0x1200);
+                       DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
+                          "1G %ssupported  10G %ssupported\n",
+                          (val & (1<<5)) ? "" : "not ",
+                          (val & (1<<7)) ? "" : "not ");
+
+                       /* unlock */
+                       bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       DP(NETIF_MSG_LINK,
+                          "Setting the SFX7101 LASI indication\n");
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
                                            EXT_PHY_OPT_LASI_CNTL, 0x1);
+                       DP(NETIF_MSG_LINK,
+                          "Setting the SFX7101 LED to blink on traffic\n");
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
+                                           0xC007, (1<<3));
+
+                       /* read modify write pause advertizing */
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                         EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
+                       val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
+                       /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+                       if (bp->advertising & ADVERTISED_Pause)
+                               val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
+
+                       if (bp->advertising & ADVERTISED_Asym_Pause) {
+                               val |=
+                                EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
+                       }
+                       DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
+                       bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+                                           EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                           EXT_PHY_KR_AUTO_NEG_ADVERT, val);
+                       /* Restart autoneg */
+                       bnx2x_mdio45_read(bp, ext_phy_addr,
+                                         EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                         EXT_PHY_KR_CTRL, &val);
+                       val |= 0x200;
+                       bnx2x_mdio45_write(bp, ext_phy_addr,
+                                           EXT_PHY_KR_AUTO_NEG_DEVAD,
+                                           EXT_PHY_KR_CTRL, val);
                        break;
 
                default:
-                       DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
-                          bp->ext_phy_config);
+                       BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
+                                 bp->ext_phy_config);
                        break;
                }
-               bp->phy_addr = local_phy;
 
        } else { /* SerDes */
-/*             ext_phy_addr = ((bp->ext_phy_config &
+/*             ext_phy_addr = ((bp->ext_phy_config &
                                 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
                                PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
 */
@@ -2854,10 +3610,6 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
 
                case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
                        DP(NETIF_MSG_LINK, "SerDes 5482\n");
-                       bnx2x_bits_en(bp,
-                                     NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                                     NIG_MASK_MI_INT);
-                       DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
                        break;
 
                default:
@@ -2871,8 +3623,22 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
 {
        u32 ext_phy_type;
-       u32 ext_phy_addr;
-       u32 local_phy;
+       u32 ext_phy_addr = ((bp->ext_phy_config &
+                            PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+                           PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+       u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
+
+       /* The PHY reset is controled by GPIO 1
+        * Give it 1ms of reset pulse
+        */
+       if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
+           (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
+               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+                              MISC_REGISTERS_GPIO_OUTPUT_LOW);
+               msleep(1);
+               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+                              MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+       }
 
        if (bp->phy_flags & PHY_XGXS_FLAG) {
                ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
@@ -2883,15 +3649,24 @@ static void bnx2x_ext_phy_reset(struct bnx2x *bp)
 
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
-                       DP(NETIF_MSG_LINK, "XGXS 8705/6\n");
-                       local_phy = bp->phy_addr;
-                       ext_phy_addr = ((bp->ext_phy_config &
-                                       PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
-                                       PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
-                       bp->phy_addr = (u8)ext_phy_addr;
-                       bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
+                       DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
+                       bnx2x_mdio45_write(bp, ext_phy_addr,
+                                          EXT_PHY_OPT_PMA_PMD_DEVAD,
                                           EXT_PHY_OPT_CNTL, 0xa040);
-                       bp->phy_addr = local_phy;
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+                       DP(NETIF_MSG_LINK, "XGXS 8072\n");
+                       bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+                       bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+                                               ext_phy_addr,
+                                               EXT_PHY_KR_PMA_PMD_DEVAD,
+                                               0, 1<<15);
+                       bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
                        break;
 
                default:
@@ -2930,6 +3705,7 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
                        NIG_MASK_SERDES0_LINK_STATUS |
                        NIG_MASK_MI_INT));
 
+       /* Activate the external PHY */
        bnx2x_ext_phy_reset(bp);
 
        bnx2x_set_aer_mmd(bp);
@@ -2994,13 +3770,13 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
                        /* AN enabled */
                        bnx2x_set_brcm_cl37_advertisment(bp);
 
-                       /* program duplex & pause advertisment (for aneg) */
+                       /* program duplex & pause advertisement (for aneg) */
                        bnx2x_set_ieee_aneg_advertisment(bp);
 
                        /* enable autoneg */
                        bnx2x_set_autoneg(bp);
 
-                       /* enalbe and restart AN */
+                       /* enable and restart AN */
                        bnx2x_restart_autoneg(bp);
                }
 
@@ -3010,11 +3786,11 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
                bnx2x_initialize_sgmii_process(bp);
        }
 
-       /* enable the interrupt */
-       bnx2x_link_int_enable(bp);
-
        /* init ext phy and enable link state int */
        bnx2x_ext_phy_init(bp);
+
+       /* enable the interrupt */
+       bnx2x_link_int_enable(bp);
 }
 
 static void bnx2x_phy_deassert(struct bnx2x *bp)
@@ -3073,6 +3849,11 @@ static int bnx2x_phy_init(struct bnx2x *bp)
 static void bnx2x_link_reset(struct bnx2x *bp)
 {
        int port = bp->port;
+       u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
+
+       /* update shared memory */
+       bp->link_status = 0;
+       bnx2x_update_mng(bp);
 
        /* disable attentions */
        bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
@@ -3081,21 +3862,45 @@ static void bnx2x_link_reset(struct bnx2x *bp)
                        NIG_MASK_SERDES0_LINK_STATUS |
                        NIG_MASK_MI_INT));
 
-       bnx2x_ext_phy_reset(bp);
+       /* activate nig drain */
+       NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+       /* disable nig egress interface */
+       NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
+       NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+
+       /* Stop BigMac rx */
+       bnx2x_bmac_rx_disable(bp);
+
+       /* disable emac */
+       NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+       msleep(10);
+
+       /* The PHY reset is controled by GPIO 1
+        * Hold it as output low
+        */
+       if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
+           (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
+               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+                              MISC_REGISTERS_GPIO_OUTPUT_LOW);
+               DP(NETIF_MSG_LINK, "reset external PHY\n");
+       }
 
        /* reset the SerDes/XGXS */
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
               (0x1ff << (port*16)));
 
-       /* reset EMAC / BMAC and disable NIG interfaces */
-       NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
-       NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
+       /* reset BigMac */
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 
-       NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
+       /* disable nig ingress interface */
+       NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
        NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
-       NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
 
-       NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+       /* set link down */
+       bp->link_up = 0;
 }
 
 #ifdef BNX2X_XGXS_LB
@@ -3158,7 +3963,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
        int port = bp->port;
 
        DP(NETIF_MSG_TIMER,
-          "spe (%x:%x)  command %x  hw_cid %x  data (%x:%x)  left %x\n",
+          "spe (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
           (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
           (void *)bp->spq_prod_bd - (void *)bp->spq), command,
           HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
@@ -3176,6 +3981,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
                bnx2x_panic();
                return -EBUSY;
        }
+
        /* CID needs port number to be encoded int it */
        bp->spq_prod_bd->hdr.conn_and_cmd_data =
                        cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
@@ -3282,8 +4088,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
        u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
        u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
                              MISC_REG_AEU_MASK_ATTN_FUNC_0;
-       u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
-                                  NIG_REG_MASK_INTERRUPT_PORT0;
+       u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+                                      NIG_REG_MASK_INTERRUPT_PORT0;
 
        if (~bp->aeu_mask & (asserted & 0xff))
                BNX2X_ERR("IGU ERROR\n");
@@ -3301,15 +4107,11 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
 
        if (asserted & ATTN_HARD_WIRED_MASK) {
                if (asserted & ATTN_NIG_FOR_FUNC) {
-                       u32 nig_status_port;
-                       u32 nig_int_addr = port ?
-                                       NIG_REG_STATUS_INTERRUPT_PORT1 :
-                                       NIG_REG_STATUS_INTERRUPT_PORT0;
 
-                       bp->nig_mask = REG_RD(bp, nig_mask_addr);
-                       REG_WR(bp, nig_mask_addr, 0);
+                       /* save nig interrupt mask */
+                       bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
+                       REG_WR(bp, nig_int_mask_addr, 0);
 
-                       nig_status_port = REG_RD(bp, nig_int_addr);
                        bnx2x_link_update(bp);
 
                        /* handle unicore attn? */
@@ -3362,15 +4164,132 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
 
        /* now set back the mask */
        if (asserted & ATTN_NIG_FOR_FUNC)
-               REG_WR(bp, nig_mask_addr, bp->nig_mask);
+               REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
 }
 
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
 {
        int port = bp->port;
-       int index;
+       int reg_offset;
+       u32 val;
+
+       if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+               reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+               val = REG_RD(bp, reg_offset);
+               val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+               REG_WR(bp, reg_offset, val);
+
+               BNX2X_ERR("SPIO5 hw attention\n");
+
+               switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+               case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
+                       /* Fan failure attention */
+
+                       /* The PHY reset is controled by GPIO 1 */
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+                                      MISC_REGISTERS_GPIO_OUTPUT_LOW);
+                       /* Low power mode is controled by GPIO 2 */
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+                                      MISC_REGISTERS_GPIO_OUTPUT_LOW);
+                       /* mark the failure */
+                       bp->ext_phy_config &=
+                                       ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+                       bp->ext_phy_config |=
+                                       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+                       SHMEM_WR(bp,
+                                dev_info.port_hw_config[port].
+                                                       external_phy_config,
+                                bp->ext_phy_config);
+                       /* log the failure */
+                       printk(KERN_ERR PFX "Fan Failure on Network"
+                              " Controller %s has caused the driver to"
+                              " shutdown the card to prevent permanent"
+                              " damage.  Please contact Dell Support for"
+                              " assistance\n", bp->dev->name);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+       u32 val;
+
+       if (attn & BNX2X_DOORQ_ASSERT) {
+
+               val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+               BNX2X_ERR("DB hw attention 0x%x\n", val);
+               /* DORQ discard attention */
+               if (val & 0x2)
+                       BNX2X_ERR("FATAL error from DORQ\n");
+       }
+}
+
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+       u32 val;
+
+       if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+               val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+               BNX2X_ERR("CFC hw attention 0x%x\n", val);
+               /* CFC error attention */
+               if (val & 0x2)
+                       BNX2X_ERR("FATAL error from CFC\n");
+       }
+
+       if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+
+               val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+               BNX2X_ERR("PXP hw attention 0x%x\n", val);
+               /* RQ_USDMDP_FIFO_OVERFLOW */
+               if (val & 0x18000)
+                       BNX2X_ERR("FATAL error from PXP\n");
+       }
+}
+
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+       if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+               if (attn & BNX2X_MC_ASSERT_BITS) {
+
+                       BNX2X_ERR("MC assert!\n");
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+                       bnx2x_panic();
+
+               } else if (attn & BNX2X_MCP_ASSERT) {
+
+                       BNX2X_ERR("MCP assert!\n");
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+                       bnx2x_mc_assert(bp);
+
+               } else
+                       BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+       }
+
+       if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+
+               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+               BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
+       }
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
        struct attn_route attn;
        struct attn_route group_mask;
+       int port = bp->port;
+       int index;
        u32 reg_addr;
        u32 val;
 
@@ -3391,64 +4310,14 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
                        DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
                           (unsigned long long)group_mask.sig[0]);
 
-                       if (attn.sig[3] & group_mask.sig[3] &
-                           EVEREST_GEN_ATTN_IN_USE_MASK) {
-
-                               if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
-
-                                       BNX2X_ERR("MC assert!\n");
-                                       bnx2x_panic();
-
-                               } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
-
-                                       BNX2X_ERR("MCP assert!\n");
-                                       REG_WR(bp,
-                                            MISC_REG_AEU_GENERAL_ATTN_11, 0);
-                                       bnx2x_mc_assert(bp);
-
-                               } else {
-                                       BNX2X_ERR("UNKOWEN HW ASSERT!\n");
-                               }
-                       }
-
-                       if (attn.sig[1] & group_mask.sig[1] &
-                           BNX2X_DOORQ_ASSERT) {
-
-                               val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
-                               BNX2X_ERR("DB hw attention 0x%x\n", val);
-                               /* DORQ discard attention */
-                               if (val & 0x2)
-                                       BNX2X_ERR("FATAL error from DORQ\n");
-                       }
-
-                       if (attn.sig[2] & group_mask.sig[2] &
-                           AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
-
-                               val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
-                               BNX2X_ERR("CFC hw attention 0x%x\n", val);
-                               /* CFC error attention */
-                               if (val & 0x2)
-                                       BNX2X_ERR("FATAL error from CFC\n");
-                       }
-
-                       if (attn.sig[2] & group_mask.sig[2] &
-                           AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
-
-                               val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
-                               BNX2X_ERR("PXP hw attention 0x%x\n", val);
-                               /* RQ_USDMDP_FIFO_OVERFLOW */
-                               if (val & 0x18000)
-                                       BNX2X_ERR("FATAL error from PXP\n");
-                       }
-
-                       if (attn.sig[3] & group_mask.sig[3] &
-                           EVEREST_LATCHED_ATTN_IN_USE_MASK) {
-
-                               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
-                                      0x7ff);
-                               DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
-                                  attn.sig[3]);
-                       }
+                       bnx2x_attn_int_deasserted3(bp,
+                                       attn.sig[3] & group_mask.sig[3]);
+                       bnx2x_attn_int_deasserted1(bp,
+                                       attn.sig[1] & group_mask.sig[1]);
+                       bnx2x_attn_int_deasserted2(bp,
+                                       attn.sig[2] & group_mask.sig[2]);
+                       bnx2x_attn_int_deasserted0(bp,
+                                       attn.sig[0] & group_mask.sig[0]);
 
                        if ((attn.sig[0] & group_mask.sig[0] &
                                                HW_INTERRUT_ASSERT_SET_0) ||
@@ -3456,7 +4325,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
                                                HW_INTERRUT_ASSERT_SET_1) ||
                            (attn.sig[2] & group_mask.sig[2] &
                                                HW_INTERRUT_ASSERT_SET_2))
-                               BNX2X_ERR("FATAL HW block attention\n");
+                               BNX2X_ERR("FATAL HW block attention"
+                                         "  set0 0x%x  set1 0x%x"
+                                         "  set2 0x%x\n",
+                                         (attn.sig[0] & group_mask.sig[0] &
+                                          HW_INTERRUT_ASSERT_SET_0),
+                                         (attn.sig[1] & group_mask.sig[1] &
+                                          HW_INTERRUT_ASSERT_SET_1),
+                                         (attn.sig[2] & group_mask.sig[2] &
+                                          HW_INTERRUT_ASSERT_SET_2));
 
                        if ((attn.sig[0] & group_mask.sig[0] &
                                                HW_PRTY_ASSERT_SET_0) ||
@@ -3464,7 +4341,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
                                                HW_PRTY_ASSERT_SET_1) ||
                            (attn.sig[2] & group_mask.sig[2] &
                                                HW_PRTY_ASSERT_SET_2))
-                               BNX2X_ERR("FATAL HW block parity atention\n");
+                              BNX2X_ERR("FATAL HW block parity attention\n");
                }
        }
 
@@ -3529,7 +4406,7 @@ static void bnx2x_sp_task(struct work_struct *work)
 
        /* Return here if interrupt is disabled */
        if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+               DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
                return;
        }
 
@@ -3539,12 +4416,11 @@ static void bnx2x_sp_task(struct work_struct *work)
 
        DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
 
-       if (status & 0x1) {
-               /* HW attentions */
+       /* HW attentions */
+       if (status & 0x1)
                bnx2x_attn_int(bp);
-       }
 
-       /* CStorm events: query_stats, cfc delete ramrods */
+       /* CStorm events: query_stats, port delete ramrod */
        if (status & 0x2)
                bp->stat_pending = 0;
 
@@ -3558,6 +4434,7 @@ static void bnx2x_sp_task(struct work_struct *work)
                     IGU_INT_NOP, 1);
        bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
                     IGU_INT_ENABLE, 1);
+
 }
 
 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3567,11 +4444,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
 
        /* Return here if interrupt is disabled */
        if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+               DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
                return IRQ_HANDLED;
        }
 
-       bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
+       bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (unlikely(bp->panic))
@@ -3906,7 +4783,7 @@ static void bnx2x_stop_stats(struct bnx2x *bp)
 
                while (bp->stats_state != STATS_STATE_DISABLE) {
                        if (!timeout) {
-                               BNX2X_ERR("timeout wating for stats stop\n");
+                               BNX2X_ERR("timeout waiting for stats stop\n");
                                break;
                        }
                        timeout--;
@@ -4173,39 +5050,37 @@ static void bnx2x_update_net_stats(struct bnx2x *bp)
 
        nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
 
-       nstats->tx_bytes =
-               bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+       nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
 
-       nstats->rx_dropped = estats->checksum_discard +
-                                  estats->mac_discard;
+       nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
        nstats->tx_dropped = 0;
 
        nstats->multicast =
                bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
 
-       nstats->collisions =
-               estats->single_collision_transmit_frames +
-               estats->multiple_collision_transmit_frames +
-               estats->late_collision_frames +
-               estats->excessive_collision_frames;
+       nstats->collisions = estats->single_collision_transmit_frames +
+                            estats->multiple_collision_transmit_frames +
+                            estats->late_collision_frames +
+                            estats->excessive_collision_frames;
 
        nstats->rx_length_errors = estats->runt_packets_received +
                                   estats->jabber_packets_received;
-       nstats->rx_over_errors = estats->no_buff_discard;
+       nstats->rx_over_errors = estats->brb_discard +
+                                estats->brb_truncate_discard;
        nstats->rx_crc_errors = estats->crc_receive_errors;
        nstats->rx_frame_errors = estats->alignment_errors;
-       nstats->rx_fifo_errors = estats->brb_discard +
-                                      estats->brb_truncate_discard;
+       nstats->rx_fifo_errors = estats->no_buff_discard;
        nstats->rx_missed_errors = estats->xxoverflow_discard;
 
        nstats->rx_errors = nstats->rx_length_errors +
                            nstats->rx_over_errors +
                            nstats->rx_crc_errors +
                            nstats->rx_frame_errors +
-                           nstats->rx_fifo_errors;
+                           nstats->rx_fifo_errors +
+                           nstats->rx_missed_errors;
 
        nstats->tx_aborted_errors = estats->late_collision_frames +
-                                         estats->excessive_collision_frames;
+                                   estats->excessive_collision_frames;
        nstats->tx_carrier_errors = estats->false_carrier_detections;
        nstats->tx_fifo_errors = 0;
        nstats->tx_heartbeat_errors = 0;
@@ -4334,7 +5209,7 @@ static void bnx2x_timer(unsigned long data)
                return;
 
        if (atomic_read(&bp->intr_sem) != 0)
-               goto bnx2x_restart_timer;
+               goto timer_restart;
 
        if (poll) {
                struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -4344,7 +5219,7 @@ static void bnx2x_timer(unsigned long data)
                rc = bnx2x_rx_int(fp, 1000);
        }
 
-       if (!nomcp && (bp->bc_ver >= 0x040003)) {
+       if (!nomcp) {
                int port = bp->port;
                u32 drv_pulse;
                u32 mcp_pulse;
@@ -4353,9 +5228,9 @@ static void bnx2x_timer(unsigned long data)
                bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
                /* TBD - add SYSTEM_TIME */
                drv_pulse = bp->fw_drv_pulse_wr_seq;
-               SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse);
+               SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
 
-               mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) &
+               mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
                             MCP_PULSE_SEQ_MASK);
                /* The delta between driver pulse and mcp response
                 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -4369,11 +5244,11 @@ static void bnx2x_timer(unsigned long data)
        }
 
        if (bp->stats_state == STATS_STATE_DISABLE)
-               goto bnx2x_restart_timer;
+               goto timer_restart;
 
        bnx2x_update_stats(bp);
 
-bnx2x_restart_timer:
+timer_restart:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
@@ -4438,6 +5313,9 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
                                            atten_status_block);
        def_sb->atten_status_block.status_block_id = id;
 
+       bp->def_att_idx = 0;
+       bp->attn_state = 0;
+
        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 
@@ -4472,6 +5350,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
                                            u_def_status_block);
        def_sb->u_def_status_block.status_block_id = id;
 
+       bp->def_u_idx = 0;
+
        REG_WR(bp, BAR_USTRORM_INTMEM +
               USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
        REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -4489,6 +5369,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
                                            c_def_status_block);
        def_sb->c_def_status_block.status_block_id = id;
 
+       bp->def_c_idx = 0;
+
        REG_WR(bp, BAR_CSTRORM_INTMEM +
               CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
        REG_WR(bp, BAR_CSTRORM_INTMEM +
@@ -4506,6 +5388,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
                                            t_def_status_block);
        def_sb->t_def_status_block.status_block_id = id;
 
+       bp->def_t_idx = 0;
+
        REG_WR(bp, BAR_TSTRORM_INTMEM +
               TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
        REG_WR(bp, BAR_TSTRORM_INTMEM +
@@ -4523,6 +5407,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
                                            x_def_status_block);
        def_sb->x_def_status_block.status_block_id = id;
 
+       bp->def_x_idx = 0;
+
        REG_WR(bp, BAR_XSTRORM_INTMEM +
               XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
        REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -4535,6 +5421,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
                REG_WR16(bp, BAR_XSTRORM_INTMEM +
                         XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
 
+       bp->stat_pending = 0;
+
        bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 }
 
@@ -4626,7 +5514,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
                fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
                fp->rx_pkt = fp->rx_calls = 0;
 
-               /* Warning! this will genrate an interrupt (to the TSTORM) */
+               /* Warning! this will generate an interrupt (to the TSTORM) */
                /* must only be done when chip is initialized */
                REG_WR(bp, BAR_TSTRORM_INTMEM +
                       TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
@@ -4678,7 +5566,6 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp)
 
        bp->spq_left = MAX_SPQ_PENDING;
        bp->spq_prod_idx = 0;
-       bp->dsb_sp_prod_idx = 0;
        bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
        bp->spq_prod_bd = bp->spq;
        bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
@@ -4755,6 +5642,42 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
        REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
 }
 
+static void bnx2x_set_client_config(struct bnx2x *bp)
+{
+#ifdef BCM_VLAN
+       int mode = bp->rx_mode;
+#endif
+       int i, port = bp->port;
+       struct tstorm_eth_client_config tstorm_client = {0};
+
+       tstorm_client.mtu = bp->dev->mtu;
+       tstorm_client.statistics_counter_id = 0;
+       tstorm_client.config_flags =
+                               TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
+#ifdef BCM_VLAN
+       if (mode && bp->vlgrp) {
+               tstorm_client.config_flags |=
+                               TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
+               DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+       }
+#endif
+       if (mode != BNX2X_RX_MODE_PROMISC)
+               tstorm_client.drop_flags =
+                               TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
+
+       for_each_queue(bp, i) {
+               REG_WR(bp, BAR_TSTRORM_INTMEM +
+                      TSTORM_CLIENT_CONFIG_OFFSET(port, i),
+                      ((u32 *)&tstorm_client)[0]);
+               REG_WR(bp, BAR_TSTRORM_INTMEM +
+                      TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
+                      ((u32 *)&tstorm_client)[1]);
+       }
+
+/*     DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
+          ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
+}
+
 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 {
        int mode = bp->rx_mode;
@@ -4794,41 +5717,9 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 /*             DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
                   ((u32 *)&tstorm_mac_filter)[i]); */
        }
-}
-
-static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
-{
-#ifdef BCM_VLAN
-       int mode = bp->rx_mode;
-#endif
-       int port = bp->port;
-       struct tstorm_eth_client_config tstorm_client = {0};
-
-       tstorm_client.mtu = bp->dev->mtu;
-       tstorm_client.statistics_counter_id = 0;
-       tstorm_client.config_flags =
-               TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
-#ifdef BCM_VLAN
-       if (mode && bp->vlgrp) {
-               tstorm_client.config_flags |=
-                               TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
-               DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
-       }
-#endif
-       tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
-                                   TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
-                                   TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
-                                   TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
-
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
-              ((u32 *)&tstorm_client)[0]);
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
-              ((u32 *)&tstorm_client)[1]);
 
-/*      DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
-          ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
+       if (mode != BNX2X_RX_MODE_NONE)
+               bnx2x_set_client_config(bp);
 }
 
 static void bnx2x_init_internal(struct bnx2x *bp)
@@ -4836,7 +5727,6 @@ static void bnx2x_init_internal(struct bnx2x *bp)
        int port = bp->port;
        struct tstorm_eth_function_common_config tstorm_config = {0};
        struct stats_indication_flags stats_flags = {0};
-       int i;
 
        if (is_multi(bp)) {
                tstorm_config.config_flags = MULTI_FLAGS;
@@ -4850,13 +5740,9 @@ static void bnx2x_init_internal(struct bnx2x *bp)
 /*      DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
           (*(u32 *)&tstorm_config)); */
 
-       bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx untill link is up */
+       bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
        bnx2x_set_storm_rx_mode(bp);
 
-       for_each_queue(bp, i)
-               bnx2x_set_client_config(bp, i);
-
-
        stats_flags.collect_eth = cpu_to_le32(1);
 
        REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
@@ -4902,7 +5788,7 @@ static void bnx2x_nic_init(struct bnx2x *bp)
        bnx2x_init_internal(bp);
        bnx2x_init_stats(bp);
        bnx2x_init_ind_table(bp);
-       bnx2x_enable_int(bp);
+       bnx2x_int_enable(bp);
 
 }
 
@@ -5265,8 +6151,10 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
        if (mode & 0x1) {       /* init common */
                DP(BNX2X_MSG_MCP, "starting common init  func %d  mode %x\n",
                   func, mode);
-               REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff);
-               REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc);
+               REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+                      0xffffffff);
+               REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+                      0xfffc);
                bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
 
                REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
@@ -5359,7 +6247,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
                REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
 #endif
                bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
-               /* softrest pulse */
+               /* soft reset pulse */
                REG_WR(bp, QM_REG_SOFT_RESET, 1);
                REG_WR(bp, QM_REG_SOFT_RESET, 0);
 
@@ -5413,7 +6301,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
                REG_WR(bp, SRC_REG_SOFT_RST, 1);
                for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
                        REG_WR(bp, i, 0xc0cac01a);
-                       /* TODO: repleace with something meaningfull */
+                       /* TODO: replace with something meaningful */
                }
                /* SRCH COMMON comes here */
                REG_WR(bp, SRC_REG_SOFT_RST, 0);
@@ -5486,6 +6374,28 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
                enable_blocks_attention(bp);
                /* enable_blocks_parity(bp); */
 
+               switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+               case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
+                       /* Fan failure is indicated by SPIO 5 */
+                       bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+                                      MISC_REGISTERS_SPIO_INPUT_HI_Z);
+
+                       /* set to active low mode */
+                       val = REG_RD(bp, MISC_REG_SPIO_INT);
+                       val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+                                       MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+                       REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+                       /* enable interrupt to signal the IGU */
+                       val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+                       val |= (1 << MISC_REGISTERS_SPIO_5);
+                       REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+                       break;
+
+               default:
+                       break;
+               }
+
        } /* end of common init */
 
        /* per port init */
@@ -5645,9 +6555,21 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
        /* Port MCP comes here */
        /* Port DMAE comes here */
 
+       switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+       case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
+               /* add SPIO 5 to group 0 */
+               val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+               val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+               REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
+               break;
+
+       default:
+               break;
+       }
+
        bnx2x_link_reset(bp);
 
-       /* Reset pciex errors for debug */
+       /* Reset PCIE errors for debug */
        REG_WR(bp, 0x2114, 0xffffffff);
        REG_WR(bp, 0x2120, 0xffffffff);
        REG_WR(bp, 0x2814, 0xffffffff);
@@ -5669,9 +6591,9 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
                port = bp->port;
 
                bp->fw_drv_pulse_wr_seq =
-                               (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) &
+                               (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
                                 DRV_PULSE_SEQ_MASK);
-               bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param);
+               bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
                DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  fw_mb 0x%x\n",
                   bp->fw_drv_pulse_wr_seq, bp->fw_mb);
        } else {
@@ -5681,16 +6603,15 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
        return 0;
 }
 
-
-/* send the MCP a request, block untill there is a reply */
+/* send the MCP a request, block until there is a reply */
 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
 {
-       u32 rc = 0;
-       u32 seq = ++bp->fw_seq;
        int port = bp->port;
+       u32 seq = ++bp->fw_seq;
+       u32 rc = 0;
 
-       SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq);
-       DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq);
+       SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
+       DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
 
        /* let the FW do it's magic ... */
        msleep(100); /* TBD */
@@ -5698,19 +6619,20 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
        if (CHIP_REV_IS_SLOW(bp))
                msleep(900);
 
-       rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header);
-
+       rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
        DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
 
        /* is this a reply to our command? */
        if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
                rc &= FW_MSG_CODE_MASK;
+
        } else {
                /* FW BUG! */
                BNX2X_ERR("FW failed to respond!\n");
                bnx2x_fw_dump(bp);
                rc = 0;
        }
+
        return rc;
 }
 
@@ -5869,7 +6791,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
        for (i = 0; i < 16*1024; i += 64)
                * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
 
-       /* now sixup the last line in the block to point to the next block */
+       /* now fixup the last line in the block to point to the next block */
        *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
 
        /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
@@ -5950,22 +6872,19 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
        int i;
 
        free_irq(bp->msix_table[0].vector, bp->dev);
-       DP(NETIF_MSG_IFDOWN, "rleased sp irq (%d)\n",
+       DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
           bp->msix_table[0].vector);
 
        for_each_queue(bp, i) {
-               DP(NETIF_MSG_IFDOWN, "about to rlease fp #%d->%d irq  "
+               DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
                   "state(%x)\n", i, bp->msix_table[i + 1].vector,
                   bnx2x_fp(bp, i, state));
 
-               if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
-
-                       free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
-                       bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
-
-               } else
-                       DP(NETIF_MSG_IFDOWN, "irq not freed\n");
+               if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
+                       BNX2X_ERR("IRQ of fp #%d being freed while "
+                                 "state != closed\n", i);
 
+               free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
        }
 
 }
@@ -5995,7 +6914,7 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
 
        if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
                                     bp->num_queues + 1)){
-               BNX2X_ERR("failed to enable msix\n");
+               BNX2X_LOG("failed to enable MSI-X\n");
                return -1;
 
        }
@@ -6010,11 +6929,8 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 {
 
-
        int i, rc;
 
-       DP(NETIF_MSG_IFUP, "about to request sp irq\n");
-
        rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
                         bp->dev->name, bp->dev);
 
@@ -6029,7 +6945,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
                                 bp->dev->name, &bp->fp[i]);
 
                if (rc) {
-                       BNX2X_ERR("request fp #%d irq failed\n", i);
+                       BNX2X_ERR("request fp #%d irq failed  "
+                                 "rc %d\n", i, rc);
                        bnx2x_free_msix_irqs(bp);
                        return -EBUSY;
                }
@@ -6109,8 +7026,8 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
        /* can take a while if any port is running */
        int timeout = 500;
 
-       /* DP("waiting for state to become %d on IDX [%d]\n",
-       state, sb_idx); */
+       DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
+          poll ? "polling" : "waiting", state, idx);
 
        might_sleep();
 
@@ -6128,7 +7045,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
 
                mb(); /* state is changed by bnx2x_sp_event()*/
 
-               if (*state_p != state)
+               if (*state_p == state)
                        return 0;
 
                timeout--;
@@ -6136,17 +7053,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
 
        }
 
-
        /* timeout! */
-       BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx);
-       return -EBUSY;
+       BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
+                 poll ? "polling" : "waiting", state, idx);
 
+       return -EBUSY;
 }
 
 static int bnx2x_setup_leading(struct bnx2x *bp)
 {
 
-       /* reset IGU staae */
+       /* reset IGU state */
        bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 
        /* SETUP ramrod */
@@ -6162,12 +7079,13 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
        /* reset IGU state */
        bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 
+       /* SETUP ramrod */
        bp->fp[index].state = BNX2X_FP_STATE_OPENING;
        bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
 
        /* Wait for completion */
        return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
-                                &(bp->fp[index].state), 1);
+                                &(bp->fp[index].state), 0);
 
 }
 
@@ -6177,8 +7095,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
 
 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
 {
-       int rc;
-       int i = 0;
+       u32 load_code;
+       int i;
 
        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
 
@@ -6188,26 +7106,28 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
           initialized, otherwise - not.
        */
        if (!nomcp) {
-               rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
-               if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, unloading\n");
+                       return -EBUSY;
+               }
+               if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+                       BNX2X_ERR("MCP refused load request, unloading\n");
                        return -EBUSY; /* other port in diagnostic mode */
                }
        } else {
-               rc = FW_MSG_CODE_DRV_LOAD_COMMON;
+               load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
        }
 
-       DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
-
        /* if we can't use msix we only need one fp,
         * so try to enable msix with the requested number of fp's
         * and fallback to inta with one fp
         */
        if (req_irq) {
-
                if (use_inta) {
                        bp->num_queues = 1;
                } else {
-                       if (use_multi > 1 && use_multi <= 16)
+                       if ((use_multi > 1) && (use_multi <= 16))
                                /* user requested number */
                                bp->num_queues = use_multi;
                        else if (use_multi == 1)
@@ -6216,15 +7136,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
                                bp->num_queues = 1;
 
                        if (bnx2x_enable_msix(bp)) {
-                               /* faild to enable msix */
+                               /* failed to enable msix */
                                bp->num_queues = 1;
                                if (use_multi)
-                                       BNX2X_ERR("Muti requested but failed"
+                                       BNX2X_ERR("Multi requested but failed"
                                                  " to enable MSI-X\n");
                        }
                }
        }
 
+       DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
+
        if (bnx2x_alloc_mem(bp))
                return -ENOMEM;
 
@@ -6232,13 +7154,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
                if (bp->flags & USING_MSIX_FLAG) {
                        if (bnx2x_req_msix_irqs(bp)) {
                                pci_disable_msix(bp->pdev);
-                               goto out_error;
+                               goto load_error;
                        }
 
                } else {
                        if (bnx2x_req_irq(bp)) {
                                BNX2X_ERR("IRQ request failed, aborting\n");
-                               goto out_error;
+                               goto load_error;
                        }
                }
        }
@@ -6249,31 +7171,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
 
 
        /* Initialize HW */
-       if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
+       if (bnx2x_function_init(bp,
+                               (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
                BNX2X_ERR("HW init failed, aborting\n");
-               goto out_error;
+               goto load_error;
        }
 
 
        atomic_set(&bp->intr_sem, 0);
 
-       /* Reenable SP tasklet */
-       /*if (bp->sp_task_en) {                */
-       /*        tasklet_enable(&bp->sp_task);*/
-       /*} else {                             */
-       /*        bp->sp_task_en = 1;          */
-       /*}                                    */
 
        /* Setup NIC internals and enable interrupts */
        bnx2x_nic_init(bp);
 
        /* Send LOAD_DONE command to MCP */
        if (!nomcp) {
-               rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
-               DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
-               if (!rc) {
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+               if (!load_code) {
                        BNX2X_ERR("MCP response failure, unloading\n");
-                       goto int_disable;
+                       goto load_int_disable;
                }
        }
 
@@ -6285,11 +7201,11 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
                napi_enable(&bnx2x_fp(bp, i, napi));
 
        if (bnx2x_setup_leading(bp))
-               goto stop_netif;
+               goto load_stop_netif;
 
        for_each_nondefault_queue(bp, i)
                if (bnx2x_setup_multi(bp, i))
-                       goto stop_netif;
+                       goto load_stop_netif;
 
        bnx2x_set_mac_addr(bp);
 
@@ -6313,42 +7229,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
 
        return 0;
 
-stop_netif:
+load_stop_netif:
        for_each_queue(bp, i)
                napi_disable(&bnx2x_fp(bp, i, napi));
 
-int_disable:
-       bnx2x_disable_int_sync(bp);
+load_int_disable:
+       bnx2x_int_disable_sync(bp);
 
        bnx2x_free_skbs(bp);
        bnx2x_free_irq(bp);
 
-out_error:
+load_error:
        bnx2x_free_mem(bp);
 
        /* TBD we really need to reset the chip
           if we want to recover from this */
-       return rc;
+       return -EBUSY;
 }
 
-static void bnx2x_netif_stop(struct bnx2x *bp)
-{
-       int i;
-
-       bp->rx_mode = BNX2X_RX_MODE_NONE;
-       bnx2x_set_storm_rx_mode(bp);
-
-       bnx2x_disable_int_sync(bp);
-       bnx2x_link_reset(bp);
-
-       for_each_queue(bp, i)
-               napi_disable(&bnx2x_fp(bp, i, napi));
-
-       if (netif_running(bp->dev)) {
-               netif_tx_disable(bp->dev);
-               bp->dev->trans_start = jiffies; /* prevent tx timeout */
-       }
-}
 
 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
 {
@@ -6401,20 +7299,20 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
 
        int rc;
 
-       /* halt the connnection */
+       /* halt the connection */
        bp->fp[index].state = BNX2X_FP_STATE_HALTING;
        bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
 
 
        rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
                                       &(bp->fp[index].state), 1);
-       if (rc) /* timout */
+       if (rc) /* timeout */
                return rc;
 
        /* delete cfc entry */
        bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
 
-       return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index,
+       return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
                                 &(bp->fp[index].state), 1);
 
 }
@@ -6422,8 +7320,8 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
 
 static void bnx2x_stop_leading(struct bnx2x *bp)
 {
-
-       /* if the other port is hadling traffic,
+       u16 dsb_sp_prod_idx;
+       /* if the other port is handling traffic,
           this can take a lot of time */
        int timeout = 500;
 
@@ -6437,52 +7335,71 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
                               &(bp->fp[0].state), 1))
                return;
 
-       bp->dsb_sp_prod_idx = *bp->dsb_sp_prod;
+       dsb_sp_prod_idx = *bp->dsb_sp_prod;
 
-       /* Send CFC_DELETE ramrod */
+       /* Send PORT_DELETE ramrod */
        bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
 
-       /*
-          Wait for completion.
+       /* Wait for completion to arrive on default status block
           we are going to reset the chip anyway
           so there is not much to do if this times out
         */
-       while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) {
-                       timeout--;
-                       msleep(1);
+       while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
+               timeout--;
+               msleep(1);
        }
-
+       if (!timeout) {
+               DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
+                  "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
+                  *bp->dsb_sp_prod, dsb_sp_prod_idx);
+       }
+       bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+       bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
 }
 
-static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
+
+static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
 {
        u32 reset_code = 0;
-       int rc;
-       int i;
+       int i, timeout;
 
        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 
-       /* Calling flush_scheduled_work() may deadlock because
-        * linkwatch_event() may be on the workqueue and it will try to get
-        * the rtnl_lock which we are holding.
-        */
+       del_timer_sync(&bp->timer);
 
-       while (bp->in_reset_task)
-               msleep(1);
+       bp->rx_mode = BNX2X_RX_MODE_NONE;
+       bnx2x_set_storm_rx_mode(bp);
 
-       /* Delete the timer: do it before disabling interrupts, as it
-          may be stil STAT_QUERY ramrod pending after stopping the timer */
-       del_timer_sync(&bp->timer);
+       if (netif_running(bp->dev)) {
+               netif_tx_disable(bp->dev);
+               bp->dev->trans_start = jiffies; /* prevent tx timeout */
+       }
+
+       /* Wait until all fast path tasks complete */
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+
+               timeout = 1000;
+               while (bnx2x_has_work(fp) && (timeout--))
+                       msleep(1);
+               if (!timeout)
+                       BNX2X_ERR("timeout waiting for queue[%d]\n", i);
+       }
 
        /* Wait until stat ramrod returns and all SP tasks complete */
-       while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
+       timeout = 1000;
+       while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
+              (timeout--))
                msleep(1);
 
-       /* Stop fast path, disable MAC, disable interrupts, disable napi */
-       bnx2x_netif_stop(bp);
+       for_each_queue(bp, i)
+               napi_disable(&bnx2x_fp(bp, i, napi));
+       /* Disable interrupts after Tx and Rx are disabled on stack level */
+       bnx2x_int_disable_sync(bp);
 
        if (bp->flags & NO_WOL_FLAG)
                reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
        else if (bp->wol) {
                u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
                u8 *mac_addr = bp->dev->dev_addr;
@@ -6499,28 +7416,37 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
                EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
 
                reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
        } else
                reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
+       /* Close multi and leading connections */
        for_each_nondefault_queue(bp, i)
                if (bnx2x_stop_multi(bp, i))
-                       goto error;
-
+                       goto unload_error;
 
        bnx2x_stop_leading(bp);
+       if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
+           (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
+               DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
+                  "state 0x%x  fp[0].state 0x%x",
+                  bp->state, bp->fp[0].state);
+       }
+
+unload_error:
+       bnx2x_link_reset(bp);
 
-error:
        if (!nomcp)
-               rc = bnx2x_fw_command(bp, reset_code);
+               reset_code = bnx2x_fw_command(bp, reset_code);
        else
-               rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+               reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
 
        /* Release IRQs */
-       if (fre_irq)
+       if (free_irq)
                bnx2x_free_irq(bp);
 
        /* Reset the chip */
-       bnx2x_reset_chip(bp, rc);
+       bnx2x_reset_chip(bp, reset_code);
 
        /* Report UNLOAD_DONE to MCP */
        if (!nomcp)
@@ -6531,8 +7457,7 @@ error:
        bnx2x_free_mem(bp);
 
        bp->state = BNX2X_STATE_CLOSED;
-       /* Set link down */
-       bp->link_up = 0;
+
        netif_carrier_off(bp->dev);
 
        return 0;
@@ -6568,7 +7493,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
                                          SUPPORTED_100baseT_Half |
                                          SUPPORTED_100baseT_Full |
                                          SUPPORTED_1000baseT_Full |
-                                         SUPPORTED_2500baseT_Full |
+                                         SUPPORTED_2500baseX_Full |
                                          SUPPORTED_TP | SUPPORTED_FIBRE |
                                          SUPPORTED_Autoneg |
                                          SUPPORTED_Pause |
@@ -6581,10 +7506,10 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
 
                        bp->phy_flags |= PHY_SGMII_FLAG;
 
-                       bp->supported |= (/* SUPPORTED_10baseT_Half |
-                                            SUPPORTED_10baseT_Full |
-                                            SUPPORTED_100baseT_Half |
-                                            SUPPORTED_100baseT_Full |*/
+                       bp->supported |= (SUPPORTED_10baseT_Half |
+                                         SUPPORTED_10baseT_Full |
+                                         SUPPORTED_100baseT_Half |
+                                         SUPPORTED_100baseT_Full |
                                          SUPPORTED_1000baseT_Full |
                                          SUPPORTED_TP | SUPPORTED_FIBRE |
                                          SUPPORTED_Autoneg |
@@ -6620,7 +7545,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
                                          SUPPORTED_100baseT_Half |
                                          SUPPORTED_100baseT_Full |
                                          SUPPORTED_1000baseT_Full |
-                                         SUPPORTED_2500baseT_Full |
+                                         SUPPORTED_2500baseX_Full |
                                          SUPPORTED_10000baseT_Full |
                                          SUPPORTED_TP | SUPPORTED_FIBRE |
                                          SUPPORTED_Autoneg |
@@ -6629,12 +7554,46 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
                        break;
 
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
+                                       ext_phy_type);
+
+                       bp->supported |= (SUPPORTED_10000baseT_Full |
+                                         SUPPORTED_FIBRE |
+                                         SUPPORTED_Pause |
+                                         SUPPORTED_Asym_Pause);
+                       break;
+
                case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n",
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
+                                      ext_phy_type);
+
+                       bp->supported |= (SUPPORTED_10000baseT_Full |
+                                         SUPPORTED_1000baseT_Full |
+                                         SUPPORTED_Autoneg |
+                                         SUPPORTED_FIBRE |
+                                         SUPPORTED_Pause |
+                                         SUPPORTED_Asym_Pause);
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
                                       ext_phy_type);
 
                        bp->supported |= (SUPPORTED_10000baseT_Full |
+                                         SUPPORTED_1000baseT_Full |
                                          SUPPORTED_FIBRE |
+                                         SUPPORTED_Autoneg |
+                                         SUPPORTED_Pause |
+                                         SUPPORTED_Asym_Pause);
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
+                                      ext_phy_type);
+
+                       bp->supported |= (SUPPORTED_10000baseT_Full |
+                                         SUPPORTED_TP |
+                                         SUPPORTED_Autoneg |
                                          SUPPORTED_Pause |
                                          SUPPORTED_Asym_Pause);
                        break;
@@ -6691,7 +7650,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
                                   SUPPORTED_1000baseT_Full);
 
        if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
-               bp->supported &= ~SUPPORTED_2500baseT_Full;
+               bp->supported &= ~SUPPORTED_2500baseX_Full;
 
        if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
                bp->supported &= ~SUPPORTED_10000baseT_Full;
@@ -6711,13 +7670,8 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                        bp->req_line_speed = 0;
                        bp->advertising = bp->supported;
                } else {
-                       u32 ext_phy_type;
-
-                       ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
-                       if ((ext_phy_type ==
-                               PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
-                           (ext_phy_type ==
-                               PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
+                       if (XGXS_EXT_PHY_TYPE(bp) ==
+                               PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
                                /* force 10G, no AN */
                                bp->req_line_speed = SPEED_10000;
                                bp->advertising =
@@ -6734,8 +7688,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                break;
 
        case PORT_FEATURE_LINK_SPEED_10M_FULL:
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
+               if (bp->supported & SUPPORTED_10baseT_Full) {
                        bp->req_line_speed = SPEED_10;
                        bp->advertising = (ADVERTISED_10baseT_Full |
                                           ADVERTISED_TP);
@@ -6749,8 +7702,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                break;
 
        case PORT_FEATURE_LINK_SPEED_10M_HALF:
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+               if (bp->supported & SUPPORTED_10baseT_Half) {
                        bp->req_line_speed = SPEED_10;
                        bp->req_duplex = DUPLEX_HALF;
                        bp->advertising = (ADVERTISED_10baseT_Half |
@@ -6765,8 +7717,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                break;
 
        case PORT_FEATURE_LINK_SPEED_100M_FULL:
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+               if (bp->supported & SUPPORTED_100baseT_Full) {
                        bp->req_line_speed = SPEED_100;
                        bp->advertising = (ADVERTISED_100baseT_Full |
                                           ADVERTISED_TP);
@@ -6780,8 +7731,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                break;
 
        case PORT_FEATURE_LINK_SPEED_100M_HALF:
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+               if (bp->supported & SUPPORTED_100baseT_Half) {
                        bp->req_line_speed = SPEED_100;
                        bp->req_duplex = DUPLEX_HALF;
                        bp->advertising = (ADVERTISED_100baseT_Half |
@@ -6796,8 +7746,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                break;
 
        case PORT_FEATURE_LINK_SPEED_1G:
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
+               if (bp->supported & SUPPORTED_1000baseT_Full) {
                        bp->req_line_speed = SPEED_1000;
                        bp->advertising = (ADVERTISED_1000baseT_Full |
                                           ADVERTISED_TP);
@@ -6811,10 +7760,9 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                break;
 
        case PORT_FEATURE_LINK_SPEED_2_5G:
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
+               if (bp->supported & SUPPORTED_2500baseX_Full) {
                        bp->req_line_speed = SPEED_2500;
-                       bp->advertising = (ADVERTISED_2500baseT_Full |
+                       bp->advertising = (ADVERTISED_2500baseX_Full |
                                           ADVERTISED_TP);
                } else {
                        BNX2X_ERR("NVRAM config error. "
@@ -6828,15 +7776,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
        case PORT_FEATURE_LINK_SPEED_10G_CX4:
        case PORT_FEATURE_LINK_SPEED_10G_KX4:
        case PORT_FEATURE_LINK_SPEED_10G_KR:
-               if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
-                       BNX2X_ERR("NVRAM config error. "
-                                 "Invalid link_config 0x%x"
-                                 "  phy_flags 0x%x\n",
-                                 bp->link_config, bp->phy_flags);
-                       return;
-               }
-               if (bp->speed_cap_mask &
-                   PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
+               if (bp->supported & SUPPORTED_10000baseT_Full) {
                        bp->req_line_speed = SPEED_10000;
                        bp->advertising = (ADVERTISED_10000baseT_Full |
                                           ADVERTISED_FIBRE);
@@ -6863,43 +7803,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
 
        bp->req_flow_ctrl = (bp->link_config &
                             PORT_FEATURE_FLOW_CONTROL_MASK);
-       /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
-       switch (bp->req_flow_ctrl) {
-       case FLOW_CTRL_AUTO:
+       if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
+           (bp->supported & SUPPORTED_Autoneg))
                bp->req_autoneg |= AUTONEG_FLOW_CTRL;
-               if (bp->dev->mtu <= 4500) {
-                       bp->pause_mode = PAUSE_BOTH;
-                       bp->advertising |= (ADVERTISED_Pause |
-                                           ADVERTISED_Asym_Pause);
-               } else {
-                       bp->pause_mode = PAUSE_ASYMMETRIC;
-                       bp->advertising |= ADVERTISED_Asym_Pause;
-               }
-               break;
-
-       case FLOW_CTRL_TX:
-               bp->pause_mode = PAUSE_ASYMMETRIC;
-               bp->advertising |= ADVERTISED_Asym_Pause;
-               break;
-
-       case FLOW_CTRL_RX:
-       case FLOW_CTRL_BOTH:
-               bp->pause_mode = PAUSE_BOTH;
-               bp->advertising |= (ADVERTISED_Pause |
-                                   ADVERTISED_Asym_Pause);
-               break;
 
-       case FLOW_CTRL_NONE:
-       default:
-               bp->pause_mode = PAUSE_NONE;
-               bp->advertising &= ~(ADVERTISED_Pause |
-                                    ADVERTISED_Asym_Pause);
-               break;
-       }
-       BNX2X_DEV_INFO("req_autoneg 0x%x  req_flow_ctrl 0x%x\n"
-            KERN_INFO "  pause_mode %d  advertising 0x%x\n",
-                      bp->req_autoneg, bp->req_flow_ctrl,
-                      bp->pause_mode, bp->advertising);
+       BNX2X_DEV_INFO("req_autoneg 0x%x  req_flow_ctrl 0x%x"
+                      "  advertising 0x%x\n",
+                      bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
 }
 
 static void bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -6933,15 +7843,15 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
        val = SHMEM_RD(bp, validity_map[port]);
        if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
                != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-               BNX2X_ERR("MCP validity signature bad\n");
+               BNX2X_ERR("BAD MCP validity signature\n");
 
-       bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) &
+       bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
                      DRV_MSG_SEQ_NUMBER_MASK);
 
        bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
-
+       bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
        bp->serdes_config =
-               SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config);
+               SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
        bp->lane_config =
                SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
        bp->ext_phy_config =
@@ -6954,13 +7864,13 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
        bp->link_config =
                SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
 
-       BNX2X_DEV_INFO("hw_config (%08x)  serdes_config (%08x)\n"
+       BNX2X_DEV_INFO("hw_config (%08x) board (%08x)  serdes_config (%08x)\n"
             KERN_INFO "  lane_config (%08x)  ext_phy_config (%08x)\n"
             KERN_INFO "  speed_cap_mask (%08x)  link_config (%08x)"
                       "  fw_seq (%08x)\n",
-                      bp->hw_config, bp->serdes_config, bp->lane_config,
-                      bp->ext_phy_config, bp->speed_cap_mask,
-                      bp->link_config, bp->fw_seq);
+                      bp->hw_config, bp->board, bp->serdes_config,
+                      bp->lane_config, bp->ext_phy_config,
+                      bp->speed_cap_mask, bp->link_config, bp->fw_seq);
 
        switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
        bnx2x_link_settings_supported(bp, switch_cfg);
@@ -7014,14 +7924,8 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
        return;
 
 set_mac: /* only supposed to happen on emulation/FPGA */
-       BNX2X_ERR("warning constant MAC workaround active\n");
-       bp->dev->dev_addr[0] = 0;
-       bp->dev->dev_addr[1] = 0x50;
-       bp->dev->dev_addr[2] = 0xc2;
-       bp->dev->dev_addr[3] = 0x2c;
-       bp->dev->dev_addr[4] = 0x71;
-       bp->dev->dev_addr[5] = port ? 0x0d : 0x0e;
-
+       BNX2X_ERR("warning rendom MAC workaround active\n");
+       random_ether_addr(bp->dev->dev_addr);
        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
 
 }
@@ -7048,19 +7952,34 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        }
 
        if (bp->phy_flags & PHY_XGXS_FLAG) {
-               cmd->port = PORT_FIBRE;
-       } else {
+               u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+
+               switch (ext_phy_type) {
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+                       cmd->port = PORT_FIBRE;
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       cmd->port = PORT_TP;
+                       break;
+
+               default:
+                       DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+                          bp->ext_phy_config);
+               }
+       } else
                cmd->port = PORT_TP;
-       }
 
        cmd->phy_address = bp->phy_addr;
        cmd->transceiver = XCVR_INTERNAL;
 
-       if (bp->req_autoneg & AUTONEG_SPEED) {
+       if (bp->req_autoneg & AUTONEG_SPEED)
                cmd->autoneg = AUTONEG_ENABLE;
-       } else {
+       else
                cmd->autoneg = AUTONEG_DISABLE;
-       }
 
        cmd->maxtxpkt = 0;
        cmd->maxrxpkt = 0;
@@ -7091,8 +8010,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        switch (cmd->port) {
        case PORT_TP:
-               if (!(bp->supported & SUPPORTED_TP))
+               if (!(bp->supported & SUPPORTED_TP)) {
+                       DP(NETIF_MSG_LINK, "TP not supported\n");
                        return -EINVAL;
+               }
 
                if (bp->phy_flags & PHY_XGXS_FLAG) {
                        bnx2x_link_reset(bp);
@@ -7102,8 +8023,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                break;
 
        case PORT_FIBRE:
-               if (!(bp->supported & SUPPORTED_FIBRE))
+               if (!(bp->supported & SUPPORTED_FIBRE)) {
+                       DP(NETIF_MSG_LINK, "FIBRE not supported\n");
                        return -EINVAL;
+               }
 
                if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
                        bnx2x_link_reset(bp);
@@ -7113,12 +8036,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                break;
 
        default:
+               DP(NETIF_MSG_LINK, "Unknown port type\n");
                return -EINVAL;
        }
 
        if (cmd->autoneg == AUTONEG_ENABLE) {
-               if (!(bp->supported & SUPPORTED_Autoneg))
+               if (!(bp->supported & SUPPORTED_Autoneg)) {
+                       DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
                        return -EINVAL;
+               }
 
                /* advertise the requested speed and duplex if supported */
                cmd->advertising &= bp->supported;
@@ -7133,14 +8059,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                switch (cmd->speed) {
                case SPEED_10:
                        if (cmd->duplex == DUPLEX_FULL) {
-                               if (!(bp->supported & SUPPORTED_10baseT_Full))
+                               if (!(bp->supported &
+                                     SUPPORTED_10baseT_Full)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "10M full not supported\n");
                                        return -EINVAL;
+                               }
 
                                advertising = (ADVERTISED_10baseT_Full |
                                               ADVERTISED_TP);
                        } else {
-                               if (!(bp->supported & SUPPORTED_10baseT_Half))
+                               if (!(bp->supported &
+                                     SUPPORTED_10baseT_Half)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "10M half not supported\n");
                                        return -EINVAL;
+                               }
 
                                advertising = (ADVERTISED_10baseT_Half |
                                               ADVERTISED_TP);
@@ -7150,15 +8084,21 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                case SPEED_100:
                        if (cmd->duplex == DUPLEX_FULL) {
                                if (!(bp->supported &
-                                               SUPPORTED_100baseT_Full))
+                                               SUPPORTED_100baseT_Full)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "100M full not supported\n");
                                        return -EINVAL;
+                               }
 
                                advertising = (ADVERTISED_100baseT_Full |
                                               ADVERTISED_TP);
                        } else {
                                if (!(bp->supported &
-                                               SUPPORTED_100baseT_Half))
+                                               SUPPORTED_100baseT_Half)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "100M half not supported\n");
                                        return -EINVAL;
+                               }
 
                                advertising = (ADVERTISED_100baseT_Half |
                                               ADVERTISED_TP);
@@ -7166,39 +8106,54 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
 
                case SPEED_1000:
-                       if (cmd->duplex != DUPLEX_FULL)
+                       if (cmd->duplex != DUPLEX_FULL) {
+                               DP(NETIF_MSG_LINK, "1G half not supported\n");
                                return -EINVAL;
+                       }
 
-                       if (!(bp->supported & SUPPORTED_1000baseT_Full))
+                       if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
+                               DP(NETIF_MSG_LINK, "1G full not supported\n");
                                return -EINVAL;
+                       }
 
                        advertising = (ADVERTISED_1000baseT_Full |
                                       ADVERTISED_TP);
                        break;
 
                case SPEED_2500:
-                       if (cmd->duplex != DUPLEX_FULL)
+                       if (cmd->duplex != DUPLEX_FULL) {
+                               DP(NETIF_MSG_LINK,
+                                  "2.5G half not supported\n");
                                return -EINVAL;
+                       }
 
-                       if (!(bp->supported & SUPPORTED_2500baseT_Full))
+                       if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
+                               DP(NETIF_MSG_LINK,
+                                  "2.5G full not supported\n");
                                return -EINVAL;
+                       }
 
-                       advertising = (ADVERTISED_2500baseT_Full |
+                       advertising = (ADVERTISED_2500baseX_Full |
                                       ADVERTISED_TP);
                        break;
 
                case SPEED_10000:
-                       if (cmd->duplex != DUPLEX_FULL)
+                       if (cmd->duplex != DUPLEX_FULL) {
+                               DP(NETIF_MSG_LINK, "10G half not supported\n");
                                return -EINVAL;
+                       }
 
-                       if (!(bp->supported & SUPPORTED_10000baseT_Full))
+                       if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
+                               DP(NETIF_MSG_LINK, "10G full not supported\n");
                                return -EINVAL;
+                       }
 
                        advertising = (ADVERTISED_10000baseT_Full |
                                       ADVERTISED_FIBRE);
                        break;
 
                default:
+                       DP(NETIF_MSG_LINK, "Unsupported speed\n");
                        return -EINVAL;
                }
 
@@ -7398,8 +8353,7 @@ static void bnx2x_disable_nvram_access(struct bnx2x *bp)
 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
                                  u32 cmd_flags)
 {
-       int rc;
-       int count, i;
+       int count, i, rc;
        u32 val;
 
        /* build the command word */
@@ -7452,13 +8406,13 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
 
        if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
                DP(NETIF_MSG_NVM,
-                  "Invalid paramter: offset 0x%x  buf_size 0x%x\n",
+                  "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
                   offset, buf_size);
                return -EINVAL;
        }
 
        if (offset + buf_size > bp->flash_size) {
-               DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
+               DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
                                  " buf_size (0x%x) > flash_size (0x%x)\n",
                   offset, buf_size, bp->flash_size);
                return -EINVAL;
@@ -7519,8 +8473,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
                                   u32 cmd_flags)
 {
-       int rc;
-       int count, i;
+       int count, i, rc;
 
        /* build the command word */
        cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
@@ -7557,7 +8510,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
        return rc;
 }
 
-#define BYTE_OFFSET(offset)            (8 * (offset & 0x03))
+#define BYTE_OFFSET(offset)            (8 * (offset & 0x03))
 
 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
                              int buf_size)
@@ -7568,7 +8521,7 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
        u32 val;
 
        if (offset + buf_size > bp->flash_size) {
-               DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
+               DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
                                  " buf_size (0x%x) > flash_size (0x%x)\n",
                   offset, buf_size, bp->flash_size);
                return -EINVAL;
@@ -7621,13 +8574,13 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
 
        if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
                DP(NETIF_MSG_NVM,
-                  "Invalid paramter: offset 0x%x  buf_size 0x%x\n",
+                  "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
                   offset, buf_size);
                return -EINVAL;
        }
 
        if (offset + buf_size > bp->flash_size) {
-               DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
+               DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
                                  " buf_size (0x%x) > flash_size (0x%x)\n",
                   offset, buf_size, bp->flash_size);
                return -EINVAL;
@@ -7788,52 +8741,29 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
           DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
           epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
 
-       bp->req_flow_ctrl = FLOW_CTRL_AUTO;
        if (epause->autoneg) {
-               bp->req_autoneg |= AUTONEG_FLOW_CTRL;
-               if (bp->dev->mtu <= 4500) {
-                       bp->pause_mode = PAUSE_BOTH;
-                       bp->advertising |= (ADVERTISED_Pause |
-                                           ADVERTISED_Asym_Pause);
-               } else {
-                       bp->pause_mode = PAUSE_ASYMMETRIC;
-                       bp->advertising |= ADVERTISED_Asym_Pause;
+               if (!(bp->supported & SUPPORTED_Autoneg)) {
+                       DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
+                       return -EINVAL;
                }
 
-       } else {
+               bp->req_autoneg |= AUTONEG_FLOW_CTRL;
+       } else
                bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
 
-               if (epause->rx_pause)
-                       bp->req_flow_ctrl |= FLOW_CTRL_RX;
-               if (epause->tx_pause)
-                       bp->req_flow_ctrl |= FLOW_CTRL_TX;
-
-               switch (bp->req_flow_ctrl) {
-               case FLOW_CTRL_AUTO:
-                       bp->req_flow_ctrl = FLOW_CTRL_NONE;
-                       bp->pause_mode = PAUSE_NONE;
-                       bp->advertising &= ~(ADVERTISED_Pause |
-                                            ADVERTISED_Asym_Pause);
-                       break;
+       bp->req_flow_ctrl = FLOW_CTRL_AUTO;
 
-               case FLOW_CTRL_TX:
-                       bp->pause_mode = PAUSE_ASYMMETRIC;
-                       bp->advertising |= ADVERTISED_Asym_Pause;
-                       break;
+       if (epause->rx_pause)
+               bp->req_flow_ctrl |= FLOW_CTRL_RX;
+       if (epause->tx_pause)
+               bp->req_flow_ctrl |= FLOW_CTRL_TX;
 
-               case FLOW_CTRL_RX:
-               case FLOW_CTRL_BOTH:
-                       bp->pause_mode = PAUSE_BOTH;
-                       bp->advertising |= (ADVERTISED_Pause |
-                                           ADVERTISED_Asym_Pause);
-                       break;
-               }
-       }
+       if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
+           (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
+               bp->req_flow_ctrl = FLOW_CTRL_NONE;
 
-       DP(NETIF_MSG_LINK, "req_autoneg 0x%x  req_flow_ctrl 0x%x\n"
-          DP_LEVEL "  pause_mode %d  advertising 0x%x\n",
-          bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
-          bp->advertising);
+       DP(NETIF_MSG_LINK, "req_autoneg 0x%x  req_flow_ctrl 0x%x\n",
+          bp->req_autoneg, bp->req_flow_ctrl);
 
        bnx2x_stop_stats(bp);
        bnx2x_link_initialize(bp);
@@ -7906,81 +8836,87 @@ static void bnx2x_self_test(struct net_device *dev,
 static struct {
        char string[ETH_GSTRING_LEN];
 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
-       { "rx_bytes"},                           /*  0 */
-       { "rx_error_bytes"},                     /*  1 */
-       { "tx_bytes"},                           /*  2 */
-       { "tx_error_bytes"},                     /*  3 */
-       { "rx_ucast_packets"},                   /*  4 */
-       { "rx_mcast_packets"},                   /*  5 */
-       { "rx_bcast_packets"},                   /*  6 */
-       { "tx_ucast_packets"},                   /*  7 */
-       { "tx_mcast_packets"},                   /*  8 */
-       { "tx_bcast_packets"},                   /*  9 */
-       { "tx_mac_errors"},                      /* 10 */
-       { "tx_carrier_errors"},                  /* 11 */
-       { "rx_crc_errors"},                      /* 12 */
-       { "rx_align_errors"},                    /* 13 */
-       { "tx_single_collisions"},               /* 14 */
-       { "tx_multi_collisions"},                /* 15 */
-       { "tx_deferred"},                        /* 16 */
-       { "tx_excess_collisions"},               /* 17 */
-       { "tx_late_collisions"},                 /* 18 */
-       { "tx_total_collisions"},                /* 19 */
-       { "rx_fragments"},                       /* 20 */
-       { "rx_jabbers"},                         /* 21 */
-       { "rx_undersize_packets"},               /* 22 */
-       { "rx_oversize_packets"},                /* 23 */
-       { "rx_xon_frames"},                      /* 24 */
-       { "rx_xoff_frames"},                     /* 25 */
-       { "tx_xon_frames"},                      /* 26 */
-       { "tx_xoff_frames"},                     /* 27 */
-       { "rx_mac_ctrl_frames"},                 /* 28 */
-       { "rx_filtered_packets"},                /* 29 */
-       { "rx_discards"},                        /* 30 */
+       { "rx_bytes"},
+       { "rx_error_bytes"},
+       { "tx_bytes"},
+       { "tx_error_bytes"},
+       { "rx_ucast_packets"},
+       { "rx_mcast_packets"},
+       { "rx_bcast_packets"},
+       { "tx_ucast_packets"},
+       { "tx_mcast_packets"},
+       { "tx_bcast_packets"},
+       { "tx_mac_errors"},     /* 10 */
+       { "tx_carrier_errors"},
+       { "rx_crc_errors"},
+       { "rx_align_errors"},
+       { "tx_single_collisions"},
+       { "tx_multi_collisions"},
+       { "tx_deferred"},
+       { "tx_excess_collisions"},
+       { "tx_late_collisions"},
+       { "tx_total_collisions"},
+       { "rx_fragments"},      /* 20 */
+       { "rx_jabbers"},
+       { "rx_undersize_packets"},
+       { "rx_oversize_packets"},
+       { "rx_xon_frames"},
+       { "rx_xoff_frames"},
+       { "tx_xon_frames"},
+       { "tx_xoff_frames"},
+       { "rx_mac_ctrl_frames"},
+       { "rx_filtered_packets"},
+       { "rx_discards"},       /* 30 */
+       { "brb_discard"},
+       { "brb_truncate"},
+       { "xxoverflow"}
 };
 
 #define STATS_OFFSET32(offset_name) \
        (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
 
 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
-       STATS_OFFSET32(total_bytes_received_hi),                     /*  0 */
-       STATS_OFFSET32(stat_IfHCInBadOctets_hi),                     /*  1 */
-       STATS_OFFSET32(total_bytes_transmitted_hi),                  /*  2 */
-       STATS_OFFSET32(stat_IfHCOutBadOctets_hi),                    /*  3 */
-       STATS_OFFSET32(total_unicast_packets_received_hi),           /*  4 */
-       STATS_OFFSET32(total_multicast_packets_received_hi),         /*  5 */
-       STATS_OFFSET32(total_broadcast_packets_received_hi),         /*  6 */
-       STATS_OFFSET32(total_unicast_packets_transmitted_hi),        /*  7 */
-       STATS_OFFSET32(total_multicast_packets_transmitted_hi),      /*  8 */
-       STATS_OFFSET32(total_broadcast_packets_transmitted_hi),      /*  9 */
-       STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors),     /* 10 */
-       STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),            /* 11 */
-       STATS_OFFSET32(crc_receive_errors),                          /* 12 */
-       STATS_OFFSET32(alignment_errors),                            /* 13 */
-       STATS_OFFSET32(single_collision_transmit_frames),            /* 14 */
-       STATS_OFFSET32(multiple_collision_transmit_frames),          /* 15 */
-       STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),         /* 16 */
-       STATS_OFFSET32(excessive_collision_frames),                  /* 17 */
-       STATS_OFFSET32(late_collision_frames),                       /* 18 */
-       STATS_OFFSET32(number_of_bugs_found_in_stats_spec),          /* 19 */
-       STATS_OFFSET32(runt_packets_received),                       /* 20 */
-       STATS_OFFSET32(jabber_packets_received),                     /* 21 */
-       STATS_OFFSET32(error_runt_packets_received),                 /* 22 */
-       STATS_OFFSET32(error_jabber_packets_received),               /* 23 */
-       STATS_OFFSET32(pause_xon_frames_received),                   /* 24 */
-       STATS_OFFSET32(pause_xoff_frames_received),                  /* 25 */
-       STATS_OFFSET32(pause_xon_frames_transmitted),                /* 26 */
-       STATS_OFFSET32(pause_xoff_frames_transmitted),               /* 27 */
-       STATS_OFFSET32(control_frames_received),                     /* 28 */
-       STATS_OFFSET32(mac_filter_discard),                          /* 29 */
-       STATS_OFFSET32(no_buff_discard),                             /* 30 */
+       STATS_OFFSET32(total_bytes_received_hi),
+       STATS_OFFSET32(stat_IfHCInBadOctets_hi),
+       STATS_OFFSET32(total_bytes_transmitted_hi),
+       STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
+       STATS_OFFSET32(total_unicast_packets_received_hi),
+       STATS_OFFSET32(total_multicast_packets_received_hi),
+       STATS_OFFSET32(total_broadcast_packets_received_hi),
+       STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+       STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+       STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+       STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
+       STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+       STATS_OFFSET32(crc_receive_errors),
+       STATS_OFFSET32(alignment_errors),
+       STATS_OFFSET32(single_collision_transmit_frames),
+       STATS_OFFSET32(multiple_collision_transmit_frames),
+       STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+       STATS_OFFSET32(excessive_collision_frames),
+       STATS_OFFSET32(late_collision_frames),
+       STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
+       STATS_OFFSET32(runt_packets_received),                  /* 20 */
+       STATS_OFFSET32(jabber_packets_received),
+       STATS_OFFSET32(error_runt_packets_received),
+       STATS_OFFSET32(error_jabber_packets_received),
+       STATS_OFFSET32(pause_xon_frames_received),
+       STATS_OFFSET32(pause_xoff_frames_received),
+       STATS_OFFSET32(pause_xon_frames_transmitted),
+       STATS_OFFSET32(pause_xoff_frames_transmitted),
+       STATS_OFFSET32(control_frames_received),
+       STATS_OFFSET32(mac_filter_discard),
+       STATS_OFFSET32(no_buff_discard),                        /* 30 */
+       STATS_OFFSET32(brb_discard),
+       STATS_OFFSET32(brb_truncate_discard),
+       STATS_OFFSET32(xxoverflow_discard)
 };
 
 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
        8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
        4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
        4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-       4,
+       4, 4, 4, 4
 };
 
 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -8138,9 +9074,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
  * net_device service functions
  */
 
-/* Called with rtnl_lock from vlan functions and also netif_tx_lock
- * from set_multicast.
- */
+/* called with netif_tx_lock from set_multicast */
 static void bnx2x_set_rx_mode(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -8314,7 +9248,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                               ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
        tx_bd->general_data |= 1; /* header nbd */
 
-       /* remeber the first bd of the packet */
+       /* remember the first bd of the packet */
        tx_buf->first_bd = bd_prod;
 
        DP(NETIF_MSG_TX_QUEUED,
@@ -8334,7 +9268,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                /* for now NS flag is not used in Linux */
                pbd->global_data = (len |
-                                   ((skb->protocol == ETH_P_8021Q) <<
+                                   ((skb->protocol == ntohs(ETH_P_8021Q)) <<
                                     ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
                pbd->ip_hlen = ip_hdrlen(skb) / 2;
                pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
@@ -8343,7 +9277,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                        tx_bd->bd_flags.as_bitfield |=
                                                ETH_TX_BD_FLAGS_TCP_CSUM;
-                       pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
+                       pbd->tcp_flags = pbd_tcp_flags(skb);
                        pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
                        pbd->tcp_pseudo_csum = swab16(th->check);
 
@@ -8387,7 +9321,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (skb_shinfo(skb)->gso_size &&
            (skb->len > (bp->dev->mtu + ETH_HLEN))) {
-               int hlen = 2 * le32_to_cpu(pbd->total_hlen);
+               int hlen = 2 * le16_to_cpu(pbd->total_hlen);
 
                DP(NETIF_MSG_TX_QUEUED,
                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
@@ -8427,7 +9361,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        tx_bd->vlan = cpu_to_le16(pkt_prod);
                        /* this marks the bd
                         * as one that has no individual mapping
-                        * the FW ignors this flag in a bd not maked start
+                        * the FW ignores this flag in a bd not marked start
                         */
                        tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
                        DP(NETIF_MSG_TX_QUEUED,
@@ -8504,9 +9438,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u  bd %d\n", nbd, bd_prod);
 
-       fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
+       fp->hw_tx_prods->bds_prod =
+               cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
        mb(); /* FW restriction: must not reorder writing nbd and packets */
-       fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
+       fp->hw_tx_prods->packets_prod =
+               cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
        DOORBELL(bp, fp_index, 0);
 
        mmiowb();
@@ -8525,11 +9461,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
-{
-       return &dev->stats;
-}
-
 /* Called with rtnl_lock */
 static int bnx2x_open(struct net_device *dev)
 {
@@ -8543,16 +9474,13 @@ static int bnx2x_open(struct net_device *dev)
 /* Called with rtnl_lock */
 static int bnx2x_close(struct net_device *dev)
 {
-       int rc;
        struct bnx2x *bp = netdev_priv(dev);
 
        /* Unload the driver, release IRQs */
-       rc = bnx2x_nic_unload(bp, 1);
-       if (rc) {
-               BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
-               return rc;
-       }
-       bnx2x_set_power_state(bp, PCI_D3hot);
+       bnx2x_nic_unload(bp, 1);
+
+       if (!CHIP_REV_IS_SLOW(bp))
+               bnx2x_set_power_state(bp, PCI_D3hot);
 
        return 0;
 }
@@ -8584,7 +9512,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case SIOCGMIIPHY:
                data->phy_id = bp->phy_addr;
 
-               /* fallthru */
+               /* fallthrough */
        case SIOCGMIIREG: {
                u32 mii_regval;
 
@@ -8633,7 +9561,7 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
                return -EINVAL;
 
        /* This does not race with packet allocation
-        * because the actuall alloc size is
+        * because the actual alloc size is
         * only updated as part of load
         */
        dev->mtu = new_mtu;
@@ -8666,7 +9594,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
 
        bp->vlgrp = vlgrp;
        if (netif_running(dev))
-               bnx2x_set_rx_mode(dev);
+               bnx2x_set_client_config(bp);
 }
 #endif
 
@@ -8695,14 +9623,18 @@ static void bnx2x_reset_task(struct work_struct *work)
        if (!netif_running(bp->dev))
                return;
 
-       bp->in_reset_task = 1;
+       rtnl_lock();
 
-       bnx2x_netif_stop(bp);
+       if (bp->state != BNX2X_STATE_OPEN) {
+               DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
+               goto reset_task_exit;
+       }
 
        bnx2x_nic_unload(bp, 0);
        bnx2x_nic_load(bp, 0);
 
-       bp->in_reset_task = 0;
+reset_task_exit:
+       rtnl_unlock();
 }
 
 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
@@ -8783,8 +9715,6 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev,
 
        spin_lock_init(&bp->phy_lock);
 
-       bp->in_reset_task = 0;
-
        INIT_WORK(&bp->reset_task, bnx2x_reset_task);
        INIT_WORK(&bp->sp_task, bnx2x_sp_task);
 
@@ -8813,7 +9743,7 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev,
        bnx2x_get_hwinfo(bp);
 
        if (CHIP_REV(bp) == CHIP_REV_FPGA) {
-               printk(KERN_ERR PFX "FPGA detacted. MCP disabled,"
+               printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
                       " will only init first device\n");
                onefunc = 1;
                nomcp = 1;
@@ -8882,14 +9812,32 @@ err_out:
        return rc;
 }
 
+static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
+{
+       u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+       val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+       return val;
+}
+
+/* return value of 1=2.5GHz 2=5GHz */
+static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
+{
+       u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+       val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+       return val;
+}
+
 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *ent)
 {
        static int version_printed;
        struct net_device *dev = NULL;
        struct bnx2x *bp;
-       int rc, i;
+       int rc;
        int port = PCI_FUNC(pdev->devfn);
+       DECLARE_MAC_BUF(mac);
 
        if (version_printed++ == 0)
                printk(KERN_INFO "%s", version);
@@ -8906,6 +9854,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 
        if (port && onefunc) {
                printk(KERN_ERR PFX "second function disabled. exiting\n");
+               free_netdev(dev);
                return 0;
        }
 
@@ -8918,7 +9867,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        dev->hard_start_xmit = bnx2x_start_xmit;
        dev->watchdog_timeo = TX_TIMEOUT;
 
-       dev->get_stats = bnx2x_get_stats;
        dev->ethtool_ops = &bnx2x_ethtool_ops;
        dev->open = bnx2x_open;
        dev->stop = bnx2x_close;
@@ -8944,7 +9892,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 
        rc = register_netdev(dev);
        if (rc) {
-               printk(KERN_ERR PFX "Cannot register net device\n");
+               dev_err(&pdev->dev, "Cannot register net device\n");
                if (bp->regview)
                        iounmap(bp->regview);
                if (bp->doorbells)
@@ -8959,32 +9907,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        pci_set_drvdata(pdev, dev);
 
        bp->name = board_info[ent->driver_data].name;
-       printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz "
-              "found at mem %lx, IRQ %d, ",
-              dev->name, bp->name,
+       printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
+              " IRQ %d, ", dev->name, bp->name,
               ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
               ((CHIP_ID(bp) & 0x0ff0) >> 4),
-              ((bp->flags & PCIX_FLAG) ? "-X" : ""),
-              ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
-              bp->bus_speed_mhz,
-              dev->base_addr,
-              bp->pdev->irq);
-
-       printk("node addr ");
-       for (i = 0; i < 6; i++)
-               printk("%2.2x", dev->dev_addr[i]);
-       printk("\n");
-
+              bnx2x_get_pcie_width(bp),
+              (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
+              dev->base_addr, bp->pdev->irq);
+       printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
        return 0;
 }
 
 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
-       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x *bp;
+
+       if (!dev) {
+               /* we get here if init_one() fails */
+               printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+               return;
+       }
+
+       bp = netdev_priv(dev);
 
-       flush_scheduled_work();
-       /*tasklet_kill(&bp->sp_task);*/
        unregister_netdev(dev);
 
        if (bp->regview)
@@ -9002,34 +9948,43 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
-       struct bnx2x *bp = netdev_priv(dev);
-       int rc;
+       struct bnx2x *bp;
+
+       if (!dev)
+               return 0;
 
        if (!netif_running(dev))
                return 0;
 
-       rc = bnx2x_nic_unload(bp, 0);
-       if (!rc)
-               return rc;
+       bp = netdev_priv(dev);
+
+       bnx2x_nic_unload(bp, 0);
 
        netif_device_detach(dev);
-       pci_save_state(pdev);
 
+       pci_save_state(pdev);
        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
        return 0;
 }
 
 static int bnx2x_resume(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
-       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x *bp;
        int rc;
 
+       if (!dev) {
+               printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+               return -ENODEV;
+       }
+
        if (!netif_running(dev))
                return 0;
 
-       pci_restore_state(pdev);
+       bp = netdev_priv(dev);
 
+       pci_restore_state(pdev);
        bnx2x_set_power_state(bp, PCI_D0);
        netif_device_attach(dev);
 
index 4f7ae6f77452b89bfb6d303adc2e6f437d001e37..4f0c0d31e7c1d70e00f1aa6ecffbe345034f0faa 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2007 Broadcom Corporation
+ * Copyright (c) 2007-2008 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -24,6 +24,8 @@
 #define BNX2X_MSG_STATS                0x20000 /* was: NETIF_MSG_TIMER */
 #define NETIF_MSG_NVM                  0x40000 /* was: NETIF_MSG_HW */
 #define NETIF_MSG_DMAE                 0x80000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP                   0x100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP                   0x200000 /* was: NETIF_MSG_INTR */
 
 #define DP_LEVEL                       KERN_NOTICE     /* was: KERN_DEBUG */
 
                __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
        } while (0)
 
+/* for logging (never masked) */
+#define BNX2X_LOG(__fmt, __args...) do { \
+       printk(KERN_NOTICE "[%s:%d(%s)]" __fmt, __FUNCTION__, \
+               __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
+       } while (0)
+
 /* before we have a dev->name use dev_info() */
 #define BNX2X_DEV_INFO(__fmt, __args...) do { \
        if (bp->msglevel & NETIF_MSG_PROBE) \
@@ -423,8 +431,6 @@ struct bnx2x_fastpath {
 #define BNX2X_FP_STATE_OPEN            0xa0000
 #define BNX2X_FP_STATE_HALTING         0xb0000
 #define BNX2X_FP_STATE_HALTED          0xc0000
-#define BNX2X_FP_STATE_DELETED         0xd0000
-#define BNX2X_FP_STATE_CLOSE_IRQ       0xe0000
 
        int                     index;
 
@@ -505,7 +511,6 @@ struct bnx2x {
        struct eth_spe          *spq;
        dma_addr_t              spq_mapping;
        u16                     spq_prod_idx;
-       u16                     dsb_sp_prod_idx;
        struct eth_spe          *spq_prod_bd;
        struct eth_spe          *spq_last_bd;
        u16                     *dsb_sp_prod;
@@ -517,7 +522,7 @@ struct bnx2x {
         */
        u8                      stat_pending;
 
-       /* End of fileds used in the performance code paths */
+       /* End of fields used in the performance code paths */
 
        int                     panic;
        int                     msglevel;
@@ -540,8 +545,6 @@ struct bnx2x {
        spinlock_t              phy_lock;
 
        struct work_struct      reset_task;
-       u16                     in_reset_task;
-
        struct work_struct      sp_task;
 
        struct timer_list       timer;
@@ -555,7 +558,6 @@ struct bnx2x {
 #define CHIP_ID(bp)                    (((bp)->chip_id) & 0xfffffff0)
 
 #define CHIP_NUM(bp)                   (((bp)->chip_id) & 0xffff0000)
-#define CHIP_NUM_5710                  0x57100000
 
 #define CHIP_REV(bp)                   (((bp)->chip_id) & 0x0000f000)
 #define CHIP_REV_Ax                    0x00000000
@@ -574,7 +576,8 @@ struct bnx2x {
        u32                     fw_mb;
 
        u32                     hw_config;
-       u32                     serdes_config;
+       u32                     board;
+       u32                     serdes_config;
        u32                     lane_config;
        u32                     ext_phy_config;
 #define XGXS_EXT_PHY_TYPE(bp)          (bp->ext_phy_config & \
@@ -595,11 +598,11 @@ struct bnx2x {
        u8                      tx_lane_swap;
 
        u8                      link_up;
+       u8                      phy_link_up;
 
        u32                     supported;
 /* link settings - missing defines */
 #define SUPPORTED_2500baseT_Full       (1 << 15)
-#define SUPPORTED_CX4                  (1 << 16)
 
        u32                     phy_flags;
 /*#define PHY_SERDES_FLAG                      0x1*/
@@ -644,16 +647,9 @@ struct bnx2x {
 #define FLOW_CTRL_BOTH                 PORT_FEATURE_FLOW_CONTROL_BOTH
 #define FLOW_CTRL_NONE                 PORT_FEATURE_FLOW_CONTROL_NONE
 
-       u32                     pause_mode;
-#define PAUSE_NONE                     0
-#define PAUSE_SYMMETRIC                1
-#define PAUSE_ASYMMETRIC               2
-#define PAUSE_BOTH                     3
-
        u32                     advertising;
 /* link settings - missing defines */
 #define ADVERTISED_2500baseT_Full       (1 << 15)
-#define ADVERTISED_CX4                 (1 << 16)
 
        u32                     link_status;
        u32                     line_speed;
@@ -667,6 +663,8 @@ struct bnx2x {
 #define NVRAM_TIMEOUT_COUNT            30000
 #define NVRAM_PAGE_SIZE                256
 
+       u8                      wol;
+
        int                     rx_ring_size;
 
        u16                     tx_quick_cons_trip_int;
@@ -718,9 +716,6 @@ struct bnx2x {
 #endif
 
        char                    *name;
-       u16                     bus_speed_mhz;
-       u8                      wol;
-       u8                      pad;
 
        /* used to synchronize stats collecting */
        int                     stats_state;
@@ -856,8 +851,8 @@ struct bnx2x {
 #define MAX_SPQ_PENDING                8
 
 
-#define BNX2X_NUM_STATS                31
-#define BNX2X_NUM_TESTS                2
+#define BNX2X_NUM_STATS                        34
+#define BNX2X_NUM_TESTS                        1
 
 
 #define DPM_TRIGER_TYPE                0x40
@@ -867,6 +862,15 @@ struct bnx2x {
                       DPM_TRIGER_TYPE); \
        } while (0)
 
+/* PCIE link and speed */
+#define PCICFG_LINK_WIDTH              0x1f00000
+#define PCICFG_LINK_WIDTH_SHIFT                20
+#define PCICFG_LINK_SPEED              0xf0000
+#define PCICFG_LINK_SPEED_SHIFT                16
+
+#define BMAC_CONTROL_RX_ENABLE         2
+
+#define pbd_tcp_flags(skb)     (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
 
 /* stuff added to make the code fit 80Col */
 
@@ -939,13 +943,13 @@ struct bnx2x {
 #define LINK_16GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
 #define LINK_16GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
 
-#define NIG_STATUS_INTERRUPT_XGXS0_LINK10G \
+#define NIG_STATUS_XGXS0_LINK10G \
                NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
-#define NIG_XGXS0_LINK_STATUS \
+#define NIG_STATUS_XGXS0_LINK_STATUS \
                NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
-#define NIG_XGXS0_LINK_STATUS_SIZE \
+#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
                NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
-#define NIG_SERDES0_LINK_STATUS \
+#define NIG_STATUS_SERDES0_LINK_STATUS \
                NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
 #define NIG_MASK_MI_INT \
                NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
index 62a6eb81025ad3ad3166d8f91ca473907e088e13..3b968904ca659ced3e1877f17a497ca865eaebf6 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x_fw_defs.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2007 Broadcom Corporation
+ * Copyright (c) 2007-2008 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 6fd959c34d1f34cff73a6f7a9c0daa8fe5d43577..b21075ccb52ecf72945508863390d2a0268cac15 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x_hsi.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2007 Broadcom Corporation
+ * Copyright (c) 2007-2008 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -8,169 +8,9 @@
  */
 
 
-#define FUNC_0                         0
-#define FUNC_1                         1
-#define FUNC_MAX                       2
-
-
-/* This value (in milliseconds) determines the frequency of the driver
- * issuing the PULSE message code.  The firmware monitors this periodic
- * pulse to determine when to switch to an OS-absent mode. */
-#define DRV_PULSE_PERIOD_MS            250
-
-/* This value (in milliseconds) determines how long the driver should
- * wait for an acknowledgement from the firmware before timing out.  Once
- * the firmware has timed out, the driver will assume there is no firmware
- * running and there won't be any firmware-driver synchronization during a
- * driver reset. */
-#define FW_ACK_TIME_OUT_MS             5000
-
-#define FW_ACK_POLL_TIME_MS            1
-
-#define FW_ACK_NUM_OF_POLL     (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
-
-/* LED Blink rate that will achieve ~15.9Hz */
-#define LED_BLINK_RATE_VAL             480
-
-/****************************************************************************
- * Driver <-> FW Mailbox                                                   *
- ****************************************************************************/
-struct drv_fw_mb {
-       u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK                      0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ                  0x10000000
-#define DRV_MSG_CODE_LOAD_DONE                 0x11000000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN         0x20000000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS        0x20010000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP        0x20020000
-#define DRV_MSG_CODE_UNLOAD_DONE               0x21000000
-#define DRV_MSG_CODE_DIAG_ENTER_REQ            0x50000000
-#define DRV_MSG_CODE_DIAG_EXIT_REQ             0x60000000
-#define DRV_MSG_CODE_VALIDATE_KEY              0x70000000
-#define DRV_MSG_CODE_GET_CURR_KEY              0x80000000
-#define DRV_MSG_CODE_GET_UPGRADE_KEY           0x81000000
-#define DRV_MSG_CODE_GET_MANUF_KEY             0x82000000
-#define DRV_MSG_CODE_LOAD_L2B_PRAM             0x90000000
-
-#define DRV_MSG_SEQ_NUMBER_MASK                0x0000ffff
-
-       u32 drv_mb_param;
-
-       u32 fw_mb_header;
-#define FW_MSG_CODE_MASK                       0xffff0000
-#define FW_MSG_CODE_DRV_LOAD_COMMON            0x11000000
-#define FW_MSG_CODE_DRV_LOAD_PORT              0x12000000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED           0x13000000
-#define FW_MSG_CODE_DRV_LOAD_DONE              0x14000000
-#define FW_MSG_CODE_DRV_UNLOAD_COMMON          0x21000000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT            0x22000000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE            0x23000000
-#define FW_MSG_CODE_DIAG_ENTER_DONE            0x50000000
-#define FW_MSG_CODE_DIAG_REFUSE                0x51000000
-#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS       0x70000000
-#define FW_MSG_CODE_VALIDATE_KEY_FAILURE       0x71000000
-#define FW_MSG_CODE_GET_KEY_DONE               0x80000000
-#define FW_MSG_CODE_NO_KEY                     0x8f000000
-#define FW_MSG_CODE_LIC_INFO_NOT_READY         0x8f800000
-#define FW_MSG_CODE_L2B_PRAM_LOADED            0x90000000
-#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE    0x91000000
-#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE    0x92000000
-#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE    0x93000000
-#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE    0x94000000
-
-#define FW_MSG_SEQ_NUMBER_MASK                 0x0000ffff
-
-       u32 fw_mb_param;
-
-       u32 link_status;
-       /* Driver should update this field on any link change event */
-
-#define LINK_STATUS_LINK_FLAG_MASK             0x00000001
-#define LINK_STATUS_LINK_UP                    0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK      0x0000001E
-#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE   (0<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10THD             (1<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD             (2<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD           (3<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100T4             (4<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD           (5<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD           (6<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD           (7<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD           (7<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD           (8<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD           (9<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD           (9<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD            (10<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD            (10<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD            (11<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD            (11<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD          (12<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD          (12<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD            (13<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD            (13<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD            (14<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD            (14<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD            (15<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD            (15<<1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK           0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED             0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE            0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK       0x00000080
-#define LINK_STATUS_PARALLEL_DETECTION_USED            0x00000080
-
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE       0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE       0x00000400
-#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE         0x00000800
-#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE       0x00001000
-#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE       0x00002000
-#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE         0x00004000
-#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE         0x00008000
-
-#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK          0x00010000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED            0x00010000
-
-#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK          0x00020000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED            0x00020000
-
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000C0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE     (0<<18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       (1<<18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE      (2<<18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE            (3<<18)
-
-#define LINK_STATUS_SERDES_LINK                        0x00100000
-
-#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE       0x00200000
-#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE       0x00400000
-#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE        0x00800000
-#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE        0x01000000
-#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE      0x02000000
-#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE        0x04000000
-#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE        0x08000000
-#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE        0x10000000
-
-       u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK                             0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK                     0xffff0000
-       /* The system time is in the format of
-        * (year-2001)*12*32 + month*32 + day. */
-#define DRV_PULSE_ALWAYS_ALIVE                         0x00008000
-       /* Indicate to the firmware not to go into the
-        * OS-absent when it is not getting driver pulse.
-        * This is used for debugging as well for PXE(MBA). */
-
-       u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK                             0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE                         0x00008000
-       /* Indicates to the driver not to assert due to lack
-        * of MCP response */
-#define MCP_EVENT_MASK                                 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ               0x00010000
-
-};
-
+#define PORT_0                         0
+#define PORT_1                         1
+#define PORT_MAX                       2
 
 /****************************************************************************
  * Shared HW configuration                                                 *
@@ -249,7 +89,7 @@ struct shared_hw_cfg {                                        /* NVRAM Offset */
 #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ          0x00000000
 #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ          0x00001000
 
-#define SHARED_HW_CFG_HIDE_FUNC1                   0x00002000
+#define SHARED_HW_CFG_HIDE_PORT1                   0x00002000
 
        u32 power_dissipated;                                   /* 0x11c */
 #define SHARED_HW_CFG_POWER_DIS_CMN_MASK           0xff000000
@@ -290,6 +130,8 @@ struct shared_hw_cfg {                                       /* NVRAM Offset */
 #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1015G    0x00000006
 #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1020G    0x00000007
 #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G    0x00000008
+#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G    0x00000009
+#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G    0x0000000a
 
 #define SHARED_HW_CFG_BOARD_VER_MASK               0xffff0000
 #define SHARED_HW_CFG_BOARD_VER_SHIFT              16
@@ -304,13 +146,12 @@ struct shared_hw_cfg {                                     /* NVRAM Offset */
 
 };
 
+
 /****************************************************************************
  * Port HW configuration                                                   *
  ****************************************************************************/
-struct port_hw_cfg {   /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */
+struct port_hw_cfg {                       /* port 0: 0x12c  port 1: 0x2bc */
 
-       /* Fields below are port specific (in anticipation of dual port
-          devices */
        u32 pci_id;
 #define PORT_HW_CFG_PCI_VENDOR_ID_MASK             0xffff0000
 #define PORT_HW_CFG_PCI_DEVICE_ID_MASK             0x0000ffff
@@ -420,6 +261,8 @@ struct port_hw_cfg {        /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706      0x00000500
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8276      0x00000600
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481      0x00000700
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101      0x00000800
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE      0x0000fd00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN     0x0000ff00
 
 #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK         0x000000ff
@@ -462,11 +305,13 @@ struct port_hw_cfg {      /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */
 
 };
 
+
 /****************************************************************************
  * Shared Feature configuration                                            *
  ****************************************************************************/
 struct shared_feat_cfg {                                /* NVRAM Offset */
-       u32 bmc_common;                                         /* 0x450 */
+
+       u32 config;                                             /* 0x450 */
 #define SHARED_FEATURE_BMC_ECHO_MODE_EN            0x00000001
 
 };
@@ -475,7 +320,8 @@ struct shared_feat_cfg {                             /* NVRAM Offset */
 /****************************************************************************
  * Port Feature configuration                                              *
  ****************************************************************************/
-struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */
+struct port_feat_cfg {                     /* port 0: 0x454  port 1: 0x4c8 */
+
        u32 config;
 #define PORT_FEATURE_BAR1_SIZE_MASK                0x0000000f
 #define PORT_FEATURE_BAR1_SIZE_SHIFT               0
@@ -609,8 +455,7 @@ struct port_feat_cfg {      /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */
 #define PORT_FEATURE_SMBUS_ADDR_MASK               0x000000fe
 #define PORT_FEATURE_SMBUS_ADDR_SHIFT              1
 
-       u32 iscsib_boot_cfg;
-#define PORT_FEATURE_ISCSIB_SKIP_TARGET_BOOT       0x00000001
+       u32 reserved1;
 
        u32 link_config;    /* Used as HW defaults for the driver */
 #define PORT_FEATURE_CONNECTED_SWITCH_MASK         0x03000000
@@ -657,20 +502,201 @@ struct port_feat_cfg {   /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */
 };
 
 
+/*****************************************************************************
+ * Device Information                                                       *
+ *****************************************************************************/
+struct dev_info {                                                   /* size */
+
+       u32    bc_rev; /* 8 bits each: major, minor, build */           /* 4 */
+
+       struct shared_hw_cfg     shared_hw_config;                     /* 40 */
+
+       struct port_hw_cfg       port_hw_config[PORT_MAX];      /* 400*2=800 */
+
+       struct shared_feat_cfg   shared_feature_config;                 /* 4 */
+
+       struct port_feat_cfg     port_feature_config[PORT_MAX]; /* 116*2=232 */
+
+};
+
+
+#define FUNC_0                         0
+#define FUNC_1                         1
+#define E1_FUNC_MAX                    2
+#define FUNC_MAX                       E1_FUNC_MAX
+
+
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code.  The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS            250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out.  Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS             5000
+
+#define FW_ACK_POLL_TIME_MS            1
+
+#define FW_ACK_NUM_OF_POLL     (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+/* LED Blink rate that will achieve ~15.9Hz */
+#define LED_BLINK_RATE_VAL             480
+
 /****************************************************************************
- * Device Information                                                      *
+ * Driver <-> FW Mailbox                                                   *
  ****************************************************************************/
-struct dev_info {                                                  /* size */
+struct drv_port_mb {
+
+       u32 link_status;
+       /* Driver should update this field on any link change event */
+
+#define LINK_STATUS_LINK_FLAG_MASK                     0x00000001
+#define LINK_STATUS_LINK_UP                            0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK              0x0000001E
+#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE   (0<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10THD             (1<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD             (2<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD           (3<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100T4             (4<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD           (5<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD           (6<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD           (7<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD           (7<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD           (8<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD           (9<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD           (9<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD            (10<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD            (10<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD            (11<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD            (11<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD          (12<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD          (12<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD            (13<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD            (13<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD            (14<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD            (14<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD            (15<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD            (15<<1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK           0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED             0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE            0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK       0x00000080
+#define LINK_STATUS_PARALLEL_DETECTION_USED            0x00000080
+
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE       0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE       0x00000400
+#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE         0x00000800
+#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE       0x00001000
+#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE       0x00002000
+#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE         0x00004000
+#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE         0x00008000
+
+#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK          0x00010000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED            0x00010000
+
+#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK          0x00020000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED            0x00020000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE     (0<<18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       (1<<18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE      (2<<18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE            (3<<18)
+
+#define LINK_STATUS_SERDES_LINK                        0x00100000
+
+#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE       0x00200000
+#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE       0x00400000
+#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE        0x00800000
+#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE        0x01000000
+#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE      0x02000000
+#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE        0x04000000
+#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE        0x08000000
+#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE        0x10000000
 
-       u32    bc_rev; /* 8 bits each: major, minor, build */          /* 4 */
+       u32 reserved[3];
 
-       struct shared_hw_cfg     shared_hw_config;                    /* 40 */
+};
+
+
+struct drv_func_mb {
+
+       u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK                              0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ                          0x10000000
+#define DRV_MSG_CODE_LOAD_DONE                         0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN                 0x20000000
+#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS                0x20010000
+#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP                0x20020000
+#define DRV_MSG_CODE_UNLOAD_DONE                       0x21000000
+#define DRV_MSG_CODE_DIAG_ENTER_REQ                    0x50000000
+#define DRV_MSG_CODE_DIAG_EXIT_REQ                     0x60000000
+#define DRV_MSG_CODE_VALIDATE_KEY                      0x70000000
+#define DRV_MSG_CODE_GET_CURR_KEY                      0x80000000
+#define DRV_MSG_CODE_GET_UPGRADE_KEY                   0x81000000
+#define DRV_MSG_CODE_GET_MANUF_KEY                     0x82000000
+#define DRV_MSG_CODE_LOAD_L2B_PRAM                     0x90000000
+
+#define DRV_MSG_SEQ_NUMBER_MASK                        0x0000ffff
+
+       u32 drv_mb_param;
+
+       u32 fw_mb_header;
+#define FW_MSG_CODE_MASK                               0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_COMMON                    0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT                      0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION                  0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED                   0x10200000
+#define FW_MSG_CODE_DRV_LOAD_DONE                      0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_COMMON                  0x20100000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT                    0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION                0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE                    0x21100000
+#define FW_MSG_CODE_DIAG_ENTER_DONE                    0x50100000
+#define FW_MSG_CODE_DIAG_REFUSE                        0x50200000
+#define FW_MSG_CODE_DIAG_EXIT_DONE                     0x60100000
+#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS               0x70100000
+#define FW_MSG_CODE_VALIDATE_KEY_FAILURE               0x70200000
+#define FW_MSG_CODE_GET_KEY_DONE                       0x80100000
+#define FW_MSG_CODE_NO_KEY                             0x80f00000
+#define FW_MSG_CODE_LIC_INFO_NOT_READY                 0x80f80000
+#define FW_MSG_CODE_L2B_PRAM_LOADED                    0x90100000
+#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE            0x90210000
+#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE            0x90220000
+#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE            0x90230000
+#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE            0x90240000
+
+#define FW_MSG_SEQ_NUMBER_MASK                         0x0000ffff
+
+       u32 fw_mb_param;
+
+       u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                             0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK                     0xffff0000
+       /* The system time is in the format of
+        * (year-2001)*12*32 + month*32 + day. */
+#define DRV_PULSE_ALWAYS_ALIVE                         0x00008000
+       /* Indicate to the firmware not to go into the
+        * OS-absent when it is not getting driver pulse.
+        * This is used for debugging as well for PXE(MBA). */
 
-       struct port_hw_cfg       port_hw_config[FUNC_MAX];     /* 400*2=800 */
+       u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                             0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                         0x00008000
+       /* Indicates to the driver not to assert due to lack
+        * of MCP response */
+#define MCP_EVENT_MASK                                 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ               0x00010000
 
-       struct shared_feat_cfg   shared_feature_config;                /* 4 */
+       u32 iscsi_boot_signature;
+       u32 iscsi_boot_block_offset;
 
-       struct port_feat_cfg     port_feature_config[FUNC_MAX];/* 116*2=232 */
+       u32 reserved[3];
 
 };
 
@@ -678,9 +704,8 @@ struct dev_info {                                               /* size */
 /****************************************************************************
  * Management firmware state                                               *
  ****************************************************************************/
-/* Allocate 320 bytes for management firmware: still not known exactly
- * how much IMD needs. */
-#define MGMTFW_STATE_WORD_SIZE                             80
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE                             110
 
 struct mgmtfw_state {
        u32 opaque[MGMTFW_STATE_WORD_SIZE];
@@ -691,31 +716,40 @@ struct mgmtfw_state {
  * Shared Memory Region                                                    *
  ****************************************************************************/
 struct shmem_region {                         /*   SharedMem Offset (size) */
-       u32                 validity_map[FUNC_MAX];    /* 0x0 (4 * 2 = 0x8) */
-#define SHR_MEM_VALIDITY_PCI_CFG                   0x00000001
-#define SHR_MEM_VALIDITY_MB                        0x00000002
-#define SHR_MEM_VALIDITY_DEV_INFO                  0x00000004
+
+       u32                     validity_map[PORT_MAX];  /* 0x0 (4*2 = 0x8) */
+#define SHR_MEM_FORMAT_REV_ID                      ('A'<<24)
+#define SHR_MEM_FORMAT_REV_MASK                    0xff000000
+       /* validity bits */
+#define SHR_MEM_VALIDITY_PCI_CFG                   0x00100000
+#define SHR_MEM_VALIDITY_MB                        0x00200000
+#define SHR_MEM_VALIDITY_DEV_INFO                  0x00400000
+#define SHR_MEM_VALIDITY_RESERVED                  0x00000007
        /* One licensing bit should be set */
 #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
 #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
 #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
 #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT      0x00000020
+       /* Active MFW */
+#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN        0x00000000
+#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI           0x00000040
+#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP            0x00000080
+#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI           0x000000c0
+#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE           0x000001c0
+#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK           0x000001c0
 
-       struct drv_fw_mb    drv_fw_mb[FUNC_MAX];     /* 0x8 (28 * 2 = 0x38) */
-
-       struct dev_info     dev_info;                       /* 0x40 (0x438) */
+       struct dev_info         dev_info;                /* 0x8     (0x438) */
 
-#ifdef _LICENSE_H
-       license_key_t       drv_lic_key[FUNC_MAX]; /* 0x478 (52 * 2 = 0x68) */
-#else /* Linux! */
-       u8                  reserved[52*FUNC_MAX];
-#endif
+       u8                      reserved[52*PORT_MAX];
 
        /* FW information (for internal FW use) */
-       u32                 fw_info_fio_offset;            /* 0x4e0 (0x4)   */
-       struct mgmtfw_state mgmtfw_state;                  /* 0x4e4 (0x140) */
+       u32                     fw_info_fio_offset;    /* 0x4a8       (0x4) */
+       struct mgmtfw_state     mgmtfw_state;          /* 0x4ac     (0x1b8) */
+
+       struct drv_port_mb      port_mb[PORT_MAX];     /* 0x664 (16*2=0x20) */
+       struct drv_func_mb      func_mb[FUNC_MAX];     /* 0x684 (44*2=0x58) */
 
-};                                                        /* 0x624 */
+};                                                    /* 0x6dc */
 
 
 #define BCM_5710_FW_MAJOR_VERSION                      4
index 04f93bff2ef430dda0e3ea3ba7f02ca0af4c2f1b..dcaecc53bdb13c8dd3ee0f46400d981400dc1dd4 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x_init.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2007 Broadcom Corporation
+ * Copyright (c) 2007-2008 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -409,7 +409,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
 
        pci_read_config_word(bp->pdev,
                             bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val);
-       DP(NETIF_MSG_HW, "read 0x%x from devctl\n", val);
+       DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val);
        w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
        r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12);
 
@@ -472,10 +472,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
        REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
 
        REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
-       REG_WR(bp, PXP2_REG_RQ_WR_MBS0 + 8, w_order);
+       REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
        REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
-       REG_WR(bp, PXP2_REG_RQ_RD_MBS0 + 8, r_order);
+       REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
 
+       if (r_order == MAX_RD_ORD)
+               REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+       REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
        REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16);
 }
 
index 86055297ab021372fbe40f2132484b88da28143e..5a1aa0b55044426d2391d47e4105e362a24924c6 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x_reg.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2007 Broadcom Corporation
+ * Copyright (c) 2007-2008 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -24,6 +24,8 @@
 #define BRB1_REG_BRB1_INT_STS                                   0x6011c
 /* [RW 4] Parity mask register #0 read/write */
 #define BRB1_REG_BRB1_PRTY_MASK                                 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS                                  0x6012c
 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
    address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
    BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */
 #define CDU_REG_CDU_INT_STS                                     0x101030
 /* [RW 5] Parity mask register #0 read/write */
 #define CDU_REG_CDU_PRTY_MASK                                   0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS                                    0x101040
 /* [RC 32] logging of error data in case of a CDU load error:
    {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
    ype_error; ctual_active; ctual_compressed_context}; */
 #define CFC_REG_CFC_INT_STS_CLR                                 0x104100
 /* [RW 4] Parity mask register #0 read/write */
 #define CFC_REG_CFC_PRTY_MASK                                   0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS                                    0x10410c
 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */
 #define CFC_REG_CID_CAM                                         0x104800
 #define CFC_REG_CONTROL0                                        0x104028
 #define CSDM_REG_CSDM_INT_MASK_1                                0xc22ac
 /* [RW 11] Parity mask register #0 read/write */
 #define CSDM_REG_CSDM_PRTY_MASK                                 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS                                  0xc22b0
 #define CSDM_REG_ENABLE_IN1                                     0xc2238
 #define CSDM_REG_ENABLE_IN2                                     0xc223c
 #define CSDM_REG_ENABLE_OUT1                                    0xc2240
 /* [RW 32] Parity mask register #0 read/write */
 #define CSEM_REG_CSEM_PRTY_MASK_0                               0x200130
 #define CSEM_REG_CSEM_PRTY_MASK_1                               0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0                                0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1                                0x200134
 #define CSEM_REG_ENABLE_IN                                      0x2000a4
 #define CSEM_REG_ENABLE_OUT                                     0x2000a8
 /* [RW 32] This address space contains all registers and memories that are
 #define CSEM_REG_TS_9_AS                                        0x20005c
 /* [RW 1] Parity mask register #0 read/write */
 #define DBG_REG_DBG_PRTY_MASK                                   0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS                                    0xc09c
 /* [RW 2] debug only: These bits indicate the credit for PCI request type 4
    interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are
    configured */
 #define DMAE_REG_DMAE_INT_MASK                                  0x102054
 /* [RW 4] Parity mask register #0 read/write */
 #define DMAE_REG_DMAE_PRTY_MASK                                 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS                                  0x102058
 /* [RW 1] Command 0 go. */
 #define DMAE_REG_GO_C0                                          0x102080
 /* [RW 1] Command 1 go. */
 #define DORQ_REG_DORQ_INT_STS_CLR                               0x170178
 /* [RW 2] Parity mask register #0 read/write */
 #define DORQ_REG_DORQ_PRTY_MASK                                 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS                                  0x170184
 /* [RW 8] The address to write the DPM CID to STORM. */
 #define DORQ_REG_DPM_CID_ADDR                                   0x170044
 /* [RW 5] The DPM mode CID extraction offset. */
 #define HC_REG_CONFIG_1                                         0x108004
 /* [RW 3] Parity mask register #0 read/write */
 #define HC_REG_HC_PRTY_MASK                                     0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS                                      0x108094
 /* [RW 17] status block interrupt mask; one in each bit means unmask; zerow
    in each bit means mask; bit 0 - default SB; bit 1 - SB_0; bit 2 - SB_1...
    bit 16- SB_15; addr 0 - port 0; addr 1 - port 1 */
 #define MISC_REG_AEU_GENERAL_ATTN_17                            0xa044
 #define MISC_REG_AEU_GENERAL_ATTN_18                            0xa048
 #define MISC_REG_AEU_GENERAL_ATTN_19                            0xa04c
+#define MISC_REG_AEU_GENERAL_ATTN_10                            0xa028
 #define MISC_REG_AEU_GENERAL_ATTN_11                            0xa02c
 #define MISC_REG_AEU_GENERAL_ATTN_2                             0xa008
 #define MISC_REG_AEU_GENERAL_ATTN_20                            0xa050
 #define MISC_REG_AEU_GENERAL_ATTN_4                             0xa010
 #define MISC_REG_AEU_GENERAL_ATTN_5                             0xa014
 #define MISC_REG_AEU_GENERAL_ATTN_6                             0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7                             0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8                             0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9                             0xa024
 /* [RW 32] first 32b for inverting the input for function 0; for each bit:
    0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
    function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
    starts at 0x0 for the A0 tape-out and increments by one for each
    all-layer tape-out. */
 #define MISC_REG_CHIP_REV                                       0xa40c
+/* [RW 32] The following driver registers(1..6) represent 6 drivers and 32
+   clients. Each client can be controlled by one driver only. One in each
+   bit represent that this driver control the appropriate client (Ex: bit 5
+   is set means this driver control client number 5). addr1 = set; addr0 =
+   clear; read from both addresses will give the same result = status. write
+   to address 1 will set a request to control all the clients that their
+   appropriate bit (in the write command) is set. if the client is free (the
+   appropriate bit in all the other drivers is clear) one will be written to
+   that driver register; if the client isn't free the bit will remain zero.
+   if the appropriate bit is set (the driver request to gain control on a
+   client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+   interrupt will be asserted). write to address 0 will set a request to
+   free all the clients that their appropriate bit (in the write command) is
+   set. if the appropriate bit is clear (the driver request to free a client
+   it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+   be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1                               0xa510
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+   these bits is written as a '1'; the corresponding SPIO bit will turn off
+   it's drivers and become an input. This is the reset state of all GPIO
+   pins. The read value of these bits will be a '1' if that last command
+   (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+   [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+   as a '1'; the corresponding GPIO bit will drive low. The read value of
+   these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+   this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+   SET When any of these bits is written as a '1'; the corresponding GPIO
+   bit will drive high (if it has that capability). The read value of these
+   bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+   bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+   RO; These bits indicate the read value of each of the eight GPIO pins.
+   This is the result value of the pin; not the drive value. Writing these
+   bits will have not effect. */
+#define MISC_REG_GPIO                                           0xa490
 /* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
    access that does not finish within
    ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
 #define MISC_REG_MISC_INT_MASK                                  0xa388
 /* [RW 1] Parity mask register #0 read/write */
 #define MISC_REG_MISC_PRTY_MASK                                 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS                                  0xa38c
 /* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
    inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
    divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
 /* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
    shared with the driver resides */
 #define MISC_REG_SHARED_MEM_ADDR                                0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+   the corresponding SPIO bit will turn off it's drivers and become an
+   input. This is the reset state of all SPIO pins. The read value of these
+   bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+   bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+   is written as a '1'; the corresponding SPIO bit will drive low. The read
+   value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+   these bits is written as a '1'; the corresponding SPIO bit will drive
+   high (if it has that capability). The read value of these bits will be a
+   '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+   (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+   each of the eight SPIO pins. This is the result value of the pin; not the
+   drive value. Writing these bits will have not effect. Each 8 bits field
+   is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+   from VAUX. (This is an output pin only; the FLOAT field is not applicable
+   for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+   VAUX. (This is an output pin only; FLOAT field is not applicable for this
+   pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+   select VAUX supply. (This is an output pin only; it is not controlled by
+   the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+   field is not applicable for this pin; only the VALUE fields is relevant -
+   it reflects the output value); [3] reserved; [4] spio_4; [5] spio_5; [6]
+   Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+   device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO                                           0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+   according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+   [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN                                  0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+   corresponding bit in the #OLD_VALUE register. This will acknowledge an
+   interrupt on the falling edge of corresponding SPIO input (reset value
+   0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+   in the #OLD_VALUE register. This will acknowledge an interrupt on the
+   rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+   RO; These bits indicate the old value of the SPIO input value. When the
+   ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+   that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+   to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+   interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+   RO; These bits indicate the current SPIO interrupt state for each SPIO
+   pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+   command bit is written. This bit is set when the SPIO input does not
+   match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT                                       0xa500
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+   loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED                                     0xa424
 #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT     (0x1<<0)
 #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS   (0x1<<9)
 #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G         (0x1<<15)
 #define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC                        0x10044
 /* [RW 1] Input enable for RX PBF LP IF */
 #define NIG_REG_PBF_LB_IN_EN                                    0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+   ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP                                       0x10394
 /* [RW 1] output enable for RX parser descriptor IF */
 #define NIG_REG_PRS_EOP_OUT_EN                                  0x10104
 /* [RW 1] Input enable for RX parser request IF */
 #define NIG_REG_STAT2_BRB_OCTET                                 0x107e0
 #define NIG_REG_STATUS_INTERRUPT_PORT0                          0x10328
 #define NIG_REG_STATUS_INTERRUPT_PORT1                          0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+   swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+   ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE                                  0x10398
 /* [RW 1] output enable for RX_XCM0 IF */
 #define NIG_REG_XCM0_OUT_EN                                     0x100f0
 /* [RW 1] output enable for RX_XCM1 IF */
 #define PB_REG_PB_INT_STS                                       0x1c
 /* [RW 4] Parity mask register #0 read/write */
 #define PB_REG_PB_PRTY_MASK                                     0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS                                      0x2c
 #define PRS_REG_A_PRSU_20                                       0x40134
 /* [R 8] debug only: CFC load request current credit. Transaction based. */
 #define PRS_REG_CFC_LD_CURRENT_CREDIT                           0x40164
 #define PRS_REG_PRS_INT_STS                                     0x40188
 /* [RW 8] Parity mask register #0 read/write */
 #define PRS_REG_PRS_PRTY_MASK                                   0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS                                    0x40198
 /* [RW 8] Context region for pure acknowledge packets. Used in CFC load
    request message */
 #define PRS_REG_PURE_REGIONS                                    0x40024
 /* [RW 32] Parity mask register #0 read/write */
 #define PXP2_REG_PXP2_PRTY_MASK_0                               0x120588
 #define PXP2_REG_PXP2_PRTY_MASK_1                               0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0                                0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1                                0x12058c
 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives
    indication about backpressure) */
 #define PXP2_REG_RD_ALMOST_FULL_0                               0x120424
 #define PXP2_REG_RQ_HC_ENDIAN_M                                 0x1201a8
 /* [WB 53] Onchip address table */
 #define PXP2_REG_RQ_ONCHIP_AT                                   0x122000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT                                   0x12033c
 /* [RW 2] Endian mode for qm */
 #define PXP2_REG_RQ_QM_ENDIAN_M                                 0x120194
 /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
 /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
    001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
 #define PXP2_REG_RQ_RD_MBS0                                     0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+   001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1                                     0x120168
 /* [RW 2] Endian mode for src */
 #define PXP2_REG_RQ_SRC_ENDIAN_M                                0x12019c
 /* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
 /* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
    001:256B; 010: 512B; */
 #define PXP2_REG_RQ_WR_MBS0                                     0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+   001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1                                     0x120164
 /* [RW 10] if Number of entries in dmae fifo will be higer than this
    threshold then has_payload indication will be asserted; the default value
    should be equal to &gt;  write MBS size! */
 #define PXP2_REG_WR_DMAE_TH                                     0x120368
+/* [RW 10] if Number of entries in usdmdp fifo will be higer than this
+   threshold then has_payload indication will be asserted; the default value
+   should be equal to &gt;  write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH                                   0x120348
 /* [R 1] debug only: Indication if PSWHST arbiter is idle */
 #define PXP_REG_HST_ARB_IS_IDLE                                 0x103004
 /* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
 #define PXP_REG_PXP_INT_STS_CLR_0                               0x10306c
 /* [RW 26] Parity mask register #0 read/write */
 #define PXP_REG_PXP_PRTY_MASK                                   0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS                                    0x103088
 /* [RW 4] The activity counter initial increment value sent in the load
    request */
 #define QM_REG_ACTCTRINITVAL_0                                  0x168040
 #define QM_REG_QM_INT_STS                                       0x168438
 /* [RW 9] Parity mask register #0 read/write */
 #define QM_REG_QM_PRTY_MASK                                     0x168454
+/* [R 9] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS                                      0x168448
 /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
 #define QM_REG_QSTATUS_HIGH                                     0x16802c
 /* [R 32] Current queues in pipeline: Queues from 0 to 31 */
 #define SRC_REG_SRC_INT_STS                                     0x404ac
 /* [RW 3] Parity mask register #0 read/write */
 #define SRC_REG_SRC_PRTY_MASK                                   0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS                                    0x404bc
 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
 #define TCM_REG_CAM_OCCUP                                       0x5017c
 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
 #define TSDM_REG_TSDM_INT_MASK_1                                0x422ac
 /* [RW 11] Parity mask register #0 read/write */
 #define TSDM_REG_TSDM_PRTY_MASK                                 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS                                  0x422b0
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define TSEM_REG_ARB_CYCLE_SIZE                                 0x180034
 /* [RW 3] The source that is associated with arbitration element 0. Source
 /* [RW 32] Parity mask register #0 read/write */
 #define TSEM_REG_TSEM_PRTY_MASK_0                               0x180120
 #define TSEM_REG_TSEM_PRTY_MASK_1                               0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0                                0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1                                0x180124
 /* [R 5] Used to read the XX protection CAM occupancy counter. */
 #define UCM_REG_CAM_OCCUP                                       0xe0170
 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
 #define USDM_REG_USDM_INT_MASK_1                                0xc42b0
 /* [RW 11] Parity mask register #0 read/write */
 #define USDM_REG_USDM_PRTY_MASK                                 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS                                  0xc42b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define USEM_REG_ARB_CYCLE_SIZE                                 0x300034
 /* [RW 3] The source that is associated with arbitration element 0. Source
 /* [RW 32] Parity mask register #0 read/write */
 #define USEM_REG_USEM_PRTY_MASK_0                               0x300130
 #define USEM_REG_USEM_PRTY_MASK_1                               0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0                                0x300124
+#define USEM_REG_USEM_PRTY_STS_1                                0x300134
 /* [RW 2] The queue index for registration on Aux1 counter flag. */
 #define XCM_REG_AUX1_Q                                          0x20134
 /* [RW 2] Per each decision rule the queue index to register to. */
 #define XSDM_REG_XSDM_INT_MASK_1                                0x1662ac
 /* [RW 11] Parity mask register #0 read/write */
 #define XSDM_REG_XSDM_PRTY_MASK                                 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS                                  0x1662b0
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define XSEM_REG_ARB_CYCLE_SIZE                                 0x280034
 /* [RW 3] The source that is associated with arbitration element 0. Source
 /* [RW 32] Parity mask register #0 read/write */
 #define XSEM_REG_XSEM_PRTY_MASK_0                               0x280130
 #define XSEM_REG_XSEM_PRTY_MASK_1                               0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0                                0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1                                0x280134
 #define MCPR_NVM_ACCESS_ENABLE_EN                               (1L<<0)
 #define MCPR_NVM_ACCESS_ENABLE_WR_EN                            (1L<<1)
 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE                            (0xffffffL<<0)
 #define EMAC_MDIO_COMM_START_BUSY                               (1L<<29)
 #define EMAC_MDIO_MODE_AUTO_POLL                                (1L<<4)
 #define EMAC_MDIO_MODE_CLAUSE_45                                (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT                                (0x3fL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT                       16
 #define EMAC_MODE_25G_MODE                                      (1L<<5)
 #define EMAC_MODE_ACPI_RCVD                                     (1L<<20)
 #define EMAC_MODE_HALF_DUPLEX                                   (1L<<1)
 #define EMAC_RX_MTU_SIZE_JUMBO_ENA                              (1L<<31)
 #define EMAC_TX_MODE_EXT_PAUSE_EN                               (1L<<3)
 #define EMAC_TX_MODE_RESET                                      (1L<<0)
+#define MISC_REGISTERS_GPIO_1                                   1
+#define MISC_REGISTERS_GPIO_2                                   2
+#define MISC_REGISTERS_GPIO_3                                   3
+#define MISC_REGISTERS_GPIO_CLR_POS                             16
+#define MISC_REGISTERS_GPIO_FLOAT                               (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS                           24
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z                          2
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH                         1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW                          0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT                          4
+#define MISC_REGISTERS_GPIO_SET_POS                             8
 #define MISC_REGISTERS_RESET_REG_1_CLEAR                        0x588
 #define MISC_REGISTERS_RESET_REG_1_SET                          0x584
 #define MISC_REGISTERS_RESET_REG_2_CLEAR                        0x598
 #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW   (0x1<<4)
 #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
 #define MISC_REGISTERS_RESET_REG_3_SET                          0x5a4
+#define MISC_REGISTERS_SPIO_4                                   4
+#define MISC_REGISTERS_SPIO_5                                   5
+#define MISC_REGISTERS_SPIO_7                                   7
+#define MISC_REGISTERS_SPIO_CLR_POS                             16
+#define MISC_REGISTERS_SPIO_FLOAT                               (0xffL<<24)
+#define GRC_MISC_REGISTERS_SPIO_FLOAT7                          0x80000000
+#define GRC_MISC_REGISTERS_SPIO_FLOAT6                          0x40000000
+#define GRC_MISC_REGISTERS_SPIO_FLOAT5                          0x20000000
+#define GRC_MISC_REGISTERS_SPIO_FLOAT4                          0x10000000
+#define MISC_REGISTERS_SPIO_FLOAT_POS                           24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z                          2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS                     16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH                         1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW                          0
+#define MISC_REGISTERS_SPIO_SET_POS                             8
+#define HW_LOCK_MAX_RESOURCE_VALUE                              31
+#define HW_LOCK_RESOURCE_8072_MDIO                              0
+#define HW_LOCK_RESOURCE_GPIO                                   1
+#define HW_LOCK_RESOURCE_SPIO                                   2
 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR                (1<<18)
 #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT                (1<<31)
 #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT                (1<<9)
 #define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT                 (1<<3)
 #define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR                 (1<<2)
 #define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR           (1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5                           (1<<15)
 #define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT                (1<<27)
 #define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT             (1<<5)
 #define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT               (1<<25)
 #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE       0x4000
 #define MDIO_XGXS_BLOCK2_TX_LN_SWAP                    0x11
 #define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE             0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G              0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS     0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS   0x0010
 #define MDIO_XGXS_BLOCK2_TEST_MODE_LANE                0x15
 
 #define MDIO_REG_BANK_GP_STATUS                        0x8120
 #define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE   0x0001
 
 
+#define EXT_PHY_AUTO_NEG_DEVAD                         0x7
 #define EXT_PHY_OPT_PMA_PMD_DEVAD                      0x1
 #define EXT_PHY_OPT_WIS_DEVAD                          0x2
 #define EXT_PHY_OPT_PCS_DEVAD                          0x3
 #define EXT_PHY_OPT_PHY_XS_DEVAD                       0x4
 #define EXT_PHY_OPT_CNTL                               0x0
+#define EXT_PHY_OPT_CNTL2                              0x7
 #define EXT_PHY_OPT_PMD_RX_SD                          0xa
 #define EXT_PHY_OPT_PMD_MISC_CNTL                      0xca0a
 #define EXT_PHY_OPT_PHY_IDENTIFIER                     0xc800
 #define EXT_PHY_OPT_LASI_STATUS                        0x9005
 #define EXT_PHY_OPT_PCS_STATUS                         0x0020
 #define EXT_PHY_OPT_XGXS_LANE_STATUS                   0x0018
+#define EXT_PHY_OPT_AN_LINK_STATUS                     0x8304
+#define EXT_PHY_OPT_AN_CL37_CL73                       0x8370
+#define EXT_PHY_OPT_AN_CL37_FD                         0xffe4
+#define EXT_PHY_OPT_AN_CL37_AN                         0xffe0
+#define EXT_PHY_OPT_AN_ADV                             0x11
 
 #define EXT_PHY_KR_PMA_PMD_DEVAD                       0x1
 #define EXT_PHY_KR_PCS_DEVAD                           0x3
 #define EXT_PHY_KR_AUTO_NEG_DEVAD                      0x7
 #define EXT_PHY_KR_CTRL                                0x0000
+#define EXT_PHY_KR_STATUS                              0x0001
+#define EXT_PHY_KR_AUTO_NEG_COMPLETE                   0x0020
+#define EXT_PHY_KR_AUTO_NEG_ADVERT                     0x0010
+#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE               0x0400
+#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC    0x0800
+#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH          0x0C00
+#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK          0x0C00
+#define EXT_PHY_KR_LP_AUTO_NEG                         0x0013
 #define EXT_PHY_KR_CTRL2                               0x0007
 #define EXT_PHY_KR_PCS_STATUS                          0x0020
 #define EXT_PHY_KR_PMD_CTRL                            0x0096
 #define EXT_PHY_KR_MISC_CTRL1                          0xca85
 #define EXT_PHY_KR_GEN_CTRL                            0xca10
 #define EXT_PHY_KR_ROM_CODE                            0xca19
+#define EXT_PHY_KR_ROM_RESET_INTERNAL_MP               0x0188
+#define EXT_PHY_KR_ROM_MICRO_RESET                     0x018a
+
+#define EXT_PHY_SFX7101_XGXS_TEST1         0xc00a
 
index 571750975137a3adb5ef7d034e42f72ad3341461..348371fda597b0f1072e92cf062f11a4a12d7924 100644 (file)
@@ -172,30 +172,30 @@ static char version[] __initdata =
    them to system IRQ numbers. This mapping is card specific and is set to
    the configuration of the Cirrus Eval board for this chip. */
 #ifdef CONFIG_ARCH_CLPS7500
-static unsigned int netcard_portlist[] __initdata =
+static unsigned int netcard_portlist[] __used __initdata =
    { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
 static unsigned int cs8900_irq_map[] = {12,0,0,0};
 #elif defined(CONFIG_SH_HICOSH4)
-static unsigned int netcard_portlist[] __initdata =
+static unsigned int netcard_portlist[] __used __initdata =
    { 0x0300, 0};
 static unsigned int cs8900_irq_map[] = {1,0,0,0};
 #elif defined(CONFIG_MACH_IXDP2351)
-static unsigned int netcard_portlist[] __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
+static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
 static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
 #include <asm/irq.h>
 #elif defined(CONFIG_ARCH_IXDP2X01)
 #include <asm/irq.h>
-static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
+static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
 static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
 #elif defined(CONFIG_ARCH_PNX010X)
 #include <asm/irq.h>
 #include <asm/arch/gpio.h>
 #define CIRRUS_DEFAULT_BASE    IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000)      /* = Physical address 0x48200000 */
 #define CIRRUS_DEFAULT_IRQ     VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
-static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0};
+static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
 static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
 #else
-static unsigned int netcard_portlist[] __initdata =
+static unsigned int netcard_portlist[] __used __initdata =
    { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
 static unsigned int cs8900_irq_map[] = {10,11,12,5};
 #endif
index 3beace55b58d8c25efe1a04a4a6f7ed4b042529b..7fe20310eb5f38514ae2affdb7bd00db7de2d35a 100644 (file)
@@ -438,7 +438,7 @@ static void e1000_release_nvm_82571(struct e1000_hw *hw)
  *  For non-82573 silicon, write data to EEPROM at offset using SPI interface.
  *
  *  If e1000e_update_nvm_checksum is not called after this function, the
- *  EEPROM will most likley contain an invalid checksum.
+ *  EEPROM will most likely contain an invalid checksum.
  **/
 static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
                                 u16 *data)
@@ -547,7 +547,7 @@ static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
  *  poll for completion.
  *
  *  If e1000e_update_nvm_checksum is not called after this function, the
- *  EEPROM will most likley contain an invalid checksum.
+ *  EEPROM will most likely contain an invalid checksum.
  **/
 static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
                                      u16 words, u16 *data)
@@ -1053,7 +1053,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
                /* If SerDes loopback mode is entered, there is no form
                 * of reset to take the adapter out of that mode.  So we
                 * have to explicitly take the adapter out of loopback
-                * mode.  This prevents drivers from twidling their thumbs
+                * mode.  This prevents drivers from twiddling their thumbs
                 * if another tool failed to take it out of loopback mode.
                 */
                ew32(SCTL,
@@ -1098,7 +1098,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
  *  e1000e_get_laa_state_82571 - Get locally administered address state
  *  @hw: pointer to the HW structure
  *
- *  Retrieve and return the current locally administed address state.
+ *  Retrieve and return the current locally administered address state.
  **/
 bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
 {
@@ -1113,7 +1113,7 @@ bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
  *  @hw: pointer to the HW structure
  *  @state: enable/disable locally administered address
  *
- *  Enable/Disable the current locally administed address state.
+ *  Enable/Disable the current locally administers address state.
  **/
 void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
 {
@@ -1280,16 +1280,6 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
 };
 
 static struct e1000_nvm_operations e82571_nvm_ops = {
-       .acquire_nvm            = e1000_acquire_nvm_82571,
-       .read_nvm               = e1000e_read_nvm_spi,
-       .release_nvm            = e1000_release_nvm_82571,
-       .update_nvm             = e1000_update_nvm_checksum_82571,
-       .valid_led_default      = e1000_valid_led_default_82571,
-       .validate_nvm           = e1000_validate_nvm_checksum_82571,
-       .write_nvm              = e1000_write_nvm_82571,
-};
-
-static struct e1000_nvm_operations e82573_nvm_ops = {
        .acquire_nvm            = e1000_acquire_nvm_82571,
        .read_nvm               = e1000e_read_nvm_eerd,
        .release_nvm            = e1000_release_nvm_82571,
@@ -1355,6 +1345,6 @@ struct e1000_info e1000_82573_info = {
        .get_invariants         = e1000_get_invariants_82571,
        .mac_ops                = &e82571_mac_ops,
        .phy_ops                = &e82_phy_ops_m88,
-       .nvm_ops                = &e82573_nvm_ops,
+       .nvm_ops                = &e82571_nvm_ops,
 };
 
index 6232c3e96689668cb15748e88a82ddc856c1714f..a4f511f549f789c287c6fa06ae68479beb47d372 100644 (file)
@@ -66,7 +66,7 @@
 #define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
 
 /* Extended Device Control */
-#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
 #define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
 #define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 #define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
 #define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
 
-/* Receive Decriptor bit definitions */
+/* Receive Descriptor bit definitions */
 #define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
 #define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
 #define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
 #define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
 #define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
 #define E1000_RXD_ERR_CE        0x01    /* CRC Error */
 #define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
 #define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion by NVM */
 #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
 
-/* Constants used to intrepret the masked PCI-X bus speed. */
+/* Constants used to interpret the masked PCI-X bus speed. */
 
 #define HALF_DUPLEX 1
 #define FULL_DUPLEX 2
 /* PHY 1000 MII Register/Bit Definitions */
 /* PHY Registers defined by IEEE */
 #define PHY_CONTROL      0x00 /* Control Register */
-#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_STATUS       0x01 /* Status Register */
 #define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
 #define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
 #define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
index 8b88c226e8581908f2d19dfec380ed1cff35ae00..327c0620da310a7815b58836deea5c751256144c 100644 (file)
@@ -42,8 +42,7 @@
 struct e1000_info;
 
 #define ndev_printk(level, netdev, format, arg...) \
-       printk(level "%s: %s: " format, (netdev)->dev.parent->bus_id, \
-              (netdev)->name, ## arg)
+       printk(level "%s: " format, (netdev)->name, ## arg)
 
 #ifdef DEBUG
 #define ndev_dbg(netdev, format, arg...) \
index 3c5862f97dbf3b90a61cc468ab804deb23ca47dc..916025b30fc3cd4dd4af6ea31c0f06bfd27f4077 100644 (file)
@@ -184,7 +184,7 @@ enum e1e_registers {
        E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
        E1000_ICRXOC   = 0x04124, /* Irq Cause Receiver Overrun Count */
        E1000_RXCSUM   = 0x05000, /* RX Checksum Control - RW */
-       E1000_RFCTL    = 0x05008, /* Receive Filter Control*/
+       E1000_RFCTL    = 0x05008, /* Receive Filter Control */
        E1000_MTA      = 0x05200, /* Multicast Table Array - RW Array */
        E1000_RA       = 0x05400, /* Receive Address - RW Array */
        E1000_VFTA     = 0x05600, /* VLAN Filter Table Array - RW Array */
@@ -202,7 +202,7 @@ enum e1e_registers {
        E1000_FACTPS    = 0x05B30, /* Function Active and Power State to MNG */
        E1000_SWSM      = 0x05B50, /* SW Semaphore */
        E1000_FWSM      = 0x05B54, /* FW Semaphore */
-       E1000_HICR      = 0x08F00, /* Host Inteface Control */
+       E1000_HICR      = 0x08F00, /* Host Interface Control */
 };
 
 /* RSS registers */
index 8f8139de1f4841fd7cdb350d14d2e12c6c9d16cc..0ae39550768d5ebdd1ef07664aec329aa25abac9 100644 (file)
@@ -671,7 +671,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
  *  e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
  *  @hw: pointer to the HW structure
  *
- *  Polarity is determined on the polarity reveral feature being enabled.
+ *  Polarity is determined on the polarity reversal feature being enabled.
  *  This function is only called by other family-specific
  *  routines.
  **/
@@ -947,7 +947,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
        /* Either we should have a hardware SPI cycle in progress
         * bit to check against, in order to start a new cycle or
         * FDONE bit should be changed in the hardware so that it
-        * is 1 after harware reset, which can then be used as an
+        * is 1 after hardware reset, which can then be used as an
         * indication whether a cycle is in progress or has been
         * completed.
         */
@@ -1155,7 +1155,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
  *  which writes the checksum to the shadow ram.  The changes in the shadow
  *  ram are then committed to the EEPROM by processing each bank at a time
  *  checking for the modified bit and writing only the pending changes.
- *  After a succesful commit, the shadow ram is cleared and is ready for
+ *  After a successful commit, the shadow ram is cleared and is ready for
  *  future writes.
  **/
 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
@@ -1680,7 +1680,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
  *   - initialize LED identification
  *   - setup receive address registers
  *   - setup flow control
- *   - setup transmit discriptors
+ *   - setup transmit descriptors
  *   - clear statistics
  **/
 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
@@ -1961,7 +1961,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
        ew32(PHY_CTRL, phy_ctrl);
 
-       /* Call gig speed drop workaround on Giga disable before accessing
+       /* Call gig speed drop workaround on Gig disable before accessing
         * any PHY registers */
        e1000e_gig_downshift_workaround_ich8lan(hw);
 
@@ -1972,7 +1972,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
 /**
  *  e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
  *  @hw: pointer to the HW structure
- *  @state: boolean value used to set the current Kumaran workaround state
+ *  @state: boolean value used to set the current Kumeran workaround state
  *
  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
  *  /disabled - FALSE).
@@ -2017,7 +2017,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
                ew32(PHY_CTRL, reg);
 
-               /* Call gig speed drop workaround on Giga disable before
+               /* Call gig speed drop workaround on Gig disable before
                 * accessing any PHY registers */
                if (hw->mac.type == e1000_ich8lan)
                        e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -2045,7 +2045,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
  *  @hw: pointer to the HW structure
  *
  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
- *  LPLU, Giga disable, MDIC PHY reset):
+ *  LPLU, Gig disable, MDIC PHY reset):
  *    1) Set Kumeran Near-end loopback
  *    2) Clear Kumeran Near-end loopback
  *  Should only be called for ICH8[m] devices with IGP_3 Phy.
@@ -2089,10 +2089,10 @@ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_led_on_ich8lan - Turn LED's on
+ *  e1000_led_on_ich8lan - Turn LEDs on
  *  @hw: pointer to the HW structure
  *
- *  Turn on the LED's.
+ *  Turn on the LEDs.
  **/
 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
 {
@@ -2105,10 +2105,10 @@ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_led_off_ich8lan - Turn LED's off
+ *  e1000_led_off_ich8lan - Turn LEDs off
  *  @hw: pointer to the HW structure
  *
- *  Turn off the LED's.
+ *  Turn off the LEDs.
  **/
 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
 {
index 16f35fadb74b8a9ca4ce005b0617791832890832..95f75a43c9f93fff6602a62a53dd89b0c6ce12b9 100644 (file)
@@ -589,9 +589,6 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
        s32 ret_val;
        u16 nvm_data;
 
-       if (mac->fc != e1000_fc_default)
-               return 0;
-
        /* Read and store word 0x0F of the EEPROM. This word contains bits
         * that determine the hardware's default PAUSE (flow control) mode,
         * a bit that determines whether the HW defaults to enabling or
@@ -1107,34 +1104,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                        mac->fc = e1000_fc_rx_pause;
                        hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
-               }
-               /* Per the IEEE spec, at this point flow control should be
-                * disabled.  However, we want to consider that we could
-                * be connected to a legacy switch that doesn't advertise
-                * desired flow control, but can be forced on the link
-                * partner.  So if we advertised no flow control, that is
-                * what we will resolve to.  If we advertised some kind of
-                * receive capability (Rx Pause Only or Full Flow Control)
-                * and the link partner advertised none, we will configure
-                * ourselves to enable Rx Flow Control only.  We can do
-                * this safely for two reasons:  If the link partner really
-                * didn't want flow control enabled, and we enable Rx, no
-                * harm done since we won't be receiving any PAUSE frames
-                * anyway.  If the intent on the link partner was to have
-                * flow control enabled, then by us enabling RX only, we
-                * can at least receive pause frames and process them.
-                * This is a good idea because in most cases, since we are
-                * predominantly a server NIC, more times than not we will
-                * be asked to delay transmission of packets than asking
-                * our link partner to pause transmission of frames.
-                */
-               else if ((mac->original_fc == e1000_fc_none) ||
-                        (mac->original_fc == e1000_fc_tx_pause)) {
+               } else {
+                       /*
+                        * Per the IEEE spec, at this point flow control
+                        * should be disabled.
+                        */
                        mac->fc = e1000_fc_none;
                        hw_dbg(hw, "Flow Control = NONE.\r\n");
-               } else {
-                       mac->fc = e1000_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
                }
 
                /* Now we need to do one last check...  If we auto-
@@ -1164,7 +1140,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 }
 
 /**
- *  e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex
+ *  e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
  *  @hw: pointer to the HW structure
  *  @speed: stores the current speed
  *  @duplex: stores the current duplex
@@ -1200,7 +1176,7 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
 }
 
 /**
- *  e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex
+ *  e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
  *  @hw: pointer to the HW structure
  *  @speed: stores the current speed
  *  @duplex: stores the current duplex
@@ -1410,7 +1386,7 @@ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
  *  e1000e_blink_led - Blink LED
  *  @hw: pointer to the HW structure
  *
- *  Blink the led's which are set to be on.
+ *  Blink the LEDs which are set to be on.
  **/
 s32 e1000e_blink_led(struct e1000_hw *hw)
 {
@@ -1515,7 +1491,7 @@ void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
  *  @hw: pointer to the HW structure
  *
  *  Returns 0 if successful, else returns -10
- *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
  *  the master requests to be disabled.
  *
  *  Disables PCI-Express master access and verifies there are no pending
@@ -1876,7 +1852,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 }
 
 /**
- *  e1000e_read_nvm_spi - Read EEPROM's using SPI
+ *  e1000e_read_nvm_spi - Reads EEPROM using SPI
  *  @hw: pointer to the HW structure
  *  @offset: offset of word in the EEPROM to read
  *  @words: number of words to read
@@ -1980,7 +1956,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
  *  Writes data to EEPROM at offset using SPI interface.
  *
  *  If e1000e_update_nvm_checksum is not called after this function , the
- *  EEPROM will most likley contain an invalid checksum.
+ *  EEPROM will most likely contain an invalid checksum.
  **/
 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
@@ -2222,7 +2198,7 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
  *
  *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
  *
- *  This function checks whether the HOST IF is enabled for command operaton
+ *  This function checks whether the HOST IF is enabled for command operation
  *  and also checks whether the previous command is completed.  It busy waits
  *  in case of previous command is not completed.
  **/
@@ -2254,7 +2230,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
 }
 
 /**
- *  e1000e_check_mng_mode - check managament mode
+ *  e1000e_check_mng_mode - check management mode
  *  @hw: pointer to the HW structure
  *
  *  Reads the firmware semaphore register and returns true (>0) if
index 3031d6d16247abf5153884728aad6fe038d8c172..fc5c63f4f5788d8de908e1c72d167a0f96bc6938 100644 (file)
@@ -1006,7 +1006,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
  * e1000_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
  *
- * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that
  * the driver is loaded. For AMT version (only with 82573)
  * of the f/w this means that the network i/f is open.
@@ -1032,7 +1032,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
  * e1000_release_hw_control - release control of the h/w to f/w
  * @adapter: address of board private structure
  *
- * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that the
  * driver is no longer loaded. For AMT version (only with 82573) i
  * of the f/w this means that the network i/f is closed.
@@ -1241,6 +1241,11 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
 
 /**
  * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
  *      Stores a new ITR value based on packets and byte
  *      counts during the last interrupt.  The advantage of per interrupt
  *      computation is faster updates and more accurate ITR for the current
@@ -1250,10 +1255,6 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
  *      while increasing bulk throughput.
  *      this functionality is controlled by the InterruptThrottleRate module
  *      parameter (see e1000_param.c)
- * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
  **/
 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
                                     u16 itr_setting, int packets,
@@ -1366,6 +1367,7 @@ set_itr_now:
 /**
  * e1000_clean - NAPI Rx polling callback
  * @adapter: board private structure
+ * @budget: amount of packets driver is allowed to process this poll
  **/
 static int e1000_clean(struct napi_struct *napi, int budget)
 {
@@ -2000,7 +2002,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
            e1000_check_reset_block(hw))
                return;
 
-       /* managebility (AMT) is enabled */
+       /* manageability (AMT) is enabled */
        if (er32(MANC) & E1000_MANC_SMBUS_EN)
                return;
 
@@ -3488,7 +3490,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 static void e1000e_disable_l1aspm(struct pci_dev *pdev)
 {
        int pos;
-       u32 cap;
        u16 val;
 
        /*
@@ -3503,7 +3504,6 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev)
         * active.
         */
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-       pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &cap);
        pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
        if (val & 0x2) {
                dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
index fc6fee112f1c191231f64e23a98dd9ce19d7f574..dab3c468a768c9a2093264886fb58fa81dea9eaf 100644 (file)
@@ -121,7 +121,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
  *  @offset: register offset to be read
  *  @data: pointer to the read data
  *
- *  Reads the MDI control regsiter in the PHY at offset and stores the
+ *  Reads the MDI control register in the PHY at offset and stores the
  *  information read to data.
  **/
 static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
@@ -1172,7 +1172,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
 }
 
 /**
- *  e1000e_check_downshift - Checks whether a downshift in speed occured
+ *  e1000e_check_downshift - Checks whether a downshift in speed occurred
  *  @hw: pointer to the HW structure
  *
  *  Success returns 0, Failure returns 1
@@ -1388,8 +1388,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
  *
  *  The automatic gain control (agc) normalizes the amplitude of the
  *  received signal, adjusting for the attenuation produced by the
- *  cable.  By reading the AGC registers, which reperesent the
- *  cobination of course and fine gain value, the value can be put
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of course and fine gain value, the value can be put
  *  into a lookup table to obtain the approximate cable length
  *  for each channel.
  **/
@@ -1619,7 +1619,7 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
  *  Verify the reset block is not blocking us from resetting.  Acquire
  *  semaphore (if necessary) and read/set/write the device control reset
  *  bit in the PHY.  Wait the appropriate delay time for the device to
- *  reset and relase the semaphore (if necessary).
+ *  reset and release the semaphore (if necessary).
  **/
 s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
 {
index 88fb53eba715368e9685ff5dbd6db275bc826626..7c4ead35cfa23824227ce4dd1449dee671628b5c 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "ehea"
-#define DRV_VERSION    "EHEA_0083"
+#define DRV_VERSION    "EHEA_0087"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -386,6 +386,13 @@ struct ehea_port_res {
 
 
 #define EHEA_MAX_PORTS 16
+
+#define EHEA_NUM_PORTRES_FW_HANDLES    6  /* QP handle, SendCQ handle,
+                                            RecvCQ handle, EQ handle,
+                                            SendMR handle, RecvMR handle */
+#define EHEA_NUM_PORT_FW_HANDLES       1  /* EQ handle */
+#define EHEA_NUM_ADAPTER_FW_HANDLES    2  /* MR handle, NEQ handle */
+
 struct ehea_adapter {
        u64 handle;
        struct of_device *ofdev;
@@ -405,6 +412,31 @@ struct ehea_mc_list {
        u64 macaddr;
 };
 
+/* kdump support */
+struct ehea_fw_handle_entry {
+       u64 adh;               /* Adapter Handle */
+       u64 fwh;               /* Firmware Handle */
+};
+
+struct ehea_fw_handle_array {
+       struct ehea_fw_handle_entry *arr;
+       int num_entries;
+       struct semaphore lock;
+};
+
+struct ehea_bcmc_reg_entry {
+       u64 adh;               /* Adapter Handle */
+       u32 port_id;           /* Logical Port Id */
+       u8 reg_type;           /* Registration Type */
+       u64 macaddr;
+};
+
+struct ehea_bcmc_reg_array {
+       struct ehea_bcmc_reg_entry *arr;
+       int num_entries;
+       struct semaphore lock;
+};
+
 #define EHEA_PORT_UP 1
 #define EHEA_PORT_DOWN 0
 #define EHEA_PHY_LINK_UP 1
index c051c7e09b9a0b600f4202e5febba3b5ee0f7723..21af674b764e402e4d0f6ceed6d6df54278d0f15 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/if_ether.h>
 #include <linux/notifier.h>
 #include <linux/reboot.h>
+#include <asm/kexec.h>
 
 #include <net/ip.h>
 
@@ -98,8 +99,10 @@ static int port_name_cnt;
 static LIST_HEAD(adapter_list);
 u64 ehea_driver_flags;
 struct work_struct ehea_rereg_mr_task;
-
 struct semaphore dlpar_mem_lock;
+struct ehea_fw_handle_array ehea_fw_handles;
+struct ehea_bcmc_reg_array ehea_bcmc_regs;
+
 
 static int __devinit ehea_probe_adapter(struct of_device *dev,
                                        const struct of_device_id *id);
@@ -132,6 +135,160 @@ void ehea_dump(void *adr, int len, char *msg)
        }
 }
 
+static void ehea_update_firmware_handles(void)
+{
+       struct ehea_fw_handle_entry *arr = NULL;
+       struct ehea_adapter *adapter;
+       int num_adapters = 0;
+       int num_ports = 0;
+       int num_portres = 0;
+       int i = 0;
+       int num_fw_handles, k, l;
+
+       /* Determine number of handles */
+       list_for_each_entry(adapter, &adapter_list, list) {
+               num_adapters++;
+
+               for (k = 0; k < EHEA_MAX_PORTS; k++) {
+                       struct ehea_port *port = adapter->port[k];
+
+                       if (!port || (port->state != EHEA_PORT_UP))
+                               continue;
+
+                       num_ports++;
+                       num_portres += port->num_def_qps + port->num_add_tx_qps;
+               }
+       }
+
+       num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
+                        num_ports * EHEA_NUM_PORT_FW_HANDLES +
+                        num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
+
+       if (num_fw_handles) {
+               arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
+               if (!arr)
+                       return;  /* Keep the existing array */
+       } else
+               goto out_update;
+
+       list_for_each_entry(adapter, &adapter_list, list) {
+               for (k = 0; k < EHEA_MAX_PORTS; k++) {
+                       struct ehea_port *port = adapter->port[k];
+
+                       if (!port || (port->state != EHEA_PORT_UP))
+                               continue;
+
+                       for (l = 0;
+                            l < port->num_def_qps + port->num_add_tx_qps;
+                            l++) {
+                               struct ehea_port_res *pr = &port->port_res[l];
+
+                               arr[i].adh = adapter->handle;
+                               arr[i++].fwh = pr->qp->fw_handle;
+                               arr[i].adh = adapter->handle;
+                               arr[i++].fwh = pr->send_cq->fw_handle;
+                               arr[i].adh = adapter->handle;
+                               arr[i++].fwh = pr->recv_cq->fw_handle;
+                               arr[i].adh = adapter->handle;
+                               arr[i++].fwh = pr->eq->fw_handle;
+                               arr[i].adh = adapter->handle;
+                               arr[i++].fwh = pr->send_mr.handle;
+                               arr[i].adh = adapter->handle;
+                               arr[i++].fwh = pr->recv_mr.handle;
+                       }
+                       arr[i].adh = adapter->handle;
+                       arr[i++].fwh = port->qp_eq->fw_handle;
+               }
+
+               arr[i].adh = adapter->handle;
+               arr[i++].fwh = adapter->neq->fw_handle;
+
+               if (adapter->mr.handle) {
+                       arr[i].adh = adapter->handle;
+                       arr[i++].fwh = adapter->mr.handle;
+               }
+       }
+
+out_update:
+       kfree(ehea_fw_handles.arr);
+       ehea_fw_handles.arr = arr;
+       ehea_fw_handles.num_entries = i;
+}
+
+static void ehea_update_bcmc_registrations(void)
+{
+       struct ehea_bcmc_reg_entry *arr = NULL;
+       struct ehea_adapter *adapter;
+       struct ehea_mc_list *mc_entry;
+       int num_registrations = 0;
+       int i = 0;
+       int k;
+
+       /* Determine number of registrations */
+       list_for_each_entry(adapter, &adapter_list, list)
+               for (k = 0; k < EHEA_MAX_PORTS; k++) {
+                       struct ehea_port *port = adapter->port[k];
+
+                       if (!port || (port->state != EHEA_PORT_UP))
+                               continue;
+
+                       num_registrations += 2; /* Broadcast registrations */
+
+                       list_for_each_entry(mc_entry, &port->mc_list->list,list)
+                               num_registrations += 2;
+               }
+
+       if (num_registrations) {
+               arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL);
+               if (!arr)
+                       return;  /* Keep the existing array */
+       } else
+               goto out_update;
+
+       list_for_each_entry(adapter, &adapter_list, list) {
+               for (k = 0; k < EHEA_MAX_PORTS; k++) {
+                       struct ehea_port *port = adapter->port[k];
+
+                       if (!port || (port->state != EHEA_PORT_UP))
+                               continue;
+
+                       arr[i].adh = adapter->handle;
+                       arr[i].port_id = port->logical_port_id;
+                       arr[i].reg_type = EHEA_BCMC_BROADCAST |
+                                         EHEA_BCMC_UNTAGGED;
+                       arr[i++].macaddr = port->mac_addr;
+
+                       arr[i].adh = adapter->handle;
+                       arr[i].port_id = port->logical_port_id;
+                       arr[i].reg_type = EHEA_BCMC_BROADCAST |
+                                         EHEA_BCMC_VLANID_ALL;
+                       arr[i++].macaddr = port->mac_addr;
+
+                       list_for_each_entry(mc_entry,
+                                           &port->mc_list->list, list) {
+                               arr[i].adh = adapter->handle;
+                               arr[i].port_id = port->logical_port_id;
+                               arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
+                                                 EHEA_BCMC_MULTICAST |
+                                                 EHEA_BCMC_UNTAGGED;
+                               arr[i++].macaddr = mc_entry->macaddr;
+
+                               arr[i].adh = adapter->handle;
+                               arr[i].port_id = port->logical_port_id;
+                               arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
+                                                 EHEA_BCMC_MULTICAST |
+                                                 EHEA_BCMC_VLANID_ALL;
+                               arr[i++].macaddr = mc_entry->macaddr;
+                       }
+               }
+       }
+
+out_update:
+       kfree(ehea_bcmc_regs.arr);
+       ehea_bcmc_regs.arr = arr;
+       ehea_bcmc_regs.num_entries = i;
+}
+
 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
@@ -1601,19 +1758,25 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
 
        memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
 
+       down(&ehea_bcmc_regs.lock);
+
        /* Deregister old MAC in pHYP */
        ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
        if (ret)
-               goto out_free;
+               goto out_upregs;
 
        port->mac_addr = cb0->port_mac_addr << 16;
 
        /* Register new MAC in pHYP */
        ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
        if (ret)
-               goto out_free;
+               goto out_upregs;
 
        ret = 0;
+
+out_upregs:
+       ehea_update_bcmc_registrations();
+       up(&ehea_bcmc_regs.lock);
 out_free:
        kfree(cb0);
 out:
@@ -1775,9 +1938,11 @@ static void ehea_set_multicast_list(struct net_device *dev)
        }
        ehea_promiscuous(dev, 0);
 
+       down(&ehea_bcmc_regs.lock);
+
        if (dev->flags & IFF_ALLMULTI) {
                ehea_allmulti(dev, 1);
-               return;
+               goto out;
        }
        ehea_allmulti(dev, 0);
 
@@ -1803,6 +1968,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
 
        }
 out:
+       ehea_update_bcmc_registrations();
+       up(&ehea_bcmc_regs.lock);
        return;
 }
 
@@ -2285,6 +2452,8 @@ static int ehea_up(struct net_device *dev)
        if (port->state == EHEA_PORT_UP)
                return 0;
 
+       down(&ehea_fw_handles.lock);
+
        ret = ehea_port_res_setup(port, port->num_def_qps,
                                  port->num_add_tx_qps);
        if (ret) {
@@ -2321,8 +2490,17 @@ static int ehea_up(struct net_device *dev)
                }
        }
 
-       ret = 0;
+       down(&ehea_bcmc_regs.lock);
+
+       ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+       if (ret) {
+               ret = -EIO;
+               goto out_free_irqs;
+       }
+
        port->state = EHEA_PORT_UP;
+
+       ret = 0;
        goto out;
 
 out_free_irqs:
@@ -2334,6 +2512,12 @@ out:
        if (ret)
                ehea_info("Failed starting %s. ret=%i", dev->name, ret);
 
+       ehea_update_bcmc_registrations();
+       up(&ehea_bcmc_regs.lock);
+
+       ehea_update_firmware_handles();
+       up(&ehea_fw_handles.lock);
+
        return ret;
 }
 
@@ -2382,16 +2566,27 @@ static int ehea_down(struct net_device *dev)
        if (port->state == EHEA_PORT_DOWN)
                return 0;
 
+       down(&ehea_bcmc_regs.lock);
        ehea_drop_multicast_list(dev);
+       ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
        ehea_free_interrupts(dev);
 
+       down(&ehea_fw_handles.lock);
+
        port->state = EHEA_PORT_DOWN;
 
+       ehea_update_bcmc_registrations();
+       up(&ehea_bcmc_regs.lock);
+
        ret = ehea_clean_all_portres(port);
        if (ret)
                ehea_info("Failed freeing resources for %s. ret=%i",
                          dev->name, ret);
 
+       ehea_update_firmware_handles();
+       up(&ehea_fw_handles.lock);
+
        return ret;
 }
 
@@ -2920,19 +3115,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
        INIT_WORK(&port->reset_task, ehea_reset_port);
-
-       ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
-       if (ret) {
-               ret = -EIO;
-               goto out_unreg_port;
-       }
-
        ehea_set_ethtool_ops(dev);
 
        ret = register_netdev(dev);
        if (ret) {
                ehea_error("register_netdev failed. ret=%d", ret);
-               goto out_dereg_bc;
+               goto out_unreg_port;
        }
 
        port->lro_max_aggr = lro_max_aggr;
@@ -2949,9 +3137,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 
        return port;
 
-out_dereg_bc:
-       ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-
 out_unreg_port:
        ehea_unregister_port(port);
 
@@ -2971,7 +3156,6 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
 {
        unregister_netdev(port->netdev);
        ehea_unregister_port(port);
-       ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
        kfree(port->mc_list);
        free_netdev(port->netdev);
        port->adapter->active_ports--;
@@ -3014,7 +3198,6 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
 
                i++;
        };
-
        return 0;
 }
 
@@ -3159,6 +3342,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
                ehea_error("Invalid ibmebus device probed");
                return -EINVAL;
        }
+       down(&ehea_fw_handles.lock);
 
        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
        if (!adapter) {
@@ -3239,7 +3423,10 @@ out_kill_eq:
 
 out_free_ad:
        kfree(adapter);
+
 out:
+       ehea_update_firmware_handles();
+       up(&ehea_fw_handles.lock);
        return ret;
 }
 
@@ -3258,18 +3445,41 @@ static int __devexit ehea_remove(struct of_device *dev)
 
        flush_scheduled_work();
 
+       down(&ehea_fw_handles.lock);
+
        ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
        tasklet_kill(&adapter->neq_tasklet);
 
        ehea_destroy_eq(adapter->neq);
        ehea_remove_adapter_mr(adapter);
        list_del(&adapter->list);
-
        kfree(adapter);
 
+       ehea_update_firmware_handles();
+       up(&ehea_fw_handles.lock);
+
        return 0;
 }
 
+void ehea_crash_handler(void)
+{
+       int i;
+
+       if (ehea_fw_handles.arr)
+               for (i = 0; i < ehea_fw_handles.num_entries; i++)
+                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+                                            ehea_fw_handles.arr[i].fwh,
+                                            FORCE_FREE);
+
+       if (ehea_bcmc_regs.arr)
+               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+                                             ehea_bcmc_regs.arr[i].port_id,
+                                             ehea_bcmc_regs.arr[i].reg_type,
+                                             ehea_bcmc_regs.arr[i].macaddr,
+                                             0, H_DEREG_BCMC);
+}
+
 static int ehea_reboot_notifier(struct notifier_block *nb,
                                unsigned long action, void *unused)
 {
@@ -3330,7 +3540,12 @@ int __init ehea_module_init(void)
 
 
        INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
+       memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
+       memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
+
        sema_init(&dlpar_mem_lock, 1);
+       sema_init(&ehea_fw_handles.lock, 1);
+       sema_init(&ehea_bcmc_regs.lock, 1);
 
        ret = check_module_parm();
        if (ret)
@@ -3340,12 +3555,18 @@ int __init ehea_module_init(void)
        if (ret)
                goto out;
 
-       register_reboot_notifier(&ehea_reboot_nb);
+       ret = register_reboot_notifier(&ehea_reboot_nb);
+       if (ret)
+               ehea_info("failed registering reboot notifier");
+
+       ret = crash_shutdown_register(&ehea_crash_handler);
+       if (ret)
+               ehea_info("failed registering crash handler");
 
        ret = ibmebus_register_driver(&ehea_driver);
        if (ret) {
                ehea_error("failed registering eHEA device driver on ebus");
-               goto out;
+               goto out2;
        }
 
        ret = driver_create_file(&ehea_driver.driver,
@@ -3353,21 +3574,33 @@ int __init ehea_module_init(void)
        if (ret) {
                ehea_error("failed to register capabilities attribute, ret=%d",
                           ret);
-               unregister_reboot_notifier(&ehea_reboot_nb);
-               ibmebus_unregister_driver(&ehea_driver);
-               goto out;
+               goto out3;
        }
 
+       return ret;
+
+out3:
+       ibmebus_unregister_driver(&ehea_driver);
+out2:
+       unregister_reboot_notifier(&ehea_reboot_nb);
+       crash_shutdown_unregister(&ehea_crash_handler);
 out:
        return ret;
 }
 
 static void __exit ehea_module_exit(void)
 {
+       int ret;
+
        flush_scheduled_work();
        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
        ibmebus_unregister_driver(&ehea_driver);
        unregister_reboot_notifier(&ehea_reboot_nb);
+       ret = crash_shutdown_unregister(&ehea_crash_handler);
+       if (ret)
+               ehea_info("failed unregistering crash handler");
+       kfree(ehea_fw_handles.arr);
+       kfree(ehea_bcmc_regs.arr);
        ehea_destroy_busmap();
 }
 
index 0fbf1bbbaee9972a56ab6f4d13eb31f00ed40c2c..d7a3ea88eddb562be4f632387d30776da84f4ef7 100644 (file)
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
 
        /* Setup interrupt handlers. */
        for (idp = id; idp->name; idp++) {
-               if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0)
+               if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0)
                        printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq);
        }
 
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
 
        /* Setup interrupt handlers. */
        for (idp = id; idp->name; idp++) {
-               if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0)
+               if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0)
                        printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq);
        }
 
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
 
        /* Setup interrupt handlers. */
        for (idp = id; idp->name; idp++) {
-               if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
+               if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0)
                        printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq);
        }
 
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
 
        /* Setup interrupt handlers. */
        for (idp = id; idp->name; idp++) {
-               if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
+               if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0)
                        printk("FEC: Could not allocate %s IRQ(%d)!\n",
                                idp->name, b+idp->irq);
        }
index 42d94edeee26f4106170a0385e6767a10fcf99f7..af869cf9ae7d7278fdae1b717431c0f7447476b3 100644 (file)
@@ -946,16 +946,11 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct fs_enet_private *fep = netdev_priv(dev);
        struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
-       unsigned long flags;
-       int rc;
 
        if (!netif_running(dev))
                return -EINVAL;
 
-       spin_lock_irqsave(&fep->lock, flags);
-       rc = phy_mii_ioctl(fep->phydev, mii, cmd);
-       spin_unlock_irqrestore(&fep->lock, flags);
-       return rc;
+       return phy_mii_ioctl(fep->phydev, mii, cmd);
 }
 
 extern int fs_mii_connect(struct net_device *dev);
index 4244fc282f2128143425bfdc64ee8e5e13d153f1..718cf77e345ad0b4e288a2750c5f96f21f14f75e 100644 (file)
@@ -605,7 +605,7 @@ void stop_gfar(struct net_device *dev)
 
        free_skb_resources(priv);
 
-       dma_free_coherent(NULL,
+       dma_free_coherent(&dev->dev,
                        sizeof(struct txbd8)*priv->tx_ring_size
                        + sizeof(struct rxbd8)*priv->rx_ring_size,
                        priv->tx_bd_base,
@@ -626,7 +626,7 @@ static void free_skb_resources(struct gfar_private *priv)
        for (i = 0; i < priv->tx_ring_size; i++) {
 
                if (priv->tx_skbuff[i]) {
-                       dma_unmap_single(NULL, txbdp->bufPtr,
+                       dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
                                        txbdp->length,
                                        DMA_TO_DEVICE);
                        dev_kfree_skb_any(priv->tx_skbuff[i]);
@@ -643,7 +643,7 @@ static void free_skb_resources(struct gfar_private *priv)
        if(priv->rx_skbuff != NULL) {
                for (i = 0; i < priv->rx_ring_size; i++) {
                        if (priv->rx_skbuff[i]) {
-                               dma_unmap_single(NULL, rxbdp->bufPtr,
+                               dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
                                                priv->rx_buffer_size,
                                                DMA_FROM_DEVICE);
 
@@ -708,7 +708,7 @@ int startup_gfar(struct net_device *dev)
        gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
        /* Allocate memory for the buffer descriptors */
-       vaddr = (unsigned long) dma_alloc_coherent(NULL,
+       vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
                        sizeof (struct txbd8) * priv->tx_ring_size +
                        sizeof (struct rxbd8) * priv->rx_ring_size,
                        &addr, GFP_KERNEL);
@@ -919,7 +919,7 @@ err_irq_fail:
 rx_skb_fail:
        free_skb_resources(priv);
 tx_skb_fail:
-       dma_free_coherent(NULL,
+       dma_free_coherent(&dev->dev,
                        sizeof(struct txbd8)*priv->tx_ring_size
                        + sizeof(struct rxbd8)*priv->rx_ring_size,
                        priv->tx_bd_base,
@@ -1053,7 +1053,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* Set buffer length and pointer */
        txbdp->length = skb->len;
-       txbdp->bufPtr = dma_map_single(NULL, skb->data,
+       txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
                        skb->len, DMA_TO_DEVICE);
 
        /* Save the skb pointer so we can free it later */
@@ -1332,7 +1332,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
         */
        skb_reserve(skb, alignamount);
 
-       bdp->bufPtr = dma_map_single(NULL, skb->data,
+       bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
                        priv->rx_buffer_size, DMA_FROM_DEVICE);
 
        bdp->length = 0;
index bff280eff5e335ec563c732c7b708b567fc60699..6a1f23092099b5d3f0d974b7ea370a9a73c63b31 100644 (file)
@@ -439,7 +439,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
                err = igb_request_msix(adapter);
                if (!err) {
                        /* enable IAM, auto-mask,
-                        * DO NOT USE EIAME or IAME in legacy mode */
+                        * DO NOT USE EIAM or IAM in legacy mode */
                        wr32(E1000_IAM, IMS_ENABLE_MASK);
                        goto request_done;
                }
@@ -465,14 +465,9 @@ static int igb_request_irq(struct igb_adapter *adapter)
        err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
                          netdev->name, netdev);
 
-       if (err) {
+       if (err)
                dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
                        err);
-               goto request_done;
-       }
-
-       /* enable IAM, auto-mask */
-       wr32(E1000_IAM, IMS_ENABLE_MASK);
 
 request_done:
        return err;
@@ -821,7 +816,8 @@ void igb_reset(struct igb_adapter *adapter)
        wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
        igb_reset_adaptive(&adapter->hw);
-       adapter->hw.phy.ops.get_phy_info(&adapter->hw);
+       if (adapter->hw.phy.ops.get_phy_info)
+               adapter->hw.phy.ops.get_phy_info(&adapter->hw);
 }
 
 /**
@@ -2057,7 +2053,8 @@ static void igb_set_multi(struct net_device *netdev)
 static void igb_update_phy_info(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *) data;
-       adapter->hw.phy.ops.get_phy_info(&adapter->hw);
+       if (adapter->hw.phy.ops.get_phy_info)
+               adapter->hw.phy.ops.get_phy_info(&adapter->hw);
 }
 
 /**
index 53a9fd086f960cdacd4704ee1d2d68fe0d5d181c..75f3a68ee354ef23fc33b0848fe5c3bb36e5a64b 100644 (file)
@@ -67,6 +67,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
        {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)},
        {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)},
        {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)},
+       {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
        {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)},
        {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)},
        {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)},
index 81bf005ff280dd4b556c0c599a9670bc473c47bd..1d210ed4613096118b1d4e6035f357028e1860cc 100644 (file)
@@ -148,7 +148,7 @@ static void macb_handle_link_change(struct net_device *dev)
 
                        if (phydev->duplex)
                                reg |= MACB_BIT(FD);
-                       if (phydev->speed)
+                       if (phydev->speed == SPEED_100)
                                reg |= MACB_BIT(SPD);
 
                        macb_writel(bp, NCFGR, reg);
index 6323988dfa1d162423839f5805fda55de61aab00..fd8158a86f6490ce7dbaf389595b8e34a3d51821 100644 (file)
@@ -590,6 +590,13 @@ static int pcnet_config(struct pcmcia_device *link)
        dev->if_port = 0;
     }
 
+    if ((link->conf.ConfigBase == 0x03c0)
+       && (link->manf_id == 0x149) && (link->card_id = 0xc1ab)) {
+       printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
+       printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
+       goto failed;
+    }
+
     local_hw_info = get_hwinfo(link);
     if (local_hw_info == NULL)
        local_hw_info = get_prom(link);
@@ -1567,12 +1574,11 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145),
        PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230),
        PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
-/*     PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), conflict with axnet_cs */
+       PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
        PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
        PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
        PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
        PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
-/*     PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), conflict with axnet_cs */
        PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
        PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307),
        PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
index 6e9f619c491f74837a7f356943683094169e28bf..963630c65ca940d9dfc59ed57370c54dbfed7847 100644 (file)
@@ -49,13 +49,13 @@ int mdiobus_register(struct mii_bus *bus)
        int i;
        int err = 0;
 
-       mutex_init(&bus->mdio_lock);
-
        if (NULL == bus || NULL == bus->name ||
                        NULL == bus->read ||
                        NULL == bus->write)
                return -EINVAL;
 
+       mutex_init(&bus->mdio_lock);
+
        if (bus->reset)
                bus->reset(bus);
 
index e0b072d9fdb7caf571e6594af1f732068cba5dab..86e5dba079fed6d9a62bcd6d9e9e31fb40888999 100644 (file)
@@ -455,6 +455,7 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
                               skb_queue_len(&session->reorder_q));
                        __skb_unlink(skb, &session->reorder_q);
                        kfree_skb(skb);
+                       sock_put(session->sock);
                        continue;
                }
 
@@ -1110,6 +1111,8 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
        for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
 again:
                hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+                       struct sk_buff *skb;
+
                        session = hlist_entry(walk, struct pppol2tp_session, hlist);
 
                        sk = session->sock;
@@ -1138,7 +1141,10 @@ again:
                        /* Purge any queued data */
                        skb_queue_purge(&sk->sk_receive_queue);
                        skb_queue_purge(&sk->sk_write_queue);
-                       skb_queue_purge(&session->reorder_q);
+                       while ((skb = skb_dequeue(&session->reorder_q))) {
+                               kfree_skb(skb);
+                               sock_put(sk);
+                       }
 
                        release_sock(sk);
                        sock_put(sk);
index 750d2a99cb4fd2ca4591091b8645ded4d8a1fc67..daf5abab9534a7e371a872252a0cdfa86c1b1b68 100644 (file)
@@ -2690,6 +2690,7 @@ int gelic_wl_driver_probe(struct gelic_card *card)
                return -ENOMEM;
 
        /* setup net_device structure */
+       SET_NETDEV_DEV(netdev, &card->dev->core);
        gelic_wl_setup_netdev_ops(netdev);
 
        /* setup some of net_device and register it */
index 202fdf35662165c681f18e7abb3cd3c936418394..20745fd4e9738e92050db6d70d9658a2b4e78c1a 100644 (file)
@@ -1633,13 +1633,18 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 
                                         struct net_device *dev)
 {
-       u8 from;
+       int rc;
+
+       rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
+       if (rc < 0) {
+               u8 reg;
 
-       pci_read_config_byte(pdev, 0x73, &from);
+               pci_read_config_byte(pdev, 0x73, &reg);
 
-       return (from & 0x00000001) ?
-               sis190_get_mac_addr_from_apc(pdev, dev) :
-               sis190_get_mac_addr_from_eeprom(pdev, dev);
+               if (reg & 0x00000001)
+                       rc = sis190_get_mac_addr_from_apc(pdev, dev);
+       }
+       return rc;
 }
 
 static void sis190_set_speed_auto(struct net_device *dev)
index 9a6295909e43c318d4fa4da46995d2121e362920..54c662690f65482b613954918ce499121a84ea61 100644 (file)
@@ -572,8 +572,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
        default:
                /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
                ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
+
                /* turn off the Rx LED (LED_RX) */
-               ledover &= ~PHY_M_LED_MO_RX;
+               ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
        }
 
        if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
@@ -602,7 +603,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
                if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
                        /* turn on 100 Mbps LED (LED_LINK100) */
-                       ledover |= PHY_M_LED_MO_100;
+                       ledover |= PHY_M_LED_MO_100(MO_LED_ON);
                }
 
                if (ledover)
@@ -3322,82 +3323,80 @@ static void sky2_set_multicast(struct net_device *dev)
 /* Can have one global because blinking is controlled by
  * ethtool and that is always under RTNL mutex
  */
-static void sky2_led(struct sky2_hw *hw, unsigned port, int on)
+static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
 {
-       u16 pg;
+       struct sky2_hw *hw = sky2->hw;
+       unsigned port = sky2->port;
 
-       switch (hw->chip_id) {
-       case CHIP_ID_YUKON_XL:
+       spin_lock_bh(&sky2->phy_lock);
+       if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+           hw->chip_id == CHIP_ID_YUKON_EX ||
+           hw->chip_id == CHIP_ID_YUKON_SUPR) {
+               u16 pg;
                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
-               gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
-                            on ? (PHY_M_LEDC_LOS_CTRL(1) |
-                                  PHY_M_LEDC_INIT_CTRL(7) |
-                                  PHY_M_LEDC_STA1_CTRL(7) |
-                                  PHY_M_LEDC_STA0_CTRL(7))
-                            : 0);
 
-               gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
-               break;
+               switch (mode) {
+               case MO_LED_OFF:
+                       gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+                                    PHY_M_LEDC_LOS_CTRL(8) |
+                                    PHY_M_LEDC_INIT_CTRL(8) |
+                                    PHY_M_LEDC_STA1_CTRL(8) |
+                                    PHY_M_LEDC_STA0_CTRL(8));
+                       break;
+               case MO_LED_ON:
+                       gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+                                    PHY_M_LEDC_LOS_CTRL(9) |
+                                    PHY_M_LEDC_INIT_CTRL(9) |
+                                    PHY_M_LEDC_STA1_CTRL(9) |
+                                    PHY_M_LEDC_STA0_CTRL(9));
+                       break;
+               case MO_LED_BLINK:
+                       gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+                                    PHY_M_LEDC_LOS_CTRL(0xa) |
+                                    PHY_M_LEDC_INIT_CTRL(0xa) |
+                                    PHY_M_LEDC_STA1_CTRL(0xa) |
+                                    PHY_M_LEDC_STA0_CTRL(0xa));
+                       break;
+               case MO_LED_NORM:
+                       gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+                                    PHY_M_LEDC_LOS_CTRL(1) |
+                                    PHY_M_LEDC_INIT_CTRL(8) |
+                                    PHY_M_LEDC_STA1_CTRL(7) |
+                                    PHY_M_LEDC_STA0_CTRL(7));
+               }
 
-       default:
-               gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+               gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+       } else
                gm_phy_write(hw, port, PHY_MARV_LED_OVER, 
-                            on ? PHY_M_LED_ALL : 0);
-       }
+                                    PHY_M_LED_MO_DUP(mode) |
+                                    PHY_M_LED_MO_10(mode) |
+                                    PHY_M_LED_MO_100(mode) |
+                                    PHY_M_LED_MO_1000(mode) |
+                                    PHY_M_LED_MO_RX(mode) |
+                                    PHY_M_LED_MO_TX(mode));
+
+       spin_unlock_bh(&sky2->phy_lock);
 }
 
 /* blink LED's for finding board */
 static int sky2_phys_id(struct net_device *dev, u32 data)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
-       struct sky2_hw *hw = sky2->hw;
-       unsigned port = sky2->port;
-       u16 ledctrl, ledover = 0;
-       long ms;
-       int interrupted;
-       int onoff = 1;
+       unsigned int i;
 
-       if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
-               ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
-       else
-               ms = data * 1000;
-
-       /* save initial values */
-       spin_lock_bh(&sky2->phy_lock);
-       if (hw->chip_id == CHIP_ID_YUKON_XL) {
-               u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
-               gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
-               ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
-               gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
-       } else {
-               ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
-               ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
-       }
-
-       interrupted = 0;
-       while (!interrupted && ms > 0) {
-               sky2_led(hw, port, onoff);
-               onoff = !onoff;
-
-               spin_unlock_bh(&sky2->phy_lock);
-               interrupted = msleep_interruptible(250);
-               spin_lock_bh(&sky2->phy_lock);
-
-               ms -= 250;
-       }
+       if (data == 0)
+               data = UINT_MAX;
 
-       /* resume regularly scheduled programming */
-       if (hw->chip_id == CHIP_ID_YUKON_XL) {
-               u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
-               gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
-               gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
-               gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
-       } else {
-               gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
-               gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+       for (i = 0; i < data; i++) {
+               sky2_led(sky2, MO_LED_ON);
+               if (msleep_interruptible(500))
+                       break;
+               sky2_led(sky2, MO_LED_OFF);
+               if (msleep_interruptible(500))
+                       break;
        }
-       spin_unlock_bh(&sky2->phy_lock);
+       sky2_led(sky2, MO_LED_NORM);
 
        return 0;
 }
index 5ab5c1c7c5aa76ed30d8c6850b09192afac5d6db..7bb3ba9bcbd8519ea58804dba861e1b6e2001b79 100644 (file)
@@ -1318,18 +1318,21 @@ enum {
        BLINK_670MS     = 4,/* 670 ms */
 };
 
-/**** PHY_MARV_LED_OVER    16 bit r/w LED control */
-enum {
-       PHY_M_LED_MO_DUP  = 3<<10,/* Bit 11..10:  Duplex */
-       PHY_M_LED_MO_10   = 3<<8, /* Bit  9.. 8:  Link 10 */
-       PHY_M_LED_MO_100  = 3<<6, /* Bit  7.. 6:  Link 100 */
-       PHY_M_LED_MO_1000 = 3<<4, /* Bit  5.. 4:  Link 1000 */
-       PHY_M_LED_MO_RX   = 3<<2, /* Bit  3.. 2:  Rx */
-       PHY_M_LED_MO_TX   = 3<<0, /* Bit  1.. 0:  Tx */
-
-       PHY_M_LED_ALL     = PHY_M_LED_MO_DUP | PHY_M_LED_MO_10 
-                           | PHY_M_LED_MO_100 | PHY_M_LED_MO_1000
-                           | PHY_M_LED_MO_RX,
+/*****  PHY_MARV_LED_OVER      16 bit r/w      Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x)  ((x)<<14)       /* Bit 15..14:  SGMII AN Timer */
+
+#define PHY_M_LED_MO_DUP(x)    ((x)<<10)       /* Bit 11..10:  Duplex */
+#define PHY_M_LED_MO_10(x)     ((x)<<8)        /* Bit  9.. 8:  Link 10 */
+#define PHY_M_LED_MO_100(x)    ((x)<<6)        /* Bit  7.. 6:  Link 100 */
+#define PHY_M_LED_MO_1000(x)   ((x)<<4)        /* Bit  5.. 4:  Link 1000 */
+#define PHY_M_LED_MO_RX(x)     ((x)<<2)        /* Bit  3.. 2:  Rx */
+#define PHY_M_LED_MO_TX(x)     ((x)<<0)        /* Bit  1.. 0:  Tx */
+
+enum led_mode {
+       MO_LED_NORM  = 0,
+       MO_LED_BLINK = 1,
+       MO_LED_OFF   = 2,
+       MO_LED_ON    = 3,
 };
 
 /*****  PHY_MARV_EXT_CTRL_2    16 bit r/w      Ext. PHY Specific Ctrl 2 *****/
index 3af5b92b48c8f1c18ba24b75df46e44ed04959e5..0166407d7061b417f9d4276dfea49b15043ee39a 100644 (file)
@@ -1400,7 +1400,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
         *
         **************************************************************/
 
-u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
 {
        /* printk( "TLAN:  Invalid interrupt on %s.\n", dev->name ); */
        return 0;
@@ -1432,7 +1432,7 @@ u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        int             eoc = 0;
@@ -1518,7 +1518,7 @@ u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
 {
        TLan_ReadAndClearStats( dev, TLAN_RECORD );
 
@@ -1554,7 +1554,7 @@ u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u32             ack = 0;
@@ -1689,7 +1689,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
 {
        printk( "TLAN:  Test interrupt on %s.\n", dev->name );
        return 1;
@@ -1719,7 +1719,7 @@ u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        TLanList                *head_list;
@@ -1767,7 +1767,7 @@ u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u32             ack;
@@ -1842,7 +1842,7 @@ u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        dma_addr_t      head_list_phys;
@@ -1902,7 +1902,7 @@ u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
         *
         **************************************************************/
 
-void TLan_Timer( unsigned long data )
+static void TLan_Timer( unsigned long data )
 {
        struct net_device       *dev = (struct net_device *) data;
        TLanPrivateInfo *priv = netdev_priv(dev);
@@ -1983,7 +1983,7 @@ void TLan_Timer( unsigned long data )
         *
         **************************************************************/
 
-void TLan_ResetLists( struct net_device *dev )
+static void TLan_ResetLists( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        int             i;
@@ -2043,7 +2043,7 @@ void TLan_ResetLists( struct net_device *dev )
 } /* TLan_ResetLists */
 
 
-void TLan_FreeLists( struct net_device *dev )
+static void TLan_FreeLists( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        int             i;
@@ -2092,7 +2092,7 @@ void TLan_FreeLists( struct net_device *dev )
         *
         **************************************************************/
 
-void TLan_PrintDio( u16 io_base )
+static void TLan_PrintDio( u16 io_base )
 {
        u32 data0, data1;
        int     i;
@@ -2127,7 +2127,7 @@ void TLan_PrintDio( u16 io_base )
         *
         **************************************************************/
 
-void TLan_PrintList( TLanList *list, char *type, int num)
+static void TLan_PrintList( TLanList *list, char *type, int num)
 {
        int i;
 
@@ -2163,7 +2163,7 @@ void TLan_PrintList( TLanList *list, char *type, int num)
         *
         **************************************************************/
 
-void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void TLan_ReadAndClearStats( struct net_device *dev, int record )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u32             tx_good, tx_under;
@@ -2238,7 +2238,7 @@ void TLan_ReadAndClearStats( struct net_device *dev, int record )
         *
         **************************************************************/
 
-void
+static void
 TLan_ResetAdapter( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
@@ -2324,7 +2324,7 @@ TLan_ResetAdapter( struct net_device *dev )
 
 
 
-void
+static void
 TLan_FinishReset( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
@@ -2448,7 +2448,7 @@ TLan_FinishReset( struct net_device *dev )
         *
         **************************************************************/
 
-void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
 {
        int i;
 
@@ -2490,7 +2490,7 @@ void TLan_SetMac( struct net_device *dev, int areg, char *mac )
         *
         ********************************************************************/
 
-void TLan_PhyPrint( struct net_device *dev )
+static void TLan_PhyPrint( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16 i, data0, data1, data2, data3, phy;
@@ -2539,7 +2539,7 @@ void TLan_PhyPrint( struct net_device *dev )
         *
         ********************************************************************/
 
-void TLan_PhyDetect( struct net_device *dev )
+static void TLan_PhyDetect( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16             control;
@@ -2586,7 +2586,7 @@ void TLan_PhyDetect( struct net_device *dev )
 
 
 
-void TLan_PhyPowerDown( struct net_device *dev )
+static void TLan_PhyPowerDown( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16             value;
@@ -2611,7 +2611,7 @@ void TLan_PhyPowerDown( struct net_device *dev )
 
 
 
-void TLan_PhyPowerUp( struct net_device *dev )
+static void TLan_PhyPowerUp( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16             value;
@@ -2632,7 +2632,7 @@ void TLan_PhyPowerUp( struct net_device *dev )
 
 
 
-void TLan_PhyReset( struct net_device *dev )
+static void TLan_PhyReset( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16             phy;
@@ -2660,7 +2660,7 @@ void TLan_PhyReset( struct net_device *dev )
 
 
 
-void TLan_PhyStartLink( struct net_device *dev )
+static void TLan_PhyStartLink( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16             ability;
@@ -2747,7 +2747,7 @@ void TLan_PhyStartLink( struct net_device *dev )
 
 
 
-void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void TLan_PhyFinishAutoNeg( struct net_device *dev )
 {
        TLanPrivateInfo *priv = netdev_priv(dev);
        u16             an_adv;
@@ -2903,7 +2903,7 @@ void TLan_PhyMonitor( struct net_device *dev )
         *
         **************************************************************/
 
-int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
 {
        u8      nack;
        u16     sio, tmp;
@@ -2993,7 +2993,7 @@ int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
         *
         **************************************************************/
 
-void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
 {
        u16 sio;
        u32 i;
@@ -3035,7 +3035,7 @@ void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
         *
         **************************************************************/
 
-void TLan_MiiSync( u16 base_port )
+static void TLan_MiiSync( u16 base_port )
 {
        int i;
        u16 sio;
@@ -3074,7 +3074,7 @@ void TLan_MiiSync( u16 base_port )
         *
         **************************************************************/
 
-void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
 {
        u16     sio;
        int     minten;
@@ -3144,7 +3144,7 @@ void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
         *
         **************************************************************/
 
-void TLan_EeSendStart( u16 io_base )
+static void TLan_EeSendStart( u16 io_base )
 {
        u16     sio;
 
@@ -3184,7 +3184,7 @@ void TLan_EeSendStart( u16 io_base )
         *
         **************************************************************/
 
-int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
 {
        int     err;
        u8      place;
@@ -3245,7 +3245,7 @@ int TLan_EeSendByte( u16 io_base, u8 data, int stop )
         *
         **************************************************************/
 
-void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
 {
        u8  place;
        u16 sio;
@@ -3303,7 +3303,7 @@ void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
         *
         **************************************************************/
 
-int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
 {
        int err;
        TLanPrivateInfo *priv = netdev_priv(dev);
index a7afeea156bd5f80229b6b8a55b3952f3c764299..a59c1f224aa8ef230f8e42f9aa8200373417b70f 100644 (file)
@@ -482,9 +482,11 @@ static void uli526x_init(struct net_device *dev)
        struct uli526x_board_info *db = netdev_priv(dev);
        unsigned long ioaddr = db->ioaddr;
        u8      phy_tmp;
+       u8      timeout;
        u16     phy_value;
        u16 phy_reg_reset;
 
+
        ULI526X_DBUG(0, "uli526x_init()", 0);
 
        /* Reset M526x MAC controller */
@@ -509,11 +511,19 @@ static void uli526x_init(struct net_device *dev)
        /* Parser SROM and media mode */
        db->media_mode = uli526x_media_mode;
 
-       /* Phyxcer capability setting */
+       /* phyxcer capability setting */
        phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
        phy_reg_reset = (phy_reg_reset | 0x8000);
        phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+
+       /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
+        * functions") or phy data sheet for details on phy reset
+        */
        udelay(500);
+       timeout = 10;
+       while (timeout-- &&
+               phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
+                       udelay(100);
 
        /* Process Phyxcer Media Mode */
        uli526x_set_phyxcer(db);
index 038c1ef94d2e72e7332a4e28a86cbd82be14788a..7b816a032957b61d692b897c33517bc3f7ad81a3 100644 (file)
@@ -663,7 +663,11 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
        case SIOCSIFHWADDR:
        {
                /* try to set the actual net device's hw address */
-               int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
+               int ret;
+
+               rtnl_lock();
+               ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
+               rtnl_unlock();
 
                if (ret == 0) {
                        /** Set the character device's hardware address. This is used when
index 7c851b1e6daa4ba99e4d107213fca4abb34c76b4..8c9d6ae2bb31dae8eab737ae1105649aecde7336 100644 (file)
@@ -1893,7 +1893,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
 
        /* Make sure we use pattern 0, 1 and not 4, 5 */
        if (rp->quirks & rq6patterns)
-               iowrite8(0x04, ioaddr + 0xA7);
+               iowrite8(0x04, ioaddr + WOLcgClr);
 
        if (rp->wolopts & WAKE_MAGIC) {
                iowrite8(WOLmagic, ioaddr + WOLcrSet);
index fdc23678117bdd31157a13c6d428d2c1951dc778..19fd4cb0ddf8583cbd9235b6085a7ad1100dfa19 100644 (file)
@@ -361,6 +361,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
        vi->dev = dev;
        vi->vdev = vdev;
+       vdev->priv = vi;
 
        /* We expect two virtqueues, receive then send. */
        vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
@@ -395,7 +396,6 @@ static int virtnet_probe(struct virtio_device *vdev)
        }
 
        pr_debug("virtnet: registered device %s\n", dev->name);
-       vdev->priv = vi;
        return 0;
 
 unregister:
index 1a2141dabdc7a533cb9382773b674ddbe7a270bd..8bc4bc4c330e158fbdb880500fb6c144f02b55d5 100644 (file)
@@ -32,6 +32,7 @@ config B43_PCI_AUTOSELECT
        bool
        depends on B43 && SSB_PCIHOST_POSSIBLE
        select SSB_PCIHOST
+       select SSB_B43_PCI_BRIDGE
        default y
 
 # Auto-select SSB PCICORE driver, if possible
index 6745579ba96dc74ab813cb9409f01d6b0e701e25..13c65faf024727eb1caad88642f2f89d94a40b15 100644 (file)
@@ -25,6 +25,7 @@ config B43LEGACY_PCI_AUTOSELECT
        bool
        depends on B43LEGACY && SSB_PCIHOST_POSSIBLE
        select SSB_PCIHOST
+       select SSB_B43_PCI_BRIDGE
        default y
 
 # Auto-select SSB PCICORE driver, if possible
index c39de422e220d85939e44b3a15eed8478e9f0881..5f3f34e1dbfdc140f426c4660258ba54029780fd 100644 (file)
@@ -3829,7 +3829,7 @@ static void b43legacy_print_driverinfo(void)
 #ifdef CONFIG_B43LEGACY_DMA
        feat_dma = "D";
 #endif
-       printk(KERN_INFO "Broadcom 43xx driver loaded "
+       printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
               "[ Features: %s%s%s%s%s, Firmware-ID: "
               B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
               feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
index 0159701e845638de2062859864c8b4b224642c9e..afb8f4305c244e784c0e2e412622038fc37de6a2 100644 (file)
@@ -1,6 +1,6 @@
 config BCM43XX
        tristate "Broadcom BCM43xx wireless support (DEPRECATED)"
-       depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL
+       depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && (!SSB_B43_PCI_BRIDGE || SSB != y) && EXPERIMENTAL
        select WIRELESS_EXT
        select FW_LOADER
        select HW_RANDOM
index eab020338fde2b345c1c795b88d68ac4c360364a..b3c1acbcc655103fe898157dcbb70078d267db6d 100644 (file)
@@ -1040,7 +1040,6 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
        lbs_deb_leave(LBS_DEB_CMD);
        return ret;
 }
-EXPORT_SYMBOL_GPL(lbs_mesh_access);
 
 int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan)
 {
@@ -1576,7 +1575,6 @@ done:
        lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
        return ret;
 }
-EXPORT_SYMBOL_GPL(lbs_prepare_and_send_command);
 
 /**
  *  @brief This function allocates the command buffer and link
index 159216a91903d14e5f5ec5df62321d302f72b504..bdc6a1cc21033f44f1c322e38d2c3a626ab31fc5 100644 (file)
@@ -562,9 +562,7 @@ int lbs_process_rx_command(struct lbs_private *priv)
        }
 
        resp = (void *)priv->upld_buf;
-
-       curcmd = le16_to_cpu(resp->command);
-
+       curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
        respcmd = le16_to_cpu(resp->command);
        result = le16_to_cpu(resp->result);
 
@@ -572,9 +570,9 @@ int lbs_process_rx_command(struct lbs_private *priv)
                     respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies);
        lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len);
 
-       if (resp->seqnum != resp->seqnum) {
+       if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
                lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n",
-                           le16_to_cpu(resp->seqnum), le16_to_cpu(resp->seqnum));
+                           le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum));
                spin_unlock_irqrestore(&priv->driver_lock, flags);
                ret = -1;
                goto done;
index aaacd9bd6bd2bf82d5d604488e04aa44c2b4f2a3..4e22341b4f3df6fc00a321e219530e91859879cd 100644 (file)
@@ -69,7 +69,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
 int lbs_remove_card(struct lbs_private *priv);
 int lbs_start_card(struct lbs_private *priv);
 int lbs_stop_card(struct lbs_private *priv);
-int lbs_reset_device(struct lbs_private *priv);
 void lbs_host_to_card_done(struct lbs_private *priv);
 
 int lbs_update_channel(struct lbs_private *priv);
index 84fb49ca0fae3cbf53713b5f586d82102de30ea9..4d4e2f3b66acbf8480722c5dfaafc70b93d491c5 100644 (file)
@@ -1351,8 +1351,6 @@ done:
        lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
        return ret;
 }
-EXPORT_SYMBOL_GPL(lbs_add_mesh);
-
 
 static void lbs_remove_mesh(struct lbs_private *priv)
 {
@@ -1372,7 +1370,6 @@ static void lbs_remove_mesh(struct lbs_private *priv)
        free_netdev(mesh_dev);
        lbs_deb_leave(LBS_DEB_MESH);
 }
-EXPORT_SYMBOL_GPL(lbs_remove_mesh);
 
 /**
  *  @brief This function finds the CFP in
@@ -1458,20 +1455,6 @@ void lbs_interrupt(struct lbs_private *priv)
 }
 EXPORT_SYMBOL_GPL(lbs_interrupt);
 
-int lbs_reset_device(struct lbs_private *priv)
-{
-       int ret;
-
-       lbs_deb_enter(LBS_DEB_MAIN);
-       ret = lbs_prepare_and_send_command(priv, CMD_802_11_RESET,
-                                   CMD_ACT_HALT, 0, 0, NULL);
-       msleep_interruptible(10);
-
-       lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_reset_device);
-
 static int __init lbs_init_module(void)
 {
        lbs_deb_enter(LBS_DEB_MAIN);
index 5cda49aff3a86a12b0bdfda94b3bab5de3d83344..d191e055a788210e4a2b6343f4815b1bf120180d 100644 (file)
@@ -166,18 +166,23 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
        struct p54_common *priv = dev->priv;
        struct eeprom_pda_wrap *wrap = NULL;
        struct pda_entry *entry;
-       int i = 0;
        unsigned int data_len, entry_len;
        void *tmp;
        int err;
+       u8 *end = (u8 *)eeprom + len;
 
        wrap = (struct eeprom_pda_wrap *) eeprom;
-       entry = (void *)wrap->data + wrap->len;
-       i += 2;
-       i += le16_to_cpu(entry->len)*2;
-       while (i < len) {
+       entry = (void *)wrap->data + le16_to_cpu(wrap->len);
+
+       /* verify that at least the entry length/code fits */
+       while ((u8 *)entry <= end - sizeof(*entry)) {
                entry_len = le16_to_cpu(entry->len);
                data_len = ((entry_len - 1) << 1);
+
+               /* abort if entry exceeds whole structure */
+               if ((u8 *)entry + sizeof(*entry) + data_len > end)
+                       break;
+
                switch (le16_to_cpu(entry->code)) {
                case PDR_MAC_ADDRESS:
                        SET_IEEE80211_PERM_ADDR(dev, entry->data);
@@ -249,13 +254,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
                        priv->version = *(u8 *)(entry->data + 1);
                        break;
                case PDR_END:
-                       i = len;
+                       /* make it overrun */
+                       entry_len = len;
                        break;
                }
 
                entry = (void *)entry + (entry_len + 1)*2;
-               i += 2;
-               i += entry_len*2;
        }
 
        if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) {
index a721334e20d96db17af5e4dd760fdaf90f00c4a0..b67ff34e26fec96dfc8de785fa448a4af3d8ad8c 100644 (file)
@@ -53,10 +53,10 @@ struct pda_entry {
 } __attribute__ ((packed));
 
 struct eeprom_pda_wrap {
-       u32 magic;
-       u16 pad;
-       u16 len;
-       u32 arm_opcode;
+       __le32 magic;
+       __le16 pad;
+       __le16 len;
+       __le32 arm_opcode;
        u8 data[0];
 } __attribute__ ((packed));
 
index 8ce2ddf8024f60f0c3c424b0dffe1d093b382e76..10b776c1adc5a2ec20034329d6f3fb07cd25a60e 100644 (file)
@@ -228,9 +228,9 @@ struct NDIS_WLAN_BSSID_EX {
        struct NDIS_802_11_SSID Ssid;
        __le32 Privacy;
        __le32 Rssi;
-       enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
+       __le32 NetworkTypeInUse;
        struct NDIS_802_11_CONFIGURATION Configuration;
-       enum NDIS_802_11_NETWORK_INFRASTRUCTURE InfrastructureMode;
+       __le32 InfrastructureMode;
        u8 SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
        __le32 IELength;
        u8 IEs[0];
@@ -260,7 +260,7 @@ struct NDIS_802_11_KEY {
        __le32 KeyLength;
        u8 Bssid[6];
        u8 Padding[6];
-       __le64 KeyRSC;
+       u8 KeyRSC[8];
        u8 KeyMaterial[32];
 } __attribute__((packed));
 
@@ -279,11 +279,11 @@ struct RNDIS_CONFIG_PARAMETER_INFOBUFFER {
 } __attribute__((packed));
 
 /* these have to match what is in wpa_supplicant */
-enum { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP } wpa_alg;
-enum { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, CIPHER_WEP104 }
-       wpa_cipher;
-enum { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE, KEY_MGMT_802_1X_NO_WPA,
-       KEY_MGMT_WPA_NONE } wpa_key_mgmt;
+enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
+enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
+                 CIPHER_WEP104 };
+enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE,
+                   KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE };
 
 /*
  *  private data
@@ -1508,7 +1508,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
        struct usbnet *usbdev = dev->priv;
        struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
        struct NDIS_802_11_KEY ndis_key;
-       int i, keyidx, ret;
+       int keyidx, ret;
        u8 *addr;
 
        keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX;
@@ -1543,9 +1543,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
        ndis_key.KeyIndex = cpu_to_le32(keyidx);
 
        if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
-               for (i = 0; i < 6; i++)
-                       ndis_key.KeyRSC |=
-                               cpu_to_le64(ext->rx_seq[i] << (i * 8));
+               memcpy(ndis_key.KeyRSC, ext->rx_seq, 6);
                ndis_key.KeyIndex |= cpu_to_le32(1 << 29);
        }
 
index d6cba138c7ab7723cd5b039c18ebb6e5e9ce4010..c69f85ed766917e728fed93f4092e37d6bdca1cd 100644 (file)
@@ -960,8 +960,12 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2400pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
+       case STATE_RADIO_RX_ON_LINK:
+               rt2400pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+               break;
        case STATE_RADIO_RX_OFF:
-               rt2400pci_toggle_rx(rt2x00dev, state);
+       case STATE_RADIO_RX_OFF_LINK:
+               rt2400pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
                break;
        case STATE_DEEP_SLEEP:
        case STATE_SLEEP:
index e874fdcae20450d015cea1fd23fa8dc3d03a1cd1..91e87b53374f3f7138bea4c01764545ec07c0117 100644 (file)
@@ -1112,8 +1112,12 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
+       case STATE_RADIO_RX_ON_LINK:
+               rt2500pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+               break;
        case STATE_RADIO_RX_OFF:
-               rt2500pci_toggle_rx(rt2x00dev, state);
+       case STATE_RADIO_RX_OFF_LINK:
+               rt2500pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
                break;
        case STATE_DEEP_SLEEP:
        case STATE_SLEEP:
index 4ca9730e5e92f360074e4b08b5b2e05f50460ec9..638c3d243108c5eb1bfdcdae6f4bb664b654702a 100644 (file)
@@ -1001,8 +1001,12 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
+       case STATE_RADIO_RX_ON_LINK:
+               rt2500usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+               break;
        case STATE_RADIO_RX_OFF:
-               rt2500usb_toggle_rx(rt2x00dev, state);
+       case STATE_RADIO_RX_OFF_LINK:
+               rt2500usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
                break;
        case STATE_DEEP_SLEEP:
        case STATE_SLEEP:
index 72cfe00c1ed7385834e2574fe1e24349785d7dd2..07adc576db49dc28f92819c6ec46f04e926b4b48 100644 (file)
@@ -97,12 +97,16 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
        libconf.ant.rx = rx;
        libconf.ant.tx = tx;
 
+       if (rx == rt2x00dev->link.ant.active.rx &&
+           tx == rt2x00dev->link.ant.active.tx)
+               return;
+
        /*
         * Antenna setup changes require the RX to be disabled,
         * else the changes will be ignored by the device.
         */
        if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
+               rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
 
        /*
         * Write new antenna setup to device and reset the link tuner.
@@ -116,7 +120,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
        rt2x00dev->link.ant.active.tx = libconf.ant.tx;
 
        if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+               rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
 }
 
 void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
index c4be2ac4d7a407887d32e4569ff3baebbc126b48..0d51f478bcdfcd7ba842288548c1b5b43346daa3 100644 (file)
@@ -61,11 +61,33 @@ EXPORT_SYMBOL_GPL(rt2x00lib_get_ring);
 /*
  * Link tuning handlers
  */
-static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
+void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
 {
+       if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
+               return;
+
+       /*
+        * Reset link information.
+        * Both the currently active vgc level as well as
+        * the link tuner counter should be reset. Resetting
+        * the counter is important for devices where the
+        * device should only perform link tuning during the
+        * first minute after being enabled.
+        */
        rt2x00dev->link.count = 0;
        rt2x00dev->link.vgc_level = 0;
 
+       /*
+        * Reset the link tuner.
+        */
+       rt2x00dev->ops->lib->reset_tuner(rt2x00dev);
+}
+
+static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
+{
+       /*
+        * Clear all (possibly) pre-existing quality statistics.
+        */
        memset(&rt2x00dev->link.qual, 0, sizeof(rt2x00dev->link.qual));
 
        /*
@@ -79,10 +101,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->link.qual.rx_percentage = 50;
        rt2x00dev->link.qual.tx_percentage = 50;
 
-       /*
-        * Reset the link tuner.
-        */
-       rt2x00dev->ops->lib->reset_tuner(rt2x00dev);
+       rt2x00lib_reset_link_tuner(rt2x00dev);
 
        queue_delayed_work(rt2x00dev->hw->workqueue,
                           &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
@@ -93,15 +112,6 @@ static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev)
        cancel_delayed_work_sync(&rt2x00dev->link.work);
 }
 
-void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
-{
-       if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
-               return;
-
-       rt2x00lib_stop_link_tuner(rt2x00dev);
-       rt2x00lib_start_link_tuner(rt2x00dev);
-}
-
 /*
  * Ring initialization
  */
@@ -260,19 +270,11 @@ static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev)
        if (sample_a == sample_b)
                return;
 
-       if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) {
-               if (sample_a > sample_b && rx == ANTENNA_B)
-                       rx = ANTENNA_A;
-               else if (rx == ANTENNA_A)
-                       rx = ANTENNA_B;
-       }
+       if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY)
+               rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
 
-       if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY) {
-               if (sample_a > sample_b && tx == ANTENNA_B)
-                       tx = ANTENNA_A;
-               else if (tx == ANTENNA_A)
-                       tx = ANTENNA_B;
-       }
+       if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)
+               tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
 
        rt2x00lib_config_antenna(rt2x00dev, rx, tx);
 }
@@ -293,7 +295,7 @@ static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev)
         * sample the rssi from the other antenna to make a valid
         * comparison between the 2 antennas.
         */
-       if ((rssi_curr - rssi_old) > -5 || (rssi_curr - rssi_old) < 5)
+       if (abs(rssi_curr - rssi_old) < 5)
                return;
 
        rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE;
@@ -319,15 +321,15 @@ static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY;
 
        if (rt2x00dev->hw->conf.antenna_sel_rx == 0 &&
-           rt2x00dev->default_ant.rx != ANTENNA_SW_DIVERSITY)
+           rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
                rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY;
        if (rt2x00dev->hw->conf.antenna_sel_tx == 0 &&
-           rt2x00dev->default_ant.tx != ANTENNA_SW_DIVERSITY)
+           rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
                rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY;
 
        if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) &&
            !(rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)) {
-               rt2x00dev->link.ant.flags &= ~ANTENNA_MODE_SAMPLE;
+               rt2x00dev->link.ant.flags = 0;
                return;
        }
 
@@ -440,17 +442,18 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
        if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags))
                rt2x00dev->ops->lib->link_tuner(rt2x00dev);
 
-       /*
-        * Evaluate antenna setup.
-        */
-       rt2x00lib_evaluate_antenna(rt2x00dev);
-
        /*
         * Precalculate a portion of the link signal which is
         * in based on the tx/rx success/failure counters.
         */
        rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual);
 
+       /*
+        * Evaluate antenna setup, make this the last step since this could
+        * possibly reset some statistics.
+        */
+       rt2x00lib_evaluate_antenna(rt2x00dev);
+
        /*
         * Increase tuner counter, and reschedule the next link tuner run.
         */
index 838421216da0d52a0d7aedd0f78ec56a09ec9756..b1915dc7dda18e25f48e19b84f5d4c84c3c49152 100644 (file)
@@ -85,6 +85,8 @@ enum dev_state {
        STATE_RADIO_OFF,
        STATE_RADIO_RX_ON,
        STATE_RADIO_RX_OFF,
+       STATE_RADIO_RX_ON_LINK,
+       STATE_RADIO_RX_OFF_LINK,
        STATE_RADIO_IRQ_ON,
        STATE_RADIO_IRQ_OFF,
 };
index b31f0c26c32b7bb889ba789a562aeb7ae3f889ba..e808db98f2f57b09c3815fb7cb07be91386b09eb 100644 (file)
@@ -1482,8 +1482,12 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt61pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
+       case STATE_RADIO_RX_ON_LINK:
+               rt61pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+               break;
        case STATE_RADIO_RX_OFF:
-               rt61pci_toggle_rx(rt2x00dev, state);
+       case STATE_RADIO_RX_OFF_LINK:
+               rt61pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
                break;
        case STATE_DEEP_SLEEP:
        case STATE_SLEEP:
index 4d576ab3e7f984f503608696d2112eb7d1793f54..4fac2d414d845401067120dbe72ea3af9a13f9c8 100644 (file)
@@ -1208,8 +1208,12 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt73usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
+       case STATE_RADIO_RX_ON_LINK:
+               rt73usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+               break;
        case STATE_RADIO_RX_OFF:
-               rt73usb_toggle_rx(rt2x00dev, state);
+       case STATE_RADIO_RX_OFF_LINK:
+               rt73usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
                break;
        case STATE_DEEP_SLEEP:
        case STATE_SLEEP:
index 1d3b84b4af3fc5fb0937763098f7f65e57fb80fd..553a9905299a9cb80f1171ca24d3f449532ca719 100644 (file)
@@ -103,6 +103,11 @@ config IOMMU_SBA
        depends on PCI_LBA
        default PCI_LBA
 
+config IOMMU_HELPER
+       bool
+       depends on IOMMU_SBA || IOMMU_CCIO
+       default y
+
 #config PCI_EPIC
 #      bool "EPIC/SAGA PCI support"
 #      depends on PCI
index d08b284de196b2e807518ac656d678e585f9560c..60d338cd80090ff02dd20cc9e1fec5b684e8f588 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/scatterlist.h>
+#include <linux/iommu-helper.h>
 
 #include <asm/byteorder.h>
 #include <asm/cache.h>         /* for L1_CACHE_BYTES */
@@ -302,13 +303,17 @@ static int ioc_count;
 */
 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size)  \
        for(; res_ptr < res_end; ++res_ptr) { \
-               if(0 == (*res_ptr & mask)) { \
-                       *res_ptr |= mask; \
-                       res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
-                       ioc->res_hint = res_idx + (size >> 3); \
-                       goto resource_found; \
-               } \
-       }
+               int ret;\
+               unsigned int idx;\
+               idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
+               ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
+               if ((0 == (*res_ptr & mask)) && !ret) { \
+                       *res_ptr |= mask; \
+                       res_idx = idx;\
+                       ioc->res_hint = res_idx + (size >> 3); \
+                       goto resource_found; \
+               } \
+       }
 
 #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
        u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
@@ -341,10 +346,11 @@ static int ioc_count;
  * of available pages for the requested size.
  */
 static int
-ccio_alloc_range(struct ioc *ioc, size_t size)
+ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
 {
        unsigned int pages_needed = size >> IOVP_SHIFT;
        unsigned int res_idx;
+       unsigned long boundary_size;
 #ifdef CCIO_SEARCH_TIME
        unsigned long cr_start = mfctl(16);
 #endif
@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size)
        ** ggg sacrifices another 710 to the computer gods.
        */
 
+       boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT);
+       boundary_size >>= IOVP_SHIFT;
+
        if (pages_needed <= 8) {
                /*
                 * LAN traffic will not thrash the TLB IFF the same NIC
@@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
        ioc->msingle_pages += size >> IOVP_SHIFT;
 #endif
 
-       idx = ccio_alloc_range(ioc, size);
+       idx = ccio_alloc_range(ioc, dev, size);
        iovp = (dma_addr_t)MKIOVP(idx);
 
        pdir_start = &(ioc->pdir_base[idx]);
index 97ba8286c5969285294c2dd886a0e375071f4878..a9c46cc2db3701157485b8194af83a6c03c7b189 100644 (file)
@@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
 
 static inline unsigned int
 iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
-                     struct scatterlist *startsg, int nents,
-                     int (*iommu_alloc_range)(struct ioc *, size_t))
+               struct scatterlist *startsg, int nents,
+               int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
 {
        struct scatterlist *contig_sg;     /* contig chunk head */
        unsigned long dma_offset, dma_len; /* start/len of DMA stream */
@@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
                dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
                sg_dma_address(contig_sg) =
                        PIDE_FLAG 
-                       | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT)
+                       | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
                        | dma_offset;
                n_mappings++;
        }
index d06627c3f353a607f7730b300af68a3d607b83b6..e834127a8505350554806369e3d31e9cba129440 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/scatterlist.h>
+#include <linux/iommu-helper.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
 #define RESMAP_MASK(n)    (~0UL << (BITS_PER_LONG - (n)))
 #define RESMAP_IDX_MASK   (sizeof(unsigned long) - 1)
 
+unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
+                         unsigned int bitshiftcnt)
+{
+       return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+               + bitshiftcnt;
+}
 
 /**
  * sba_search_bitmap - find free space in IO PDIR resource bitmap
@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  * Cool perf optimization: search for log2(size) bits at a time.
  */
 static SBA_INLINE unsigned long
-sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
+sba_search_bitmap(struct ioc *ioc, struct device *dev,
+                 unsigned long bits_wanted)
 {
        unsigned long *res_ptr = ioc->res_hint;
        unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
-       unsigned long pide = ~0UL;
+       unsigned long pide = ~0UL, tpide;
+       unsigned long boundary_size;
+       unsigned long shift;
+       int ret;
+
+       boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT);
+       boundary_size >>= IOVP_SHIFT;
+
+#if defined(ZX1_SUPPORT)
+       BUG_ON(ioc->ibase & ~IOVP_MASK);
+       shift = ioc->ibase >> IOVP_SHIFT;
+#else
+       shift = 0;
+#endif
 
        if (bits_wanted > (BITS_PER_LONG/2)) {
                /* Search word at a time - no mask needed */
                for(; res_ptr < res_end; ++res_ptr) {
-                       if (*res_ptr == 0) {
+                       tpide = ptr_to_pide(ioc, res_ptr, 0);
+                       ret = iommu_is_span_boundary(tpide, bits_wanted,
+                                                    shift,
+                                                    boundary_size);
+                       if ((*res_ptr == 0) && !ret) {
                                *res_ptr = RESMAP_MASK(bits_wanted);
-                               pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-                               pide <<= 3;     /* convert to bit address */
+                               pide = tpide;
                                break;
                        }
                }
@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
                { 
                        DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
                        WARN_ON(mask == 0);
-                       if(((*res_ptr) & mask) == 0) {
+                       tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
+                       ret = iommu_is_span_boundary(tpide, bits_wanted,
+                                                    shift,
+                                                    boundary_size);
+                       if ((((*res_ptr) & mask) == 0) && !ret) {
                                *res_ptr |= mask;     /* mark resources busy! */
-                               pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-                               pide <<= 3;     /* convert to bit address */
-                               pide += bitshiftcnt;
+                               pide = tpide;
                                break;
                        }
                        mask >>= o;
@@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
  * resource bit map.
  */
 static int
-sba_alloc_range(struct ioc *ioc, size_t size)
+sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
 {
        unsigned int pages_needed = size >> IOVP_SHIFT;
 #ifdef SBA_COLLECT_STATS
@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size)
 #endif
        unsigned long pide;
 
-       pide = sba_search_bitmap(ioc, pages_needed);
+       pide = sba_search_bitmap(ioc, dev, pages_needed);
        if (pide >= (ioc->res_size << 3)) {
-               pide = sba_search_bitmap(ioc, pages_needed);
+               pide = sba_search_bitmap(ioc, dev, pages_needed);
                if (pide >= (ioc->res_size << 3))
                        panic("%s: I/O MMU @ %p is out of mapping resources\n",
                              __FILE__, ioc->ioc_hpa);
@@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
        ioc->msingle_calls++;
        ioc->msingle_pages += size >> IOVP_SHIFT;
 #endif
-       pide = sba_alloc_range(ioc, size);
+       pide = sba_alloc_range(ioc, dev, size);
        iovp = (dma_addr_t) pide << IOVP_SHIFT;
 
        DBG_RUN("%s() 0x%p -> 0x%lx\n",
index ef5a6a245f5fa36363d513e5aa35fef248a499a4..6a9403d79e0c6fdd70ab80daecf85c6fed514488 100644 (file)
@@ -145,13 +145,15 @@ void pci_bus_add_devices(struct pci_bus *bus)
                        child_bus = dev->subordinate;
                        child_bus->dev.parent = child_bus->bridge;
                        retval = device_register(&child_bus->dev);
-                       if (!retval)
+                       if (retval)
+                               dev_err(&dev->dev, "Error registering pci_bus,"
+                                       " continuing...\n");
+                       else
                                retval = device_create_file(&child_bus->dev,
                                                        &dev_attr_cpuaffinity);
                        if (retval)
-                               dev_err(&dev->dev, "Error registering pci_bus"
-                                       " device bridge symlink,"
-                                       " continuing...\n");
+                               dev_err(&dev->dev, "Error creating cpuaffinity"
+                                       " file, continuing...\n");
                }
        }
 }
index a590ef682153ee5d765af348640ce79d39a9d4e8..4d4a64478404650a9640c852b3cf708937fc5ff0 100644 (file)
@@ -4,7 +4,7 @@
 #include "pci.h"
 
 
-unsigned int pci_do_scan_bus(struct pci_bus *bus)
+unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
 {
        unsigned int max;
 
index cf22f9e01e005721257bd61c7a2894008f65b497..5e50008d1181046ea2b77378f68c4fd7596c1914 100644 (file)
@@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle)
  * This function should be called per *physical slot*,
  * not per each slot object in ACPI namespace.
  */
-static int enable_device(struct acpiphp_slot *slot)
+static int __ref enable_device(struct acpiphp_slot *slot)
 {
        struct pci_dev *dev;
        struct pci_bus *bus = slot->bridge->pci_bus;
index 5e9be44817cb5ff4daa3f8bc5b04f6f45f10e6d3..b3515fc4cd38383fd2a25860b346b514006c3c52 100644 (file)
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot)
  * Device configuration functions
  */
 
-int cpci_configure_slot(struct slot* slot)
+int __ref cpci_configure_slot(struct slot *slot)
 {
        struct pci_bus *parent;
        int fn;
index 6eba9b2cfb90b359fc1fb92b45408846b4694f6f..698975a6a21c712b46654cba06b71b641612a143 100644 (file)
@@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot)
        retval = pcie_write_cmd(slot, slot_cmd, cmd_mask);
        if (retval) {
                err("%s: Write command failed!\n", __FUNCTION__);
-               return -1;
+               retval = -1;
+               goto out;
        }
        dbg("%s: SLOTCTRL %x write cmd %x\n",
            __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
@@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot)
         * removed from the slot/adapter.
         */
        msleep(1000);
-
+ out:
        if (changed)
                pcie_unmask_bad_dllp(ctrl);
 
index dd50713966d1b011bc480aa9d2cab642c63bb7c9..9372a840b63dbd3d43147dca446c3f078455a2e5 100644 (file)
@@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
        }
 }
 
-static int pciehp_add_bridge(struct pci_dev *dev)
+static int __ref pciehp_add_bridge(struct pci_dev *dev)
 {
        struct pci_bus *parent = dev->bus;
        int pass, busnr, start = parent->secondary;
index 0a6b25ef194c2991e36d3be4e2bb605b61a4bed4..a69a21520895814caf732ab93735b9a950688c60 100644 (file)
@@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
        }
 }
 
-int shpchp_configure_device(struct slot *p_slot)
+int __ref shpchp_configure_device(struct slot *p_slot)
 {
        struct pci_dev *dev;
        struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
index 4d23b9fb551bc129239f07404e04f6a4102ae092..2db2e4bb0d1ed6073b6ec9d3e0dec604868974ab 100644 (file)
@@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
        }
 }
 
-void pci_read_bridge_bases(struct pci_bus *child)
+void __devinit pci_read_bridge_bases(struct pci_bus *child)
 {
        struct pci_dev *dev = child->self;
        u8 io_base_lo, io_limit_lo;
@@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
  * them, we proceed to assigning numbers to the remaining buses in
  * order to avoid overlaps between old and new bus numbers.
  */
-int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
+int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
 {
        struct pci_bus *child;
        int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
@@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
        return nr;
 }
 
-unsigned int pci_scan_child_bus(struct pci_bus *bus)
+unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
 {
        unsigned int devfn, pass, max = bus->secondary;
        struct pci_dev *dev;
@@ -1116,7 +1116,7 @@ err_out:
        return NULL;
 }
 
-struct pci_bus *pci_scan_bus_parented(struct device *parent,
+struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
                int bus, struct pci_ops *ops, void *sysdata)
 {
        struct pci_bus *b;
index bbad4a9f264f0045dfc091cb1f96cb5877eb11ce..e9a333d985526c5a5af22c1e6cc79c3e2dccad10 100644 (file)
@@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
                        pci_write_config_byte(dev, 0x75, 0x1);
                        pci_write_config_byte(dev, 0x77, 0x0);
 
-                       printk(KERN_INFO
-                               "PCI: VIA CX700 PCI parking/caching fixup on %s\n",
-                               pci_name(dev));
+                       dev_info(&dev->dev,
+                               "Disabling VIA CX700 PCI parking/caching\n");
                }
        }
 }
@@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2
                        quirk_msi_ht_cap);
 
 
-/*
- *  Force enable MSI mapping capability on HT bridges
- */
-static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev)
-{
-       int pos, ttl = 48;
-
-       pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
-       while (pos && ttl--) {
-               u8 flags;
-
-               if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) {
-                       printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n",
-                              pci_name(dev));
-
-                       pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
-                                             flags | HT_MSI_FLAGS_ENABLE);
-               }
-               pos = pci_find_next_ht_capability(dev, pos,
-                                                 HT_CAPTYPE_MSI_MAPPING);
-       }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
-                        PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
-                        quirk_msi_ht_cap_enable);
-
 /* The nVidia CK804 chipset may have 2 HT MSI mappings.
  * MSI are supported if the MSI capability set in any of these mappings.
  */
@@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
                        quirk_nvidia_ck804_msi_ht_cap);
 
-/*
- *  Force enable MSI mapping capability on HT bridges  */
-static inline void ht_enable_msi_mapping(struct pci_dev *dev)
+/* Force enable MSI mapping capability on HT bridges */
+static void __devinit ht_enable_msi_mapping(struct pci_dev *dev)
 {
        int pos, ttl = 48;
 
@@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev)
                                                  HT_CAPTYPE_MSI_MAPPING);
        }
 }
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
+                        PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
+                        ht_enable_msi_mapping);
 
 static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
 {
@@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
 
                if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
                                         &flags) == 0) {
-                       dev_info(&dev->dev, "Quirk disabling HT MSI mapping");
+                       dev_info(&dev->dev, "Disabling HT MSI mapping");
                        pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
                                              flags & ~HT_MSI_FLAGS_ENABLE);
                }
index a98b2470b9ea5f85b95a3ae3c9da79aeafb29363..bd5c0e031398e9b50404380076b69c0c25889c71 100644 (file)
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev)
 #endif  /*  0  */
 
 /**
- * pci_cleanup_rom - internal routine for freeing the ROM copy created
- * by pci_map_rom_copy called from remove.c
+ * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
  * @pdev: pointer to pci device struct
  *
  * Free the copied ROM if we allocated one.
index 5480119ff9d36d94af3dce527dbaacac4d5811d7..3ce9f3defc1287f688f6484f2f265b7cdb620125 100644 (file)
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev)
 }
 
 /**
- *  rio_device_probe - Tell if a RIO device structure has a matching RIO
- *                     device id structure
+ *  rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure
  *  @id: the RIO device id structure to match against
  *  @dev: the RIO device structure to match against
  *
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev)
  *  rio_register_driver - register a new RIO driver
  *  @rdrv: the RIO driver structure to register
  *
- *  Adds a &struct rio_driver to the list of registered drivers
+ *  Adds a &struct rio_driver to the list of registered drivers.
  *  Returns a negative value on error, otherwise 0. If no error
  *  occurred, the driver remains registered even if no device
  *  was claimed during registration.
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv)
 }
 
 /**
- *  rio_match_bus - Tell if a RIO device structure has a matching RIO
- *                  driver device id structure
+ *  rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
  *  @dev: the standard device structure to match against
  *  @drv: the standard driver structure containing the ids to match against
  *
index 36cfca31769c22a46570aa913479ca3b75482d7a..0cdeb11a9cb5e5eea0d14bf4ad63ba5fe310750b 100644 (file)
@@ -260,6 +260,15 @@ config RTC_DRV_TWL4030
          This driver can also be built as a module. If so, the module
          will be called rtc-twl4030.
 
+config RTC_DRV_S35390A
+       tristate "Seiko Instruments S-35390A"
+       help
+         If you say yes here you will get support for the Seiko
+         Instruments S-35390A.
+
+         This driver can also be built as a module. If so the module
+         will be called rtc-s35390a.
+
 endif # I2C
 
 comment "SPI RTC drivers"
index 935a1f8f36ad02975379a3be197191b918242c62..ac3a41985c28aafe4f6b2bc30c765864389dd86b 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701)   += rtc-r9701.o
 obj-$(CONFIG_RTC_DRV_RS5C313)  += rtc-rs5c313.o
 obj-$(CONFIG_RTC_DRV_RS5C348)  += rtc-rs5c348.o
 obj-$(CONFIG_RTC_DRV_RS5C372)  += rtc-rs5c372.o
+obj-$(CONFIG_RTC_DRV_S35390A)  += rtc-s35390a.o
 obj-$(CONFIG_RTC_DRV_S3C)      += rtc-s3c.o
 obj-$(CONFIG_RTC_DRV_SA1100)   += rtc-sa1100.o
 obj-$(CONFIG_RTC_DRV_SH)       += rtc-sh.o
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
new file mode 100644 (file)
index 0000000..e8abc90
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Seiko Instruments S-35390A RTC Driver
+ *
+ * Copyright (c) 2007 Byron Bradley
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/i2c.h>
+#include <linux/bitrev.h>
+#include <linux/bcd.h>
+#include <linux/slab.h>
+
+#define S35390A_CMD_STATUS1    0
+#define S35390A_CMD_STATUS2    1
+#define S35390A_CMD_TIME1      2
+
+#define S35390A_BYTE_YEAR      0
+#define S35390A_BYTE_MONTH     1
+#define S35390A_BYTE_DAY       2
+#define S35390A_BYTE_WDAY      3
+#define S35390A_BYTE_HOURS     4
+#define S35390A_BYTE_MINS      5
+#define S35390A_BYTE_SECS      6
+
+#define S35390A_FLAG_POC       0x01
+#define S35390A_FLAG_BLD       0x02
+#define S35390A_FLAG_24H       0x40
+#define S35390A_FLAG_RESET     0x80
+#define S35390A_FLAG_TEST      0x01
+
+struct s35390a {
+       struct i2c_client *client[8];
+       struct rtc_device *rtc;
+       int twentyfourhour;
+};
+
+static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len)
+{
+       struct i2c_client *client = s35390a->client[reg];
+       struct i2c_msg msg[] = {
+               { client->addr, 0, len, buf },
+       };
+
+       if ((i2c_transfer(client->adapter, msg, 1)) != 1)
+               return -EIO;
+
+       return 0;
+}
+
+static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
+{
+       struct i2c_client *client = s35390a->client[reg];
+       struct i2c_msg msg[] = {
+               { client->addr, I2C_M_RD, len, buf },
+       };
+
+       if ((i2c_transfer(client->adapter, msg, 1)) != 1)
+               return -EIO;
+
+       return 0;
+}
+
+static int s35390a_reset(struct s35390a *s35390a)
+{
+       char buf[1];
+
+       if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
+               return -EIO;
+
+       if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
+               return 0;
+
+       buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
+       buf[0] &= 0xf0;
+       return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+}
+
+static int s35390a_disable_test_mode(struct s35390a *s35390a)
+{
+       char buf[1];
+
+       if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0)
+               return -EIO;
+
+       if (!(buf[0] & S35390A_FLAG_TEST))
+               return 0;
+
+       buf[0] &= ~S35390A_FLAG_TEST;
+       return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf));
+}
+
+static char s35390a_hr2reg(struct s35390a *s35390a, int hour)
+{
+       if (s35390a->twentyfourhour)
+               return BIN2BCD(hour);
+
+       if (hour < 12)
+               return BIN2BCD(hour);
+
+       return 0x40 | BIN2BCD(hour - 12);
+}
+
+static int s35390a_reg2hr(struct s35390a *s35390a, char reg)
+{
+       unsigned hour;
+
+       if (s35390a->twentyfourhour)
+               return BCD2BIN(reg & 0x3f);
+
+       hour = BCD2BIN(reg & 0x3f);
+       if (reg & 0x40)
+               hour += 12;
+
+       return hour;
+}
+
+static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+       struct s35390a  *s35390a = i2c_get_clientdata(client);
+       int i, err;
+       char buf[7];
+
+       dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, "
+               "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec,
+               tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year,
+               tm->tm_wday);
+
+       buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100);
+       buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1);
+       buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday);
+       buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday);
+       buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour);
+       buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min);
+       buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec);
+
+       /* This chip expects the bits of each byte to be in reverse order */
+       for (i = 0; i < 7; ++i)
+               buf[i] = bitrev8(buf[i]);
+
+       err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf));
+
+       return err;
+}
+
+static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+       struct s35390a *s35390a = i2c_get_clientdata(client);
+       char buf[7];
+       int i, err;
+
+       err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf));
+       if (err < 0)
+               return err;
+
+       /* This chip returns the bits of each byte in reverse order */
+       for (i = 0; i < 7; ++i)
+               buf[i] = bitrev8(buf[i]);
+
+       tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]);
+       tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]);
+       tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]);
+       tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]);
+       tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]);
+       tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1;
+       tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100;
+
+       dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, "
+               "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec,
+               tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year,
+               tm->tm_wday);
+
+       return rtc_valid_tm(tm);
+}
+
+static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       return s35390a_get_datetime(to_i2c_client(dev), tm);
+}
+
+static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       return s35390a_set_datetime(to_i2c_client(dev), tm);
+}
+
+static const struct rtc_class_ops s35390a_rtc_ops = {
+       .read_time      = s35390a_rtc_read_time,
+       .set_time       = s35390a_rtc_set_time,
+};
+
+static struct i2c_driver s35390a_driver;
+
+static int s35390a_probe(struct i2c_client *client)
+{
+       int err;
+       unsigned int i;
+       struct s35390a *s35390a;
+       struct rtc_time tm;
+       char buf[1];
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               err = -ENODEV;
+               goto exit;
+       }
+
+       s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL);
+       if (!s35390a) {
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       s35390a->client[0] = client;
+       i2c_set_clientdata(client, s35390a);
+
+       /* This chip uses multiple addresses, use dummy devices for them */
+       for (i = 1; i < 8; ++i) {
+               s35390a->client[i] = i2c_new_dummy(client->adapter,
+                                       client->addr + i, "rtc-s35390a");
+               if (!s35390a->client[i]) {
+                       dev_err(&client->dev, "Address %02x unavailable\n",
+                                               client->addr + i);
+                       err = -EBUSY;
+                       goto exit_dummy;
+               }
+       }
+
+       err = s35390a_reset(s35390a);
+       if (err < 0) {
+               dev_err(&client->dev, "error resetting chip\n");
+               goto exit_dummy;
+       }
+
+       err = s35390a_disable_test_mode(s35390a);
+       if (err < 0) {
+               dev_err(&client->dev, "error disabling test mode\n");
+               goto exit_dummy;
+       }
+
+       err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+       if (err < 0) {
+               dev_err(&client->dev, "error checking 12/24 hour mode\n");
+               goto exit_dummy;
+       }
+       if (buf[0] & S35390A_FLAG_24H)
+               s35390a->twentyfourhour = 1;
+       else
+               s35390a->twentyfourhour = 0;
+
+       if (s35390a_get_datetime(client, &tm) < 0)
+               dev_warn(&client->dev, "clock needs to be set\n");
+
+       s35390a->rtc = rtc_device_register(s35390a_driver.driver.name,
+                               &client->dev, &s35390a_rtc_ops, THIS_MODULE);
+
+       if (IS_ERR(s35390a->rtc)) {
+               err = PTR_ERR(s35390a->rtc);
+               goto exit_dummy;
+       }
+       return 0;
+
+exit_dummy:
+       for (i = 1; i < 8; ++i)
+               if (s35390a->client[i])
+                       i2c_unregister_device(s35390a->client[i]);
+       kfree(s35390a);
+       i2c_set_clientdata(client, NULL);
+
+exit:
+       return err;
+}
+
+static int s35390a_remove(struct i2c_client *client)
+{
+       unsigned int i;
+
+       struct s35390a *s35390a = i2c_get_clientdata(client);
+       for (i = 1; i < 8; ++i)
+               if (s35390a->client[i])
+                       i2c_unregister_device(s35390a->client[i]);
+
+       rtc_device_unregister(s35390a->rtc);
+       kfree(s35390a);
+       i2c_set_clientdata(client, NULL);
+
+       return 0;
+}
+
+static struct i2c_driver s35390a_driver = {
+       .driver         = {
+               .name   = "rtc-s35390a",
+       },
+       .probe          = s35390a_probe,
+       .remove         = s35390a_remove,
+};
+
+static int __init s35390a_rtc_init(void)
+{
+       return i2c_add_driver(&s35390a_driver);
+}
+
+static void __exit s35390a_rtc_exit(void)
+{
+       i2c_del_driver(&s35390a_driver);
+}
+
+MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>");
+MODULE_DESCRIPTION("S35390A RTC driver");
+MODULE_LICENSE("GPL");
+
+module_init(s35390a_rtc_init);
+module_exit(s35390a_rtc_exit);
index 389346cda6c826fdecfb7d8663a7950ddd2fab43..07c7f31081bccbe629c7bcbd789bb7665e57edb3 100644 (file)
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = {
 };
 
 struct kbdiacruc accent_table[MAX_DIACR] = {
-       {'^', 'c', '\003'},     {'^', 'd', '\004'},
-       {'^', 'z', '\032'},     {'^', '\012', '\000'},
+       {'^', 'c', 0003},       {'^', 'd', 0004},
+       {'^', 'z', 0032},       {'^', 0012, 0000},
 };
 
 unsigned int accent_table_size = 4;
index c3076217871e7e25ab485a7a8f89311c2bd31679..d8a5c229c5a7adc71703417c4a6a84331f3976a4 100644 (file)
@@ -1851,8 +1851,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
                 }
         }
         /*      See how many write buffers are required to hold this data */
-        numBuffers= ( skb->len + privptr->p_env->write_size - 1) /
-                       ( privptr->p_env->write_size);
+       numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
 
         /*      If that number of buffers isn't available, give up for now */
         if (privptr->write_free_count < numBuffers ||
@@ -2114,8 +2113,7 @@ init_ccw_bk(struct net_device *dev)
         */
         ccw_blocks_perpage= PAGE_SIZE /  CCWBK_SIZE;
         ccw_pages_required=
-               (ccw_blocks_required+ccw_blocks_perpage -1) /
-                        ccw_blocks_perpage;
+               DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
 
 #ifdef DEBUGMSG
         printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
@@ -2131,30 +2129,29 @@ init_ccw_bk(struct net_device *dev)
         * provide good performance. With packing buffers support 32k
         * buffers are used.
          */
-        if (privptr->p_env->read_size < PAGE_SIZE) {
-            claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size;
-            claw_read_pages= (privptr->p_env->read_buffers +
-               claw_reads_perpage -1) / claw_reads_perpage;
+       if (privptr->p_env->read_size < PAGE_SIZE) {
+               claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
+               claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
+                                               claw_reads_perpage);
          }
          else {       /* > or equal  */
-            privptr->p_buff_pages_perread=
-               (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
-            claw_read_pages=
-               privptr->p_env->read_buffers * privptr->p_buff_pages_perread;
+               privptr->p_buff_pages_perread =
+                       DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
+               claw_read_pages = privptr->p_env->read_buffers *
+                                       privptr->p_buff_pages_perread;
          }
         if (privptr->p_env->write_size < PAGE_SIZE) {
-            claw_writes_perpage=
-               PAGE_SIZE / privptr->p_env->write_size;
-            claw_write_pages=
-               (privptr->p_env->write_buffers + claw_writes_perpage -1) /
-                       claw_writes_perpage;
+               claw_writes_perpage =
+                       PAGE_SIZE / privptr->p_env->write_size;
+               claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
+                                               claw_writes_perpage);
 
         }
         else {      /* >  or equal  */
-            privptr->p_buff_pages_perwrite=
-                (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
-            claw_write_pages=
-               privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite;
+               privptr->p_buff_pages_perwrite =
+                       DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
+               claw_write_pages = privptr->p_env->write_buffers *
+                                       privptr->p_buff_pages_perwrite;
         }
 #ifdef DEBUGMSG
         if (privptr->p_env->read_size < PAGE_SIZE) {
index fecba05b4e7783da1c22402080c7ab0c6338504b..e5c6f6af876558c0b92dea602c6d2f6eed1a23a5 100644 (file)
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
                                "Notifying upper driver of completion "
                                "(result %x)\n", cmd->result));
 
-       good_bytes = scsi_bufflen(cmd);
+       good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len;
         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
                drv = scsi_cmd_to_driver(cmd);
                if (drv->done)
index 1dc165ad17fb20edae98e80b408fbd8cfd0c4543..e67c14e31babba5913f0ab1852f7952e191502bb 100644 (file)
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
 }
 
 /**
- * scsi_scan_target - scan a target id, possibly including all LUNs on the
- *     target.
+ * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
  * @parent:    host to scan
  * @channel:   channel to scan
  * @id:                target id to scan
index 6f09cbd7fc488ac53286c92ba1ce0698f11ba0bd..97c68d021d28bcb3ecd1cae57846cdac3fdaed7e 100644 (file)
@@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
        /* Archtek America Corp. */
        /* Archtek SmartLink Modem 3334BT Plug & Play */
        {       "GVC000F",              0       },
+       /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */
+       {       "GVC0303",              0       },
        /* Hayes */
        /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
        {       "HAY0001",              0       },
index b82595cf13e86ff759c92109fb096f706e68e7e5..cf627cd1b4c8f27e028649e97091a5b06e375e5c 100644 (file)
@@ -686,7 +686,7 @@ config UART0_RTS_PIN
 
 config SERIAL_BFIN_UART1
        bool "Enable UART1"
-       depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x)
+       depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561)
        help
          Enable UART1
 
@@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS
 
 config UART1_CTS_PIN
        int "UART1 CTS pin"
-       depends on BFIN_UART1_CTSRTS && (BF53x || BF561)
+       depends on BFIN_UART1_CTSRTS && !BF54x
        default -1
        help
          Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
 
 config UART1_RTS_PIN
        int "UART1 RTS pin"
-       depends on BFIN_UART1_CTSRTS && (BF53x || BF561)
+       depends on BFIN_UART1_CTSRTS && !BF54x
        default -1
        help
          Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
index ac2a3ef28d55435d3673a8909e01ee27c77ebd82..0aa345b9a38bd8d605cb2112153fc49f388f8ec8 100644 (file)
@@ -1,30 +1,11 @@
 /*
- * File:         drivers/serial/bfin_5xx.c
- * Based on:     Based on drivers/serial/sa1100.c
- * Author:       Aubrey Li <aubrey.li@analog.com>
+ * Blackfin On-Chip Serial Driver
  *
- * Created:
- * Description:  Driver for blackfin 5xx serial ports
+ * Copyright 2006-2007 Analog Devices Inc.
  *
- * Modified:
- *               Copyright 2006 Analog Devices Inc.
+ * Enter bugs at http://blackfin.uclinux.org/
  *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Licensed under the GPL-2 or later.
  */
 
 #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define DMA_RX_XCOUNT          512
 #define DMA_RX_YCOUNT          (PAGE_SIZE / DMA_RX_XCOUNT)
 
-#define DMA_RX_FLUSH_JIFFIES   5
+#define DMA_RX_FLUSH_JIFFIES   (HZ / 50)
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
 static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
 #else
-static void bfin_serial_do_work(struct work_struct *work);
 static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
-static void local_put_char(struct bfin_serial_port *uart, char ch);
 #endif
 
 static void bfin_serial_mctrl_check(struct bfin_serial_port *uart);
@@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart);
 static void bfin_serial_stop_tx(struct uart_port *port)
 {
        struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+       struct circ_buf *xmit = &uart->port.info->xmit;
+#if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA)
+       unsigned short ier;
+#endif
 
        while (!(UART_GET_LSR(uart) & TEMT))
-               continue;
+               cpu_relax();
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
        disable_dma(uart->tx_dma_channel);
+       xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
+       uart->port.icount.tx += uart->tx_count;
+       uart->tx_count = 0;
+       uart->tx_done = 1;
 #else
 #ifdef CONFIG_BF54x
-       /* Waiting for Transmission Finished */
-       while (!(UART_GET_LSR(uart) & TFI))
-               continue;
        /* Clear TFI bit */
        UART_PUT_LSR(uart, TFI);
        UART_CLEAR_IER(uart, ETBEI);
 #else
-       unsigned short ier;
-
        ier = UART_GET_IER(uart);
        ier &= ~ETBEI;
        UART_PUT_IER(uart, ier);
@@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port)
        struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
-       bfin_serial_dma_tx_chars(uart);
+       if (uart->tx_done)
+               bfin_serial_dma_tx_chars(uart);
 #else
 #ifdef CONFIG_BF54x
        UART_SET_IER(uart, ETBEI);
@@ -209,34 +192,27 @@ int kgdb_get_debug_char(void)
 }
 #endif
 
-#ifdef CONFIG_SERIAL_BFIN_PIO
-static void local_put_char(struct bfin_serial_port *uart, char ch)
-{
-       unsigned short status;
-       int flags = 0;
-
-       spin_lock_irqsave(&uart->port.lock, flags);
-
-       do {
-               status = UART_GET_LSR(uart);
-       } while (!(status & THRE));
-
-       UART_PUT_CHAR(uart, ch);
-       SSYNC();
-
-       spin_unlock_irqrestore(&uart->port.lock, flags);
-}
+#if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO)
+# define UART_GET_ANOMALY_THRESHOLD(uart)    ((uart)->anomaly_threshold)
+# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
+#else
+# define UART_GET_ANOMALY_THRESHOLD(uart)    0
+# define UART_SET_ANOMALY_THRESHOLD(uart, v)
+#endif
 
+#ifdef CONFIG_SERIAL_BFIN_PIO
 static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
 {
        struct tty_struct *tty = uart->port.info->tty;
        unsigned int status, ch, flg;
-       static int in_break = 0;
+       static struct timeval anomaly_start = { .tv_sec = 0 };
 #ifdef CONFIG_KGDB_UART
        struct pt_regs *regs = get_irq_regs();
 #endif
 
        status = UART_GET_LSR(uart);
+       UART_CLEAR_LSR(uart);
+
        ch = UART_GET_CHAR(uart);
        uart->port.icount.rx++;
 
@@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
 #endif
 
        if (ANOMALY_05000230) {
-               /* The BF533 family of processors have a nice misbehavior where
-                * they continuously generate characters for a "single" break.
+               /* The BF533 (and BF561) family of processors have a nice anomaly
+                * where they continuously generate characters for a "single" break.
                 * We have to basically ignore this flood until the "next" valid
-                * character comes across.  All other Blackfin families operate
-                * properly though.
+                * character comes across.  Due to the nature of the flood, it is
+                * not possible to reliably catch bytes that are sent too quickly
+                * after this break.  So application code talking to the Blackfin
+                * which sends a break signal must allow at least 1.5 character
+                * times after the end of the break for things to stabilize.  This
+                * timeout was picked as it must absolutely be larger than 1
+                * character time +/- some percent.  So 1.5 sounds good.  All other
+                * Blackfin families operate properly.  Woo.
                 * Note: While Anomaly 05000230 does not directly address this,
                 *       the changes that went in for it also fixed this issue.
+                *       That anomaly was fixed in 0.5+ silicon.  I like bunnies.
                 */
-               if (in_break) {
-                       if (ch != 0) {
-                               in_break = 0;
-                               ch = UART_GET_CHAR(uart);
-                               if (bfin_revid() < 5)
-                                       return;
-                       } else
-                               return;
+               if (anomaly_start.tv_sec) {
+                       struct timeval curr;
+                       suseconds_t usecs;
+
+                       if ((~ch & (~ch + 1)) & 0xff)
+                               goto known_good_char;
+
+                       do_gettimeofday(&curr);
+                       if (curr.tv_sec - anomaly_start.tv_sec > 1)
+                               goto known_good_char;
+
+                       usecs = 0;
+                       if (curr.tv_sec != anomaly_start.tv_sec)
+                               usecs += USEC_PER_SEC;
+                       usecs += curr.tv_usec - anomaly_start.tv_usec;
+
+                       if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
+                               goto known_good_char;
+
+                       if (ch)
+                               anomaly_start.tv_sec = 0;
+                       else
+                               anomaly_start = curr;
+
+                       return;
+
+ known_good_char:
+                       anomaly_start.tv_sec = 0;
                }
        }
 
        if (status & BI) {
                if (ANOMALY_05000230)
-                       in_break = 1;
+                       if (bfin_revid() < 5)
+                               do_gettimeofday(&anomaly_start);
                uart->port.icount.brk++;
                if (uart_handle_break(&uart->port))
                        goto ignore_char;
@@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
                UART_PUT_CHAR(uart, uart->port.x_char);
                uart->port.icount.tx++;
                uart->port.x_char = 0;
-               return;
        }
        /*
         * Check the modem control lines before
@@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
                return;
        }
 
-       local_put_char(uart, xmit->buf[xmit->tail]);
-       xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-       uart->port.icount.tx++;
+       while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
+               UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               uart->port.icount.tx++;
+               SSYNC();
+       }
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(&uart->port);
@@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
 {
        struct bfin_serial_port *uart = dev_id;
 
-#ifdef CONFIG_BF54x
-       unsigned short status;
-       spin_lock(&uart->port.lock);
-       status = UART_GET_LSR(uart);
-       while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) {
-               bfin_serial_rx_chars(uart);
-               status = UART_GET_LSR(uart);
-       }
-       spin_unlock(&uart->port.lock);
-#else
        spin_lock(&uart->port.lock);
-       while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY)
+       while (UART_GET_LSR(uart) & DR)
                bfin_serial_rx_chars(uart);
        spin_unlock(&uart->port.lock);
-#endif
+
        return IRQ_HANDLED;
 }
 
@@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
 {
        struct bfin_serial_port *uart = dev_id;
 
-#ifdef CONFIG_BF54x
-       unsigned short status;
        spin_lock(&uart->port.lock);
-       status = UART_GET_LSR(uart);
-       while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) {
+       if (UART_GET_LSR(uart) & THRE)
                bfin_serial_tx_chars(uart);
-               status = UART_GET_LSR(uart);
-       }
        spin_unlock(&uart->port.lock);
-#else
-       spin_lock(&uart->port.lock);
-       while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY)
-               bfin_serial_tx_chars(uart);
-       spin_unlock(&uart->port.lock);
-#endif
+
        return IRQ_HANDLED;
 }
+#endif
 
-
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
 static void bfin_serial_do_work(struct work_struct *work)
 {
        struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue);
@@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
 {
        struct circ_buf *xmit = &uart->port.info->xmit;
        unsigned short ier;
-       int flags = 0;
-
-       if (!uart->tx_done)
-               return;
 
        uart->tx_done = 0;
 
+       if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
+               uart->tx_count = 0;
+               uart->tx_done = 1;
+               return;
+       }
+
        if (uart->port.x_char) {
                UART_PUT_CHAR(uart, uart->port.x_char);
                uart->port.icount.tx++;
                uart->port.x_char = 0;
-               uart->tx_done = 1;
-               return;
        }
+
        /*
         * Check the modem control lines before
         * transmitting anything.
         */
        bfin_serial_mctrl_check(uart);
 
-       if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
-               bfin_serial_stop_tx(&uart->port);
-               uart->tx_done = 1;
-               return;
-       }
-
-       spin_lock_irqsave(&uart->port.lock, flags);
        uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
        if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
                uart->tx_count = UART_XMIT_SIZE - xmit->tail;
@@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
        set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
        set_dma_x_modify(uart->tx_dma_channel, 1);
        enable_dma(uart->tx_dma_channel);
+
 #ifdef CONFIG_BF54x
        UART_SET_IER(uart, ETBEI);
 #else
@@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
        ier |= ETBEI;
        UART_PUT_IER(uart, ier);
 #endif
-       spin_unlock_irqrestore(&uart->port.lock, flags);
 }
 
 static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
@@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
        int i, flg, status;
 
        status = UART_GET_LSR(uart);
-       uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);;
+       UART_CLEAR_LSR(uart);
+
+       uart->port.icount.rx +=
+               CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
+               UART_XMIT_SIZE);
 
        if (status & BI) {
                uart->port.icount.brk++;
@@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
        else
                flg = TTY_NORMAL;
 
-       for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) {
-               if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
-                       goto dma_ignore_char;
-               uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg);
+       for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) {
+               if (i >= UART_XMIT_SIZE)
+                       i = 0;
+               if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
+                       uart_insert_char(&uart->port, status, OE,
+                               uart->rx_dma_buf.buf[i], flg);
        }
 
  dma_ignore_char:
@@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
 void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
 {
        int x_pos, pos;
-       int flags = 0;
-
-       bfin_serial_dma_tx_chars(uart);
 
-       spin_lock_irqsave(&uart->port.lock, flags);
-       x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel);
+       uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+       x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+       uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+       if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
+               uart->rx_dma_nrows = 0;
+       x_pos = DMA_RX_XCOUNT - x_pos;
        if (x_pos == DMA_RX_XCOUNT)
                x_pos = 0;
 
        pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
-
-       if (pos>uart->rx_dma_buf.tail) {
-               uart->rx_dma_buf.tail = pos;
+       if (pos != uart->rx_dma_buf.tail) {
+               uart->rx_dma_buf.head = pos;
                bfin_serial_dma_rx_chars(uart);
-               uart->rx_dma_buf.head = uart->rx_dma_buf.tail;
+               uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
        }
-       spin_unlock_irqrestore(&uart->port.lock, flags);
+
        uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
        add_timer(&(uart->rx_dma_timer));
 }
@@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
 
        spin_lock(&uart->port.lock);
        if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
-               clear_dma_irqstat(uart->tx_dma_channel);
                disable_dma(uart->tx_dma_channel);
+               clear_dma_irqstat(uart->tx_dma_channel);
 #ifdef CONFIG_BF54x
                UART_CLEAR_IER(uart, ETBEI);
 #else
@@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
                ier &= ~ETBEI;
                UART_PUT_IER(uart, ier);
 #endif
-               xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1);
-               uart->port.icount.tx+=uart->tx_count;
+               xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
+               uart->port.icount.tx += uart->tx_count;
 
                if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                        uart_write_wakeup(&uart->port);
 
-               if (uart_circ_empty(xmit))
-                       bfin_serial_stop_tx(&uart->port);
-               uart->tx_done = 1;
+               bfin_serial_dma_tx_chars(uart);
        }
 
        spin_unlock(&uart->port.lock);
@@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
        struct bfin_serial_port *uart = dev_id;
        unsigned short irqstat;
 
-       uart->rx_dma_nrows++;
-       if (uart->rx_dma_nrows == DMA_RX_YCOUNT) {
-               uart->rx_dma_nrows = 0;
-               uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT;
-               bfin_serial_dma_rx_chars(uart);
-               uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0;
-       }
        spin_lock(&uart->port.lock);
        irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
        clear_dma_irqstat(uart->rx_dma_channel);
-
        spin_unlock(&uart->port.lock);
+
+       del_timer(&(uart->rx_dma_timer));
+       uart->rx_dma_timer.expires = jiffies;
+       add_timer(&(uart->rx_dma_timer));
+
        return IRQ_HANDLED;
 }
 #endif
@@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
        if (uart->cts_pin < 0)
                return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
 
+# ifdef BF54x
+       if (UART_GET_MSR(uart) & CTS)
+# else
        if (gpio_get_value(uart->cts_pin))
+# endif
                return TIOCM_DSR | TIOCM_CAR;
        else
 #endif
@@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
                return;
 
        if (mctrl & TIOCM_RTS)
+# ifdef BF54x
+               UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS);
+# else
                gpio_set_value(uart->rts_pin, 0);
+# endif
        else
+# ifdef BF54x
+               UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS);
+# else
                gpio_set_value(uart->rts_pin, 1);
+# endif
 #endif
 }
 
@@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart)
 {
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
        unsigned int status;
-# ifdef CONFIG_SERIAL_BFIN_DMA
        struct uart_info *info = uart->port.info;
        struct tty_struct *tty = info->tty;
 
        status = bfin_serial_get_mctrl(&uart->port);
+       uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
        if (!(status & TIOCM_CTS)) {
                tty->hw_stopped = 1;
+               schedule_work(&uart->cts_workqueue);
        } else {
                tty->hw_stopped = 0;
        }
-# else
-       status = bfin_serial_get_mctrl(&uart->port);
-       uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
-       if (!(status & TIOCM_CTS))
-               schedule_work(&uart->cts_workqueue);
-# endif
 #endif
 }
 
@@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port)
        disable_dma(uart->rx_dma_channel);
        free_dma(uart->rx_dma_channel);
        del_timer(&(uart->rx_dma_timer));
+       dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
 #else
 #ifdef CONFIG_KGDB_UART
        if (uart->port.line != CONFIG_KGDB_UART_PORT)
@@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
        quot = uart_get_divisor(port, baud);
        spin_lock_irqsave(&uart->port.lock, flags);
 
+       UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
+
        do {
                lsr = UART_GET_LSR(uart);
        } while (!(lsr & TEMT));
@@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void)
                bfin_serial_ports[i].rx_dma_channel =
                        bfin_serial_resource[i].uart_rx_dma_channel;
                init_timer(&(bfin_serial_ports[i].rx_dma_timer));
-#else
-               INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work);
 #endif
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
+               INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work);
                bfin_serial_ports[i].cts_pin        =
                        bfin_serial_resource[i].uart_cts_pin;
                bfin_serial_ports[i].rts_pin        =
index 348ee2c19b5830a32b92b2d93b5a6167f199a51b..c2bb11c02bdedebd73d6934f4f67702199e81a0d 100644 (file)
@@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up)
                up->port.icount.tx++;
                if (uart_circ_empty(xmit))
                        break;
-               while (!serial_in(up, UART_LSR) & UART_LSR_THRE);
+               while (!(serial_in(up, UART_LSR) & UART_LSR_THRE));
 
        } while (--count > 0);
 
index 9ce12cb2cebc0d6dfa74dc5bc8fdf77bbf1df6b8..a8c116b80bffc18d8fd0e5c8cf43813076581e67 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/delay.h>
 #include <linux/console.h>
 #include <linux/platform_device.h>
+#include <linux/serial_sci.h>
 
 #ifdef CONFIG_CPU_FREQ
 #include <linux/notifier.h>
@@ -54,7 +55,6 @@
 #include <asm/kgdb.h>
 #endif
 
-#include <asm/sci.h>
 #include "sh-sci.h"
 
 struct sci_port {
index 9cfcfd8dad5e5fb5dfd36bd651a70efc99c43c31..617efb1640b10375c9dc2362cff085beef85f596 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Core maple bus functionality
  *
- *  Copyright (C) 2007 Adrian McMenamin
+ *  Copyright (C) 2007, 2008 Adrian McMenamin
  *
  * Based on 2.4 code by:
  *
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/io.h>
@@ -54,7 +53,7 @@ static struct device maple_bus;
 static int subdevice_map[MAPLE_PORTS];
 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
 static unsigned long maple_pnp_time;
-static int started, scanning, liststatus, realscan;
+static int started, scanning, liststatus, fullscan;
 static struct kmem_cache *maple_queue_cache;
 
 struct maple_device_specify {
@@ -62,6 +61,9 @@ struct maple_device_specify {
        int unit;
 };
 
+static bool checked[4];
+static struct maple_device *baseunits[4];
+
 /**
  *  maple_driver_register - register a device driver
  *  automatically makes the driver bus a maple bus
@@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev)
                else
                        break;
 
-       if (realscan) {
-               printk(KERN_INFO "Maple device detected: %s\n",
-                       mdev->product_name);
-               printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
-       }
+       printk(KERN_INFO "Maple device detected: %s\n",
+               mdev->product_name);
+       printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
 
        function = be32_to_cpu(mdev->devinfo.function);
 
@@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev)
                mdev->driver = &maple_dummy_driver;
                sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
        } else {
-               if (realscan)
-                       printk(KERN_INFO
-                               "Maple bus at (%d, %d): Function 0x%lX\n",
-                               mdev->port, mdev->unit, function);
+               printk(KERN_INFO
+                       "Maple bus at (%d, %d): Function 0x%lX\n",
+                       mdev->port, mdev->unit, function);
 
                matched =
                    bus_for_each_drv(&maple_bus_type, NULL, mdev,
@@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev)
 
                if (matched == 0) {
                        /* Driver does not exist yet */
-                       if (realscan)
-                               printk(KERN_INFO
-                                       "No maple driver found.\n");
+                       printk(KERN_INFO
+                               "No maple driver found.\n");
                        mdev->driver = &maple_dummy_driver;
                }
                sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
@@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev,
                maple_detach_driver(mdev);
                return;
        }
-       if (!started) {
-               printk(KERN_INFO "No maple devices attached to port %d\n",
-                      mdev->port);
+       if (!started || !fullscan) {
+               if (checked[mdev->port] == false) {
+                       checked[mdev->port] = true;
+                       printk(KERN_INFO "No maple devices attached"
+                               " to port %d\n", mdev->port);
+               }
                return;
        }
        maple_clean_submap(mdev);
@@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev,
                                   char *recvbuf)
 {
        char submask;
-       if ((!started) || (scanning == 2)) {
-               maple_attach_driver(mdev);
+       if (!started || (scanning == 2) || !fullscan) {
+               if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
+                       checked[mdev->port] = true;
+                       maple_attach_driver(mdev);
+               } else {
+                       if (mdev->unit != 0)
+                               maple_attach_driver(mdev);
+               }
                return;
        }
        if (mdev->unit == 0) {
@@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work)
        struct maple_device *dev;
        char *recvbuf;
        enum maple_code code;
+       int i;
 
        if (!maple_dma_done())
                return;
@@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work)
                } else
                        scanning = 0;
 
+               if (!fullscan) {
+                       fullscan = 1;
+                       for (i = 0; i < MAPLE_PORTS; i++) {
+                               if (checked[i] == false) {
+                                       fullscan = 0;
+                                       dev = baseunits[i];
+                                       dev->mq->command =
+                                               MAPLE_COMMAND_DEVINFO;
+                                       dev->mq->length = 0;
+                                       maple_add_packet(dev->mq);
+                               }
+                       }
+               }
                if (started == 0)
                        started = 1;
        }
@@ -694,7 +715,9 @@ static int __init maple_bus_init(void)
 
        /* setup maple ports */
        for (i = 0; i < MAPLE_PORTS; i++) {
+               checked[i] = false;
                mdev[i] = maple_alloc_dev(i, 0);
+               baseunits[i] = mdev[i];
                if (!mdev[i]) {
                        while (i-- > 0)
                                maple_free_dev(mdev[i]);
@@ -703,12 +726,9 @@ static int __init maple_bus_init(void)
                mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
                mdev[i]->mq->length = 0;
                maple_add_packet(mdev[i]->mq);
-               /* delay aids hardware detection */
-               mdelay(5);
                subdevice_map[i] = 0;
        }
 
-       realscan = 1;
        /* setup maplebus hardware */
        maplebus_dma_reset();
        /* initial detection */
index 253ed5682a6d6c354a761791b4103d2ec4f04a20..a86315a0c5b8e93c90c70edac02fd6734cf62bbb 100644 (file)
@@ -42,6 +42,7 @@ struct mpc52xx_psc_spi {
 
        /* driver internal data */
        struct mpc52xx_psc __iomem *psc;
+       struct mpc52xx_psc_fifo __iomem *fifo;
        unsigned int irq;
        u8 bits_per_word;
        u8 busy;
@@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
 {
        struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
        struct mpc52xx_psc __iomem *psc = mps->psc;
+       struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
        unsigned rb = 0;        /* number of bytes receieved */
        unsigned sb = 0;        /* number of bytes sent */
        unsigned char *rx_buf = (unsigned char *)t->rx_buf;
@@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
                        out_8(&psc->mode, 0);
                } else {
                        out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
-                       out_be16(&psc->rfalarm, rfalarm);
+                       out_be16(&fifo->rfalarm, rfalarm);
                }
                out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY);
                wait_for_completion(&mps->done);
-               recv_at_once = in_be16(&psc->rfnum);
+               recv_at_once = in_be16(&fifo->rfnum);
                dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once);
 
                send_at_once = recv_at_once;
@@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi)
 static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
 {
        struct mpc52xx_psc __iomem *psc = mps->psc;
+       struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
        u32 mclken_div;
        int ret = 0;
 
@@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
        /* Disable interrupts, interrupts are based on alarm level */
        out_be16(&psc->mpc52xx_psc_imr, 0);
        out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
-       out_8(&psc->rfcntl, 0);
+       out_8(&fifo->rfcntl, 0);
        out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
 
        /* Configure 8bit codec mode as a SPI master and use EOF flags */
@@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
                ret = -EFAULT;
                goto free_master;
        }
+       /* On the 5200, fifo regs are immediately ajacent to the psc regs */
+       mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc);
 
        ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi",
                                mps);
index 78fd33125e02af630bdcbe6e8a6eb352d0931f15..adea792fb6753266f8903f7b31fb8c50c374b303 100644 (file)
@@ -35,6 +35,11 @@ config SSB_PCIHOST
 
          If unsure, say Y
 
+config SSB_B43_PCI_BRIDGE
+       bool
+       depends on SSB_PCIHOST
+       default n
+
 config SSB_PCMCIAHOST_POSSIBLE
        bool
        depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL
index e235144add7c27d8ca26eccd95404523dd4bf3d0..de94c2eb7a37250eb4fd5a8c16acd023acc8f563 100644 (file)
@@ -14,6 +14,6 @@ ssb-$(CONFIG_SSB_DRIVER_PCICORE)      += driver_pcicore.o
 
 # b43 pci-ssb-bridge driver
 # Not strictly a part of SSB, but kept here for convenience
-ssb-$(CONFIG_SSB_PCIHOST)              += b43_pci_bridge.o
+ssb-$(CONFIG_SSB_B43_PCI_BRIDGE)       += b43_pci_bridge.o
 
 obj-$(CONFIG_SSB)                      += ssb.o
index 6d99a98800559f9daf1df56e96c01217b2b92b5c..74b9a8aea52b12bbbf721a9c6f623ac2d09655b3 100644 (file)
@@ -111,7 +111,10 @@ static void __init ssb_fixup_pcibridge(struct pci_dev *dev)
 
        /* Enable PCI bridge bus mastering and memory space */
        pci_set_master(dev);
-       pcibios_enable_device(dev, ~0);
+       if (pcibios_enable_device(dev, ~0) < 0) {
+               ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n");
+               return;
+       }
 
        /* Enable PCI bridge BAR1 prefetch and burst */
        pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3);
@@ -393,7 +396,7 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
            chipid_top != 0x5300)
                return 0;
 
-       if (bus->sprom.r1.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
+       if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
                return 0;
 
        /* The 200-pin BCM4712 package does not bond out PCI. Even when
index a789364264a6bd81db1220ac380bae94ef2a477f..21eca2b5118b3a68655b3728f1cd1ecdf609f3ff 100644 (file)
@@ -120,10 +120,10 @@ extern int ssb_devices_thaw(struct ssb_bus *bus);
 extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev);
 
 /* b43_pci_bridge.c */
-#ifdef CONFIG_SSB_PCIHOST
+#ifdef CONFIG_SSB_B43_PCI_BRIDGE
 extern int __init b43_pci_ssb_bridge_init(void);
 extern void __exit b43_pci_ssb_bridge_exit(void);
-#else /* CONFIG_SSB_PCIHOST */
+#else /* CONFIG_SSB_B43_PCI_BRIDGR */
 static inline int b43_pci_ssb_bridge_init(void)
 {
        return 0;
index 5c33cdb9cac7131a0e4d2715ecbcec538903406f..a2b0aa48b8eaba4d5be56c5d2bcbe3a4ed9fc2cd 100644 (file)
@@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS
          If you are unsure about this, say N here.
 
 config USB_SUSPEND
-       bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)"
-       depends on USB && PM && EXPERIMENTAL
+       bool "USB selective suspend/resume and wakeup"
+       depends on USB && PM
        help
          If you say Y here, you can use driver calls or the sysfs
-         "power/state" file to suspend or resume individual USB
-         peripherals.
+         "power/level" file to suspend or resume individual USB
+         peripherals and to enable or disable autosuspend (see
+         Documentation/usb/power-management.txt for more details).
 
          Also, USB "remote wakeup" signaling is supported, whereby some
          USB devices (like keyboards and network adapters) can wake up
index f90ab5e94c5842d5b5c61f55e5fd33fdf4b5ecb1..d9d1eb19f2a134ecde7a5f5bad21cb195da0bdb1 100644 (file)
  * devices is broken...
  */
 static const struct usb_device_id usb_quirk_list[] = {
-       /* Action Semiconductor flash disk */
-       { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255},
-
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* HP 5300/5370C scanner */
-       { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 },
+       { USB_DEVICE(0x03f0, 0x0701), .driver_info =
+                       USB_QUIRK_STRING_FETCH_255 },
 
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Philips PSC805 audio device */
+       { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Roland SC-8820 */
        { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
        /* Edirol SD-20 */
        { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* INTEL VALUE SSD */
-       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
-
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* Philips PSC805 audio device */
-       { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+       /* Action Semiconductor flash disk */
+       { USB_DEVICE(0x10d6, 0x2200), .driver_info =
+                       USB_QUIRK_STRING_FETCH_255 },
 
        /* SKYMEDI USB_DRIVE */
        { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* INTEL VALUE SSD */
+       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
        { }  /* terminating entry must be last */
 };
 
index 4e984060c984b4cbb715607f26ab13dfb05d357e..1f0db51190ccc79c121364de04a39a93c7e54aae 100644 (file)
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
 EXPORT_SYMBOL_GPL(usb_ifnum_to_if);
 
 /**
- * usb_altnum_to_altsetting - get the altsetting structure with a given
- *     alternate setting number.
+ * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number.
  * @intf: the interface containing the altsetting in question
  * @altnum: the desired alternate setting number
  *
@@ -234,7 +233,7 @@ static int ksuspend_usb_init(void)
         * singlethreaded.  Its job doesn't justify running on more
         * than one CPU.
         */
-       ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd");
+       ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
        if (!ksuspend_usb_wq)
                return -ENOMEM;
        return 0;
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf);
  */
 
 /**
- * usb_lock_device_for_reset - cautiously acquire the lock for a
- *     usb device structure
+ * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure
  * @udev: device that's being locked
  * @iface: interface bound to the driver making the request (optional)
  *
index 4f6bfa100f2ad009381190605cdb75c1ee4271ae..2c32bd08ee7db2c6051df00de37f709de9041a10 100644 (file)
@@ -92,7 +92,6 @@ struct printer_dev {
        u8                      *current_rx_buf;
        u8                      printer_status;
        u8                      reset_printer;
-       struct class_device     *printer_class_dev;
        struct cdev             printer_cdev;
        struct device           *pdev;
        u8                      printer_cdev_open;
index 4402d6f042d971bd42d6a1453bb3c0bef85172ee..096c41cc40d1bc8acd262fb75f5cf310d8189e6c 100644 (file)
@@ -103,6 +103,12 @@ static const char ep0name [] = "ep0";
 #error "Can't configure both IXP and PXA"
 #endif
 
+/* IXP doesn't yet support <linux/clk.h> */
+#define clk_get(dev,name)      NULL
+#define clk_enable(clk)                do { } while (0)
+#define clk_disable(clk)       do { } while (0)
+#define clk_put(clk)           do { } while (0)
+
 #endif
 
 #include "pxa2xx_udc.h"
@@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *);
 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not
  * in active use.
  */
-static int pullup(struct pxa2xx_udc *udc, int is_active)
+static int pullup(struct pxa2xx_udc *udc)
 {
-       is_active = is_active && udc->vbus && udc->pullup;
+       int is_active = udc->vbus && udc->pullup && !udc->suspended;
        DMSG("%s\n", is_active ? "active" : "inactive");
-       if (is_active)
-               udc_enable(udc);
-       else {
-               if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
-                       DMSG("disconnect %s\n", udc->driver
-                               ? udc->driver->driver.name
-                               : "(no driver)");
-                       stop_activity(udc, udc->driver);
+       if (is_active) {
+               if (!udc->active) {
+                       udc->active = 1;
+                       /* Enable clock for USB device */
+                       clk_enable(udc->clk);
+                       udc_enable(udc);
                }
-               udc_disable(udc);
+       } else {
+               if (udc->active) {
+                       if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+                               DMSG("disconnect %s\n", udc->driver
+                                       ? udc->driver->driver.name
+                                       : "(no driver)");
+                               stop_activity(udc, udc->driver);
+                       }
+                       udc_disable(udc);
+                       /* Disable clock for USB device */
+                       clk_disable(udc->clk);
+                       udc->active = 0;
+               }
+
        }
        return 0;
 }
@@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
        struct pxa2xx_udc       *udc;
 
        udc = container_of(_gadget, struct pxa2xx_udc, gadget);
-       udc->vbus = is_active = (is_active != 0);
+       udc->vbus = (is_active != 0);
        DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
-       pullup(udc, is_active);
+       pullup(udc);
        return 0;
 }
 
@@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
        if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
                return -EOPNOTSUPP;
 
-       is_active = (is_active != 0);
-       udc->pullup = is_active;
-       pullup(udc, is_active);
+       udc->pullup = (is_active != 0);
+       pullup(udc);
        return 0;
 }
 
@@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = {
 #ifdef CONFIG_USB_GADGET_DEBUG_FS
 
 static int
-udc_seq_show(struct seq_file *m, void *d)
+udc_seq_show(struct seq_file *m, void *_d)
 {
        struct pxa2xx_udc       *dev = m->private;
        unsigned long           flags;
@@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev)
 
        udc_clear_mask_UDCCR(UDCCR_UDE);
 
-#ifdef CONFIG_ARCH_PXA
-        /* Disable clock for USB device */
-       clk_disable(dev->clk);
-#endif
-
        ep0_idle (dev);
        dev->gadget.speed = USB_SPEED_UNKNOWN;
 }
@@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev)
 {
        udc_clear_mask_UDCCR(UDCCR_UDE);
 
-#ifdef CONFIG_ARCH_PXA
-        /* Enable clock for USB device */
-       clk_enable(dev->clk);
-#endif
-
        /* try to clear these bits before we enable the udc */
        udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
 
@@ -1286,7 +1292,7 @@ fail:
         * for set_configuration as well as eventual disconnect.
         */
        DMSG("registered gadget driver '%s'\n", driver->driver.name);
-       pullup(dev, 1);
+       pullup(dev);
        dump_state(dev);
        return 0;
 }
@@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
                return -EINVAL;
 
        local_irq_disable();
-       pullup(dev, 0);
+       dev->pullup = 0;
+       pullup(dev);
        stop_activity(dev, driver);
        local_irq_enable();
 
@@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
        if (irq < 0)
                return -ENODEV;
 
-#ifdef CONFIG_ARCH_PXA
        dev->clk = clk_get(&pdev->dev, "UDCCLK");
        if (IS_ERR(dev->clk)) {
                retval = PTR_ERR(dev->clk);
                goto err_clk;
        }
-#endif
 
        pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
                dev->has_cfr ? "" : " (!cfr)",
@@ -2250,10 +2255,8 @@ lubbock_fail0:
        if (dev->mach->gpio_vbus)
                gpio_free(dev->mach->gpio_vbus);
  err_gpio_vbus:
-#ifdef CONFIG_ARCH_PXA
        clk_put(dev->clk);
  err_clk:
-#endif
        return retval;
 }
 
@@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
        if (dev->driver)
                return -EBUSY;
 
-       udc_disable(dev);
+       dev->pullup = 0;
+       pullup(dev);
+
        remove_debug_files(dev);
 
        if (dev->got_irq) {
@@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
        if (dev->mach->gpio_pullup)
                gpio_free(dev->mach->gpio_pullup);
 
-#ifdef CONFIG_ARCH_PXA
        clk_put(dev->clk);
-#endif
 
        platform_set_drvdata(pdev, NULL);
        the_controller = NULL;
@@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
 static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct pxa2xx_udc       *udc = platform_get_drvdata(dev);
+       unsigned long flags;
 
        if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
                WARN("USB host won't detect disconnect!\n");
-       pullup(udc, 0);
+       udc->suspended = 1;
+
+       local_irq_save(flags);
+       pullup(udc);
+       local_irq_restore(flags);
 
        return 0;
 }
@@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
 static int pxa2xx_udc_resume(struct platform_device *dev)
 {
        struct pxa2xx_udc       *udc = platform_get_drvdata(dev);
+       unsigned long flags;
 
-       pullup(udc, 1);
+       udc->suspended = 0;
+       local_irq_save(flags);
+       pullup(udc);
+       local_irq_restore(flags);
 
        return 0;
 }
index b67e3ff5e4eb5baf648b0fcceb457e7e3456af69..e2c19e88c8753b2bd745fab07d6fb8698c855489 100644 (file)
@@ -119,7 +119,9 @@ struct pxa2xx_udc {
                                                has_cfr : 1,
                                                req_pending : 1,
                                                req_std : 1,
-                                               req_config : 1;
+                                               req_config : 1,
+                                               suspended : 1,
+                                               active : 1;
 
 #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
        struct timer_list                       timer;
index 776a97f33914e05511bc85dcdced2b4febe6a89b..2e49de820b1494b054dfc5c4cd92ad75c5aad2ef 100644 (file)
@@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
                        if (likely (last->urb != urb)) {
                                ehci_urb_done(ehci, last->urb, last_status);
                                count++;
+                               last_status = -EINPROGRESS;
                        }
                        ehci_qtd_free (ehci, last);
                        last = NULL;
-                       last_status = -EINPROGRESS;
                }
 
                /* ignore urbs submitted during completions we reported */
index 0130fd8571e4018549db9a5003bd432b72645878..d7071c85575876552841c4bff7397cd8c43bbcb1 100644 (file)
@@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf)
                buf[0] = 0;
 
        for (i = 0; i < ports; i++) {
-               u32 status = isp116x->rhport[i] =
-                   isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1);
+               u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1);
 
                if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
                              | RH_PS_OCIC | RH_PS_PRSC)) {
@@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
                DBG("GetPortStatus\n");
                if (!wIndex || wIndex > ports)
                        goto error;
-               tmp = isp116x->rhport[--wIndex];
+               spin_lock_irqsave(&isp116x->lock, flags);
+               tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1);
+               spin_unlock_irqrestore(&isp116x->lock, flags);
                *(__le32 *) buf = cpu_to_le32(tmp);
                DBG("GetPortStatus: port[%d]  %08x\n", wIndex + 1, tmp);
                break;
@@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
                spin_lock_irqsave(&isp116x->lock, flags);
                isp116x_write_reg32(isp116x, wIndex
                                    ? HCRHPORT2 : HCRHPORT1, tmp);
-               isp116x->rhport[wIndex] =
-                   isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1);
                spin_unlock_irqrestore(&isp116x->lock, flags);
                break;
        case SetPortFeature:
@@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
                        spin_lock_irqsave(&isp116x->lock, flags);
                        isp116x_write_reg32(isp116x, wIndex
                                            ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS);
+                       spin_unlock_irqrestore(&isp116x->lock, flags);
                        break;
                case USB_PORT_FEAT_POWER:
                        DBG("USB_PORT_FEAT_POWER\n");
                        spin_lock_irqsave(&isp116x->lock, flags);
                        isp116x_write_reg32(isp116x, wIndex
                                            ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS);
+                       spin_unlock_irqrestore(&isp116x->lock, flags);
                        break;
                case USB_PORT_FEAT_RESET:
                        DBG("USB_PORT_FEAT_RESET\n");
                        root_port_reset(isp116x, wIndex);
-                       spin_lock_irqsave(&isp116x->lock, flags);
                        break;
                default:
                        goto error;
                }
-               isp116x->rhport[wIndex] =
-                   isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1);
-               spin_unlock_irqrestore(&isp116x->lock, flags);
                break;
 
        default:
index b91e2edd9c5c1415ebecfe7aa056cddcb15c4fc6..595b90a9984880c819011ad4e181a223d8d6d50b 100644 (file)
@@ -270,7 +270,6 @@ struct isp116x {
        u32 rhdesca;
        u32 rhdescb;
        u32 rhstatus;
-       u32 rhport[2];
 
        /* async schedule: control, bulk */
        struct list_head async;
index 76db2fef4657d076b2e35b5285882308dd173659..91dc433dbcf14048488239bc42a788024eadc32a 100644 (file)
@@ -92,6 +92,7 @@ struct ftdi_sio_quirk {
 };
 
 static int   ftdi_jtag_probe           (struct usb_serial *serial);
+static int   ftdi_mtxorb_hack_setup    (struct usb_serial *serial);
 static void  ftdi_USB_UIRT_setup       (struct ftdi_private *priv);
 static void  ftdi_HE_TIRA1_setup       (struct ftdi_private *priv);
 
@@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = {
        .probe  = ftdi_jtag_probe,
 };
 
+static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = {
+       .probe  = ftdi_mtxorb_hack_setup,
+};
+
 static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
        .port_probe = ftdi_USB_UIRT_setup,
 };
@@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
+       { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
@@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
        { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
        { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
@@ -1088,6 +1096,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
        return 0;
 }
 
+/*
+ * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
+ * We have to correct it if we want to read from it.
+ */
+static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
+{
+       struct usb_host_endpoint *ep = serial->dev->ep_in[1];
+       struct usb_endpoint_descriptor *ep_desc = &ep->desc;
+
+       if (ep->enabled && ep_desc->wMaxPacketSize == 0) {
+               ep_desc->wMaxPacketSize = 0x40;
+               info("Fixing invalid wMaxPacketSize on read pipe");
+       }
+
+       return 0;
+}
+
 /* ftdi_shutdown is called from usbserial:usb_serial_disconnect
  *   it is called when the usb device is disconnected
  *
index 6eee2ab914eca092d1d3b53dc92b30ee294605d1..e1eb742abcd5d081f467b01263a5bf45f26f54b3 100644 (file)
  * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
 #define FTDI_OOCDLINK_PID      0xbaf8  /* Amontec JTAGkey */
 
+/*
+ * The following are the values for the Matrix Orbital VK204-25-USB
+ * display, which use the FT232RL.
+ */
+#define MTXORB_VK_VID          0x1b3d
+#define MTXORB_VK_PID          0x0158
+
 /* Interbiometrics USB I/O Board */
 /* Developed for Interbiometrics by Rudolf Gugler */
 #define INTERBIOMETRICS_VID              0x1209
index 869ecd374cb49e8161b765d23791604c3aff6256..aeeb9cb209994d5a2f0a17c925509605c84d1184 100644 (file)
 
 /* vendor id and device id defines */
 
+/* The native mos7840/7820 component */
 #define USB_VENDOR_ID_MOSCHIP           0x9710
 #define MOSCHIP_DEVICE_ID_7840          0x7840
 #define MOSCHIP_DEVICE_ID_7820          0x7820
+/* The native component can have its vendor/device id's overridden
+ * in vendor-specific implementations.  Such devices can be handled
+ * by making a change here, in moschip_port_id_table, and in
+ * moschip_id_table_combined
+ */
+#define USB_VENDOR_ID_BANDB             0x0856
+#define BANDB_DEVICE_ID_USOPTL4_4       0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_2       0xAC42
 
-/* Interrupt Rotinue Defines    */
+/* Interrupt Routine Defines    */
 
 #define SERIAL_IIR_RLS      0x06
 #define SERIAL_IIR_MS       0x00
 static struct usb_device_id moschip_port_id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
        {}                      /* terminating entry */
 };
 
 static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
        {}                      /* terminating entry */
 };
 
index af2674c57414c0acde52258f97dcf795f8215cf8..828a4377ec6a1496c5898a6a12c6f3cd8c5f7497 100644 (file)
@@ -120,6 +120,9 @@ static int  option_send_setup(struct usb_serial_port *port);
 #define ANYDATA_PRODUCT_ADU_E100A              0x6501
 #define ANYDATA_PRODUCT_ADU_500A               0x6502
 
+#define AXESSTEL_VENDOR_ID                     0x1726
+#define AXESSTEL_PRODUCT_MV110H                        0x1000
+
 #define BANDRICH_VENDOR_ID                     0x1A8D
 #define BANDRICH_PRODUCT_C100_1                        0x1002
 #define BANDRICH_PRODUCT_C100_2                        0x1003
@@ -192,6 +195,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+       { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
        { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
        { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
index 958f5b17847c57748a16a344a833a5a7f8dfe3ae..b9b8ede61fb337bf4927ef2638640bc6723d8102 100644 (file)
@@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
 
        if (!sg)
                sg = scsi_sglist(srb);
-       buflen = min(buflen, scsi_bufflen(srb));
 
        /* This loop handles a single s-g list entry, which may
         * include multiple pages.  Find the initial page structure
@@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer,
        unsigned int offset = 0;
        struct scatterlist *sg = NULL;
 
+       buflen = min(buflen, scsi_bufflen(srb));
        buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
                        TO_XFER_BUF);
        if (buflen < scsi_bufflen(srb))
index e83dfba7e6361af564390dcd553dcdd6a1790ee5..742b5c656d668d447b02984d1fe4c2ba237c6743 100644 (file)
@@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var,
 
        /* check we can fit these values into the registers */
 
-       if (var->hsync_len > 255 || var->vsync_len > 255)
+       if (var->hsync_len > 255 || var->vsync_len > 63)
                return -EINVAL;
 
-       if ((var->xres + var->right_margin) >= 4096)
+       /* hdisplay end and hsync start */
+       if ((var->xres + var->right_margin) > 4096)
                return -EINVAL;
 
+       /* vdisplay end and vsync start */
        if ((var->yres + var->lower_margin) > 2048)
                return -EINVAL;
 
@@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var,
                var->blue.length        = var->bits_per_pixel;
                var->blue.offset        = 0;
                var->transp.length      = 0;
+               var->transp.offset      = 0;
 
                break;
 
        case 16:
                if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) {
-                       var->red.offset         = 11;
-                       var->green.offset       = 5;
-                       var->blue.offset        = 0;
-               } else {
                        var->blue.offset        = 11;
                        var->green.offset       = 5;
                        var->red.offset         = 0;
+               } else {
+                       var->red.offset         = 11;
+                       var->green.offset       = 5;
+                       var->blue.offset        = 0;
                }
+               var->transp.offset      = 0;
 
                var->red.length         = 5;
                var->green.length       = 6;
@@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
                break;
 
        case 16:
-               info->fix.visual = FB_VISUAL_DIRECTCOLOR;
+               info->fix.visual = FB_VISUAL_TRUECOLOR;
                break;
 
        case 32:
@@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info)
 
        case 16:
                control |= SM501_DC_CRT_CONTROL_16BPP;
+               sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE);
                break;
 
        case 32:
@@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info)
 
        case 16:
                control |= SM501_DC_PANEL_CONTROL_16BPP;
+               sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE);
                break;
 
        case 32:
index 70fb4ee2b4215d978208f5edf7693991e4ebdd5c..919ce75db9e2ce1a0bcf6ce71d498eace0233c38 100644 (file)
@@ -564,19 +564,46 @@ static inline void write3CE(int reg, unsigned char val)
        t_outb(val, 0x3CF);
 }
 
-static inline void enable_mmio(void)
+static void enable_mmio(void)
 {
+       unsigned char tmp;
+
        /* Goto New Mode */
        outb(0x0B, 0x3C4);
        inb(0x3C5);
 
        /* Unprotect registers */
        outb(NewMode1, 0x3C4);
+       tmp = inb(0x3C5);
        outb(0x80, 0x3C5);
 
        /* Enable MMIO */
        outb(PCIReg, 0x3D4);
        outb(inb(0x3D5) | 0x01, 0x3D5);
+
+       t_outb(NewMode1, 0x3C4);
+       t_outb(tmp, 0x3C5);
+}
+
+static void disable_mmio(void)
+{
+       unsigned char tmp;
+
+       /* Goto New Mode */
+       t_outb(0x0B, 0x3C4);
+       t_inb(0x3C5);
+
+       /* Unprotect registers */
+       t_outb(NewMode1, 0x3C4);
+       tmp = t_inb(0x3C5);
+       t_outb(0x80, 0x3C5);
+
+       /* Disable MMIO */
+       t_outb(PCIReg, 0x3D4);
+       t_outb(t_inb(0x3D5) & ~0x01, 0x3D5);
+
+       outb(NewMode1, 0x3C4);
+       outb(tmp, 0x3C5);
 }
 
 #define crtc_unlock()  write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F)
@@ -1239,9 +1266,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
        default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
 
        if (!default_par.io_virt) {
-               release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
                debug("ioremap failed\n");
-               return -1;
+               err = -1;
+               goto out_unmap1;
        }
 
        enable_mmio();
@@ -1252,25 +1279,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
 
        if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) {
                debug("request_mem_region failed!\n");
+               disable_mmio();
                err = -1;
-               goto out_unmap;
+               goto out_unmap1;
        }
 
        fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start,
                                              tridentfb_fix.smem_len);
 
        if (!fb_info.screen_base) {
-               release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
                debug("ioremap failed\n");
                err = -1;
-               goto out_unmap;
+               goto out_unmap2;
        }
 
        output("%s board found\n", pci_name(dev));
-#if 0
-       output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n",
-               tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt);
-#endif
        displaytype = get_displaytype();
 
        if (flatpanel)
@@ -1288,9 +1311,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
 
        if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) {
                err = -EINVAL;
-               goto out_unmap;
+               goto out_unmap2;
        }
-       fb_alloc_cmap(&fb_info.cmap, 256, 0);
+       err = fb_alloc_cmap(&fb_info.cmap, 256, 0);
+       if (err < 0)
+               goto out_unmap2;
+
        if (defaultaccel && acc)
                default_var.accel_flags |= FB_ACCELF_TEXT;
        else
@@ -1300,19 +1326,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
        fb_info.device = &dev->dev;
        if (register_framebuffer(&fb_info) < 0) {
                printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n");
+               fb_dealloc_cmap(&fb_info.cmap);
                err = -EINVAL;
-               goto out_unmap;
+               goto out_unmap2;
        }
        output("fb%d: %s frame buffer device %dx%d-%dbpp\n",
           fb_info.node, fb_info.fix.id, default_var.xres,
           default_var.yres, default_var.bits_per_pixel);
        return 0;
 
-out_unmap:
-       if (default_par.io_virt)
-               iounmap(default_par.io_virt);
+out_unmap2:
        if (fb_info.screen_base)
                iounmap(fb_info.screen_base);
+       release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
+       disable_mmio();
+out_unmap1:
+       if (default_par.io_virt)
+               iounmap(default_par.io_virt);
+       release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
        return err;
 }
 
@@ -1323,7 +1354,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev)
        iounmap(par->io_virt);
        iounmap(fb_info.screen_base);
        release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
-       release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
+       release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
 }
 
 /* List of boards that we are trying to support */
index 688e435b4d9a60cb56097ee85c82ec3b662bd479..10211e493001582e0a5b27f549a4c9c00f3a6e5a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/ds1wm.h>
 
@@ -102,12 +103,12 @@ struct ds1wm_data {
 static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
                                        u8 val)
 {
-        __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift));
+       __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift));
 }
 
 static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
 {
-        return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift));
+       return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift));
 }
 
 
@@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
        timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT);
        ds1wm_data->reset_complete = NULL;
        if (!timeleft) {
-                dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n");
-                return 1;
+               dev_err(&ds1wm_data->pdev->dev, "reset failed\n");
+               return 1;
        }
 
        /* Wait for the end of the reset. According to the specs, the time
@@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
                (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0));
 
        if (!ds1wm_data->slave_present) {
-                dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
-                return 1;
-        }
+               dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
+               return 1;
+       }
 
-        return 0;
+       return 0;
 }
 
 static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data)
@@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev)
        if (!pdev)
                return -ENODEV;
 
-       ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL);
+       ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
        if (!ds1wm_data)
                return -ENOMEM;
 
@@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev)
                goto err1;
 
        ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm");
-       if (!ds1wm_data->clk) {
-               ret = -ENOENT;
+       if (IS_ERR(ds1wm_data->clk)) {
+               ret = PTR_ERR(ds1wm_data->clk);
                goto err2;
        }
 
index 41a958a7585e750cd800b2d229d5b3b2a04f0898..5e1a4fb5cacb2a3715ad3552bd8e412fdae9b00c 100644 (file)
@@ -1424,6 +1424,18 @@ struct elf_note_info {
        int thread_notes;
 };
 
+/*
+ * When a regset has a writeback hook, we call it on each thread before
+ * dumping user memory.  On register window machines, this makes sure the
+ * user memory backing the register data is up to date before we read it.
+ */
+static void do_thread_regset_writeback(struct task_struct *task,
+                                      const struct user_regset *regset)
+{
+       if (regset->writeback)
+               regset->writeback(task, regset, 1);
+}
+
 static int fill_thread_core_info(struct elf_thread_core_info *t,
                                 const struct user_regset_view *view,
                                 long signr, size_t *total)
@@ -1445,6 +1457,8 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
                  sizeof(t->prstatus), &t->prstatus);
        *total += notesize(&t->notes[0]);
 
+       do_thread_regset_writeback(t->task, &view->regsets[0]);
+
        /*
         * Each other regset might generate a note too.  For each regset
         * that has no core_note_type or is inactive, we leave t->notes[i]
@@ -1452,6 +1466,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
         */
        for (i = 1; i < view->n; ++i) {
                const struct user_regset *regset = &view->regsets[i];
+               do_thread_regset_writeback(t->task, regset);
                if (regset->core_note_type &&
                    (!regset->active || regset->active(t->task, regset))) {
                        int ret;
index 3ebccf4aa7e3280318b4ae8fbd2b28514d929749..ddfdd2c80bf9ba22c20901e5a74f9cd449d80ccd 100644 (file)
@@ -627,8 +627,7 @@ repeat:
 }
 
 /**
- * sync_mapping_buffers - write out and wait upon a mapping's "associated"
- *                        buffers
+ * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
@@ -836,7 +835,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                smp_mb();
                if (buffer_dirty(bh)) {
                        list_add(&bh->b_assoc_buffers,
-                                &bh->b_assoc_map->private_list);
+                                &mapping->private_list);
                        bh->b_assoc_map = mapping;
                }
                spin_unlock(lock);
index edd248367b36fe67166053731ab55d0edbcff796..dbd91461853c97fc1c5c4538c53c6514570f85d4 100644 (file)
@@ -6,7 +6,9 @@ and sync so that events like out of disk space get reported properly on
 cached files. Fix setxattr failure to certain Samba versions. Fix mount
 of second share to disconnected server session (autoreconnect on this).
 Add ability to modify cifs acls for handling chmod (when mounted with
-cifsacl flag).
+cifsacl flag). Fix prefixpath path separator so we can handle mounts
+with prefixpaths longer than one directory (one path component) when
+mounted to Windows servers.
 
 Version 1.51
 ------------
index c623e2f9c5dbab90e314c54133ea027ff6cbfe4d..50306229b0f9f822b4f4e6d53cca9b9a5986ec07 100644 (file)
@@ -461,7 +461,7 @@ A partial list of the supported mount options follows:
  cifsacl        Report mode bits (e.g. on stat) based on the Windows ACL for
                the file. (EXPERIMENTAL)
  servern        Specify the server 's netbios name (RFC1001 name) to use
-               when attempting to setup a session to the server.  This is
+               when attempting to setup a session to the server. 
                This is needed for mounting to some older servers (such
                as OS/2 or Windows 98 and Windows ME) since they do not
                support a default server name.  A server name can be up
index 73c4c419663c1dc1a3558d2df405fcb58bc07e66..0228ed06069e95c88b3b4a032b5b423f474ca190 100644 (file)
@@ -98,8 +98,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
                        if (mid_entry->resp_buf) {
                                cifs_dump_detail(mid_entry->resp_buf);
                                cifs_dump_mem("existing buf: ",
-                                       mid_entry->resp_buf,
-                                       62 /* fixme */);
+                                       mid_entry->resp_buf, 62);
                        }
                }
        }
@@ -439,7 +438,7 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
 
        return length;
 }
-#endif
+#endif /* STATS */
 
 static struct proc_dir_entry *proc_fs_cifs;
 read_proc_t cifs_txanchor_read;
@@ -482,7 +481,7 @@ cifs_proc_init(void)
                                cifs_stats_read, NULL);
        if (pde)
                pde->write_proc = cifs_stats_write;
-#endif
+#endif /* STATS */
        pde = create_proc_read_entry("cifsFYI", 0, proc_fs_cifs,
                                cifsFYI_read, NULL);
        if (pde)
@@ -918,4 +917,12 @@ security_flags_write(struct file *file, const char __user *buffer,
        /* BB should we turn on MAY flags for other MUST options? */
        return count;
 }
-#endif
+#else
+inline void cifs_proc_init(void)
+{
+}
+
+inline void cifs_proc_clean(void)
+{
+}
+#endif /* PROC_FS */
index c26cd0d2c6d525d64455dfba60e9fdd5b9a4243a..5eb3b83bbfa76b90992f8bfbc805758f6a2e71bf 100644 (file)
 
 void cifs_dump_mem(char *label, void *data, int length);
 #ifdef CONFIG_CIFS_DEBUG2
+#define DBG2 2
 void cifs_dump_detail(struct smb_hdr *);
 void cifs_dump_mids(struct TCP_Server_Info *);
+#else
+#define DBG2 0
 #endif
 extern int traceSMB;           /* flag which enables the function below */
 void dump_smb(struct smb_hdr *, int);
@@ -64,10 +67,10 @@ extern int cifsERROR;
  *     ---------
  */
 #else          /* _CIFS_DEBUG */
-#define cERROR(button,prspec)
-#define cEVENT(format,arg...)
+#define cERROR(button, prspec)
+#define cEVENT(format, arg...)
 #define cFYI(button, prspec)
-#define cifserror(format,arg...)
+#define cifserror(format, arg...)
 #endif         /* _CIFS_DEBUG */
 
 #endif                         /* _H_CIFS_DEBUG */
index 6ad447529961c0939c84b1eba822cb7076e3817d..7f8838253410572397725dffd3d92abba884c20c 100644 (file)
@@ -286,7 +286,7 @@ static void dump_referral(const struct dfs_info3_param *ref)
        cFYI(1, ("DFS: node path: %s", ref->node_name));
        cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type));
        cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
-                               ref->PathConsumed));
+                               ref->path_consumed));
 }
 
 
index d543accc10dd0be24a870ebaa634bd9c7cb199e7..6653e29637a7ec534c83d53c8a21ddda24130367 100644 (file)
@@ -125,7 +125,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
 #ifdef CONFIG_CIFS_DEBUG2
        if (cifsFYI && !IS_ERR(spnego_key)) {
                struct cifs_spnego_msg *msg = spnego_key->payload.data;
-               cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024,
+               cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U,
                                msg->secblob_len + msg->sesskey_len));
        }
 #endif /* CONFIG_CIFS_DEBUG2 */
index b5903b89250d412c93f31eb626252c44861463b4..7d75272a6b3f01499738ce1ba10f961429892855 100644 (file)
@@ -32,7 +32,7 @@
  *
  */
 int
-cifs_strfromUCS_le(char *to, const __le16 * from,
+cifs_strfromUCS_le(char *to, const __le16 *from,
                   int len, const struct nls_table *codepage)
 {
        int i;
@@ -61,7 +61,7 @@ cifs_strfromUCS_le(char *to, const __le16 * from,
  *
  */
 int
-cifs_strtoUCS(__le16 * to, const char *from, int len,
+cifs_strtoUCS(__le16 *to, const char *from, int len,
              const struct nls_table *codepage)
 {
        int charlen;
index 614c11fcdcb67d0f704badaf029991dddc3682c0..14eb9a2395d3cc3c4020e08c01d8b3b5dbc9faa0 100644 (file)
@@ -254,7 +254,8 @@ UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
        const wchar_t *anchor2 = ucs2;
 
        while (*ucs1) {
-               if (*ucs1 == *ucs2) {   /* Partial match found */
+               if (*ucs1 == *ucs2) {
+                       /* Partial match found */
                        ucs1++;
                        ucs2++;
                } else {
@@ -279,7 +280,8 @@ UniToupper(register wchar_t uc)
 {
        register const struct UniCaseRange *rp;
 
-       if (uc < sizeof (CifsUniUpperTable)) {  /* Latin characters */
+       if (uc < sizeof(CifsUniUpperTable)) {
+               /* Latin characters */
                return uc + CifsUniUpperTable[uc];      /* Use base tables */
        } else {
                rp = CifsUniUpperRange; /* Use range tables */
@@ -320,7 +322,8 @@ UniTolower(wchar_t uc)
 {
        register struct UniCaseRange *rp;
 
-       if (uc < sizeof (UniLowerTable)) {      /* Latin characters */
+       if (uc < sizeof(UniLowerTable)) {
+               /* Latin characters */
                return uc + UniLowerTable[uc];  /* Use base tables */
        } else {
                rp = UniLowerRange;     /* Use range tables */
index a7035bd18e4e4282ed4fcef9c22132c94c9e7f09..f93932c217728861d97ca4b1d4d2754a6f81fa1c 100644 (file)
@@ -46,8 +46,7 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
 static const struct cifs_sid sid_everyone = {
        1, 1, {0, 0, 0, 0, 0, 1}, {0} };
 /* group users */
-static const struct cifs_sid sid_user =
-               {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
+static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
 
 
 int match_sid(struct cifs_sid *ctsid)
@@ -195,9 +194,9 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
        /* For deny ACEs we change the mask so that subsequent allow access
           control entries do not turn on the bits we are denying */
        if (type == ACCESS_DENIED) {
-               if (flags & GENERIC_ALL) {
+               if (flags & GENERIC_ALL)
                        *pbits_to_set &= ~S_IRWXUGO;
-               }
+
                if ((flags & GENERIC_WRITE) ||
                        ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
                        *pbits_to_set &= ~S_IWUGO;
@@ -216,9 +215,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
 
        if (flags & GENERIC_ALL) {
                *pmode |= (S_IRWXUGO & (*pbits_to_set));
-#ifdef CONFIG_CIFS_DEBUG2
-               cFYI(1, ("all perms"));
-#endif
+               cFYI(DBG2, ("all perms"));
                return;
        }
        if ((flags & GENERIC_WRITE) ||
@@ -231,9 +228,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
                        ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
                *pmode |= (S_IXUGO & (*pbits_to_set));
 
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("access flags 0x%x mode now 0x%x", flags, *pmode));
-#endif
+       cFYI(DBG2, ("access flags 0x%x mode now 0x%x", flags, *pmode));
        return;
 }
 
@@ -262,9 +257,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
        if (mode & S_IXUGO)
                *pace_flags |= SET_FILE_EXEC_RIGHTS;
 
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags));
-#endif
+       cFYI(DBG2, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags));
        return;
 }
 
@@ -358,11 +351,9 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
                return;
        }
 
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("DACL revision %d size %d num aces %d",
+       cFYI(DBG2, ("DACL revision %d size %d num aces %d",
                le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
                le32_to_cpu(pdacl->num_aces)));
-#endif
 
        /* reset rwx permissions for user/group/other.
           Also, if num_aces is 0 i.e. DACL has no ACEs,
@@ -381,10 +372,6 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
                ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
                                GFP_KERNEL);
 
-/*             cifscred->cecount = pdacl->num_aces;
-               cifscred->aces = kmalloc(num_aces *
-                       sizeof(struct cifs_ace *), GFP_KERNEL);*/
-
                for (i = 0; i < num_aces; ++i) {
                        ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
 #ifdef CONFIG_CIFS_DEBUG2
@@ -437,7 +424,7 @@ static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
                                         &sid_everyone, nmode, S_IRWXO);
 
        pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
-       pndacl->num_aces = 3;
+       pndacl->num_aces = cpu_to_le32(3);
 
        return (0);
 }
@@ -495,13 +482,11 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
                                le32_to_cpu(pntsd->gsidoffset));
        dacloffset = le32_to_cpu(pntsd->dacloffset);
        dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x "
+       cFYI(DBG2, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x "
                 "sacloffset 0x%x dacloffset 0x%x",
                 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
                 le32_to_cpu(pntsd->gsidoffset),
                 le32_to_cpu(pntsd->sacloffset), dacloffset));
-#endif
 /*     cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
        rc = parse_sid(owner_sid_ptr, end_of_acl);
        if (rc)
@@ -636,9 +621,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
        struct super_block *sb;
        struct cifs_sb_info *cifs_sb;
 
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
-#endif
+       cFYI(DBG2, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
 
        if (!inode)
                return (rc);
@@ -669,9 +652,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
        }
 
        rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("SetCIFSACL rc = %d", rc));
-#endif
+       cFYI(DBG2, ("SetCIFSACL rc = %d", rc));
        if (unlock_file == TRUE)
                atomic_dec(&open_file->wrtPending);
        else
@@ -689,9 +670,7 @@ void acl_to_uid_mode(struct inode *inode, const char *path)
        u32 acllen = 0;
        int rc = 0;
 
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("converting ACL to mode for %s", path));
-#endif
+       cFYI(DBG2, ("converting ACL to mode for %s", path));
        pntsd = get_cifs_acl(&acllen, inode, path);
 
        /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
@@ -712,9 +691,7 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
        struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
        struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
 
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("set ACL from mode for %s", path));
-#endif
+       cFYI(DBG2, ("set ACL from mode for %s", path));
 
        /* Get the security descriptor */
        pntsd = get_cifs_acl(&acllen, inode, path);
@@ -736,16 +713,12 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
 
                rc = build_sec_desc(pntsd, pnntsd, acllen, inode, nmode);
 
-#ifdef CONFIG_CIFS_DEBUG2
-               cFYI(1, ("build_sec_desc rc: %d", rc));
-#endif
+               cFYI(DBG2, ("build_sec_desc rc: %d", rc));
 
                if (!rc) {
                        /* Set the security descriptor */
                        rc = set_cifs_acl(pnntsd, acllen, inode, path);
-#ifdef CONFIG_CIFS_DEBUG2
-                       cFYI(1, ("set_cifs_acl rc: %d", rc));
-#endif
+                       cFYI(DBG2, ("set_cifs_acl rc: %d", rc));
                }
 
                kfree(pnntsd);
index fcc434227691fe4cbd57d2d74b7780c0767804fb..a04b17e5a9d01dbf5b8af64323b38b9ec45b20e8 100644 (file)
@@ -204,9 +204,8 @@ cifs_put_super(struct super_block *sb)
                return;
        }
        rc = cifs_umount(sb, cifs_sb);
-       if (rc) {
+       if (rc)
                cERROR(1, ("cifs_umount failed with return code %d", rc));
-       }
 #ifdef CONFIG_CIFS_DFS_UPCALL
        if (cifs_sb->mountdata) {
                kfree(cifs_sb->mountdata);
@@ -461,7 +460,7 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
 
 static struct quotactl_ops cifs_quotactl_ops = {
        .set_xquota     = cifs_xquota_set,
-       .get_xquota     = cifs_xquota_set,
+       .get_xquota     = cifs_xquota_get,
        .set_xstate     = cifs_xstate_set,
        .get_xstate     = cifs_xstate_get,
 };
@@ -472,9 +471,7 @@ static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
        struct cifs_sb_info *cifs_sb;
        struct cifsTconInfo *tcon;
 
-#ifdef CONFIG_CIFS_DFS_UPCALL
        dfs_shrink_umount_helper(vfsmnt);
-#endif /* CONFIG CIFS_DFS_UPCALL */
 
        if (!(flags & MNT_FORCE))
                return;
@@ -992,9 +989,7 @@ static int __init
 init_cifs(void)
 {
        int rc = 0;
-#ifdef CONFIG_PROC_FS
        cifs_proc_init();
-#endif
 /*     INIT_LIST_HEAD(&GlobalServerList);*/    /* BB not implemented yet */
        INIT_LIST_HEAD(&GlobalSMBSessionList);
        INIT_LIST_HEAD(&GlobalTreeConnectionList);
@@ -1095,19 +1090,15 @@ init_cifs(void)
  out_destroy_inodecache:
        cifs_destroy_inodecache();
  out_clean_proc:
-#ifdef CONFIG_PROC_FS
        cifs_proc_clean();
-#endif
        return rc;
 }
 
 static void __exit
 exit_cifs(void)
 {
-       cFYI(0, ("exit_cifs"));
-#ifdef CONFIG_PROC_FS
+       cFYI(DBG2, ("exit_cifs"));
        cifs_proc_clean();
-#endif
 #ifdef CONFIG_CIFS_DFS_UPCALL
        unregister_key_type(&key_type_dns_resolver);
 #endif
index 5d32d8ddc82eac91b80ebf552da45f0ea6c9fd22..69a2e1942542e1601e9e66078a1c052b86d51742 100644 (file)
@@ -454,7 +454,7 @@ struct dir_notify_req {
 
 struct dfs_info3_param {
        int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/
-       int PathConsumed;
+       int path_consumed;
        int server_type;
        int ref_flag;
        char *path_name;
index 2f09f565a3d9603c02811f4ce47cfb36df41de21..0af63e6b426be4044e5d921c0b2ae53da517e5e5 100644 (file)
@@ -53,11 +53,11 @@ extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
 extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */ , const int flags);
-extern int SendReceiveBlockingLock(const unsigned int /* xid */ ,
-                                       struct cifsTconInfo *,
-                               struct smb_hdr * /* input */ ,
-                               struct smb_hdr * /* out */ ,
-                               int * /* bytes returned */);
+extern int SendReceiveBlockingLock(const unsigned int xid,
+                       struct cifsTconInfo *ptcon,
+                       struct smb_hdr *in_buf ,
+                       struct smb_hdr *out_buf,
+                       int *bytes_returned);
 extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
 extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);
 extern int is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
@@ -84,7 +84,7 @@ extern __u16 GetNextMid(struct TCP_Server_Info *server);
 extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
                                                 struct cifsTconInfo *);
 extern void DeleteOplockQEntry(struct oplock_q_entry *);
-extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ );
+extern struct timespec cifs_NTtimeToUnix(u64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time);
 extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
@@ -104,7 +104,11 @@ extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
 extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
 #ifdef CONFIG_CIFS_DFS_UPCALL
 extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt);
-#endif
+#else
+static inline void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
+{
+}
+#endif /* DFS_UPCALL */
 void cifs_proc_init(void);
 void cifs_proc_clean(void);
 
@@ -175,11 +179,11 @@ extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
                        struct kstatfs *FSData);
 
 extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon,
-                       const char *fileName, const FILE_BASIC_INFO * data,
+                       const char *fileName, const FILE_BASIC_INFO *data,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
 extern int CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
-                       const FILE_BASIC_INFO * data, __u16 fid);
+                       const FILE_BASIC_INFO *data, __u16 fid);
 #if 0
 extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon,
                        char *fileName, __u16 dos_attributes,
index 9409524e4bf88e5634ebbc94c2a1b30e7df38aeb..30bbe448e260fa931a2a36098057bfdd8364fad3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifssmb.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2007
+ *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   Contains the routines for constructing the SMB PDUs themselves
@@ -102,10 +102,12 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
           to this tcon */
 }
 
-/* If the return code is zero, this function must fill in request_buf pointer */
+/* Allocate and return pointer to an SMB request buffer, and set basic
+   SMB information in the SMB header.  If the return code is zero, this
+   function must have filled in request_buf pointer */
 static int
 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
-        void **request_buf /* returned */)
+               void **request_buf)
 {
        int rc = 0;
 
@@ -363,7 +365,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
                *response_buf = *request_buf;
 
        header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
-                       wct /*wct */ );
+                       wct);
 
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
@@ -523,7 +525,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        if (remain >= (MIN_TZ_ADJ / 2))
                                result += MIN_TZ_ADJ;
                        if (val < 0)
-                               result = - result;
+                               result = -result;
                        server->timeAdj = result;
                } else {
                        server->timeAdj = (int)tmp;
@@ -600,7 +602,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
        server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
                        (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
        server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
-       cFYI(0, ("Max buf = %d", ses->server->maxBuf));
+       cFYI(DBG2, ("Max buf = %d", ses->server->maxBuf));
        GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
        server->capabilities = le32_to_cpu(pSMBr->Capabilities);
        server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
@@ -868,9 +870,8 @@ PsxDelete:
        pSMB->ByteCount = cpu_to_le16(byte_count);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Posix delete returned %d", rc));
-       }
        cifs_buf_release(pSMB);
 
        cifs_stats_inc(&tcon->num_deletes);
@@ -916,9 +917,8 @@ DelFileRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_deletes);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Error in RMFile = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
        if (rc == -EAGAIN)
@@ -961,9 +961,8 @@ RmDirRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_rmdirs);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Error in RMDir = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
        if (rc == -EAGAIN)
@@ -1005,9 +1004,8 @@ MkDirRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_mkdirs);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Error in Mkdir = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
        if (rc == -EAGAIN)
@@ -1017,7 +1015,7 @@ MkDirRetry:
 
 int
 CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
-               __u64 mode, __u16 * netfid, FILE_UNIX_BASIC_INFO *pRetData,
+               __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
                __u32 *pOplock, const char *name,
                const struct nls_table *nls_codepage, int remap)
 {
@@ -1027,8 +1025,8 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
        int rc = 0;
        int bytes_returned = 0;
        __u16 params, param_offset, offset, byte_count, count;
-       OPEN_PSX_REQ * pdata;
-       OPEN_PSX_RSP * psx_rsp;
+       OPEN_PSX_REQ *pdata;
+       OPEN_PSX_RSP *psx_rsp;
 
        cFYI(1, ("In POSIX Create"));
 PsxCreat:
@@ -1110,9 +1108,7 @@ PsxCreat:
        /* check to make sure response data is there */
        if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
                pRetData->Type = cpu_to_le32(-1); /* unknown */
-#ifdef CONFIG_CIFS_DEBUG2
-               cFYI(1, ("unknown type"));
-#endif
+               cFYI(DBG2, ("unknown type"));
        } else {
                if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
                                        + sizeof(FILE_UNIX_BASIC_INFO)) {
@@ -1169,8 +1165,8 @@ static __u16 convert_disposition(int disposition)
 int
 SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
            const char *fileName, const int openDisposition,
-           const int access_flags, const int create_options, __u16 * netfid,
-           int *pOplock, FILE_ALL_INFO * pfile_info,
+           const int access_flags, const int create_options, __u16 *netfid,
+           int *pOplock, FILE_ALL_INFO *pfile_info,
            const struct nls_table *nls_codepage, int remap)
 {
        int rc = -EACCES;
@@ -1221,8 +1217,8 @@ OldOpenRetry:
 
        if (create_options & CREATE_OPTION_SPECIAL)
                pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
-       else
-                pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); /* BB FIXME */
+       else /* BB FIXME BB */
+               pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/);
 
        /* if ((omode & S_IWUGO) == 0)
                pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);*/
@@ -1284,8 +1280,8 @@ OldOpenRetry:
 int
 CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
            const char *fileName, const int openDisposition,
-           const int access_flags, const int create_options, __u16 * netfid,
-           int *pOplock, FILE_ALL_INFO * pfile_info,
+           const int access_flags, const int create_options, __u16 *netfid,
+           int *pOplock, FILE_ALL_INFO *pfile_info,
            const struct nls_table *nls_codepage, int remap)
 {
        int rc = -EACCES;
@@ -1556,9 +1552,9 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
        } /* else setting file size with write of zero bytes */
        if (wct == 14)
                byte_count = bytes_sent + 1; /* pad */
-       else /* wct == 12 */ {
+       else /* wct == 12 */
                byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
-       }
+
        pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF);
        pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
        pSMB->hdr.smb_buf_length += byte_count;
@@ -1663,7 +1659,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
                rc = -EIO;
                *nbytes = 0;
        } else {
-               WRITE_RSP * pSMBr = (WRITE_RSP *)iov[0].iov_base;
+               WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base;
                *nbytes = le16_to_cpu(pSMBr->CountHigh);
                *nbytes = (*nbytes) << 16;
                *nbytes += le16_to_cpu(pSMBr->Count);
@@ -1744,9 +1740,8 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
                /* SMB buffer freed by function above */
        }
        cifs_stats_inc(&tcon->num_locks);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in Lock = %d", rc));
-       }
 
        /* Note: On -EAGAIN error only caller can retry on handle based calls
        since file handle passed in no longer valid */
@@ -1791,7 +1786,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
 
        count = sizeof(struct cifs_posix_lock);
        pSMB->MaxParameterCount = cpu_to_le16(2);
-       pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+       pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
        pSMB->SetupCount = 1;
        pSMB->Reserved3 = 0;
        if (get_flag)
@@ -1972,9 +1967,8 @@ renameRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_renames);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in rename = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
 
@@ -2016,7 +2010,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
        data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
        rename_info = (struct set_file_rename *) data_offset;
        pSMB->MaxParameterCount = cpu_to_le16(2);
-       pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+       pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
        pSMB->SetupCount = 1;
        pSMB->Reserved3 = 0;
        pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -2052,9 +2046,8 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
        rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&pTcon->num_t2renames);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in Rename (by file handle) = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
 
@@ -2211,9 +2204,8 @@ createSymLinkRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_symlinks);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
-       }
 
        if (pSMB)
                cifs_buf_release(pSMB);
@@ -2299,9 +2291,8 @@ createHardLinkRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_hardlinks);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
        if (rc == -EAGAIN)
@@ -2370,9 +2361,9 @@ winCreateHardLinkRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        cifs_stats_inc(&tcon->num_hardlinks);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in hard link (NT rename) = %d", rc));
-       }
+
        cifs_buf_release(pSMB);
        if (rc == -EAGAIN)
                goto winCreateHardLinkRetry;
@@ -2968,9 +2959,8 @@ setAclRetry:
        pSMB->ByteCount = cpu_to_le16(byte_count);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Set POSIX ACL returned %d", rc));
-       }
 
 setACLerrorExit:
        cifs_buf_release(pSMB);
@@ -2982,7 +2972,7 @@ setACLerrorExit:
 /* BB fix tabs in this function FIXME BB */
 int
 CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
-              const int netfid, __u64 * pExtAttrBits, __u64 *pMask)
+              const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
 {
        int rc = 0;
        struct smb_t2_qfi_req *pSMB = NULL;
@@ -3000,7 +2990,7 @@ GetExtAttrRetry:
        if (rc)
                return rc;
 
-       params = 2 /* level */ +2 /* fid */;
+       params = 2 /* level */ + 2 /* fid */;
        pSMB->t2.TotalDataCount = 0;
        pSMB->t2.MaxParameterCount = cpu_to_le16(4);
        /* BB find exact max data count below from sess structure BB */
@@ -3071,7 +3061,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
 {
        int rc = 0;
        int buf_type = 0;
-       QUERY_SEC_DESC_REQ * pSMB;
+       QUERY_SEC_DESC_REQ *pSMB;
        struct kvec iov[1];
 
        cFYI(1, ("GetCifsACL"));
@@ -3101,7 +3091,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
        if (rc) {
                cFYI(1, ("Send error in QuerySecDesc = %d", rc));
        } else {                /* decode response */
-               __le32 * parm;
+               __le32 *parm;
                __u32 parm_len;
                __u32 acl_len;
                struct smb_com_ntransact_rsp *pSMBr;
@@ -3230,8 +3220,8 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
                        FILE_ALL_INFO *pFinfo,
                        const struct nls_table *nls_codepage, int remap)
 {
-       QUERY_INFORMATION_REQ * pSMB;
-       QUERY_INFORMATION_RSP * pSMBr;
+       QUERY_INFORMATION_REQ *pSMB;
+       QUERY_INFORMATION_RSP *pSMBr;
        int rc = 0;
        int bytes_returned;
        int name_len;
@@ -3263,9 +3253,11 @@ QInfRetry:
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
                cFYI(1, ("Send error in QueryInfo = %d", rc));
-       } else if (pFinfo) {            /* decode response */
+       } else if (pFinfo) {
                struct timespec ts;
                __u32 time = le32_to_cpu(pSMBr->last_write_time);
+
+               /* decode response */
                /* BB FIXME - add time zone adjustment BB */
                memset(pFinfo, 0, sizeof(FILE_ALL_INFO));
                ts.tv_nsec = 0;
@@ -3296,7 +3288,7 @@ QInfRetry:
 int
 CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
                 const unsigned char *searchName,
-                FILE_ALL_INFO * pFindData,
+                FILE_ALL_INFO *pFindData,
                 int legacy /* old style infolevel */,
                 const struct nls_table *nls_codepage, int remap)
 {
@@ -3371,10 +3363,12 @@ QPathInfoRetry:
                else if (pFindData) {
                        int size;
                        __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
-                       if (legacy) /* we do not read the last field, EAsize,
-                                      fortunately since it varies by subdialect
-                                      and on Set vs. Get, is two bytes or 4
-                                      bytes depending but we don't care here */
+
+                       /* On legacy responses we do not read the last field,
+                       EAsize, fortunately since it varies by subdialect and
+                       also note it differs on Set vs. Get, ie two bytes or 4
+                       bytes depending but we don't care here */
+                       if (legacy)
                                size = sizeof(FILE_INFO_STANDARD);
                        else
                                size = sizeof(FILE_ALL_INFO);
@@ -3476,85 +3470,6 @@ UnixQPathInfoRetry:
        return rc;
 }
 
-#if 0  /* function unused at present */
-int CIFSFindSingle(const int xid, struct cifsTconInfo *tcon,
-              const char *searchName, FILE_ALL_INFO * findData,
-              const struct nls_table *nls_codepage)
-{
-/* level 257 SMB_ */
-       TRANSACTION2_FFIRST_REQ *pSMB = NULL;
-       TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
-       int rc = 0;
-       int bytes_returned;
-       int name_len;
-       __u16 params, byte_count;
-
-       cFYI(1, ("In FindUnique"));
-findUniqueRetry:
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
-       if (rc)
-               return rc;
-
-       if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-               name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                    PATH_MAX, nls_codepage);
-               name_len++;     /* trailing null */
-               name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(searchName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, searchName, name_len);
-       }
-
-       params = 12 + name_len /* includes null */ ;
-       pSMB->TotalDataCount = 0;       /* no EAs */
-       pSMB->MaxParameterCount = cpu_to_le16(2);
-       pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
-       pSMB->MaxSetupCount = 0;
-       pSMB->Reserved = 0;
-       pSMB->Flags = 0;
-       pSMB->Timeout = 0;
-       pSMB->Reserved2 = 0;
-       pSMB->ParameterOffset = cpu_to_le16(
-        offsetof(struct smb_com_transaction2_ffirst_req, InformationLevel)-4);
-       pSMB->DataCount = 0;
-       pSMB->DataOffset = 0;
-       pSMB->SetupCount = 1;   /* one byte, no need to le convert */
-       pSMB->Reserved3 = 0;
-       pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
-       byte_count = params + 1 /* pad */ ;
-       pSMB->TotalParameterCount = cpu_to_le16(params);
-       pSMB->ParameterCount = pSMB->TotalParameterCount;
-       pSMB->SearchAttributes =
-           cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
-                       ATTR_DIRECTORY);
-       pSMB->SearchCount = cpu_to_le16(16);    /* BB increase */
-       pSMB->SearchFlags = cpu_to_le16(1);
-       pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO);
-       pSMB->SearchStorageType = 0;    /* BB what should we set this to? BB */
-       pSMB->hdr.smb_buf_length += byte_count;
-       pSMB->ByteCount = cpu_to_le16(byte_count);
-
-       rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-                        (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-
-       if (rc) {
-               cFYI(1, ("Send error in FindFileDirInfo = %d", rc));
-       } else {                /* decode response */
-               cifs_stats_inc(&tcon->num_ffirst);
-               /* BB fill in */
-       }
-
-       cifs_buf_release(pSMB);
-       if (rc == -EAGAIN)
-               goto findUniqueRetry;
-
-       return rc;
-}
-#endif /* end unused (temporarily) function */
-
 /* xid, tcon, searchName and codepage are input parms, rest are returned */
 int
 CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
@@ -3566,7 +3481,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
 /* level 257 SMB_ */
        TRANSACTION2_FFIRST_REQ *pSMB = NULL;
        TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
-       T2_FFIRST_RSP_PARMS * parms;
+       T2_FFIRST_RSP_PARMS *parms;
        int rc = 0;
        int bytes_returned = 0;
        int name_len;
@@ -3697,7 +3612,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
 {
        TRANSACTION2_FNEXT_REQ *pSMB = NULL;
        TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
-       T2_FNEXT_RSP_PARMS * parms;
+       T2_FNEXT_RSP_PARMS *parms;
        char *response_data;
        int rc = 0;
        int bytes_returned, name_len;
@@ -3836,9 +3751,9 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
        pSMB->FileID = searchHandle;
        pSMB->ByteCount = 0;
        rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
-       if (rc) {
+       if (rc)
                cERROR(1, ("Send error in FindClose = %d", rc));
-       }
+
        cifs_stats_inc(&tcon->num_fclose);
 
        /* Since session is dead, search handle closed on server already */
@@ -3851,7 +3766,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
 int
 CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
                      const unsigned char *searchName,
-                     __u64 * inode_number,
+                     __u64 *inode_number,
                      const struct nls_table *nls_codepage, int remap)
 {
        int rc = 0;
@@ -4560,9 +4475,8 @@ SETFSUnixRetry:
                cERROR(1, ("Send error in SETFSUnixInfo = %d", rc));
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-               if (rc) {
+               if (rc)
                        rc = -EIO;      /* bad smb */
-               }
        }
        cifs_buf_release(pSMB);
 
@@ -4744,9 +4658,8 @@ SetEOFRetry:
        pSMB->ByteCount = cpu_to_le16(byte_count);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("SetPathInfo (file size) returned %d", rc));
-       }
 
        cifs_buf_release(pSMB);
 
@@ -4897,9 +4810,8 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
        pSMB->ByteCount = cpu_to_le16(byte_count);
        memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
        rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
-       }
 
        /* Note: On -EAGAIN error only caller can retry on handle based calls
                since file handle passed in no longer valid */
@@ -4975,9 +4887,8 @@ SetTimesRetry:
        pSMB->ByteCount = cpu_to_le16(byte_count);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("SetPathInfo (times) returned %d", rc));
-       }
 
        cifs_buf_release(pSMB);
 
@@ -5027,9 +4938,8 @@ SetAttrLgcyRetry:
        pSMB->ByteCount = cpu_to_le16(name_len + 1);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("Error in LegacySetAttr = %d", rc));
-       }
 
        cifs_buf_release(pSMB);
 
@@ -5138,9 +5048,8 @@ setPermsRetry:
        pSMB->ByteCount = cpu_to_le16(byte_count);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("SetPathInfo (perms) returned %d", rc));
-       }
 
        if (pSMB)
                cifs_buf_release(pSMB);
@@ -5615,9 +5524,8 @@ SetEARetry:
        pSMB->ByteCount = cpu_to_le16(byte_count);
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-       if (rc) {
+       if (rc)
                cFYI(1, ("SetPathInfo (EA) returned %d", rc));
-       }
 
        cifs_buf_release(pSMB);
 
index 65d0ba72e78f1e304af44941c74a4a1f9adb2a5d..8dbfa97cd18ca6ae60ba6621e8e9214cb7a1641d 100644 (file)
@@ -1722,8 +1722,15 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
                           originally at mount time */
                        if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
                                cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
-                       if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0)
+                       if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
+                               if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
+                                       cERROR(1, ("POSIXPATH support change"));
                                cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
+                       } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
+                               cERROR(1, ("possible reconnect error"));
+                               cERROR(1,
+                                       ("server disabled POSIX path support"));
+                       }
                }
 
                cap &= CIFS_UNIX_CAP_MASK;
@@ -1753,9 +1760,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
                if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
                        if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
                                CIFS_SB(sb)->rsize = 127 * 1024;
-#ifdef CONFIG_CIFS_DEBUG2
-                               cFYI(1, ("larger reads not supported by srv"));
-#endif
+                               cFYI(DBG2,
+                                       ("larger reads not supported by srv"));
                        }
                }
 
@@ -1792,6 +1798,26 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
        }
 }
 
+static void
+convert_delimiter(char *path, char delim)
+{
+       int i;
+       char old_delim;
+
+       if (path == NULL)
+               return;
+
+       if (delim == '/') 
+               old_delim = '\\';
+       else
+               old_delim = '/';
+
+       for (i = 0; path[i] != '\0'; i++) {
+               if (path[i] == old_delim)
+                       path[i] = delim;
+       }
+}
+
 int
 cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
           char *mount_data, const char *devname)
@@ -2057,7 +2083,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                cifs_sb->prepath = volume_info.prepath;
                if (cifs_sb->prepath) {
                        cifs_sb->prepathlen = strlen(cifs_sb->prepath);
-                       cifs_sb->prepath[0] = CIFS_DIR_SEP(cifs_sb);
+                       /* we can not convert the / to \ in the path
+                       separators in the prefixpath yet because we do not
+                       know (until reset_cifs_unix_caps is called later)
+                       whether POSIX PATH CAP is available. We normalize
+                       the / to \ after reset_cifs_unix_caps is called */
                        volume_info.prepath = NULL;
                } else
                        cifs_sb->prepathlen = 0;
@@ -2225,11 +2255,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                else
                        tcon->unix_ext = 0; /* server does not support them */
 
+               /* convert forward to back slashes in prepath here if needed */
+               if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
+                       convert_delimiter(cifs_sb->prepath,
+                                         CIFS_DIR_SEP(cifs_sb));
+
                if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
                        cifs_sb->rsize = 1024 * 127;
-#ifdef CONFIG_CIFS_DEBUG2
-                       cFYI(1, ("no very large read support, rsize now 127K"));
-#endif
+                       cFYI(DBG2,
+                               ("no very large read support, rsize now 127K"));
                }
                if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
                        cifs_sb->wsize = min(cifs_sb->wsize,
index 699ec11984099bf432cfb44bd780235623321ff4..4e83b47c4b34328c35f9d49fc042440e35623fc6 100644 (file)
@@ -3,7 +3,7 @@
  *
  *   vfs operations that deal with dentries
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2007
+ *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -111,16 +111,6 @@ cifs_bp_rename_retry:
        return full_path;
 }
 
-/* char * build_wildcard_path_from_dentry(struct dentry *direntry)
-{
-       if(full_path == NULL)
-               return full_path;
-
-       full_path[namelen] = '\\';
-       full_path[namelen+1] = '*';
-       full_path[namelen+2] = 0;
-BB remove above eight lines BB */
-
 /* Inode operations in similar order to how they appear in Linux file fs.h */
 
 int
@@ -171,9 +161,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
                        disposition = FILE_OVERWRITE_IF;
                else if ((oflags & O_CREAT) == O_CREAT)
                        disposition = FILE_OPEN_IF;
-               else {
+               else
                        cFYI(1, ("Create flag not set in create function"));
-               }
        }
 
        /* BB add processing to set equivalent of mode - e.g. via CreateX with
@@ -367,7 +356,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
                        int oplock = 0;
                        u16 fileHandle;
-                       FILE_ALL_INFO * buf;
+                       FILE_ALL_INFO *buf;
 
                        cFYI(1, ("sfu compat create special file"));
 
@@ -534,9 +523,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
        int isValid = 1;
 
        if (direntry->d_inode) {
-               if (cifs_revalidate(direntry)) {
+               if (cifs_revalidate(direntry))
                        return 0;
-               }
        } else {
                cFYI(1, ("neg dentry 0x%p name = %s",
                         direntry, direntry->d_name.name));
index 073fdc3db41950ba15c022deda18bc00ac4718de..966e9288930be75bc9d952cde2960ef3191db212 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
  *                            Handles host name to IP address resolution
- * 
+ *
  *   Copyright (c) International Business Machines  Corp., 2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
index 995474c90885c6dab51e2d4e0b86be6fa23d87bb..7d1d5aa4c430b6c904c08a5c65c8be32ba0a0298 100644 (file)
@@ -35,9 +35,8 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags)
 
        /* No way on Linux VFS to ask to monitor xattr
        changes (and no stream support either */
-       if (fcntl_notify_flags & DN_ACCESS) {
+       if (fcntl_notify_flags & DN_ACCESS)
                cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS;
-       }
        if (fcntl_notify_flags & DN_MODIFY) {
                /* What does this mean on directories? */
                cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE |
@@ -47,9 +46,8 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags)
                cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION |
                        FILE_NOTIFY_CHANGE_LAST_WRITE;
        }
-       if (fcntl_notify_flags & DN_DELETE) {
+       if (fcntl_notify_flags & DN_DELETE)
                cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE;
-       }
        if (fcntl_notify_flags & DN_RENAME) {
                /* BB review this - checking various server behaviors */
                cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME |
index 5f7c374ae89c7b833f45f4a9cdeda5ef006bdc0c..fa849c91d323e605b7696a1592288054ae068ccb 100644 (file)
@@ -353,9 +353,9 @@ static int cifs_reopen_file(struct file *file, int can_flush)
        int disposition = FILE_OPEN;
        __u16 netfid;
 
-       if (file->private_data) {
+       if (file->private_data)
                pCifsFile = (struct cifsFileInfo *)file->private_data;
-       else
+       else
                return -EBADF;
 
        xid = GetXid();
@@ -499,9 +499,8 @@ int cifs_close(struct inode *inode, struct file *file)
                                        the struct would be in each open file,
                                        but this should give enough time to
                                        clear the socket */
-#ifdef CONFIG_CIFS_DEBUG2
-                                       cFYI(1, ("close delay, write pending"));
-#endif /* DEBUG2 */
+                                       cFYI(DBG2,
+                                               ("close delay, write pending"));
                                        msleep(timeout);
                                        timeout *= 4;
                                }
@@ -1423,9 +1422,8 @@ static int cifs_writepage(struct page *page, struct writeback_control *wbc)
        xid = GetXid();
 /* BB add check for wbc flags */
        page_cache_get(page);
-       if (!PageUptodate(page)) {
+       if (!PageUptodate(page))
                cFYI(1, ("ppw - page not up to date"));
-       }
 
        /*
         * Set the "writeback" flag, and clear "dirty" in the radix tree.
@@ -1460,9 +1458,9 @@ static int cifs_commit_write(struct file *file, struct page *page,
        cFYI(1, ("commit write for page %p up to position %lld for %d",
                 page, position, to));
        spin_lock(&inode->i_lock);
-       if (position > inode->i_size) {
+       if (position > inode->i_size)
                i_size_write(inode, position);
-       }
+
        spin_unlock(&inode->i_lock);
        if (!PageUptodate(page)) {
                position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
@@ -1596,9 +1594,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
        }
        open_file = (struct cifsFileInfo *)file->private_data;
 
-       if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+       if ((file->f_flags & O_ACCMODE) == O_WRONLY)
                cFYI(1, ("attempting read on write only file instance"));
-       }
+
        for (total_read = 0, current_offset = read_data;
             read_size > total_read;
             total_read += bytes_read, current_offset += bytes_read) {
@@ -1625,9 +1623,8 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
                                                smb_read_data +
                                                4 /* RFC1001 length field */ +
                                                le16_to_cpu(pSMBr->DataOffset),
-                                               bytes_read)) {
+                                               bytes_read))
                                        rc = -EFAULT;
-                               }
 
                                if (buf_type == CIFS_SMALL_BUFFER)
                                        cifs_small_buf_release(smb_read_data);
@@ -1814,9 +1811,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        pTcon = cifs_sb->tcon;
 
        pagevec_init(&lru_pvec, 0);
-#ifdef CONFIG_CIFS_DEBUG2
-               cFYI(1, ("rpages: num pages %d", num_pages));
-#endif
+               cFYI(DBG2, ("rpages: num pages %d", num_pages));
        for (i = 0; i < num_pages; ) {
                unsigned contig_pages;
                struct page *tmp_page;
@@ -1849,10 +1844,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                /* Read size needs to be in multiples of one page */
                read_size = min_t(const unsigned int, read_size,
                                  cifs_sb->rsize & PAGE_CACHE_MASK);
-#ifdef CONFIG_CIFS_DEBUG2
-               cFYI(1, ("rpages: read size 0x%x  contiguous pages %d",
+               cFYI(DBG2, ("rpages: read size 0x%x  contiguous pages %d",
                                read_size, contig_pages));
-#endif
                rc = -EAGAIN;
                while (rc == -EAGAIN) {
                        if ((open_file->invalidHandle) &&
@@ -2026,7 +2019,7 @@ int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
                struct cifs_sb_info *cifs_sb;
 
                cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
-               if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
                        /* since no page cache to corrupt on directio
                        we can change size safely */
                        return 1;
index b1a4a65eaa08e0b24f1c1daf83700113f38a742b..24eb4d392155b8d4482e7ff94f369c0a8c3a035d 100644 (file)
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
 
+
+static void cifs_set_ops(struct inode *inode)
+{
+       struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+
+       switch (inode->i_mode & S_IFMT) {
+       case S_IFREG:
+               inode->i_op = &cifs_file_inode_ops;
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+                       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                               inode->i_fop = &cifs_file_direct_nobrl_ops;
+                       else
+                               inode->i_fop = &cifs_file_direct_ops;
+               } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       inode->i_fop = &cifs_file_nobrl_ops;
+               else { /* not direct, send byte range locks */
+                       inode->i_fop = &cifs_file_ops;
+               }
+
+
+               /* check if server can support readpages */
+               if (cifs_sb->tcon->ses->server->maxBuf <
+                               PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+                       inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+               else
+                       inode->i_data.a_ops = &cifs_addr_ops;
+               break;
+       case S_IFDIR:
+               inode->i_op = &cifs_dir_inode_ops;
+               inode->i_fop = &cifs_dir_ops;
+               break;
+       case S_IFLNK:
+               inode->i_op = &cifs_symlink_inode_ops;
+               break;
+       default:
+               init_special_inode(inode, inode->i_mode, inode->i_rdev);
+               break;
+       }
+}
+
+static void cifs_unix_info_to_inode(struct inode *inode,
+               FILE_UNIX_BASIC_INFO *info, int force_uid_gid)
+{
+       struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+       struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
+       __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes);
+       __u64 end_of_file = le64_to_cpu(info->EndOfFile);
+
+       inode->i_atime = cifs_NTtimeToUnix(le64_to_cpu(info->LastAccessTime));
+       inode->i_mtime =
+               cifs_NTtimeToUnix(le64_to_cpu(info->LastModificationTime));
+       inode->i_ctime = cifs_NTtimeToUnix(le64_to_cpu(info->LastStatusChange));
+       inode->i_mode = le64_to_cpu(info->Permissions);
+
+       /*
+        * Since we set the inode type below we need to mask off
+        * to avoid strange results if bits set above.
+        */
+       inode->i_mode &= ~S_IFMT;
+       switch (le32_to_cpu(info->Type)) {
+       case UNIX_FILE:
+               inode->i_mode |= S_IFREG;
+               break;
+       case UNIX_SYMLINK:
+               inode->i_mode |= S_IFLNK;
+               break;
+       case UNIX_DIR:
+               inode->i_mode |= S_IFDIR;
+               break;
+       case UNIX_CHARDEV:
+               inode->i_mode |= S_IFCHR;
+               inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+                                     le64_to_cpu(info->DevMinor) & MINORMASK);
+               break;
+       case UNIX_BLOCKDEV:
+               inode->i_mode |= S_IFBLK;
+               inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+                                     le64_to_cpu(info->DevMinor) & MINORMASK);
+               break;
+       case UNIX_FIFO:
+               inode->i_mode |= S_IFIFO;
+               break;
+       case UNIX_SOCKET:
+               inode->i_mode |= S_IFSOCK;
+               break;
+       default:
+               /* safest to call it a file if we do not know */
+               inode->i_mode |= S_IFREG;
+               cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
+               break;
+       }
+
+       if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) &&
+           !force_uid_gid)
+               inode->i_uid = cifs_sb->mnt_uid;
+       else
+               inode->i_uid = le64_to_cpu(info->Uid);
+
+       if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) &&
+           !force_uid_gid)
+               inode->i_gid = cifs_sb->mnt_gid;
+       else
+               inode->i_gid = le64_to_cpu(info->Gid);
+
+       inode->i_nlink = le64_to_cpu(info->Nlinks);
+
+       spin_lock(&inode->i_lock);
+       if (is_size_safe_to_change(cifsInfo, end_of_file)) {
+               /*
+                * We can not safely change the file size here if the client
+                * is writing to it due to potential races.
+                */
+               i_size_write(inode, end_of_file);
+
+               /*
+                * i_blocks is not related to (i_size / i_blksize),
+                * but instead 512 byte (2**9) size is required for
+                * calculating num blocks.
+                */
+               inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
+       }
+       spin_unlock(&inode->i_lock);
+}
+
 int cifs_get_inode_info_unix(struct inode **pinode,
        const unsigned char *search_path, struct super_block *sb, int xid)
 {
@@ -74,7 +198,6 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                }
        } else {
                struct cifsInodeInfo *cifsInfo;
-               __u32 type = le32_to_cpu(findData.Type);
                __u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes);
                __u64 end_of_file = le64_to_cpu(findData.EndOfFile);
 
@@ -105,112 +228,16 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* this is ok to set on every inode revalidate */
                atomic_set(&cifsInfo->inUse, 1);
 
-               inode->i_atime =
-                   cifs_NTtimeToUnix(le64_to_cpu(findData.LastAccessTime));
-               inode->i_mtime =
-                   cifs_NTtimeToUnix(le64_to_cpu
-                               (findData.LastModificationTime));
-               inode->i_ctime =
-                   cifs_NTtimeToUnix(le64_to_cpu(findData.LastStatusChange));
-               inode->i_mode = le64_to_cpu(findData.Permissions);
-               /* since we set the inode type below we need to mask off
-                  to avoid strange results if bits set above */
-               inode->i_mode &= ~S_IFMT;
-               if (type == UNIX_FILE) {
-                       inode->i_mode |= S_IFREG;
-               } else if (type == UNIX_SYMLINK) {
-                       inode->i_mode |= S_IFLNK;
-               } else if (type == UNIX_DIR) {
-                       inode->i_mode |= S_IFDIR;
-               } else if (type == UNIX_CHARDEV) {
-                       inode->i_mode |= S_IFCHR;
-                       inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor),
-                               le64_to_cpu(findData.DevMinor) & MINORMASK);
-               } else if (type == UNIX_BLOCKDEV) {
-                       inode->i_mode |= S_IFBLK;
-                       inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor),
-                               le64_to_cpu(findData.DevMinor) & MINORMASK);
-               } else if (type == UNIX_FIFO) {
-                       inode->i_mode |= S_IFIFO;
-               } else if (type == UNIX_SOCKET) {
-                       inode->i_mode |= S_IFSOCK;
-               } else {
-                       /* safest to call it a file if we do not know */
-                       inode->i_mode |= S_IFREG;
-                       cFYI(1, ("unknown type %d", type));
-               }
+               cifs_unix_info_to_inode(inode, &findData, 0);
 
-               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
-                       inode->i_uid = cifs_sb->mnt_uid;
-               else
-                       inode->i_uid = le64_to_cpu(findData.Uid);
-
-               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
-                       inode->i_gid = cifs_sb->mnt_gid;
-               else
-                       inode->i_gid = le64_to_cpu(findData.Gid);
-
-               inode->i_nlink = le64_to_cpu(findData.Nlinks);
-
-               spin_lock(&inode->i_lock);
-               if (is_size_safe_to_change(cifsInfo, end_of_file)) {
-               /* can not safely change the file size here if the
-                  client is writing to it due to potential races */
-                       i_size_write(inode, end_of_file);
-
-               /* blksize needs to be multiple of two. So safer to default to
-               blksize and blkbits set in superblock so 2**blkbits and blksize
-               will match rather than setting to:
-               (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
-
-               /* This seems incredibly stupid but it turns out that i_blocks
-                  is not related to (i_size / i_blksize), instead 512 byte size
-                  is required for calculating num blocks */
-
-               /* 512 bytes (2**9) is the fake blocksize that must be used */
-               /* for this calculation */
-                       inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
-               }
-               spin_unlock(&inode->i_lock);
 
                if (num_of_bytes < end_of_file)
                        cFYI(1, ("allocation size less than end of file"));
                cFYI(1, ("Size %ld and blocks %llu",
                        (unsigned long) inode->i_size,
                        (unsigned long long)inode->i_blocks));
-               if (S_ISREG(inode->i_mode)) {
-                       cFYI(1, ("File inode"));
-                       inode->i_op = &cifs_file_inode_ops;
-                       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
-                               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                                       inode->i_fop =
-                                               &cifs_file_direct_nobrl_ops;
-                               else
-                                       inode->i_fop = &cifs_file_direct_ops;
-                       } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                               inode->i_fop = &cifs_file_nobrl_ops;
-                       else /* not direct, send byte range locks */
-                               inode->i_fop = &cifs_file_ops;
-
-                       /* check if server can support readpages */
-                       if (pTcon->ses->server->maxBuf <
-                           PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
-                               inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
-                       else
-                               inode->i_data.a_ops = &cifs_addr_ops;
-               } else if (S_ISDIR(inode->i_mode)) {
-                       cFYI(1, ("Directory inode"));
-                       inode->i_op = &cifs_dir_inode_ops;
-                       inode->i_fop = &cifs_dir_ops;
-               } else if (S_ISLNK(inode->i_mode)) {
-                       cFYI(1, ("Symbolic Link inode"));
-                       inode->i_op = &cifs_symlink_inode_ops;
-               /* tmp_inode->i_fop = */ /* do not need to set to anything */
-               } else {
-                       cFYI(1, ("Init special inode"));
-                       init_special_inode(inode, inode->i_mode,
-                                          inode->i_rdev);
-               }
+
+               cifs_set_ops(inode);
        }
        return rc;
 }
@@ -490,9 +517,9 @@ int cifs_get_inode_info(struct inode **pinode,
                        if (decode_sfu_inode(inode,
                                         le64_to_cpu(pfindData->EndOfFile),
                                         search_path,
-                                        cifs_sb, xid)) {
+                                        cifs_sb, xid))
                                cFYI(1, ("Unrecognized sfu inode type"));
-                       }
+
                        cFYI(1, ("sfu mode 0%o", inode->i_mode));
                } else {
                        inode->i_mode |= S_IFREG;
@@ -546,36 +573,7 @@ int cifs_get_inode_info(struct inode **pinode,
                        atomic_set(&cifsInfo->inUse, 1);
                }
 
-               if (S_ISREG(inode->i_mode)) {
-                       cFYI(1, ("File inode"));
-                       inode->i_op = &cifs_file_inode_ops;
-                       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
-                               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                                       inode->i_fop =
-                                               &cifs_file_direct_nobrl_ops;
-                               else
-                                       inode->i_fop = &cifs_file_direct_ops;
-                       } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                               inode->i_fop = &cifs_file_nobrl_ops;
-                       else /* not direct, send byte range locks */
-                               inode->i_fop = &cifs_file_ops;
-
-                       if (pTcon->ses->server->maxBuf <
-                            PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
-                               inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
-                       else
-                               inode->i_data.a_ops = &cifs_addr_ops;
-               } else if (S_ISDIR(inode->i_mode)) {
-                       cFYI(1, ("Directory inode"));
-                       inode->i_op = &cifs_dir_inode_ops;
-                       inode->i_fop = &cifs_dir_ops;
-               } else if (S_ISLNK(inode->i_mode)) {
-                       cFYI(1, ("Symbolic Link inode"));
-                       inode->i_op = &cifs_symlink_inode_ops;
-               } else {
-                       init_special_inode(inode, inode->i_mode,
-                                          inode->i_rdev);
-               }
+               cifs_set_ops(inode);
        }
        kfree(buf);
        return rc;
@@ -792,17 +790,12 @@ psx_del_no_retry:
 }
 
 static void posix_fill_in_inode(struct inode *tmp_inode,
-       FILE_UNIX_BASIC_INFO *pData, int *pobject_type, int isNewInode)
+       FILE_UNIX_BASIC_INFO *pData, int isNewInode)
 {
+       struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
        loff_t local_size;
        struct timespec local_mtime;
 
-       struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
-       struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
-
-       __u32 type = le32_to_cpu(pData->Type);
-       __u64 num_of_bytes = le64_to_cpu(pData->NumOfBytes);
-       __u64 end_of_file = le64_to_cpu(pData->EndOfFile);
        cifsInfo->time = jiffies;
        atomic_inc(&cifsInfo->inUse);
 
@@ -810,115 +803,27 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
        local_mtime = tmp_inode->i_mtime;
        local_size  = tmp_inode->i_size;
 
-       tmp_inode->i_atime =
-           cifs_NTtimeToUnix(le64_to_cpu(pData->LastAccessTime));
-       tmp_inode->i_mtime =
-           cifs_NTtimeToUnix(le64_to_cpu(pData->LastModificationTime));
-       tmp_inode->i_ctime =
-           cifs_NTtimeToUnix(le64_to_cpu(pData->LastStatusChange));
-
-       tmp_inode->i_mode = le64_to_cpu(pData->Permissions);
-       /* since we set the inode type below we need to mask off type
-          to avoid strange results if bits above were corrupt */
-       tmp_inode->i_mode &= ~S_IFMT;
-       if (type == UNIX_FILE) {
-               *pobject_type = DT_REG;
-               tmp_inode->i_mode |= S_IFREG;
-       } else if (type == UNIX_SYMLINK) {
-               *pobject_type = DT_LNK;
-               tmp_inode->i_mode |= S_IFLNK;
-       } else if (type == UNIX_DIR) {
-               *pobject_type = DT_DIR;
-               tmp_inode->i_mode |= S_IFDIR;
-       } else if (type == UNIX_CHARDEV) {
-               *pobject_type = DT_CHR;
-               tmp_inode->i_mode |= S_IFCHR;
-               tmp_inode->i_rdev = MKDEV(le64_to_cpu(pData->DevMajor),
-                               le64_to_cpu(pData->DevMinor) & MINORMASK);
-       } else if (type == UNIX_BLOCKDEV) {
-               *pobject_type = DT_BLK;
-               tmp_inode->i_mode |= S_IFBLK;
-               tmp_inode->i_rdev = MKDEV(le64_to_cpu(pData->DevMajor),
-                               le64_to_cpu(pData->DevMinor) & MINORMASK);
-       } else if (type == UNIX_FIFO) {
-               *pobject_type = DT_FIFO;
-               tmp_inode->i_mode |= S_IFIFO;
-       } else if (type == UNIX_SOCKET) {
-               *pobject_type = DT_SOCK;
-               tmp_inode->i_mode |= S_IFSOCK;
-       } else {
-               /* safest to just call it a file */
-               *pobject_type = DT_REG;
-               tmp_inode->i_mode |= S_IFREG;
-               cFYI(1, ("unknown inode type %d", type));
-       }
-
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("object type: %d", type));
-#endif
-       tmp_inode->i_uid = le64_to_cpu(pData->Uid);
-       tmp_inode->i_gid = le64_to_cpu(pData->Gid);
-       tmp_inode->i_nlink = le64_to_cpu(pData->Nlinks);
-
-       spin_lock(&tmp_inode->i_lock);
-       if (is_size_safe_to_change(cifsInfo, end_of_file)) {
-               /* can not safely change the file size here if the
-               client is writing to it due to potential races */
-               i_size_write(tmp_inode, end_of_file);
+       cifs_unix_info_to_inode(tmp_inode, pData, 1);
+       cifs_set_ops(tmp_inode);
 
-       /* 512 bytes (2**9) is the fake blocksize that must be used */
-       /* for this calculation, not the real blocksize */
-               tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
-       }
-       spin_unlock(&tmp_inode->i_lock);
-
-       if (S_ISREG(tmp_inode->i_mode)) {
-               cFYI(1, ("File inode"));
-               tmp_inode->i_op = &cifs_file_inode_ops;
-
-               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
-                       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                               tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
-                       else
-                               tmp_inode->i_fop = &cifs_file_direct_ops;
+       if (!S_ISREG(tmp_inode->i_mode))
+               return;
 
-               } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                       tmp_inode->i_fop = &cifs_file_nobrl_ops;
-               else
-                       tmp_inode->i_fop = &cifs_file_ops;
-
-               if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
-                  (cifs_sb->tcon->ses->server->maxBuf <
-                       PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
-                       tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
-               else
-                       tmp_inode->i_data.a_ops = &cifs_addr_ops;
-
-               if (isNewInode)
-                       return; /* No sense invalidating pages for new inode
-                                  since we we have not started caching
-                                  readahead file data yet */
+       /*
+        * No sense invalidating pages for new inode
+        * since we we have not started caching
+        * readahead file data yet.
+        */
+       if (isNewInode)
+               return;
 
-               if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
-                       (local_size == tmp_inode->i_size)) {
-                       cFYI(1, ("inode exists but unchanged"));
-               } else {
-                       /* file may have changed on server */
-                       cFYI(1, ("invalidate inode, readdir detected change"));
-                       invalidate_remote_inode(tmp_inode);
-               }
-       } else if (S_ISDIR(tmp_inode->i_mode)) {
-               cFYI(1, ("Directory inode"));
-               tmp_inode->i_op = &cifs_dir_inode_ops;
-               tmp_inode->i_fop = &cifs_dir_ops;
-       } else if (S_ISLNK(tmp_inode->i_mode)) {
-               cFYI(1, ("Symbolic Link inode"));
-               tmp_inode->i_op = &cifs_symlink_inode_ops;
-/* tmp_inode->i_fop = *//* do not need to set to anything */
+       if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
+               (local_size == tmp_inode->i_size)) {
+               cFYI(1, ("inode exists but unchanged"));
        } else {
-               cFYI(1, ("Special inode"));
-               init_special_inode(tmp_inode, tmp_inode->i_mode,
-                                  tmp_inode->i_rdev);
+               /* file may have changed on server */
+               cFYI(1, ("invalidate inode, readdir detected change"));
+               invalidate_remote_inode(tmp_inode);
        }
 }
 
@@ -968,7 +873,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
                        cFYI(1, ("posix mkdir returned 0x%x", rc));
                        d_drop(direntry);
                } else {
-                       int obj_type;
                        if (pInfo->Type == cpu_to_le32(-1)) {
                                /* no return info, go query for it */
                                kfree(pInfo);
@@ -1004,7 +908,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
                        /* we already checked in POSIXCreate whether
                           frame was long enough */
                        posix_fill_in_inode(direntry->d_inode,
-                                       pInfo, &obj_type, 1 /* NewInode */);
+                                       pInfo, 1 /* NewInode */);
 #ifdef CONFIG_CIFS_DEBUG2
                        cFYI(1, ("instantiated dentry %p %s to inode %p",
                                direntry, direntry->d_name.name, newinode));
@@ -1214,9 +1118,8 @@ int cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
                } /* if we can not get memory just leave rc as EEXIST */
        }
 
-       if (rc) {
+       if (rc)
                cFYI(1, ("rename rc %d", rc));
-       }
 
        if ((rc == -EIO) || (rc == -EEXIST)) {
                int oplock = FALSE;
index d24fe6880a04e63f1ea370ef67ca92f0205f3fb5..5c792df13d62f518224378a0c9a795d9440040a7 100644 (file)
@@ -30,7 +30,7 @@
 
 #define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2)
 
-int cifs_ioctl (struct inode *inode, struct file *filep,
+int cifs_ioctl(struct inode *inode, struct file *filep,
                unsigned int command, unsigned long arg)
 {
        int rc = -ENOTTY; /* strange error - but the precedent */
index a2415c1a14dbbe297326ffd6c00ba2b9c0631361..a725c2609d672f36eab258996163cd476fca354b 100644 (file)
@@ -56,7 +56,7 @@ lshift(__u32 x, int s)
 
 /* this applies md4 to 64 byte chunks */
 static void
-mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D)
+mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D)
 {
        int j;
        __u32 AA, BB, CC, DD;
@@ -137,7 +137,7 @@ mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D)
 }
 
 static void
-copy64(__u32 * M, unsigned char *in)
+copy64(__u32 *M, unsigned char *in)
 {
        int i;
 
index f13f96d42fcf63ebdfe4c76a1914a3d0afd03738..462bbfefd4b671771a52658ec08fa0efd72e9e5c 100644 (file)
@@ -161,7 +161,7 @@ MD5Final(unsigned char digest[16], struct MD5Context *ctx)
 
 /* This is the central step in the MD5 algorithm. */
 #define MD5STEP(f, w, x, y, z, data, s) \
-       ( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
+       (w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x)
 
 /*
  * The core of the MD5 algorithm, this alters an existing MD5 hash to
@@ -302,9 +302,8 @@ hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
        int i;
 
        /* if key is longer than 64 bytes truncate it */
-       if (key_len > 64) {
+       if (key_len > 64)
                key_len = 64;
-       }
 
        /* start out by storing key in pads */
        memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
@@ -359,9 +358,9 @@ hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
 {
        struct HMACMD5Context ctx;
        hmac_md5_init_limK_to_64(key, 16, &ctx);
-       if (data_len != 0) {
+       if (data_len != 0)
                hmac_md5_update(data, data_len, &ctx);
-       }
+
        hmac_md5_final(digest, &ctx);
 }
 #endif
index 15546c2354c5800b7930860304a0d043273db7fb..2a42d9fedbb266751f7f9e96dd4fb4a079d75b7e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/misc.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2007
+ *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -320,9 +320,9 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
                if (treeCon->ses) {
                        if (treeCon->ses->capabilities & CAP_UNICODE)
                                buffer->Flags2 |= SMBFLG2_UNICODE;
-                       if (treeCon->ses->capabilities & CAP_STATUS32) {
+                       if (treeCon->ses->capabilities & CAP_STATUS32)
                                buffer->Flags2 |= SMBFLG2_ERR_STATUS;
-                       }
+
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
                        buffer->Mid = GetNextMid(treeCon->ses->server);
@@ -610,7 +610,8 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
 
        buffer = (unsigned char *) smb_buf;
        for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
-               if (i % 8 == 0) {       /* have reached the beginning of line */
+               if (i % 8 == 0) {
+                       /* have reached the beginning of line */
                        printk(KERN_DEBUG "| ");
                        j = 0;
                }
@@ -621,7 +622,8 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
                else
                        debug_line[1 + (2 * j)] = '_';
 
-               if (i % 8 == 7) { /* reached end of line, time to print ascii */
+               if (i % 8 == 7) {
+                       /* reached end of line, time to print ascii */
                        debug_line[16] = 0;
                        printk(" | %s\n", debug_line);
                }
@@ -631,7 +633,7 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
                debug_line[2 * j] = ' ';
                debug_line[1 + (2 * j)] = ' ';
        }
-       printk( " | %s\n", debug_line);
+       printk(" | %s\n", debug_line);
        return;
 }
 
index 646e1f06941b5c8062e9c8e590c2e2790736ab7f..3b5a5ce882b63083f492c34a3e1b007fe54be87f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/netmisc.c
  *
- *   Copyright (c) International Business Machines  Corp., 2002
+ *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   Error mapping routines from Samba libsmb/errormap.c
@@ -150,9 +150,7 @@ static int canonicalize_unc(char *cp)
                if (cp[i] == '\\')
                        break;
                if (cp[i] == '/') {
-#ifdef CONFIG_CIFS_DEBUG2
-                       cFYI(1, ("change slash to backslash in malformed UNC"));
-#endif
+                       cFYI(DBG2, ("change slash to \\ in malformed UNC"));
                        cp[i] = '\\';
                        return 1;
                }
@@ -178,9 +176,7 @@ cifs_inet_pton(int address_family, char *cp, void *dst)
        } else if (address_family == AF_INET6) {
                ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
        }
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("address conversion returned %d for %s", ret, cp));
-#endif
+       cFYI(DBG2, ("address conversion returned %d for %s", ret, cp));
        if (ret > 0)
                ret = 1;
        return ret;
@@ -253,7 +249,8 @@ static const struct {
        ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, {
        ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, {
        ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, {
-       ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {   /* mapping changed since shell does lookup on * and expects file not found */
+        /* mapping changed since shell does lookup on * expects FileNotFound */
+       ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {
        ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, {
        ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, {
        ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, {
@@ -820,7 +817,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
        /* old style errors */
 
        /* DOS class smb error codes - map DOS */
-       if (smberrclass == ERRDOS) {  /* 1 byte field no need to byte reverse */
+       if (smberrclass == ERRDOS) {
+               /* 1 byte field no need to byte reverse */
                for (i = 0;
                     i <
                     sizeof(mapping_table_ERRDOS) /
@@ -834,7 +832,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
                        }
                        /* else try next error mapping one to see if match */
                }
-       } else if (smberrclass == ERRSRV) {   /* server class of error codes */
+       } else if (smberrclass == ERRSRV) {
+               /* server class of error codes */
                for (i = 0;
                     i <
                     sizeof(mapping_table_ERRSRV) /
@@ -922,8 +921,8 @@ struct timespec cnvrtDosUnixTm(__u16 date, __u16 time)
 {
        struct timespec ts;
        int sec, min, days, month, year;
-       SMB_TIME * st = (SMB_TIME *)&time;
-       SMB_DATE * sd = (SMB_DATE *)&date;
+       SMB_TIME *st = (SMB_TIME *)&time;
+       SMB_DATE *sd = (SMB_DATE *)&date;
 
        cFYI(1, ("date %d time %d", date, time));
 
index 0f22def4bdff47e30300dec9bcad5be12cd193d6..32b445edc88282d2ce55ab1bd5a8f7bc2b9d7b3d 100644 (file)
@@ -3,7 +3,7 @@
  *
  *   Directory search handling
  *
- *   Copyright (C) International Business Machines  Corp., 2004, 2007
+ *   Copyright (C) International Business Machines  Corp., 2004, 2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -42,17 +42,18 @@ static void dump_cifs_file_struct(struct file *file, char *label)
                        cFYI(1, ("empty cifs private file data"));
                        return;
                }
-               if (cf->invalidHandle) {
+               if (cf->invalidHandle)
                        cFYI(1, ("invalid handle"));
-               }
-               if (cf->srch_inf.endOfSearch) {
+               if (cf->srch_inf.endOfSearch)
                        cFYI(1, ("end of search"));
-               }
-               if (cf->srch_inf.emptyDir) {
+               if (cf->srch_inf.emptyDir)
                        cFYI(1, ("empty dir"));
-               }
        }
 }
+#else
+static inline void dump_cifs_file_struct(struct file *file, char *label)
+{
+}
 #endif /* DEBUG2 */
 
 /* Returns one if new inode created (which therefore needs to be hashed) */
@@ -150,7 +151,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
                      cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
        } else { /* legacy, OS2 and DOS style */
 /*             struct timespec ts;*/
-               FIND_FILE_STANDARD_INFO * pfindData =
+               FIND_FILE_STANDARD_INFO *pfindData =
                        (FIND_FILE_STANDARD_INFO *)buf;
 
                tmp_inode->i_mtime = cnvrtDosUnixTm(
@@ -198,9 +199,8 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
        if (attr & ATTR_DIRECTORY) {
                *pobject_type = DT_DIR;
                /* override default perms since we do not lock dirs */
-               if (atomic_read(&cifsInfo->inUse) == 0) {
+               if (atomic_read(&cifsInfo->inUse) == 0)
                        tmp_inode->i_mode = cifs_sb->mnt_dir_mode;
-               }
                tmp_inode->i_mode |= S_IFDIR;
        } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
                   (attr & ATTR_SYSTEM)) {
@@ -231,9 +231,8 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
        } /* could add code here - to validate if device or weird share type? */
 
        /* can not fill in nlink here as in qpathinfo version and Unx search */
-       if (atomic_read(&cifsInfo->inUse) == 0) {
+       if (atomic_read(&cifsInfo->inUse) == 0)
                atomic_set(&cifsInfo->inUse, 1);
-       }
 
        spin_lock(&tmp_inode->i_lock);
        if (is_size_safe_to_change(cifsInfo, end_of_file)) {
@@ -461,9 +460,8 @@ static int initiate_cifs_search(const int xid, struct file *file)
 
        full_path = build_path_from_dentry(file->f_path.dentry);
 
-       if (full_path == NULL) {
+       if (full_path == NULL)
                return -ENOMEM;
-       }
 
        cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos));
 
@@ -471,9 +469,9 @@ ffirst_retry:
        /* test for Unix extensions */
        /* but now check for them on the share/mount not on the SMB session */
 /*     if (pTcon->ses->capabilities & CAP_UNIX) { */
-       if (pTcon->unix_ext) {
+       if (pTcon->unix_ext)
                cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
-       else if ((pTcon->ses->capabilities &
+       else if ((pTcon->ses->capabilities &
                        (CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
                cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
        } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
@@ -514,10 +512,10 @@ static int cifs_unicode_bytelen(char *str)
 static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
 {
        char *new_entry;
-       FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
+       FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
 
        if (level == SMB_FIND_FILE_INFO_STANDARD) {
-               FIND_FILE_STANDARD_INFO * pfData;
+               FIND_FILE_STANDARD_INFO *pfData;
                pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
 
                new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
@@ -553,7 +551,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
        int len = 0;
 
        if (cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
-               FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+               FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                if (cfile->srch_inf.unicode) {
                        len = cifs_unicode_bytelen(filename);
@@ -562,30 +560,30 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
                        len = strnlen(filename, 5);
                }
        } else if (cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
-               FILE_DIRECTORY_INFO * pFindData =
+               FILE_DIRECTORY_INFO *pFindData =
                        (FILE_DIRECTORY_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                len = le32_to_cpu(pFindData->FileNameLength);
        } else if (cfile->srch_inf.info_level ==
                        SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
-               FILE_FULL_DIRECTORY_INFO * pFindData =
+               FILE_FULL_DIRECTORY_INFO *pFindData =
                        (FILE_FULL_DIRECTORY_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                len = le32_to_cpu(pFindData->FileNameLength);
        } else if (cfile->srch_inf.info_level ==
                        SMB_FIND_FILE_ID_FULL_DIR_INFO) {
-               SEARCH_ID_FULL_DIR_INFO * pFindData =
+               SEARCH_ID_FULL_DIR_INFO *pFindData =
                        (SEARCH_ID_FULL_DIR_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                len = le32_to_cpu(pFindData->FileNameLength);
        } else if (cfile->srch_inf.info_level ==
                        SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
-               FILE_BOTH_DIRECTORY_INFO * pFindData =
+               FILE_BOTH_DIRECTORY_INFO *pFindData =
                        (FILE_BOTH_DIRECTORY_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                len = le32_to_cpu(pFindData->FileNameLength);
        } else if (cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) {
-               FIND_FILE_STANDARD_INFO * pFindData =
+               FIND_FILE_STANDARD_INFO *pFindData =
                        (FIND_FILE_STANDARD_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                len = pFindData->FileNameLength;
@@ -666,9 +664,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
        . and .. for the root of a drive and for those we need
        to start two entries earlier */
 
-#ifdef CONFIG_CIFS_DEBUG2
        dump_cifs_file_struct(file, "In fce ");
-#endif
        if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
             is_dir_changed(file)) ||
           (index_to_find < first_entry_in_buffer)) {
@@ -718,7 +714,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
                pos_in_buf = index_to_find - first_entry_in_buffer;
                cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
 
-               for (i=0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
+               for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
                        /* go entry by entry figuring out which is first */
                        current_entry = nxt_dir_entry(current_entry, end_of_smb,
                                                cifsFile->srch_inf.info_level);
@@ -793,7 +789,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
                filename = &pFindData->FileName[0];
                len = le32_to_cpu(pFindData->FileNameLength);
        } else if (level == SMB_FIND_FILE_INFO_STANDARD) {
-               FIND_FILE_STANDARD_INFO * pFindData =
+               FIND_FILE_STANDARD_INFO *pFindData =
                        (FIND_FILE_STANDARD_INFO *)current_entry;
                filename = &pFindData->FileName[0];
                /* one byte length, no name conversion */
@@ -928,7 +924,7 @@ static int cifs_save_resume_key(const char *current_entry,
        level = cifsFile->srch_inf.info_level;
 
        if (level == SMB_FIND_FILE_UNIX) {
-               FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+               FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry;
 
                filename = &pFindData->FileName[0];
                if (cifsFile->srch_inf.unicode) {
index d2153abcba6d7983678e8e08df39eee7bf4083a3..ed150efbe27c93fbec1834767262453c47619e14 100644 (file)
@@ -417,10 +417,6 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
 
                calc_lanman_hash(ses, lnm_session_key);
                ses->flags |= CIFS_SES_LANMAN;
-/* #ifdef CONFIG_CIFS_DEBUG2
-               cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
-                       CIFS_SESS_KEY_SIZE);
-#endif */
                memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
                bcc_ptr += CIFS_SESS_KEY_SIZE;
 
index cfa6d21fb4e871c4e09a94be0271d0eaf490a531..04943c976f98d1df3810d42d2a362cc940c27131 100644 (file)
@@ -114,42 +114,42 @@ static uchar sbox[8][4][16] = {
        {{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7},
         {0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8},
         {4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0},
-        {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13}},
+        {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13} },
 
        {{15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10},
         {3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5},
         {0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15},
-        {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9}},
+        {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9} },
 
        {{10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8},
         {13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1},
         {13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7},
-        {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12}},
+        {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12} },
 
        {{7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15},
         {13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9},
         {10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4},
-        {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14}},
+        {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14} },
 
        {{2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9},
         {14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6},
         {4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14},
-        {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3}},
+        {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3} },
 
        {{12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11},
         {10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8},
         {9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6},
-        {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13}},
+        {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13} },
 
        {{4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1},
         {13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6},
         {1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2},
-        {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12}},
+        {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12} },
 
        {{13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7},
         {1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2},
         {7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8},
-        {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11}}
+        {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11} }
 };
 
 static void
@@ -313,9 +313,8 @@ str_to_key(unsigned char *str, unsigned char *key)
        key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
        key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
        key[7] = str[6] & 0x7F;
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 8; i++)
                key[i] = (key[i] << 1);
-       }
 }
 
 static void
@@ -344,9 +343,8 @@ smbhash(unsigned char *out, unsigned char *in, unsigned char *key, int forw)
 
        dohash(outb, inb, keyb, forw);
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 8; i++)
                out[i] = 0;
-       }
 
        for (i = 0; i < 64; i++) {
                if (outb[i])
index 50b623ad93205700822888e00c1e684186aac05e..3612d6c0a0bbc1a4c9f64c2ce0f674517ce86d72 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/transport.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2007
+ *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *   Jeremy Allison (jra@samba.org) 2006.
  *
@@ -358,9 +358,9 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
        } else if (ses->status != CifsGood) {
                /* check if SMB session is bad because we are setting it up */
                if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
-                       (in_buf->Command != SMB_COM_NEGOTIATE)) {
+                       (in_buf->Command != SMB_COM_NEGOTIATE))
                        return -EAGAIN;
-               /* else ok - we are setting up session */
+               /* else ok - we are setting up session */
        }
        *ppmidQ = AllocMidQEntry(in_buf, ses);
        if (*ppmidQ == NULL)
@@ -437,9 +437,8 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
        iov[0].iov_len = in_buf->smb_buf_length + 4;
        flags |= CIFS_NO_RESP;
        rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
-#ifdef CONFIG_CIFS_DEBUG2
-       cFYI(1, ("SendRcvNoR flags %d rc %d", flags, rc));
-#endif
+       cFYI(DBG2, ("SendRcvNoRsp flags %d rc %d", flags, rc));
+
        return rc;
 }
 
index 54e8ef96cb7930c07734aa9f0b808eaa2e215953..8cd6a445b017ae62c2028b083dce8593d5c894fc 100644 (file)
@@ -139,9 +139,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
        } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
                        goto set_ea_exit;
-               if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
+               if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
                        cFYI(1, ("attempt to set cifs inode metadata"));
-               }
+
                ea_name += 5; /* skip past user. prefix */
                rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
                        (__u16)value_size, cifs_sb->local_nls,
@@ -262,7 +262,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
                                cifs_sb->mnt_cifs_flags &
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
 #ifdef CONFIG_CIFS_EXPERIMENTAL
-               else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+               else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
                        __u16 fid;
                        int oplock = FALSE;
                        struct cifs_ntsd *pacl = NULL;
@@ -303,11 +303,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
        } else if (strncmp(ea_name,
                  CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
                cFYI(1, ("Security xattr namespace not supported yet"));
-       } else {
+       } else
                cFYI(1,
                    ("illegal xattr request %s (only user namespace supported)",
                        ea_name));
-       }
 
        /* We could add an additional check for streams ie
            if proc/fs/cifs/streamstoxattr is set then
index d26e2826ba5b8242ec3a4ad1bcf3b849d64b5a8a..e9602d85c11d0220d5cbccf7817c25b10a6d068e 100644 (file)
 
 #define DEBUGFS_MAGIC  0x64626720
 
-/* declared over in file.c */
-extern struct file_operations debugfs_file_operations;
-extern struct inode_operations debugfs_link_operations;
-
 static struct vfsmount *debugfs_mount;
 static int debugfs_mount_count;
 
index dc74b186145d74db8a56470d5ef2e63ea5c0a3c0..6df1debdccce14aabbf0e63193b66ec0c8366d3e 100644 (file)
@@ -263,52 +263,102 @@ out:
        return 0;
 }
 
-/* This function must zero any hole we create */
+/**
+ * ecryptfs_prepare_write
+ * @file: The eCryptfs file
+ * @page: The eCryptfs page
+ * @from: The start byte from which we will write
+ * @to: The end byte to which we will write
+ *
+ * This function must zero any hole we create
+ *
+ * Returns zero on success; non-zero otherwise
+ */
 static int ecryptfs_prepare_write(struct file *file, struct page *page,
                                  unsigned from, unsigned to)
 {
-       int rc = 0;
        loff_t prev_page_end_size;
+       int rc = 0;
 
        if (!PageUptodate(page)) {
-               rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
-                                                     PAGE_CACHE_SIZE,
-                                                     page->mapping->host);
-               if (rc) {
-                       printk(KERN_ERR "%s: Error attemping to read lower "
-                              "page segment; rc = [%d]\n", __FUNCTION__, rc);
-                       ClearPageUptodate(page);
-                       goto out;
-               } else
+               struct ecryptfs_crypt_stat *crypt_stat =
+                       &ecryptfs_inode_to_private(
+                               file->f_path.dentry->d_inode)->crypt_stat;
+
+               if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
+                   || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
+                       rc = ecryptfs_read_lower_page_segment(
+                               page, page->index, 0, PAGE_CACHE_SIZE,
+                               page->mapping->host);
+                       if (rc) {
+                               printk(KERN_ERR "%s: Error attemping to read "
+                                      "lower page segment; rc = [%d]\n",
+                                      __FUNCTION__, rc);
+                               ClearPageUptodate(page);
+                               goto out;
+                       } else
+                               SetPageUptodate(page);
+               } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
+                       if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
+                               rc = ecryptfs_copy_up_encrypted_with_header(
+                                       page, crypt_stat);
+                               if (rc) {
+                                       printk(KERN_ERR "%s: Error attempting "
+                                              "to copy the encrypted content "
+                                              "from the lower file whilst "
+                                              "inserting the metadata from "
+                                              "the xattr into the header; rc "
+                                              "= [%d]\n", __FUNCTION__, rc);
+                                       ClearPageUptodate(page);
+                                       goto out;
+                               }
+                               SetPageUptodate(page);
+                       } else {
+                               rc = ecryptfs_read_lower_page_segment(
+                                       page, page->index, 0, PAGE_CACHE_SIZE,
+                                       page->mapping->host);
+                               if (rc) {
+                                       printk(KERN_ERR "%s: Error reading "
+                                              "page; rc = [%d]\n",
+                                              __FUNCTION__, rc);
+                                       ClearPageUptodate(page);
+                                       goto out;
+                               }
+                               SetPageUptodate(page);
+                       }
+               } else {
+                       rc = ecryptfs_decrypt_page(page);
+                       if (rc) {
+                               printk(KERN_ERR "%s: Error decrypting page "
+                                      "at index [%ld]; rc = [%d]\n",
+                                      __FUNCTION__, page->index, rc);
+                               ClearPageUptodate(page);
+                               goto out;
+                       }
                        SetPageUptodate(page);
+               }
        }
-
        prev_page_end_size = ((loff_t)page->index << PAGE_CACHE_SHIFT);
-
-       /*
-        * If creating a page or more of holes, zero them out via truncate.
-        * Note, this will increase i_size.
-        */
+       /* If creating a page or more of holes, zero them out via truncate.
+        * Note, this will increase i_size. */
        if (page->index != 0) {
                if (prev_page_end_size > i_size_read(page->mapping->host)) {
                        rc = ecryptfs_truncate(file->f_path.dentry,
                                               prev_page_end_size);
                        if (rc) {
-                               printk(KERN_ERR "Error on attempt to "
+                               printk(KERN_ERR "%s: Error on attempt to "
                                       "truncate to (higher) offset [%lld];"
-                                      " rc = [%d]\n", prev_page_end_size, rc);
+                                      " rc = [%d]\n", __FUNCTION__,
+                                      prev_page_end_size, rc);
                                goto out;
                        }
                }
        }
-       /*
-        * Writing to a new page, and creating a small hole from start of page?
-        * Zero it out.
-        */
-       if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
-           (from != 0)) {
+       /* Writing to a new page, and creating a small hole from start
+        * of page?  Zero it out. */
+       if ((i_size_read(page->mapping->host) == prev_page_end_size)
+           && (from != 0))
                zero_user(page, 0, PAGE_CACHE_SIZE);
-       }
 out:
        return rc;
 }
index a44b142fb4607baee2d832b1056103b5d157e5a7..54a0a557b6781ecf379015bbd60dc6f3fd37c176 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -173,8 +173,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                return NULL;
 
        if (write) {
-               struct rlimit *rlim = current->signal->rlim;
                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+               struct rlimit *rlim;
+
+               /*
+                * We've historically supported up to 32 pages (ARG_MAX)
+                * of argument strings even with small stacks
+                */
+               if (size <= ARG_MAX)
+                       return page;
 
                /*
                 * Limit to 1/4-th the stack size for the argv+env strings.
@@ -183,6 +190,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                 *  - the program will have a reasonable amount of stack left
                 *    to work from.
                 */
+               rlim = current->signal->rlim;
                if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
                        put_page(page);
                        return NULL;
index 18769cc3237703e3426012378b31ca2e8c6024aa..ad536066408229da9955e41e27a0f5e72756cccd 100644 (file)
@@ -806,8 +806,8 @@ static match_table_t tokens = {
        {Opt_quota, "quota"},
        {Opt_usrquota, "usrquota"},
        {Opt_barrier, "barrier=%u"},
-       {Opt_err, NULL},
        {Opt_resize, "resize"},
+       {Opt_err, NULL},
 };
 
 static ext3_fsblk_t get_sb_block(void **data)
index 33888bb58144fc1660d4106dbcbd689b560d2aa0..2c23bade9aa676451972905449e8d8cc0bbcfec3 100644 (file)
@@ -46,7 +46,7 @@ const struct file_operations ext4_dir_operations = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
 #endif
-       .fsync          = ext4_sync_file,       /* BKL held */
+       .fsync          = ext4_sync_file,
        .release        = ext4_release_dir,
 };
 
index bc7081f1fbe80dd5add381c5746b1bbc74e4883a..9ae6e67090cdfad1bd52a7e7169ba725dea19c82 100644 (file)
@@ -148,6 +148,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
        ext4_fsblk_t bg_start;
+       ext4_fsblk_t last_block;
        ext4_grpblk_t colour;
        int depth;
 
@@ -169,8 +170,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
        /* OK. use inode's group */
        bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
                le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
-       colour = (current->pid % 16) *
+       last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
+
+       if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
+               colour = (current->pid % 16) *
                        (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+       else
+               colour = (current->pid % 16) * ((last_block - bg_start) / 16);
        return bg_start + colour + block;
 }
 
@@ -349,7 +355,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
 #define ext4_ext_show_leaf(inode,path)
 #endif
 
-static void ext4_ext_drop_refs(struct ext4_ext_path *path)
+void ext4_ext_drop_refs(struct ext4_ext_path *path)
 {
        int depth = path->p_depth;
        int i;
@@ -2168,6 +2174,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        newblock = iblock - ee_block + ext_pblock(ex);
        ex2 = ex;
 
+       err = ext4_ext_get_access(handle, inode, path + depth);
+       if (err)
+               goto out;
+
        /* ex1: ee_block to iblock - 1 : uninitialized */
        if (iblock > ee_block) {
                ex1 = ex;
@@ -2200,16 +2210,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                newdepth = ext_depth(inode);
                if (newdepth != depth) {
                        depth = newdepth;
-                       path = ext4_ext_find_extent(inode, iblock, NULL);
+                       ext4_ext_drop_refs(path);
+                       path = ext4_ext_find_extent(inode, iblock, path);
                        if (IS_ERR(path)) {
                                err = PTR_ERR(path);
-                               path = NULL;
                                goto out;
                        }
                        eh = path[depth].p_hdr;
                        ex = path[depth].p_ext;
                        if (ex2 != &newex)
                                ex2 = ex;
+
+                       err = ext4_ext_get_access(handle, inode, path + depth);
+                       if (err)
+                               goto out;
                }
                allocated = max_blocks;
        }
@@ -2230,9 +2244,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        ex2->ee_len = cpu_to_le16(allocated);
        if (ex2 != ex)
                goto insert;
-       err = ext4_ext_get_access(handle, inode, path + depth);
-       if (err)
-               goto out;
        /*
         * New (initialized) extent starts from the first block
         * in the current extent. i.e., ex2 == ex
@@ -2276,9 +2287,22 @@ out:
 }
 
 /*
+ * Block allocation/map/preallocation routine for extents based files
+ *
+ *
  * Need to be called with
  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
+ *
+ * return > 0, number of of blocks already mapped/allocated
+ *          if create == 0 and these are pre-allocated blocks
+ *             buffer head is unmapped
+ *          otherwise blocks are mapped
+ *
+ * return = 0, if plain look up failed (blocks have not been allocated)
+ *          buffer head is unmapped
+ *
+ * return < 0, error case.
  */
 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
                        ext4_lblk_t iblock,
@@ -2623,7 +2647,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
         * modify 1 super block, 1 block bitmap and 1 group descriptor.
         */
        credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
-       down_write((&EXT4_I(inode)->i_data_sem));
+       mutex_lock(&inode->i_mutex);
 retry:
        while (ret >= 0 && ret < max_blocks) {
                block = block + ret;
@@ -2634,16 +2658,17 @@ retry:
                        break;
                }
 
-               ret = ext4_ext_get_blocks(handle, inode, block,
+               ret = ext4_get_blocks_wrap(handle, inode, block,
                                          max_blocks, &map_bh,
                                          EXT4_CREATE_UNINITIALIZED_EXT, 0);
-               WARN_ON(ret <= 0);
                if (ret <= 0) {
-                       ext4_error(inode->i_sb, "ext4_fallocate",
-                                   "ext4_ext_get_blocks returned error: "
-                                   "inode#%lu, block=%u, max_blocks=%lu",
+#ifdef EXT4FS_DEBUG
+                       WARN_ON(ret <= 0);
+                       printk(KERN_ERR "%s: ext4_ext_get_blocks "
+                                   "returned error inode#%lu, block=%u, "
+                                   "max_blocks=%lu", __func__,
                                    inode->i_ino, block, max_blocks);
-                       ret = -EIO;
+#endif
                        ext4_mark_inode_dirty(handle, inode);
                        ret2 = ext4_journal_stop(handle);
                        break;
@@ -2680,7 +2705,6 @@ retry:
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
 
-       up_write((&EXT4_I(inode)->i_data_sem));
        /*
         * Time to update the file size.
         * Update only when preallocation was requested beyond the file size.
@@ -2692,21 +2716,18 @@ retry:
                         * if no error, we assume preallocation succeeded
                         * completely
                         */
-                       mutex_lock(&inode->i_mutex);
                        i_size_write(inode, offset + len);
                        EXT4_I(inode)->i_disksize = i_size_read(inode);
-                       mutex_unlock(&inode->i_mutex);
                } else if (ret < 0 && nblocks) {
                        /* Handle partial allocation scenario */
                        loff_t newsize;
 
-                       mutex_lock(&inode->i_mutex);
                        newsize  = (nblocks << blkbits) + i_size_read(inode);
                        i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
                        EXT4_I(inode)->i_disksize = i_size_read(inode);
-                       mutex_unlock(&inode->i_mutex);
                }
        }
 
+       mutex_unlock(&inode->i_mutex);
        return ret > 0 ? ret2 : ret;
 }
index da18a74b966a91da09ae9710ba8976a111f3946f..8036b9b5376bd8f7ac50e1dffe1da5aceebbc487 100644 (file)
@@ -702,7 +702,12 @@ got:
        ei->i_dir_start_lookup = 0;
        ei->i_disksize = 0;
 
-       ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL;
+       /*
+        * Don't inherit extent flag from directory. We set extent flag on
+        * newly created directory and file only if -o extent mount option is
+        * specified
+        */
+       ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
        if (S_ISLNK(mode))
                ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
        /* dirsync only applies to directories */
@@ -745,12 +750,15 @@ got:
                goto fail_free_drop;
        }
        if (test_opt(sb, EXTENTS)) {
-               EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
-               ext4_ext_tree_init(handle, inode);
-               err = ext4_update_incompat_feature(handle, sb,
-                                               EXT4_FEATURE_INCOMPAT_EXTENTS);
-               if (err)
-                       goto fail;
+               /* set extent flag only for directory and file */
+               if (S_ISDIR(mode) || S_ISREG(mode)) {
+                       EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
+                       ext4_ext_tree_init(handle, inode);
+                       err = ext4_update_incompat_feature(handle, sb,
+                                       EXT4_FEATURE_INCOMPAT_EXTENTS);
+                       if (err)
+                               goto fail;
+               }
        }
 
        ext4_debug("allocating inode %lu\n", inode->i_ino);
index 7dd9b50d5ebc7d10f2a0dc2f7e09560ee7784db8..945cbf6cb1fc6d686c5ef89ba2663ce753dab160 100644 (file)
@@ -403,6 +403,7 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
        __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
        __le32 *p;
        ext4_fsblk_t bg_start;
+       ext4_fsblk_t last_block;
        ext4_grpblk_t colour;
 
        /* Try to find previous block */
@@ -420,8 +421,13 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
         * into the same cylinder group then.
         */
        bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
-       colour = (current->pid % 16) *
+       last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
+
+       if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
+               colour = (current->pid % 16) *
                        (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+       else
+               colour = (current->pid % 16) * ((last_block - bg_start) / 16);
        return bg_start + colour;
 }
 
@@ -768,7 +774,6 @@ err_out:
  *
  * `handle' can be NULL if create == 0.
  *
- * The BKL may not be held on entry here.  Be sure to take it early.
  * return > 0, # of blocks mapped or allocated.
  * return = 0, if plain lookup failed.
  * return < 0, error case.
@@ -903,11 +908,38 @@ out:
  */
 #define DIO_CREDITS 25
 
+
+/*
+ *
+ *
+ * ext4_ext4 get_block() wrapper function
+ * It will do a look up first, and returns if the blocks already mapped.
+ * Otherwise it takes the write lock of the i_data_sem and allocate blocks
+ * and store the allocated blocks in the result buffer head and mark it
+ * mapped.
+ *
+ * If file type is extents based, it will call ext4_ext_get_blocks(),
+ * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
+ * based files
+ *
+ * On success, it returns the number of blocks being mapped or allocate.
+ * if create==0 and the blocks are pre-allocated and uninitialized block,
+ * the result buffer head is unmapped. If the create ==1, it will make sure
+ * the buffer head is mapped.
+ *
+ * It returns 0 if plain look up failed (blocks have not been allocated), in
+ * that casem, buffer head is unmapped
+ *
+ * It returns the error in case of allocation failure.
+ */
 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
                        unsigned long max_blocks, struct buffer_head *bh,
                        int create, int extend_disksize)
 {
        int retval;
+
+       clear_buffer_mapped(bh);
+
        /*
         * Try to see if we can get  the block without requesting
         * for new file system block.
@@ -921,12 +953,26 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
                                inode, block, max_blocks, bh, 0, 0);
        }
        up_read((&EXT4_I(inode)->i_data_sem));
-       if (!create || (retval > 0))
+
+       /* If it is only a block(s) look up */
+       if (!create)
+               return retval;
+
+       /*
+        * Returns if the blocks have already allocated
+        *
+        * Note that if blocks have been preallocated
+        * ext4_ext_get_block() returns th create = 0
+        * with buffer head unmapped.
+        */
+       if (retval > 0 && buffer_mapped(bh))
                return retval;
 
        /*
-        * We need to allocate new blocks which will result
-        * in i_data update
+        * New blocks allocate and/or writing to uninitialized extent
+        * will possibly result in updating i_data, so we take
+        * the write lock of i_data_sem, and call get_blocks()
+        * with create == 1 flag.
         */
        down_write((&EXT4_I(inode)->i_data_sem));
        /*
index dd0fcfcb35ce85f9af3b81331a20b14348b48694..ef97f19c2f9d7a51d68ccb281ca982e79a53e823 100644 (file)
@@ -627,21 +627,19 @@ static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
        return block;
 }
 
+static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
+{
 #if BITS_PER_LONG == 64
-#define mb_correct_addr_and_bit(bit, addr)             \
-{                                                      \
-       bit += ((unsigned long) addr & 7UL) << 3;       \
-       addr = (void *) ((unsigned long) addr & ~7UL);  \
-}
+       *bit += ((unsigned long) addr & 7UL) << 3;
+       addr = (void *) ((unsigned long) addr & ~7UL);
 #elif BITS_PER_LONG == 32
-#define mb_correct_addr_and_bit(bit, addr)             \
-{                                                      \
-       bit += ((unsigned long) addr & 3UL) << 3;       \
-       addr = (void *) ((unsigned long) addr & ~3UL);  \
-}
+       *bit += ((unsigned long) addr & 3UL) << 3;
+       addr = (void *) ((unsigned long) addr & ~3UL);
 #else
 #error "how many bits you are?!"
 #endif
+       return addr;
+}
 
 static inline int mb_test_bit(int bit, void *addr)
 {
@@ -649,34 +647,54 @@ static inline int mb_test_bit(int bit, void *addr)
         * ext4_test_bit on architecture like powerpc
         * needs unsigned long aligned address
         */
-       mb_correct_addr_and_bit(bit, addr);
+       addr = mb_correct_addr_and_bit(&bit, addr);
        return ext4_test_bit(bit, addr);
 }
 
 static inline void mb_set_bit(int bit, void *addr)
 {
-       mb_correct_addr_and_bit(bit, addr);
+       addr = mb_correct_addr_and_bit(&bit, addr);
        ext4_set_bit(bit, addr);
 }
 
 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
 {
-       mb_correct_addr_and_bit(bit, addr);
+       addr = mb_correct_addr_and_bit(&bit, addr);
        ext4_set_bit_atomic(lock, bit, addr);
 }
 
 static inline void mb_clear_bit(int bit, void *addr)
 {
-       mb_correct_addr_and_bit(bit, addr);
+       addr = mb_correct_addr_and_bit(&bit, addr);
        ext4_clear_bit(bit, addr);
 }
 
 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
 {
-       mb_correct_addr_and_bit(bit, addr);
+       addr = mb_correct_addr_and_bit(&bit, addr);
        ext4_clear_bit_atomic(lock, bit, addr);
 }
 
+static inline int mb_find_next_zero_bit(void *addr, int max, int start)
+{
+       int fix = 0;
+       addr = mb_correct_addr_and_bit(&fix, addr);
+       max += fix;
+       start += fix;
+
+       return ext4_find_next_zero_bit(addr, max, start) - fix;
+}
+
+static inline int mb_find_next_bit(void *addr, int max, int start)
+{
+       int fix = 0;
+       addr = mb_correct_addr_and_bit(&fix, addr);
+       max += fix;
+       start += fix;
+
+       return ext4_find_next_bit(addr, max, start) - fix;
+}
+
 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
 {
        char *bb;
@@ -906,7 +924,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
        unsigned short chunk;
        unsigned short border;
 
-       BUG_ON(len >= EXT4_BLOCKS_PER_GROUP(sb));
+       BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
 
        border = 2 << sb->s_blocksize_bits;
 
@@ -946,12 +964,12 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
 
        /* initialize buddy from bitmap which is aggregation
         * of on-disk bitmap and preallocations */
-       i = ext4_find_next_zero_bit(bitmap, max, 0);
+       i = mb_find_next_zero_bit(bitmap, max, 0);
        grp->bb_first_free = i;
        while (i < max) {
                fragments++;
                first = i;
-               i = ext4_find_next_bit(bitmap, max, i);
+               i = mb_find_next_bit(bitmap, max, i);
                len = i - first;
                free += len;
                if (len > 1)
@@ -959,7 +977,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
                else
                        grp->bb_counters[0]++;
                if (i < max)
-                       i = ext4_find_next_zero_bit(bitmap, max, i);
+                       i = mb_find_next_zero_bit(bitmap, max, i);
        }
        grp->bb_fragments = fragments;
 
@@ -967,6 +985,10 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
                ext4_error(sb, __FUNCTION__,
                        "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
                        group, free, grp->bb_free);
+               /*
+                * If we intent to continue, we consider group descritor
+                * corrupt and update bb_free using bitmap value
+                */
                grp->bb_free = free;
        }
 
@@ -1778,7 +1800,7 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
                buddy = mb_find_buddy(e4b, i, &max);
                BUG_ON(buddy == NULL);
 
-               k = ext4_find_next_zero_bit(buddy, max, 0);
+               k = mb_find_next_zero_bit(buddy, max, 0);
                BUG_ON(k >= max);
 
                ac->ac_found++;
@@ -1818,11 +1840,11 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
        i = e4b->bd_info->bb_first_free;
 
        while (free && ac->ac_status == AC_STATUS_CONTINUE) {
-               i = ext4_find_next_zero_bit(bitmap,
+               i = mb_find_next_zero_bit(bitmap,
                                                EXT4_BLOCKS_PER_GROUP(sb), i);
                if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
                        /*
-                        * IF we corrupt the bitmap  we won't find any
+                        * IF we have corrupt bitmap, we won't find any
                         * free blocks even though group info says we
                         * we have free blocks
                         */
@@ -1838,6 +1860,12 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
                        ext4_error(sb, __FUNCTION__, "%d free blocks as per "
                                        "group info. But got %d blocks\n",
                                        free, ex.fe_len);
+                       /*
+                        * The number of free blocks differs. This mostly
+                        * indicate that the bitmap is corrupt. So exit
+                        * without claiming the space.
+                        */
+                       break;
                }
 
                ext4_mb_measure_extent(ac, &ex, e4b);
@@ -3740,10 +3768,10 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
        }
 
        while (bit < end) {
-               bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit);
+               bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
                if (bit >= end)
                        break;
-               next = ext4_find_next_bit(bitmap_bh->b_data, end, bit);
+               next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
                if (next > end)
                        next = end;
                start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
@@ -3771,6 +3799,10 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
                        (unsigned long) pa->pa_len);
                ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n",
                                                free, pa->pa_free);
+               /*
+                * pa is already deleted so we use the value obtained
+                * from the bitmap and continue.
+                */
        }
        atomic_add(free, &sbi->s_mb_discarded);
        if (ac)
index 8c6c685b9d22e1d399d34066e5e6a497150c81a9..5c1e27de7755b46552c3fd85dd11ad66d0ec1490 100644 (file)
@@ -43,6 +43,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
 
        if (IS_ERR(path)) {
                retval = PTR_ERR(path);
+               path = NULL;
                goto err_out;
        }
 
@@ -74,6 +75,10 @@ static int finish_range(handle_t *handle, struct inode *inode,
        }
        retval = ext4_ext_insert_extent(handle, inode, path, &newext);
 err_out:
+       if (path) {
+               ext4_ext_drop_refs(path);
+               kfree(path);
+       }
        lb->first_pblock = 0;
        return retval;
 }
index a9347fb43bcca34dde35173aa1eb1dcccbc50d27..28aa2ed4297ecb30aeedb1d04f3c5470410c7e9a 100644 (file)
@@ -1804,12 +1804,8 @@ retry:
        inode->i_fop = &ext4_dir_operations;
        inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
        dir_block = ext4_bread (handle, inode, 0, 1, &err);
-       if (!dir_block) {
-               ext4_dec_count(handle, inode); /* is this nlink == 0? */
-               ext4_mark_inode_dirty(handle, inode);
-               iput (inode);
-               goto out_stop;
-       }
+       if (!dir_block)
+               goto out_clear_inode;
        BUFFER_TRACE(dir_block, "get_write_access");
        ext4_journal_get_write_access(handle, dir_block);
        de = (struct ext4_dir_entry_2 *) dir_block->b_data;
@@ -1832,7 +1828,8 @@ retry:
        ext4_mark_inode_dirty(handle, inode);
        err = ext4_add_entry (handle, dentry, inode);
        if (err) {
-               inode->i_nlink = 0;
+out_clear_inode:
+               clear_nlink(inode);
                ext4_mark_inode_dirty(handle, inode);
                iput (inode);
                goto out_stop;
@@ -2164,7 +2161,7 @@ static int ext4_unlink(struct inode * dir, struct dentry *dentry)
        dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
        ext4_update_dx_flag(dir);
        ext4_mark_inode_dirty(handle, dir);
-       ext4_dec_count(handle, inode);
+       drop_nlink(inode);
        if (!inode->i_nlink)
                ext4_orphan_add(handle, inode);
        inode->i_ctime = ext4_current_time(inode);
@@ -2214,7 +2211,7 @@ retry:
                err = __page_symlink(inode, symname, l,
                                mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
                if (err) {
-                       ext4_dec_count(handle, inode);
+                       clear_nlink(inode);
                        ext4_mark_inode_dirty(handle, inode);
                        iput (inode);
                        goto out_stop;
@@ -2223,7 +2220,6 @@ retry:
                inode->i_op = &ext4_fast_symlink_inode_operations;
                memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
                inode->i_size = l-1;
-               EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
        }
        EXT4_I(inode)->i_disksize = inode->i_size;
        err = ext4_add_nondir(handle, dentry, inode);
@@ -2407,7 +2403,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
                ext4_dec_count(handle, old_dir);
                if (new_inode) {
                        /* checked empty_dir above, can't have another parent,
-                        * ext3_dec_count() won't work for many-linked dirs */
+                        * ext4_dec_count() won't work for many-linked dirs */
                        new_inode->i_nlink = 0;
                } else {
                        ext4_inc_count(handle, new_dir);
index 9477a2bd6ff2ea0c2ae9180c89a0166d470df1b6..e29efa0f9d6298402bf35b588838d9737b08c551 100644 (file)
@@ -1037,6 +1037,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
                ext4_warning(sb, __FUNCTION__,
                             "multiple resizers run on filesystem!");
                unlock_super(sb);
+               ext4_journal_stop(handle);
                err = -EBUSY;
                goto exit_put;
        }
index 038ed743619911799dfff8e27623e75f593fc60c..c6cbb6cd59b25588ea68b49c69062362c65b3e0b 100644 (file)
@@ -369,7 +369,7 @@ out:
 
 
 /**
- * int journal_restart() - restart a handle .
+ * int journal_restart() - restart a handle.
  * @handle:  handle to restart
  * @nblocks: nr credits requested
  *
@@ -844,8 +844,7 @@ out:
 }
 
 /**
- * int journal_get_undo_access() -  Notify intent to modify metadata with
- *     non-rewindable consequences
+ * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences
  * @handle: transaction
  * @bh: buffer to undo
  * @credits: store the number of taken credits here (if not NULL)
@@ -921,12 +920,14 @@ out:
 }
 
 /**
- * int journal_dirty_data() -  mark a buffer as containing dirty data which
- *                             needs to be flushed before we can commit the
- *                             current transaction.
+ * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed
  * @handle: transaction
  * @bh: bufferhead to mark
  *
+ * Description:
+ * Mark a buffer as containing dirty data which needs to be flushed before
+ * we can commit the current transaction.
+ *
  * The buffer is placed on the transaction's data list and is marked as
  * belonging to the transaction.
  *
@@ -1098,11 +1099,11 @@ no_journal:
 }
 
 /**
- * int journal_dirty_metadata() -  mark a buffer as containing dirty metadata
+ * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
  * @handle: transaction to add buffer to.
  * @bh: buffer to mark
  *
- * mark dirty metadata which needs to be journaled as part of the current
+ * Mark dirty metadata which needs to be journaled as part of the current
  * transaction.
  *
  * The buffer is placed on the transaction's metadata list and is marked
index 5df564366f36b5ea287245b3b5fd060ab1012f31..235e4d3873a88d17837f1a9551f09664c8ffcb54 100644 (file)
@@ -325,16 +325,12 @@ confused:
 }
 
 /**
- * mpage_readpages - populate an address space with some pages, and
- *                       start reads against them.
- *
+ * mpage_readpages - populate an address space with some pages & start reads against them
  * @mapping: the address_space
  * @pages: The address of a list_head which contains the target pages.  These
  *   pages have their ->index populated and are otherwise uninitialised.
- *
  *   The page at @pages->prev has the lowest file offset, and reads should be
  *   issued in @pages->prev to @pages->next order.
- *
  * @nr_pages: The number of pages at *@pages
  * @get_block: The filesystem's block mapper function.
  *
@@ -360,6 +356,7 @@ confused:
  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
  * submitted in the following order:
  *     12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
+ *
  * because the indirect block has to be read to get the mappings of blocks
  * 13,14,15,16.  Obviously, this impacts performance.
  *
@@ -656,9 +653,7 @@ out:
 }
 
 /**
- * mpage_writepages - walk the list of dirty pages of the given
- * address space and writepage() all of them.
- * 
+ * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
  * @mapping: address space structure to write
  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
  * @get_block: the filesystem's block mapper function.
index 82243127eebf6b81d80acd239609bdc1fd26ae43..90383ed6100530d6d10e5edae9b437278138fa0e 100644 (file)
@@ -257,7 +257,7 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(!OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
+       BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
 
        ret = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &di_bh,
                               OCFS2_BH_CACHED, inode);
index e280833ceb9aa76bd8404ded84b6047948b0ca9d..8a18758480805504c453ed291908b36764b9f722 100644 (file)
@@ -390,9 +390,8 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
                                goto bail;
                        }
                        if (pde)
-                               pde->rec_len =
-                                       cpu_to_le16(le16_to_cpu(pde->rec_len) +
-                                                   le16_to_cpu(de->rec_len));
+                               le16_add_cpu(&pde->rec_len,
+                                               le16_to_cpu(de->rec_len));
                        else
                                de->inode = 0;
                        dir->i_version++;
index a54d33d95ada49f6a993c0518168870e1b58c1a2..c92d1b19fc0bbb51c0c98cbe27847f98ac6fce10 100644 (file)
@@ -1695,9 +1695,9 @@ send_response:
  * can periodically run all locks owned by this node
  * and re-assert across the cluster...
  */
-int dlm_do_assert_master(struct dlm_ctxt *dlm,
-                        struct dlm_lock_resource *res,
-                        void *nodemap, u32 flags)
+static int dlm_do_assert_master(struct dlm_ctxt *dlm,
+                               struct dlm_lock_resource *res,
+                               void *nodemap, u32 flags)
 {
        struct dlm_assert_master assert;
        int to, tmpret;
index 351130c9b7346c7d26faa5d4215720baa8e92f1a..f7794306b2bd20a0dc2e1d1c9aec72e559bd4b8c 100644 (file)
@@ -3042,7 +3042,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
                inode = ocfs2_lock_res_inode(lockres);
        mapping = inode->i_mapping;
 
-       if (S_ISREG(inode->i_mode))
+       if (!S_ISREG(inode->i_mode))
                goto out;
 
        /*
@@ -3219,8 +3219,8 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
        return UNBLOCK_CONTINUE_POST;
 }
 
-void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
-                               struct ocfs2_lock_res *lockres)
+static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
+                                      struct ocfs2_lock_res *lockres)
 {
        int status;
        struct ocfs2_unblock_ctl ctl = {0, 0,};
@@ -3356,7 +3356,7 @@ static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
        return should_wake;
 }
 
-int ocfs2_downconvert_thread(void *arg)
+static int ocfs2_downconvert_thread(void *arg)
 {
        int status = 0;
        struct ocfs2_super *osb = arg;
index 1d5b0699d0a9bb0adc67a85d4720778be9f81773..e3cf902404b45e3caaec642d450cf626d09ecb74 100644 (file)
@@ -109,8 +109,6 @@ void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
                               struct ocfs2_lock_res *lockres);
 
 /* for the downconvert thread */
-void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
-                               struct ocfs2_lock_res *lockres);
 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb);
 
 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
index c0efd9489fe8168f88d28c1abe6acdb2cedb3385..0758daf64da07c41fcf4f73a7b55d634589ab87d 100644 (file)
@@ -49,10 +49,15 @@ static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
 static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map,
                                              int bit);
 static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map);
-static void __ocfs2_node_map_dup(struct ocfs2_node_map *target,
-                                struct ocfs2_node_map *from);
-static void __ocfs2_node_map_set(struct ocfs2_node_map *target,
-                                struct ocfs2_node_map *from);
+
+/* special case -1 for now
+ * TODO: should *really* make sure the calling func never passes -1!!  */
+static void ocfs2_node_map_init(struct ocfs2_node_map *map)
+{
+       map->num_nodes = OCFS2_NODE_MAP_MAX_NODES;
+       memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) *
+              sizeof(unsigned long));
+}
 
 void ocfs2_init_node_maps(struct ocfs2_super *osb)
 {
@@ -136,15 +141,6 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
                mlog_errno(ret);
 }
 
-/* special case -1 for now
- * TODO: should *really* make sure the calling func never passes -1!!  */
-void ocfs2_node_map_init(struct ocfs2_node_map *map)
-{
-       map->num_nodes = OCFS2_NODE_MAP_MAX_NODES;
-       memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) *
-              sizeof(unsigned long));
-}
-
 static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
                                            int bit)
 {
@@ -216,6 +212,8 @@ int ocfs2_node_map_is_empty(struct ocfs2_super *osb,
        return ret;
 }
 
+#if 0
+
 static void __ocfs2_node_map_dup(struct ocfs2_node_map *target,
                                 struct ocfs2_node_map *from)
 {
@@ -254,6 +252,8 @@ static void __ocfs2_node_map_set(struct ocfs2_node_map *target,
                target->map[i] = from->map[i];
 }
 
+#endif  /*  0  */
+
 /* Returns whether the recovery bit was actually set - it may not be
  * if a node is still marked as needing recovery */
 int ocfs2_recovery_map_set(struct ocfs2_super *osb,
index 56859211888a18cd6f22ed9a9fecf20eaba040ce..eac63aed7611c2105d5026c58affea7f2b562ed4 100644 (file)
@@ -33,7 +33,6 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb);
 
 /* node map functions - used to keep track of mounted and in-recovery
  * nodes. */
-void ocfs2_node_map_init(struct ocfs2_node_map *map);
 int ocfs2_node_map_is_empty(struct ocfs2_super *osb,
                            struct ocfs2_node_map *map);
 void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
@@ -57,9 +56,5 @@ int ocfs2_recovery_map_set(struct ocfs2_super *osb,
                           int num);
 void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
                              int num);
-/* returns 1 if bit is the only bit set in target, 0 otherwise */
-int ocfs2_node_map_is_only(struct ocfs2_super *osb,
-                          struct ocfs2_node_map *target,
-                          int bit);
 
 #endif /* OCFS2_HEARTBEAT_H */
index add1ffdc5c6c75bb2accdb65972466f2651e80c8..ab83fd5624294561541dba9663f128336f296116 100644 (file)
@@ -120,9 +120,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
 
        mlog_entry_void();
 
-       if (ocfs2_mount_local(osb))
-               goto bail;
-
        if (osb->local_alloc_size == 0)
                goto bail;
 
@@ -588,8 +585,7 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
        while(bits_wanted--)
                ocfs2_set_bit(start++, bitmap);
 
-       alloc->id1.bitmap1.i_used = cpu_to_le32(*num_bits +
-                               le32_to_cpu(alloc->id1.bitmap1.i_used));
+       le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
 
        status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
        if (status < 0) {
index 96ee899d65020e76e5c0c8d49b0683c74214ebed..91a1bd67ac1d6dd58da1d3db5c0691cdb7ea7525 100644 (file)
@@ -314,9 +314,12 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
 static int lstats_show_proc(struct seq_file *m, void *v)
 {
        int i;
-       struct task_struct *task = m->private;
-       seq_puts(m, "Latency Top version : v0.1\n");
+       struct inode *inode = m->private;
+       struct task_struct *task = get_proc_task(inode);
 
+       if (!task)
+               return -ESRCH;
+       seq_puts(m, "Latency Top version : v0.1\n");
        for (i = 0; i < 32; i++) {
                if (task->latency_record[i].backtrace[0]) {
                        int q;
@@ -341,32 +344,24 @@ static int lstats_show_proc(struct seq_file *m, void *v)
                }
 
        }
+       put_task_struct(task);
        return 0;
 }
 
 static int lstats_open(struct inode *inode, struct file *file)
 {
-       int ret;
-       struct seq_file *m;
-       struct task_struct *task = get_proc_task(inode);
-
-       ret = single_open(file, lstats_show_proc, NULL);
-       if (!ret) {
-               m = file->private_data;
-               m->private = task;
-       }
-       return ret;
+       return single_open(file, lstats_show_proc, inode);
 }
 
 static ssize_t lstats_write(struct file *file, const char __user *buf,
                            size_t count, loff_t *offs)
 {
-       struct seq_file *m;
-       struct task_struct *task;
+       struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
 
-       m = file->private_data;
-       task = m->private;
+       if (!task)
+               return -ESRCH;
        clear_all_latency_tracing(task);
+       put_task_struct(task);
 
        return count;
 }
index 468805d40e2bdb44daeb43bfccf4a1b5919c7add..2d563979cb025412dce7847bd79979c34a8456eb 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/interrupt.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
+#include <linux/genhd.h>
 #include <linux/smp.h>
 #include <linux/signal.h>
 #include <linux/module.h>
@@ -377,7 +378,6 @@ static int stram_read_proc(char *page, char **start, off_t off,
 #endif
 
 #ifdef CONFIG_BLOCK
-extern const struct seq_operations partitions_op;
 static int partitions_open(struct inode *inode, struct file *file)
 {
        return seq_open(file, &partitions_op);
@@ -389,7 +389,6 @@ static const struct file_operations proc_partitions_operations = {
        .release        = seq_release,
 };
 
-extern const struct seq_operations diskstats_op;
 static int diskstats_open(struct inode *inode, struct file *file)
 {
        return seq_open(file, &diskstats_op);
index 6841452e0dea00620cd2053f9cd02356f94f6169..393cc22c1717d582dc9e03d401ec514fd8ac4c99 100644 (file)
@@ -2031,7 +2031,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
                return -EXDEV;
        }
        /* We must not pack tails for quota files on reiserfs for quota IO to work */
-       if (!REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask) {
+       if (!(REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask)) {
                reiserfs_warning(sb,
                                 "reiserfs: Quota file must have tail packing disabled.");
                path_put(&nd.path);
index 9b559ee711a8e59f8e12fb0029b4d5a0356b7df4..0670c915cd35c5e8b653494d4d5b1efe76867e2e 100644 (file)
@@ -1669,6 +1669,13 @@ static int link_pipe(struct pipe_inode_info *ipipe,
                i++;
        } while (len);
 
+       /*
+        * return EAGAIN if we have the potential of some data in the
+        * future, otherwise just return 0
+        */
+       if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
+               ret = -EAGAIN;
+
        inode_double_unlock(ipipe->inode, opipe->inode);
 
        /*
@@ -1709,11 +1716,8 @@ static long do_tee(struct file *in, struct file *out, size_t len,
                ret = link_ipipe_prep(ipipe, flags);
                if (!ret) {
                        ret = link_opipe_prep(opipe, flags);
-                       if (!ret) {
+                       if (!ret)
                                ret = link_pipe(ipipe, opipe, len, flags);
-                               if (!ret && (flags & SPLICE_F_NONBLOCK))
-                                       ret = -EAGAIN;
-                       }
                }
        }
 
index 21dfc9da235e623ef77a330076edb6269f375f48..8831d95187904f0ecf4a1aa1ac5b77cc767aec77 100644 (file)
@@ -171,7 +171,7 @@ xfs_parseargs(
        char                    *this_char, *value, *eov;
        int                     dsunit, dswidth, vol_dsunit, vol_dswidth;
        int                     iosize;
-       int                     ikeep = 0;
+       int                     dmapi_implies_ikeep = 1;
 
        args->flags |= XFSMNT_BARRIER;
        args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
@@ -302,10 +302,10 @@ xfs_parseargs(
                } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
                        args->flags &= ~XFSMNT_BARRIER;
                } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
-                       ikeep = 1;
-                       args->flags &= ~XFSMNT_IDELETE;
+                       args->flags |= XFSMNT_IKEEP;
                } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
-                       args->flags |= XFSMNT_IDELETE;
+                       dmapi_implies_ikeep = 0;
+                       args->flags &= ~XFSMNT_IKEEP;
                } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
                        args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE;
                } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
@@ -410,8 +410,8 @@ xfs_parseargs(
         * Note that if "ikeep" or "noikeep" mount options are
         * supplied, then they are honored.
         */
-       if (!(args->flags & XFSMNT_DMAPI) && !ikeep)
-               args->flags |= XFSMNT_IDELETE;
+       if ((args->flags & XFSMNT_DMAPI) && dmapi_implies_ikeep)
+               args->flags |= XFSMNT_IKEEP;
 
        if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
                if (dsunit) {
@@ -446,6 +446,7 @@ xfs_showargs(
 {
        static struct proc_xfs_info xfs_info_set[] = {
                /* the few simple ones we can get from the mount struct */
+               { XFS_MOUNT_IKEEP,              "," MNTOPT_IKEEP },
                { XFS_MOUNT_WSYNC,              "," MNTOPT_WSYNC },
                { XFS_MOUNT_INO64,              "," MNTOPT_INO64 },
                { XFS_MOUNT_NOALIGN,            "," MNTOPT_NOALIGN },
@@ -461,7 +462,6 @@ xfs_showargs(
        };
        static struct proc_xfs_info xfs_info_unset[] = {
                /* the few simple ones we can get from the mount struct */
-               { XFS_MOUNT_IDELETE,            "," MNTOPT_IKEEP },
                { XFS_MOUNT_COMPAT_IOSIZE,      "," MNTOPT_LARGEIO },
                { XFS_MOUNT_BARRIER,            "," MNTOPT_NOBARRIER },
                { XFS_MOUNT_SMALL_INUMS,        "," MNTOPT_64BITINODE },
index 48228848f5ae3e2e6f52900b6d0ea7215274a36d..fab0b6d5a41be3916ac8893a2f82f142cd4443f4 100644 (file)
  * XFS bit manipulation routines, used in non-realtime code.
  */
 
+#ifndef HAVE_ARCH_HIGHBIT
+/*
+ * Index of high bit number in byte, -1 for none set, 0..7 otherwise.
+ */
+static const char xfs_highbit[256] = {
+       -1, 0, 1, 1, 2, 2, 2, 2,                        /* 00 .. 07 */
+       3, 3, 3, 3, 3, 3, 3, 3,                 /* 08 .. 0f */
+       4, 4, 4, 4, 4, 4, 4, 4,                 /* 10 .. 17 */
+       4, 4, 4, 4, 4, 4, 4, 4,                 /* 18 .. 1f */
+       5, 5, 5, 5, 5, 5, 5, 5,                 /* 20 .. 27 */
+       5, 5, 5, 5, 5, 5, 5, 5,                 /* 28 .. 2f */
+       5, 5, 5, 5, 5, 5, 5, 5,                 /* 30 .. 37 */
+       5, 5, 5, 5, 5, 5, 5, 5,                 /* 38 .. 3f */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 40 .. 47 */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 48 .. 4f */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 50 .. 57 */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 58 .. 5f */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 60 .. 67 */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 68 .. 6f */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 70 .. 77 */
+       6, 6, 6, 6, 6, 6, 6, 6,                 /* 78 .. 7f */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* 80 .. 87 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* 88 .. 8f */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* 90 .. 97 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* 98 .. 9f */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* a0 .. a7 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* a8 .. af */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* b0 .. b7 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* b8 .. bf */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* c0 .. c7 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* c8 .. cf */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* d0 .. d7 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* d8 .. df */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* e0 .. e7 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* e8 .. ef */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* f0 .. f7 */
+       7, 7, 7, 7, 7, 7, 7, 7,                 /* f8 .. ff */
+};
+#endif
+
+/*
+ * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
+ */
+inline int
+xfs_highbit32(
+       __uint32_t      v)
+{
+#ifdef HAVE_ARCH_HIGHBIT
+       return highbit32(v);
+#else
+       int             i;
+
+       if (v & 0xffff0000)
+               if (v & 0xff000000)
+                       i = 24;
+               else
+                       i = 16;
+       else if (v & 0x0000ffff)
+               if (v & 0x0000ff00)
+                       i = 8;
+               else
+                       i = 0;
+       else
+               return -1;
+       return i + xfs_highbit[(v >> i) & 0xff];
+#endif
+}
+
+/*
+ * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set.
+ */
+int
+xfs_lowbit64(
+       __uint64_t      v)
+{
+       __uint32_t      w = (__uint32_t)v;
+       int             n = 0;
+
+       if (w) {        /* lower bits */
+               n = ffs(w);
+       } else {        /* upper bits */
+               w = (__uint32_t)(v >> 32);
+               if (w && (n = ffs(w)))
+                       n += 32;
+       }
+       return n - 1;
+}
+
+/*
+ * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set.
+ */
+int
+xfs_highbit64(
+       __uint64_t      v)
+{
+       __uint32_t      h = (__uint32_t)(v >> 32);
+
+       if (h)
+               return xfs_highbit32(h) + 32;
+       return xfs_highbit32((__uint32_t)v);
+}
+
+
 /*
  * Return whether bitmap is empty.
  * Size is number of words in the bitmap, which is padded to word boundary
index 325a007dec91de7942df66db18afc0be2199a61c..082641a9782c073e832fab360d88d98020f87310 100644 (file)
@@ -47,30 +47,13 @@ static inline __uint64_t xfs_mask64lo(int n)
 }
 
 /* Get high bit set out of 32-bit argument, -1 if none set */
-static inline int xfs_highbit32(__uint32_t v)
-{
-       return fls(v) - 1;
-}
-
-/* Get high bit set out of 64-bit argument, -1 if none set */
-static inline int xfs_highbit64(__uint64_t v)
-{
-       return fls64(v) - 1;
-}
-
-/* Get low bit set out of 32-bit argument, -1 if none set */
-static inline int xfs_lowbit32(__uint32_t v)
-{
-       __uint32_t t = v;
-       return (t) ? find_first_bit((unsigned long *)&t, 32) : -1;
-}
+extern int xfs_highbit32(__uint32_t v);
 
 /* Get low bit set out of 64-bit argument, -1 if none set */
-static inline int xfs_lowbit64(__uint64_t v)
-{
-       __uint64_t t = v;
-       return (t) ? find_first_bit((unsigned long *)&t, 64) : -1;
-}
+extern int xfs_lowbit64(__uint64_t v);
+
+/* Get high bit set out of 64-bit argument, -1 if none set */
+extern int xfs_highbit64(__uint64_t);
 
 /* Return whether bitmap is empty (1 == empty) */
 extern int xfs_bitmap_empty(uint *map, uint size);
index d16c1b971074ba0cb95403b522f1efe083771070..d5d1e60ee2247ba7281954888a0412c51a0bbd3f 100644 (file)
@@ -86,7 +86,7 @@ struct xfs_mount_args {
 #define XFSMNT_NOUUID          0x01000000      /* Ignore fs uuid */
 #define XFSMNT_DMAPI           0x02000000      /* enable dmapi/xdsm */
 #define XFSMNT_BARRIER         0x04000000      /* use write barriers */
-#define XFSMNT_IDELETE         0x08000000      /* inode cluster delete */
+#define XFSMNT_IKEEP           0x08000000      /* inode cluster delete */
 #define XFSMNT_SWALLOC         0x10000000      /* turn on stripe width
                                                 * allocation */
 #define XFSMNT_DIRSYNC         0x40000000      /* sync creat,link,unlink,rename
index c5836b951d0c1a80ef9f71a2e0eeba511a437af1..db9d5fa600af04ea5a1a0b31e3111c8a38347af7 100644 (file)
@@ -1053,7 +1053,7 @@ xfs_difree(
        /*
         * When an inode cluster is free, it becomes eligible for removal
         */
-       if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
+       if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
            (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {
 
                *delete = 1;
index f7c620ec6e695eb338b4da9a21692bf5d0c59608..1d8a4728d847c104403cbaa8117b6b2c6a8089a3 100644 (file)
@@ -366,7 +366,7 @@ typedef struct xfs_mount {
 #define XFS_MOUNT_SMALL_INUMS  (1ULL << 15)    /* users wants 32bit inodes */
 #define XFS_MOUNT_NOUUID       (1ULL << 16)    /* ignore uuid during mount */
 #define XFS_MOUNT_BARRIER      (1ULL << 17)
-#define XFS_MOUNT_IDELETE      (1ULL << 18)    /* delete empty inode clusters*/
+#define XFS_MOUNT_IKEEP                (1ULL << 18)    /* keep empty inode clusters*/
 #define XFS_MOUNT_SWALLOC      (1ULL << 19)    /* turn on stripe width
                                                 * allocation */
 #define XFS_MOUNT_RDONLY       (1ULL << 20)    /* read-only fs */
index ca83ddf72af4569ab8cfa1b3cdfcbbf16c80ffb6..47082c01872d0621f0c8499862e16c6ef33d2aee 100644 (file)
@@ -72,6 +72,18 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
  * Internal functions.
  */
 
+/*
+ * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set.
+ */
+STATIC int
+xfs_lowbit32(
+       __uint32_t      v)
+{
+       if (v)
+               return ffs(v) - 1;
+       return -1;
+}
+
 /*
  * Allocate space to the bitmap or summary file, and zero it, for growfs.
  */
@@ -432,7 +444,6 @@ xfs_rtallocate_extent_near(
        }
        bbno = XFS_BITTOBLOCK(mp, bno);
        i = 0;
-       ASSERT(minlen != 0);
        log2len = xfs_highbit32(minlen);
        /*
         * Loop over all bitmap blocks (bbno + i is current block).
@@ -601,8 +612,6 @@ xfs_rtallocate_extent_size(
        xfs_suminfo_t   sum;            /* summary information for extents */
 
        ASSERT(minlen % prod == 0 && maxlen % prod == 0);
-       ASSERT(maxlen != 0);
-
        /*
         * Loop over all the levels starting with maxlen.
         * At each level, look at all the bitmap blocks, to see if there
@@ -660,9 +669,6 @@ xfs_rtallocate_extent_size(
                *rtblock = NULLRTBLOCK;
                return 0;
        }
-       ASSERT(minlen != 0);
-       ASSERT(maxlen != 0);
-
        /*
         * Loop over sizes, from maxlen down to minlen.
         * This time, when we do the allocations, allow smaller ones
@@ -1948,7 +1954,6 @@ xfs_growfs_rt(
                                  nsbp->sb_blocksize * nsbp->sb_rextsize);
                nsbp->sb_rextents = nsbp->sb_rblocks;
                do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
-               ASSERT(nsbp->sb_rextents != 0);
                nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
                nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
                nrsumsize =
index 413587f0215578760d0109dc3825ceb296ce0fab..7321304a69ccc129452c674dc298cdef25d5480a 100644 (file)
@@ -281,8 +281,8 @@ xfs_start_flags(
                mp->m_readio_log = mp->m_writeio_log = ap->iosizelog;
        }
 
-       if (ap->flags & XFSMNT_IDELETE)
-               mp->m_flags |= XFS_MOUNT_IDELETE;
+       if (ap->flags & XFSMNT_IKEEP)
+               mp->m_flags |= XFS_MOUNT_IKEEP;
        if (ap->flags & XFSMNT_DIRSYNC)
                mp->m_flags |= XFS_MOUNT_DIRSYNC;
        if (ap->flags & XFSMNT_ATTR2)
index b7e730851461d635f38eff192b9ab6011053f98f..c145bb01bc8fa573a6a1884fc37c9b0aae5bbf6d 100644 (file)
@@ -35,7 +35,7 @@
 1004:
                mrc     p6, 0, \irqstat, c6, c0, 0      @ ICIP2
                mrc     p6, 0, \irqnr, c7, c0, 0        @ ICMR2
-               ands    \irqstat, \irqstat, \irqnr
+               ands    \irqnr, \irqstat, \irqnr
                beq     1003f
                rsb     \irqstat, \irqnr, #0
                and     \irqstat, \irqstat, \irqnr
index ac175b4d10cbfa859880be062163be2526eaab4c..2357a73340d4200c39743268fe930826d341a601 100644 (file)
 #define MCCR_FSRIE     (1 << 1)        /* FIFO Service Request Interrupt Enable */
 
 #define GCR            __REG(0x4050000C)  /* Global Control Register */
+#ifdef CONFIG_PXA3xx
+#define GCR_CLKBPB     (1 << 31)       /* Internal clock enable */
+#endif
 #define GCR_nDMAEN     (1 << 24)       /* non DMA Enable */
 #define GCR_CDONE_IE   (1 << 19)       /* Command Done Interrupt Enable */
 #define GCR_SDONE_IE   (1 << 18)       /* Status Done Interrupt Enable */
index 1ee17b6951d02f9057867594dbb5f3d268c4e038..47fe34d692dab57555a0b8e6af72b09660eca39f 100644 (file)
@@ -8,7 +8,7 @@
 /* Maximum address we can reach in physical address mode */
 #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
 /* Maximum address we can use for the control code buffer */
-#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
 
 #define KEXEC_CONTROL_CODE_SIZE        4096
 
index 4e7bd32288ae6196a2fa5082173d5510cacd0b7a..c042194d3ab55fc19be196d9db37640d260f224b 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
 
-#define ARCH_SUPPORTS_KRETPROBES
 #define __ARCH_WANT_KPROBES_INSN_SLOT
 #define MAX_INSN_SIZE                  2
 #define MAX_STACK_SIZE                 64      /* 32 would probably be OK */
index 8431f6eed5c6b6a5ebee2c067032e32f40302952..5db03cf3b90525d2164175288bd0f5a28f3efe7a 100644 (file)
@@ -40,16 +40,16 @@ extern int __bug_unaligned_x(const void *ptr);
  */
 
 #define __get_unaligned_2_le(__p)                                      \
-       (__p[0] | __p[1] << 8)
+       (unsigned int)(__p[0] | __p[1] << 8)
 
 #define __get_unaligned_2_be(__p)                                      \
-       (__p[0] << 8 | __p[1])
+       (unsigned int)(__p[0] << 8 | __p[1])
 
 #define __get_unaligned_4_le(__p)                                      \
-       (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
+       (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
 
 #define __get_unaligned_4_be(__p)                                      \
-       (__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3])
+       (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3])
 
 #define __get_unaligned_8_le(__p)                                      \
        ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 |      \
index 018f6e2a02425d4acc7372fc0111c701332cff68..3ae7b548fce764bc04dd2e00a93e9b2f330dce9a 100644 (file)
@@ -157,6 +157,7 @@ extern struct page *empty_zero_page;
 #define _PAGE_S(x)     _PAGE_NORMAL(x)
 
 #define PAGE_COPY      _PAGE_P(PAGE_WRITE | PAGE_READ)
+#define PAGE_SHARED    _PAGE_S(PAGE_WRITE | PAGE_READ)
 
 #ifndef __ASSEMBLY__
 /*
index 8265ea473d5bc566cd2f7acc087b74d8f511f617..4f318f1fd2d901214b5771c53d3e19854db971c6 100644 (file)
@@ -1,12 +1,11 @@
 /*
- * include/asm/bf5xx_timers.h
- *
- * This file contains the major Data structures and constants
- * used for General Purpose Timer Implementation in BF5xx
+ * gptimers.h - Blackfin General Purpose Timer structs/defines/prototypes
  *
+ * Copyright (c) 2005-2008 Analog Devices Inc.
  * Copyright (C) 2005 John DeHority
  * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de)
  *
+ * Licensed under the GPL-2.
  */
 
 #ifndef _BLACKFIN_TIMERS_H_
index 65480dab244ef72dc9c338ff65c053e7a676a22d..86b67834354da4a86603eb4cf15f3485ab7d78c0 100644 (file)
@@ -67,4 +67,6 @@ static __inline__ int irq_canonicalize(int irq)
 #define NO_IRQ ((unsigned int)(-1))
 #endif
 
+#define SIC_SYSIRQ(irq)        (irq - (IRQ_CORETMR + 1))
+
 #endif                         /* _BFIN_IRQ_H_ */
index 15dbc21eed8bd30c34da9f747ccf88bc1f92b347..c0694ecd2ecde47ecb91b99c43e26bed588d28f2 100644 (file)
@@ -23,7 +23,6 @@
 #define UART_GET_DLH(uart)     bfin_read16(((uart)->port.membase + OFFSET_DLH))
 #define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
 #define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
 #define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
 
 #define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
@@ -58,6 +57,7 @@
 struct bfin_serial_port {
        struct uart_port port;
        unsigned int old_status;
+       unsigned int lsr;
 #ifdef CONFIG_SERIAL_BFIN_DMA
        int tx_done;
        int tx_count;
@@ -67,15 +67,31 @@ struct bfin_serial_port {
        unsigned int tx_dma_channel;
        unsigned int rx_dma_channel;
        struct work_struct tx_dma_workqueue;
-#else
-       struct work_struct cts_workqueue;
 #endif
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       struct work_struct cts_workqueue;
        int cts_pin;
        int rts_pin;
 #endif
 };
 
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
+{
+       unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
+       uart->lsr |= (lsr & (BI|FE|PE|OE));
+       return lsr | uart->lsr;
+}
+
+static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
+{
+       uart->lsr = 0;
+       bfin_write16(uart->port.membase + OFFSET_LSR, -1);
+}
+
 struct bfin_serial_port bfin_serial_ports[NR_PORTS];
 struct bfin_serial_res {
        unsigned long uart_base_addr;
index 7871d4313f49b3d7751c4cbe779d5fa321ea789c..b6f513bee56ece582d69bdba072e722b557099f0 100644 (file)
@@ -23,7 +23,6 @@
 #define UART_GET_DLH(uart)     bfin_read16(((uart)->port.membase + OFFSET_DLH))
 #define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
 #define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
 #define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
 
 #define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
@@ -46,6 +45,7 @@
 struct bfin_serial_port {
         struct uart_port        port;
         unsigned int            old_status;
+       unsigned int lsr;
 #ifdef CONFIG_SERIAL_BFIN_DMA
        int                     tx_done;
        int                     tx_count;
@@ -56,14 +56,34 @@ struct bfin_serial_port {
        unsigned int            rx_dma_channel;
        struct work_struct      tx_dma_workqueue;
 #else
-       struct work_struct      cts_workqueue;
+# if ANOMALY_05000230
+       unsigned int anomaly_threshold;
+# endif
 #endif
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       struct work_struct      cts_workqueue;
        int                     cts_pin;
        int                     rts_pin;
 #endif
 };
 
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
+{
+       unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
+       uart->lsr |= (lsr & (BI|FE|PE|OE));
+       return lsr | uart->lsr;
+}
+
+static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
+{
+       uart->lsr = 0;
+       bfin_write16(uart->port.membase + OFFSET_LSR, -1);
+}
+
 struct bfin_serial_port bfin_serial_ports[NR_PORTS];
 struct bfin_serial_res {
        unsigned long   uart_base_addr;
index 86e45c3798389270f6feb33cfb40205ced3afc52..8fc672d3105747a75efa3cb86676f6877c176a0c 100644 (file)
@@ -23,7 +23,6 @@
 #define UART_GET_DLH(uart)     bfin_read16(((uart)->port.membase + OFFSET_DLH))
 #define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
 #define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
 #define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
 
 #define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
@@ -58,6 +57,7 @@
 struct bfin_serial_port {
         struct uart_port        port;
         unsigned int            old_status;
+       unsigned int lsr;
 #ifdef CONFIG_SERIAL_BFIN_DMA
        int                     tx_done;
        int                     tx_count;
@@ -67,15 +67,31 @@ struct bfin_serial_port {
        unsigned int            tx_dma_channel;
        unsigned int            rx_dma_channel;
        struct work_struct      tx_dma_workqueue;
-#else
-       struct work_struct      cts_workqueue;
 #endif
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       struct work_struct      cts_workqueue;
        int             cts_pin;
        int             rts_pin;
 #endif
 };
 
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
+{
+       unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
+       uart->lsr |= (lsr & (BI|FE|PE|OE));
+       return lsr | uart->lsr;
+}
+
+static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
+{
+       uart->lsr = 0;
+       bfin_write16(uart->port.membase + OFFSET_LSR, -1);
+}
+
 struct bfin_serial_port bfin_serial_ports[NR_PORTS];
 struct bfin_serial_res {
        unsigned long   uart_base_addr;
index 3770aa38ee9f9f6b039316ca1714997e431933d3..7e6339f62a5058ae5e4226fef033c9985968f96e 100644 (file)
@@ -24,6 +24,8 @@
 #define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
 #define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
 #define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
+#define UART_GET_MSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_MSR))
+#define UART_GET_MCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_MCR))
 
 #define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
 #define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
@@ -32,7 +34,9 @@
 #define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
 #define UART_PUT_LSR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_LSR),v)
 #define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
+#define UART_CLEAR_LSR(uart)    bfin_write16(((uart)->port.membase + OFFSET_LSR), -1)
 #define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
+#define UART_PUT_MCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_MCR),v)
 
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
@@ -68,10 +72,9 @@ struct bfin_serial_port {
        unsigned int            tx_dma_channel;
        unsigned int            rx_dma_channel;
        struct work_struct      tx_dma_workqueue;
-#else
-       struct work_struct      cts_workqueue;
 #endif
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       struct work_struct      cts_workqueue;
        int             cts_pin;
        int             rts_pin;
 #endif
index 7871d4313f49b3d7751c4cbe779d5fa321ea789c..b6f513bee56ece582d69bdba072e722b557099f0 100644 (file)
@@ -23,7 +23,6 @@
 #define UART_GET_DLH(uart)     bfin_read16(((uart)->port.membase + OFFSET_DLH))
 #define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
 #define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
 #define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
 
 #define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
@@ -46,6 +45,7 @@
 struct bfin_serial_port {
         struct uart_port        port;
         unsigned int            old_status;
+       unsigned int lsr;
 #ifdef CONFIG_SERIAL_BFIN_DMA
        int                     tx_done;
        int                     tx_count;
@@ -56,14 +56,34 @@ struct bfin_serial_port {
        unsigned int            rx_dma_channel;
        struct work_struct      tx_dma_workqueue;
 #else
-       struct work_struct      cts_workqueue;
+# if ANOMALY_05000230
+       unsigned int anomaly_threshold;
+# endif
 #endif
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
+       struct work_struct      cts_workqueue;
        int                     cts_pin;
        int                     rts_pin;
 #endif
 };
 
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
+{
+       unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
+       uart->lsr |= (lsr & (BI|FE|PE|OE));
+       return lsr | uart->lsr;
+}
+
+static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
+{
+       uart->lsr = 0;
+       bfin_write16(uart->port.membase + OFFSET_LSR, -1);
+}
+
 struct bfin_serial_port bfin_serial_ports[NR_PORTS];
 struct bfin_serial_res {
        unsigned long   uart_base_addr;
index 362617f9384574b36457c0e56bbb4dcad0b0c511..3a16df2c86d857fe5df7f0768919fba6158e4594 100644 (file)
@@ -49,7 +49,8 @@
 #define bfin_read_FIO_INEN() bfin_read_FIO0_INEN()
 #define bfin_write_FIO_INEN(val) bfin_write_FIO0_INEN(val)
 
-
+#define SIC_IWR0 SICA_IWR0
+#define SIC_IWR1 SICA_IWR1
 #define SIC_IAR0 SICA_IAR0
 #define bfin_write_SIC_IMASK0 bfin_write_SICA_IMASK0
 #define bfin_write_SIC_IMASK1 bfin_write_SICA_IMASK1
index d667816486c095cfe162b143e4c7ff68449a5a21..1bc8d2f89cccf7ae69baaa1a29477f20406b7b2d 100644 (file)
@@ -559,6 +559,7 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val)
 #define bfin_write_PPI0_CONTROL(val)         bfin_write16(PPI0_CONTROL,val)
 #define bfin_read_PPI0_STATUS()              bfin_read16(PPI0_STATUS)
 #define bfin_write_PPI0_STATUS(val)          bfin_write16(PPI0_STATUS,val)
+#define bfin_clear_PPI0_STATUS()             bfin_read_PPI0_STATUS()
 #define bfin_read_PPI0_COUNT()               bfin_read16(PPI0_COUNT)
 #define bfin_write_PPI0_COUNT(val)           bfin_write16(PPI0_COUNT,val)
 #define bfin_read_PPI0_DELAY()               bfin_read16(PPI0_DELAY)
@@ -570,6 +571,7 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val)
 #define bfin_write_PPI1_CONTROL(val)         bfin_write16(PPI1_CONTROL,val)
 #define bfin_read_PPI1_STATUS()              bfin_read16(PPI1_STATUS)
 #define bfin_write_PPI1_STATUS(val)          bfin_write16(PPI1_STATUS,val)
+#define bfin_clear_PPI1_STATUS()             bfin_read_PPI1_STATUS()
 #define bfin_read_PPI1_COUNT()               bfin_read16(PPI1_COUNT)
 #define bfin_write_PPI1_COUNT(val)           bfin_write16(PPI1_COUNT,val)
 #define bfin_read_PPI1_DELAY()               bfin_read16(PPI1_DELAY)
index 69d48a2dc8e13ee15fa7c493fbe89eadc9bbaac9..ea11eaf0e922dfef1018ddae25fbee4625595e26 100644 (file)
@@ -1,43 +1,6 @@
 /* 
  * Authors:    Bjorn Wesen (bjornw@axis.com)
  *            Hans-Peter Nilsson (hp@axis.com)
- *
- * $Log: uaccess.h,v $
- * Revision 1.8  2001/10/29 13:01:48  bjornw
- * Removed unused variable tmp2 in strnlen_user
- *
- * Revision 1.7  2001/10/02 12:44:52  hp
- * Add support for 64-bit put_user/get_user
- *
- * Revision 1.6  2001/10/01 14:51:17  bjornw
- * Added register prefixes and removed underscores
- *
- * Revision 1.5  2000/10/25 03:33:21  hp
- * - Provide implementation for everything else but get_user and put_user;
- *   copying inline to/from user for constant length 0..16, 20, 24, and
- *   clearing for 0..4, 8, 12, 16, 20, 24, strncpy_from_user and strnlen_user
- *   always inline.
- * - Constraints for destination addr in get_user cannot be memory, only reg.
- * - Correct labels for PC at expected fault points.
- * - Nits with assembly code.
- * - Don't use statement expressions without value; use "do {} while (0)".
- * - Return correct values from __generic_... functions.
- *
- * Revision 1.4  2000/09/12 16:28:25  bjornw
- * * Removed comments from the get/put user asm code
- * * Constrains for destination addr in put_user cannot be memory, only reg
- *
- * Revision 1.3  2000/09/12 14:30:20  bjornw
- * MAX_ADDR_USER does not exist anymore
- *
- * Revision 1.2  2000/07/13 15:52:48  bjornw
- * New user-access functions
- *
- * Revision 1.1.1.1  2000/07/10 16:32:31  bjornw
- * CRIS architecture, working draft
- *
- *
- *
  */
 
 /* Asm:s have been tweaked (within the domain of correctness) to give
@@ -209,9 +172,9 @@ extern long __get_user_bad(void);
 /* More complex functions.  Most are inline, but some call functions that
    live in lib/usercopy.c  */
 
-extern unsigned long __copy_user(void *to, const void *from, unsigned long n);
-extern unsigned long __copy_user_zeroing(void *to, const void *from, unsigned long n);
-extern unsigned long __do_clear_user(void *to, unsigned long n);
+extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
+extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 
 static inline unsigned long
 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
@@ -253,7 +216,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
 }
 
 
-/* Note that if these expand awfully if made into switch constructs, so
+/* Note that these expand awfully if made into switch constructs, so
    don't do that.  */
 
 static inline unsigned long
@@ -407,19 +370,21 @@ __constant_clear_user(void __user *to, unsigned long n)
  */
 
 static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+__generic_copy_from_user_nocheck(void *to, const void __user *from,
+                                unsigned long n)
 {
        return __copy_user_zeroing(to,from,n);
 }
 
 static inline unsigned long
-__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+__generic_copy_to_user_nocheck(void __user *to, const void *from,
+                              unsigned long n)
 {
        return __copy_user(to,from,n);
 }
 
 static inline unsigned long
-__generic_clear_user_nocheck(void *to, unsigned long n)
+__generic_clear_user_nocheck(void __user *to, unsigned long n)
 {
        return __do_clear_user(to,n);
 }
index 007cb16a6b5b7cc4b59606485ecf24aaf28a9858..76398ef87e9bcb730c864a624ed77b88be08068a 100644 (file)
 #define __NR_timerfd_create    322
 #define __NR_eventfd           323
 #define __NR_fallocate         324
-#define __NR_timerfd_settime    315
-#define __NR_timerfd_gettime    316
+#define __NR_timerfd_settime   325
+#define __NR_timerfd_gettime   326
 
 #ifdef __KERNEL__
 
-#define NR_syscalls 325
+#define NR_syscalls 327
 
 #include <asm/arch/unistd.h>
 
index 4a1e48b9f4031b2808d973d2679e7a52cdbbab11..eb24a3f47caa212b94b7d2c1736ea7be2b59e992 100644 (file)
@@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm
 header-y += break.h
 header-y += fpu.h
 header-y += fpswa.h
-header-y += gcc_intrin.h
 header-y += ia64regs.h
 header-y += intel_intrin.h
 header-y += intrinsics.h
@@ -12,5 +11,6 @@ header-y += ptrace_offsets.h
 header-y += rse.h
 header-y += ucontext.h
 
+unifdef-y += gcc_intrin.h
 unifdef-y += perfmon.h
 unifdef-y += ustack.h
index 7e6e3779670a3c82690a92d357d0d992247fba12..76366dc9c1a0473ead7124ddce204252537d8ebc 100644 (file)
@@ -93,6 +93,9 @@ extern __u8 isa_irq_to_vector_map[16];
 struct irq_cfg {
        ia64_vector vector;
        cpumask_t domain;
+       cpumask_t old_domain;
+       unsigned move_cleanup_count;
+       u8 move_in_progress : 1;
 };
 extern spinlock_t vector_lock;
 extern struct irq_cfg irq_cfg[NR_IRQS];
@@ -106,12 +109,19 @@ extern int assign_irq_vector (int irq);   /* allocate a free vector */
 extern void free_irq_vector (int vector);
 extern int reserve_irq_vector (int vector);
 extern void __setup_vector_irq(int cpu);
-extern int reassign_irq_vector(int irq, int cpu);
 extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
 extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
 extern int check_irq_used (int irq);
 extern void destroy_and_reserve_irq (unsigned int irq);
 
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+extern int irq_prepare_move(int irq, int cpu);
+extern void irq_complete_move(unsigned int irq);
+#else
+static inline int irq_prepare_move(int irq, int cpu) { return 0; }
+static inline void irq_complete_move(unsigned int irq) {}
+#endif
+
 static inline void ia64_resend_irq(unsigned int vector)
 {
        platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
index a93ce9ef07ff7a5497a4806d99df460030b080aa..8233b3a964c6eb590b9fbcc20fd7c23fd45fc653 100644 (file)
@@ -82,7 +82,6 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
 };
 
-#define ARCH_SUPPORTS_KRETPROBES
 #define kretprobe_blacklist_size 0
 
 #define SLOT0_OPCODE_SHIFT     (37)
@@ -122,10 +121,6 @@ extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_exceptions_notify(struct notifier_block *self,
                                    unsigned long val, void *data);
 
-/* ia64 does not need this */
-static inline void jprobe_return(void)
-{
-}
 extern void invalidate_stacked_regs(void);
 extern void flush_register_stack(void);
 extern void arch_remove_kprobe(struct kprobe *p);
index 2251118894ae0005f193a2befa8c5a11b963f4e0..f4904db3b0573c486d7bfe303076245d8d76a8f1 100644 (file)
@@ -807,6 +807,10 @@ static inline s64
 ia64_sal_physical_id_info(u16 *splid)
 {
        struct ia64_sal_retval isrv;
+
+       if (sal_revision < SAL_VERSION_CODE(3,2))
+               return -1;
+
        SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
        if (splid)
                *splid = isrv.v0;
index 87f77b11931772245001d68e10ea863d3de446eb..e72ba563f10229389eb429417e39815e370662e4 100644 (file)
 #define __NR_epoll_pwait       315
 #define __NR_utimensat         316
 #define __NR_signalfd          317
-#define __NR_timerfd           318
+#define __NR_timerfd_create    318
 #define __NR_eventfd           319
 #define __NR_fallocate         320
+#define __NR_timerfd_settime   321
+#define __NR_timerfd_gettime   322
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            321
+#define NR_syscalls            323
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 1cf26d240d839922d97e2aceb8cbcc5618b477c7..de9f47a51cc215e04d19f1a40fb413045d9d572b 100644 (file)
@@ -21,4 +21,6 @@ extern void (*mach_power_off)( void );
 
 extern void config_BSP(char *command, int len);
 
+extern void do_IRQ(int irq, struct pt_regs *fp);
+
 #endif /* _M68KNOMMU_MACHDEP_H */
index 27c2f9bb4dbdd7a6528203ccd3a98e95b17486ab..4ba98b9c5d799abc7b6df69f1477bd57fd1d3eaf 100644 (file)
 #define __NR_epoll_pwait       315
 #define __NR_utimensat         316
 #define __NR_signalfd          317
-#define __NR_timerfd           318
+#define __NR_timerfd_create    318
 #define __NR_eventfd           319
 #define __NR_fallocate         320
+#define __NR_timerfd_settime   321
+#define __NR_timerfd_gettime   322
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            321
+#define NR_syscalls            323
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index afabad230dbb4f212d057449f03f3fec3b79d171..d0e7701fa1f6cbff19370ef0a2dc6e4856e5a27f 100644 (file)
@@ -80,7 +80,6 @@ typedef unsigned int kprobe_opcode_t;
 #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
 #endif
 
-#define ARCH_SUPPORTS_KRETPROBES
 #define flush_insn_slot(p)     do { } while (0)
 #define kretprobe_blacklist_size 0
 
index 0d6238987df864f9b7f2d4f1cff8a6b7fade4fda..edc0cfd7f6e28b205d8b17df52000be78bbe1aa2 100644 (file)
 #define   CTRL_RUNLATCH        0x1
 #define SPRN_DABR      0x3F5   /* Data Address Breakpoint Register */
 #define   DABR_TRANSLATION     (1UL << 2)
+#define SPRN_DABRX     0x3F7   /* Data Address Breakpoint Register Extension */
+#define   DABRX_USER   (1UL << 0)
+#define   DABRX_KERNEL (1UL << 1)
 #define SPRN_DAR       0x013   /* Data Address Register */
 #define SPRN_DSISR     0x012   /* Data Storage Interrupt Status Register */
 #define   DSISR_NOHPTE         0x40000000      /* no translation found */
index 948db3d0d05c194d4715706fe4c932a781d23e4e..330f68caffe4b167eb70ed5d79db31cb48078919 100644 (file)
@@ -46,7 +46,6 @@ typedef u16 kprobe_opcode_t;
        ? (MAX_STACK_SIZE) \
        : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
 
-#define ARCH_SUPPORTS_KRETPROBES
 #define kretprobe_blacklist_size 0
 
 #define KPROBE_SWAP_INST       0x10
index 56bd838b7db4b692ccc3acc580745d048b5a2f66..bee2d81c56bfbde6392866f1d4bb05b58d1aa4b8 100644 (file)
@@ -35,7 +35,7 @@
     defined(CONFIG_CPU_SUBTYPE_SH7710) || \
     defined(CONFIG_CPU_SUBTYPE_SH7720) || \
     defined(CONFIG_CPU_SUBTYPE_SH7721)
-#define CCR3   0xa40000b4
+#define CCR3_REG       0xa40000b4
 #define CCR_CACHE_16KB  0x00010000
 #define CCR_CACHE_32KB 0x00020000
 #endif
index 500030eae7aa8d1352c5ac6dbba0385ba57b1da2..2dab0b8d9454f8d8b67afa46016b2924b6b9b28f 100644 (file)
@@ -12,7 +12,7 @@
        not     r11, r11
        stc     sr, r10
        and     r11, r10
-#ifdef CONFIG_HAS_SR_RB
+#ifdef CONFIG_CPU_HAS_SR_RB
        stc     k_g_imask, r11
        or      r11, r10
 #endif
@@ -20,7 +20,7 @@
        .endm
 
        .macro  get_current_thread_info, ti, tmp
-#ifdef CONFIG_HAS_SR_RB
+#ifdef CONFIG_CPU_HAS_SR_RB
        stc     r7_bank, \ti
 #else
        mov     #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
index 45e47c159a6e55316e74a35cdd2303b8fb0586a0..4e08210cd4c2a33b22eceb46c1788071deae9cdc 100644 (file)
@@ -44,6 +44,8 @@ extern enum sparc_cpu sparc_cpu_model;
 
 #define SUN4M_NCPUS            4              /* Architectural limit of sun4m. */
 
+extern char reboot_command[];
+
 extern struct thread_info *current_set[NR_CPUS];
 
 extern unsigned long empty_bad_page;
index 7237dd87663ec4857f9ae957c54acad25831745e..5879d71afdaa799b2bd18f038ee390e680390c4c 100644 (file)
@@ -14,8 +14,6 @@ typedef u32 kprobe_opcode_t;
 
 #define arch_remove_kprobe(p)  do {} while (0)
 
-#define ARCH_SUPPORTS_KRETPROBES
-
 #define flush_insn_slot(p)             \
 do {   flushi(&(p)->ainsn.insn[0]);    \
        flushi(&(p)->ainsn.insn[1]);    \
index ed91a5d8d4f05dce3c5ebe2f54d5e3f0ce510907..53eae091a171920d90dbc083d5fb96d0a8f67b3e 100644 (file)
@@ -30,6 +30,8 @@ enum sparc_cpu {
 #define ARCH_SUN4C_SUN4 0
 #define ARCH_SUN4 0
 
+extern char reboot_command[];
+
 /* These are here in an effort to more fully work around Spitfire Errata
  * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
  * branch, the chip can stop executing instructions until a trap occurs.
index cd9f894dd2d7c778fe4d85fdeb0f83168a16804d..c9952ea9f6980cdb3620470ff285c59e28f8acd2 100644 (file)
@@ -102,6 +102,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
 static inline int
 futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
+
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
+       /* Real i386 machines have no cmpxchg instruction */
+       if (boot_cpu_data.x86 == 3)
+               return -ENOSYS;
+#endif
+
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
index 143476a3cb52c8328dc653c5b60b535ea5248f30..61ad7b5d142e1d4cc1baae2d18e186369c7907c9 100644 (file)
@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t;
        : (((unsigned long)current_thread_info()) + THREAD_SIZE \
           - (unsigned long)(ADDR)))
 
-#define ARCH_SUPPORTS_KRETPROBES
 #define flush_insn_slot(p)     do { } while (0)
 
 extern const int kretprobe_blacklist_size;
index 4d9367b72976a1601897af8ce3f448ad58245c11..9b17571e9bc35f3aaf68c4eb451ae4fa384578d7 100644 (file)
 /* Found in switcher.S */
 extern unsigned long default_idt_entries[];
 
+/* Declarations for definitions in lguest_guest.S */
+extern char lguest_noirq_start[], lguest_noirq_end[];
+extern const char lgstart_cli[], lgend_cli[];
+extern const char lgstart_sti[], lgend_sti[];
+extern const char lgstart_popf[], lgend_popf[];
+extern const char lgstart_pushf[], lgend_pushf[];
+extern const char lgstart_iret[], lgend_iret[];
+
+extern void lguest_iret(void);
+extern void lguest_init(void);
+
 struct lguest_regs
 {
        /* Manually saved part. */
index fec025c7f58cf0b375f94dc4335ca39fdf113125..e3b2bce0aff8e741305068fd41602e06cbf26ba4 100644 (file)
@@ -3,17 +3,29 @@
 
 /* Define nops for use with alternative() */
 
-/* generic versions from gas */
-#define GENERIC_NOP1   ".byte 0x90\n"
-#define GENERIC_NOP2           ".byte 0x89,0xf6\n"
-#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
-#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
-#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
-#define GENERIC_NOP6   ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP7   ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP8   GENERIC_NOP1 GENERIC_NOP7
+/* generic versions from gas
+   1: nop
+   2: movl %esi,%esi
+   3: leal 0x00(%esi),%esi
+   4: leal 0x00(,%esi,1),%esi
+   6: leal 0x00000000(%esi),%esi
+   7: leal 0x00000000(,%esi,1),%esi
+*/
+#define GENERIC_NOP1 ".byte 0x90\n"
+#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
+#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
+#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
+#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
+#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
 
-/* Opteron 64bit nops */
+/* Opteron 64bit nops
+   1: nop
+   2: osp nop
+   3: osp osp nop
+   4: osp osp osp nop
+*/
 #define K8_NOP1 GENERIC_NOP1
 #define K8_NOP2        ".byte 0x66,0x90\n"
 #define K8_NOP3        ".byte 0x66,0x66,0x90\n"
 #define K8_NOP7        K8_NOP4 K8_NOP3
 #define K8_NOP8        K8_NOP4 K8_NOP4
 
-/* K7 nops */
-/* uses eax dependencies (arbitary choice) */
-#define K7_NOP1  GENERIC_NOP1
+/* K7 nops
+   uses eax dependencies (arbitary choice)
+   1: nop
+   2: movl %eax,%eax
+   3: leal (,%eax,1),%eax
+   4: leal 0x00(,%eax,1),%eax
+   6: leal 0x00000000(%eax),%eax
+   7: leal 0x00000000(,%eax,1),%eax
+*/
+#define K7_NOP1        GENERIC_NOP1
 #define K7_NOP2        ".byte 0x8b,0xc0\n"
 #define K7_NOP3        ".byte 0x8d,0x04,0x20\n"
 #define K7_NOP4        ".byte 0x8d,0x44,0x20,0x00\n"
 #define K7_NOP5        K7_NOP4 ASM_NOP1
 #define K7_NOP6        ".byte 0x8d,0x80,0,0,0,0\n"
-#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-#define K7_NOP8        K7_NOP7 ASM_NOP1
+#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
+#define K7_NOP8        K7_NOP7 ASM_NOP1
 
-/* P6 nops */
-/* uses eax dependencies (Intel-recommended choice) */
+/* P6 nops
+   uses eax dependencies (Intel-recommended choice)
+   1: nop
+   2: osp nop
+   3: nopl (%eax)
+   4: nopl 0x00(%eax)
+   5: nopl 0x00(%eax,%eax,1)
+   6: osp nopl 0x00(%eax,%eax,1)
+   7: nopl 0x00000000(%eax)
+   8: nopl 0x00000000(%eax,%eax,1)
+*/
 #define P6_NOP1        GENERIC_NOP1
 #define P6_NOP2        ".byte 0x66,0x90\n"
 #define P6_NOP3        ".byte 0x0f,0x1f,0x00\n"
@@ -63,9 +91,7 @@
 #define ASM_NOP6 K7_NOP6
 #define ASM_NOP7 K7_NOP7
 #define ASM_NOP8 K7_NOP8
-#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
-      defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
-      defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
+#elif defined(CONFIG_X86_P6_NOP)
 #define ASM_NOP1 P6_NOP1
 #define ASM_NOP2 P6_NOP2
 #define ASM_NOP3 P6_NOP3
index f7393bc516eff1c1d25ad799f0441f734a151280..143546073b958097eef072e4d8849b820995232b 100644 (file)
 #define __PHYSICAL_MASK_SHIFT  46
 #define __VIRTUAL_MASK_SHIFT   48
 
-#define KERNEL_TEXT_SIZE  (40*1024*1024)
-#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
+/*
+ * Kernel image size is limited to 128 MB (see level2_kernel_pgt in
+ * arch/x86/kernel/head_64.S), and it is mapped here:
+ */
+#define KERNEL_IMAGE_SIZE      (128*1024*1024)
+#define KERNEL_IMAGE_START     _AC(0xffffffff80000000, UL)
 
 #ifndef __ASSEMBLY__
 void clear_page(void *page);
index 81a8ee4c55fc50a941373d5c16de01da455bdee7..f224eb3c3157591ac69a53466609ea0841beefbc 100644 (file)
 */
 struct ptrace_bts_config {
        /* requested or actual size of BTS buffer in bytes */
-       u32 size;
+       __u32 size;
        /* bitmask of below flags */
-       u32 flags;
+       __u32 flags;
        /* buffer overflow signal */
-       u32 signal;
+       __u32 signal;
        /* actual size of bts_struct in bytes */
-       u32 bts_size;
+       __u32 bts_size;
 };
 #endif
 
index aada32fffec2b18fa0ae39516ea0622fcfcba631..994df3780007c7da7ea42f773cb67b50ffd9e81b 100644 (file)
@@ -61,6 +61,7 @@ header-y += efs_fs_sb.h
 header-y += elf-fdpic.h
 header-y += elf-em.h
 header-y += fadvise.h
+header-y += falloc.h
 header-y += fd.h
 header-y += fdreg.h
 header-y += fib_rules.h
index 6fe67d1939c27919c0f53396da0718d58ef71b42..6f79d40dd3c01f25c105b9bdae4f5e70a7c88e1e 100644 (file)
@@ -216,8 +216,8 @@ struct request {
        unsigned int cmd_len;
        unsigned char cmd[BLK_MAX_CDB];
 
-       unsigned int raw_data_len;
        unsigned int data_len;
+       unsigned int extra_len; /* length of alignment and padding */
        unsigned int sense_len;
        void *data;
        void *sense;
@@ -362,6 +362,7 @@ struct request_queue
        unsigned long           seg_boundary_mask;
        void                    *dma_drain_buffer;
        unsigned int            dma_drain_size;
+       unsigned int            dma_pad_mask;
        unsigned int            dma_alignment;
 
        struct blk_queue_tag    *queue_tags;
@@ -701,6 +702,7 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
+extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
 extern int blk_queue_dma_drain(struct request_queue *q,
                               dma_drain_needed_fn *dma_drain_needed,
                               void *buf, unsigned int size);
index ac6aad98b6073ef7cd3a16b088b02ffccaadae32..1ddebfc52565f3eb4894c422c1fc5dd831ae523c 100644 (file)
@@ -37,7 +37,7 @@ SUBSYS(cpuacct)
 
 /* */
 
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 SUBSYS(mem_cgroup)
 #endif
 
index d0e17e1657dca11b86f151084a10bc87204c80a1..dcae0c8d97e6e2d04a8db04494e899eb403e1c18 100644 (file)
@@ -138,6 +138,12 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 #define noinline
 #endif
 
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead.  For documentaiton reasons.
+ */
+#define noinline_for_stack noinline
+
 #ifndef __always_inline
 #define __always_inline inline
 #endif
index 70817a9968be20bba98670e3d1332141217901a0..fd722f3cb6bd362320b12f191e2e81a19380a602 100644 (file)
@@ -172,7 +172,5 @@ int cn_cb_equal(struct cb_id *, struct cb_id *);
 
 void cn_queue_wrapper(struct work_struct *work);
 
-extern int cn_already_initialized;
-
 #endif                         /* __KERNEL__ */
 #endif                         /* __CONNECTOR_H */
index f592d6de3b971592a2b289e57175d776c212f3b1..7266124361b44f24259db727c7dc23d0694fb8de 100644 (file)
@@ -27,6 +27,11 @@ struct debugfs_blob_wrapper {
 };
 
 #if defined(CONFIG_DEBUG_FS)
+
+/* declared over in file.c */
+extern const struct file_operations debugfs_file_operations;
+extern const struct inode_operations debugfs_link_operations;
+
 struct dentry *debugfs_create_file(const char *name, mode_t mode,
                                   struct dentry *parent, void *data,
                                   const struct file_operations *fops);
index 17ddb55430ae19c28704a3da24f230b7a2e2ff6a..54552d21296efe9037de1d034ef45cafa4199067 100644 (file)
@@ -7,6 +7,8 @@
  * Delay routines, using a pre-computed "loops_per_jiffy" value.
  */
 
+#include <linux/kernel.h>
+
 extern unsigned long loops_per_jiffy;
 
 #include <asm/delay.h>
@@ -32,7 +34,11 @@ extern unsigned long loops_per_jiffy;
 #endif
 
 #ifndef ndelay
-#define ndelay(x)      udelay(((x)+999)/1000)
+static inline void ndelay(unsigned long x)
+{
+       udelay(DIV_ROUND_UP(x, 1000));
+}
+#define ndelay(x) ndelay(x)
 #endif
 
 void calibrate_delay(void);
index acbb364674ff1f8516f50747d0d48375f15df49a..261e43a4c873042c687138db87e9e1a242e6ffd8 100644 (file)
@@ -366,7 +366,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
  */
 static inline void dma_async_issue_pending(struct dma_chan *chan)
 {
-       return chan->device->device_issue_pending(chan);
+       chan->device->device_issue_pending(chan);
 }
 
 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
index 532d13adabc4f5648dafee76f6edb319063f7a02..0a90e1c3a42207c9d961ff29315dc48c95e0ddec 100644 (file)
@@ -45,8 +45,8 @@ struct compat_elf_prpsinfo
        char                            pr_zomb;
        char                            pr_nice;
        compat_ulong_t                  pr_flag;
-       compat_uid_t                    pr_uid;
-       compat_gid_t                    pr_gid;
+       __compat_uid_t                  pr_uid;
+       __compat_gid_t                  pr_gid;
        compat_pid_t                    pr_pid, pr_ppid, pr_pgrp, pr_sid;
        char                            pr_fname[16];
        char                            pr_psargs[ELF_PRARGSZ];
index 697da4bce6c513b003e9118d95e8bda34eeeb81a..1285c583b2d868421366fc71bb05cc12c507225f 100644 (file)
@@ -227,5 +227,6 @@ extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *,
                                                ext4_lblk_t *, ext4_fsblk_t *);
 extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
                                                ext4_lblk_t *, ext4_fsblk_t *);
+extern void ext4_ext_drop_refs(struct ext4_ext_path *);
 #endif /* _LINUX_EXT4_EXTENTS */
 
index 09a3b18918c70d89c9ab795ce40620d4c0067803..32c2ac49a07071c67264c854707aebe8ae03850f 100644 (file)
 #define dev_to_disk(device) container_of(device, struct gendisk, dev)
 #define dev_to_part(device) container_of(device, struct hd_struct, dev)
 
-extern struct device_type disk_type;
 extern struct device_type part_type;
 extern struct kobject *block_depr;
 extern struct class block_class;
 
+extern const struct seq_operations partitions_op;
+extern const struct seq_operations diskstats_op;
+
 enum {
 /* These three have identical behaviour; use the second one if DOS FDISK gets
    confused about extended/logical partitions starting past cylinder 1023. */
@@ -556,7 +558,6 @@ extern struct gendisk *alloc_disk_node(int minors, int node_id);
 extern struct gendisk *alloc_disk(int minors);
 extern struct kobject *get_disk(struct gendisk *disk);
 extern void put_disk(struct gendisk *disk);
-extern void genhd_media_change_notify(struct gendisk *disk);
 extern void blk_register_region(dev_t devt, unsigned long range,
                        struct module *module,
                        struct kobject *(*probe)(dev_t, int *, void *),
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
new file mode 100644 (file)
index 0000000..4987a84
--- /dev/null
@@ -0,0 +1,95 @@
+#ifndef __LINUX_GPIO_H
+#define __LINUX_GPIO_H
+
+/* see Documentation/gpio.txt */
+
+#ifdef CONFIG_GENERIC_GPIO
+#include <asm/gpio.h>
+
+#else
+
+/*
+ * Some platforms don't support the GPIO programming interface.
+ *
+ * In case some driver uses it anyway (it should normally have
+ * depended on GENERIC_GPIO), these routines help the compiler
+ * optimize out much GPIO-related code ... or trigger a runtime
+ * warning when something is wrongly called.
+ */
+
+static inline int gpio_is_valid(int number)
+{
+       return 0;
+}
+
+static inline int gpio_request(unsigned gpio, const char *label)
+{
+       return -ENOSYS;
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+       /* GPIO can never have been requested */
+       WARN_ON(1);
+}
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+       return -ENOSYS;
+}
+
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+       return -ENOSYS;
+}
+
+static inline int gpio_get_value(unsigned gpio)
+{
+       /* GPIO can never have been requested or set as {in,out}put */
+       WARN_ON(1);
+       return 0;
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+       /* GPIO can never have been requested or set as output */
+       WARN_ON(1);
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+       /* GPIO can never have been requested or set as {in,out}put */
+       WARN_ON(1);
+       return 0;
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+       /* GPIO can never have been requested or set as {in,out}put */
+       WARN_ON(1);
+       return 0;
+}
+
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+       /* GPIO can never have been requested or set as output */
+       WARN_ON(1);
+}
+
+static inline int gpio_to_irq(unsigned gpio)
+{
+       /* GPIO can never have been requested or set as input */
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static inline int irq_to_gpio(unsigned irq)
+{
+       /* irq can never have been returned from gpio_to_irq() */
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+#endif
+
+#endif /* __LINUX_GPIO_H */
index 2961ec788046627c823feb291d7d522f15982fbf..49829988bfa02cb85a02e38db87f81d077261060 100644 (file)
@@ -109,6 +109,14 @@ static inline void account_system_vtime(struct task_struct *tsk)
 }
 #endif
 
+#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ)
+extern void rcu_irq_enter(void);
+extern void rcu_irq_exit(void);
+#else
+# define rcu_irq_enter() do { } while (0)
+# define rcu_irq_exit() do { } while (0)
+#endif /* CONFIG_PREEMPT_RCU */
+
 /*
  * It is safe to do non-atomic ops on ->hardirq_context,
  * because NMI handlers may not preempt and the ops are
@@ -117,6 +125,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
  */
 #define __irq_enter()                                  \
        do {                                            \
+               rcu_irq_enter();                        \
                account_system_vtime(current);          \
                add_preempt_count(HARDIRQ_OFFSET);      \
                trace_hardirq_enter();                  \
@@ -135,6 +144,7 @@ extern void irq_enter(void);
                trace_hardirq_exit();                   \
                account_system_vtime(current);          \
                sub_preempt_count(HARDIRQ_OFFSET);      \
+               rcu_irq_exit();                         \
        } while (0)
 
 /*
index 4dd4c04ff2f468a1fddbbeb47cbf7c69bea1ed1b..c975caf75385d2e634cdb151dae416e2487533a0 100644 (file)
@@ -1,3 +1,6 @@
+extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+                                 unsigned long shift,
+                                 unsigned long boundary_size);
 extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
                                      unsigned long start, unsigned int nr,
                                      unsigned long shift,
index 4a6ce82ba03971f832a8f325eb150907090213bc..0f28486f636067677cf9b80c1279c7da88ffc4c9 100644 (file)
@@ -125,11 +125,11 @@ struct jprobe {
 DECLARE_PER_CPU(struct kprobe *, current_kprobe);
 DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
-#ifdef ARCH_SUPPORTS_KRETPROBES
+#ifdef CONFIG_KRETPROBES
 extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
                                   struct pt_regs *regs);
 extern int arch_trampoline_kprobe(struct kprobe *p);
-#else /* ARCH_SUPPORTS_KRETPROBES */
+#else /* CONFIG_KRETPROBES */
 static inline void arch_prepare_kretprobe(struct kretprobe *rp,
                                        struct pt_regs *regs)
 {
@@ -138,7 +138,7 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
 {
        return 0;
 }
-#endif /* ARCH_SUPPORTS_KRETPROBES */
+#endif /* CONFIG_KRETPROBES */
 /*
  * Function-return probe -
  * Note:
index 4de4fd2d8607d19df5f9eb4cb3a88325fd3d60f8..c1ec04fd000d42d27d310ce79c92944f09d63c5f 100644 (file)
@@ -221,6 +221,7 @@ struct kvm_vapic_addr {
  * Get size for mmap(vcpu_fd)
  */
 #define KVM_GET_VCPU_MMAP_SIZE    _IO(KVMIO,   0x04) /* in bytes */
+#define KVM_GET_SUPPORTED_CPUID   _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
 
 /*
  * Extension capability list.
@@ -230,8 +231,8 @@ struct kvm_vapic_addr {
 #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
 #define KVM_CAP_USER_MEMORY 3
 #define KVM_CAP_SET_TSS_ADDR 4
-#define KVM_CAP_EXT_CPUID 5
 #define KVM_CAP_VAPIC 6
+#define KVM_CAP_EXT_CPUID 7
 
 /*
  * ioctls for VM fds
@@ -249,7 +250,6 @@ struct kvm_vapic_addr {
 #define KVM_CREATE_VCPU           _IO(KVMIO,  0x41)
 #define KVM_GET_DIRTY_LOG         _IOW(KVMIO, 0x42, struct kvm_dirty_log)
 #define KVM_SET_MEMORY_ALIAS      _IOW(KVMIO, 0x43, struct kvm_memory_alias)
-#define KVM_GET_SUPPORTED_CPUID   _IOWR(KVMIO, 0x48, struct kvm_cpuid2)
 /* Device model IOC */
 #define KVM_CREATE_IRQCHIP       _IO(KVMIO,  0x60)
 #define KVM_IRQ_LINE             _IOW(KVMIO, 0x61, struct kvm_irq_level)
index ea4764b0a2f49dc40772be7e0ad98053a83d3084..928b0d59e9ba07d64686975f5db6022ad608ef8d 100644 (file)
@@ -107,6 +107,7 @@ struct kvm_memory_slot {
 struct kvm {
        struct mutex lock; /* protects the vcpus array and APIC accesses */
        spinlock_t mmu_lock;
+       struct rw_semaphore slots_lock;
        struct mm_struct *mm; /* userspace tied to this vm */
        int nmemslots;
        struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
index 3f01e2bae1a1dcb22303b7f1edd1399c20d47f6c..d31e36ebb436fd25ec087ac135b5fdbef68ff815 100644 (file)
@@ -64,7 +64,6 @@ struct maple_driver {
        int (*connect) (struct maple_device * dev);
        void (*disconnect) (struct maple_device * dev);
        struct device_driver drv;
-       int registered;
 };
 
 void maple_getcond_callback(struct maple_device *dev,
index 5df879dc3776c732dc62e017850f0eb4739429e3..430f6adf9762d175096246d92dfe999f6b6bba9c 100644 (file)
@@ -104,10 +104,16 @@ static inline void marker_update_probe_range(struct marker *begin,
 #define MARK_NOARGS " "
 
 /* To be used for string format validity checking with gcc */
-static inline void __printf(1, 2) __mark_check_format(const char *fmt, ...)
+static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...)
 {
 }
 
+#define __mark_check_format(format, args...)                           \
+       do {                                                            \
+               if (0)                                                  \
+                       ___mark_check_format(format, ## args);          \
+       } while (0)
+
 extern marker_probe_func __mark_empty_function;
 
 extern void marker_probe_cb(const struct marker *mdata,
index 04075628cb9a315d31acc90bf2bc165606d80e15..8b1c4295848b77b6808c1f451434e02e599fffb2 100644 (file)
@@ -25,18 +25,20 @@ struct page_cgroup;
 struct page;
 struct mm_struct;
 
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
 extern void mm_free_cgroup(struct mm_struct *mm);
-extern void page_assign_page_cgroup(struct page *page,
-                                       struct page_cgroup *pc);
+
+#define page_reset_bad_cgroup(page)    ((page)->page_cgroup = 0)
+
 extern struct page_cgroup *page_get_page_cgroup(struct page *page);
 extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask);
-extern void mem_cgroup_uncharge(struct page_cgroup *pc);
+extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+                                       gfp_t gfp_mask);
 extern void mem_cgroup_uncharge_page(struct page *page);
-extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active);
+extern void mem_cgroup_move_lists(struct page *page, bool active);
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
@@ -44,11 +46,9 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct mem_cgroup *mem_cont,
                                        int active);
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
-extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
-                                       gfp_t gfp_mask);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
 
-#define vm_match_cgroup(mm, cgroup)    \
+#define mm_match_cgroup(mm, cgroup)    \
        ((cgroup) == rcu_dereference((mm)->mem_cgroup))
 
 extern int mem_cgroup_prepare_migration(struct page *page);
@@ -72,7 +72,7 @@ extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
 extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
                                struct zone *zone, int priority);
 
-#else /* CONFIG_CGROUP_MEM_CONT */
+#else /* CONFIG_CGROUP_MEM_RES_CTLR */
 static inline void mm_init_cgroup(struct mm_struct *mm,
                                        struct task_struct *p)
 {
@@ -82,8 +82,7 @@ static inline void mm_free_cgroup(struct mm_struct *mm)
 {
 }
 
-static inline void page_assign_page_cgroup(struct page *page,
-                                               struct page_cgroup *pc)
+static inline void page_reset_bad_cgroup(struct page *page)
 {
 }
 
@@ -92,33 +91,27 @@ static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
        return NULL;
 }
 
-static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
-                                       gfp_t gfp_mask)
+static inline int mem_cgroup_charge(struct page *page,
+                                       struct mm_struct *mm, gfp_t gfp_mask)
 {
        return 0;
 }
 
-static inline void mem_cgroup_uncharge(struct page_cgroup *pc)
+static inline int mem_cgroup_cache_charge(struct page *page,
+                                       struct mm_struct *mm, gfp_t gfp_mask)
 {
+       return 0;
 }
 
 static inline void mem_cgroup_uncharge_page(struct page *page)
 {
 }
 
-static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
-                                               bool active)
-{
-}
-
-static inline int mem_cgroup_cache_charge(struct page *page,
-                                               struct mm_struct *mm,
-                                               gfp_t gfp_mask)
+static inline void mem_cgroup_move_lists(struct page *page, bool active)
 {
-       return 0;
 }
 
-static inline int vm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
+static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
 {
        return 1;
 }
index bfee0bd1d43545742504225e0ab0754a315e7aa9..af190ceab9719dc68b148e3b22b05b28a54c64d2 100644 (file)
@@ -64,10 +64,7 @@ struct page {
 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
            spinlock_t ptl;
 #endif
-           struct {
-                  struct kmem_cache *slab;     /* SLUB: Pointer to slab */
-                  void *end;                   /* SLUB: end marker */
-           };
+           struct kmem_cache *slab;    /* SLUB: Pointer to slab */
            struct page *first_page;    /* Compound tail pages */
        };
        union {
@@ -91,7 +88,7 @@ struct page {
        void *virtual;                  /* Kernel virtual address (NULL if
                                           not kmapped, ie. highmem) */
 #endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
        unsigned long page_cgroup;
 #endif
 };
@@ -225,7 +222,7 @@ struct mm_struct {
        /* aio bits */
        rwlock_t                ioctx_list_lock;
        struct kioctx           *ioctx_list;
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
        struct mem_cgroup *mem_cgroup;
 #endif
 };
index b74b615492e8117c47d37f348b13668ef4ffbce5..f0680c2bee73b16cc8b642f1d7de2e1f9133d43c 100644 (file)
@@ -31,7 +31,7 @@
 #define NF_VERDICT_QMASK 0xffff0000
 #define NF_VERDICT_QBITS 16
 
-#define NF_QUEUE_NR(x) (((x << NF_VERDICT_QBITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
 
 /* only for userspace compatibility */
 #ifndef __KERNEL__
index a0525a1f4715d7cce2e1e883070b75a655b12359..e3d79593fb3a53186523071b4e22d6fbee47e79a 100644 (file)
@@ -25,6 +25,7 @@ struct netpoll {
 
 struct netpoll_info {
        atomic_t refcnt;
+       int rx_flags;
        spinlock_t rx_lock;
        struct netpoll *rx_np; /* netpoll that registered an rx_hook */
        struct sk_buff_head arp_tx; /* list of arp requests to reply to */
@@ -50,12 +51,12 @@ static inline int netpoll_rx(struct sk_buff *skb)
        unsigned long flags;
        int ret = 0;
 
-       if (!npinfo || !npinfo->rx_np)
+       if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
                return 0;
 
        spin_lock_irqsave(&npinfo->rx_lock, flags);
-       /* check rx_np again with the lock held */
-       if (npinfo->rx_np && __netpoll_rx(skb))
+       /* check rx_flags again with the lock held */
+       if (npinfo->rx_flags && __netpoll_rx(skb))
                ret = 1;
        spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 
index 87195b62de5261cf0bc6dd31e171386de769ccdf..f3165e7ac4312529b29b81db910855338500f62e 100644 (file)
@@ -388,6 +388,16 @@ struct pci_driver {
 
 #define        to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
 
+/**
+ * DECLARE_PCI_DEVICE_TABLE - macro used to describe a pci device table
+ * @_table: device table name
+ *
+ * This macro is used to create a struct pci_device_id array (a device table)
+ * in a generic manner.
+ */
+#define DECLARE_PCI_DEVICE_TABLE(_table) \
+       const struct pci_device_id _table[] __devinitconst
+
 /**
  * PCI_DEVICE - macro used to describe a specific pci device
  * @vend: the 16 bit PCI Vendor ID
index e51b531cd0b2d970631bc6097019869b74c55830..47fbcba118506c04f628e7a0337067f9ea94fa86 100644 (file)
@@ -235,6 +235,8 @@ struct bitmap {
 
        unsigned long flags;
 
+       int allclean;
+
        unsigned long max_write_behind; /* write-behind mode */
        atomic_t behind_writes;
 
index 85a068bab625a85d9a62e34859eed5e580a31b16..7bb6d1abf71e85944bc20e82243cc88df8353e71 100644 (file)
@@ -83,6 +83,7 @@ struct mdk_rdev_s
 #define        BarriersNotsupp 5               /* BIO_RW_BARRIER is not supported */
 #define        AllReserved     6               /* If whole device is reserved for
                                         * one array */
+#define        AutoDetected    7               /* added by auto-detect */
 
        int desc_nr;                    /* descriptor index in the superblock */
        int raid_disk;                  /* role of device in array */
index 4d6624260b4c241e7f6299ea6396642e26b400ca..b3dccd68629e1c0481dd41471798ec25884fafd8 100644 (file)
@@ -160,5 +160,8 @@ extern void rcu_restart_cpu(int cpu);
 extern long rcu_batches_completed(void);
 extern long rcu_batches_completed_bh(void);
 
+#define rcu_enter_nohz()       do { } while (0)
+#define rcu_exit_nohz()                do { } while (0)
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_RCUCLASSIC_H */
index 60c2a033b19e0fa7333b29d591a5f00d497a838a..01152ed532c8025decbf57fbf6859273a2e3994d 100644 (file)
@@ -82,5 +82,27 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
 
 struct softirq_action;
 
+#ifdef CONFIG_NO_HZ
+DECLARE_PER_CPU(long, dynticks_progress_counter);
+
+static inline void rcu_enter_nohz(void)
+{
+       __get_cpu_var(dynticks_progress_counter)++;
+       WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1);
+       mb();
+}
+
+static inline void rcu_exit_nohz(void)
+{
+       mb();
+       __get_cpu_var(dynticks_progress_counter)++;
+       WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1));
+}
+
+#else /* CONFIG_NO_HZ */
+#define rcu_enter_nohz()       do { } while (0)
+#define rcu_exit_nohz()                do { } while (0)
+#endif /* CONFIG_NO_HZ */
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_RCUPREEMPT_H */
index e217d188a102bec8b15693f6a871945f823431be..9ae4030067a934b23293a72fe975ab72eda19d72 100644 (file)
@@ -242,6 +242,7 @@ struct task_struct;
 
 extern void sched_init(void);
 extern void sched_init_smp(void);
+extern asmlinkage void schedule_tail(struct task_struct *prev);
 extern void init_idle(struct task_struct *idle, int cpu);
 extern void init_idle_bootup_task(struct task_struct *idle);
 
@@ -1189,7 +1190,7 @@ struct task_struct {
        int softirq_context;
 #endif
 #ifdef CONFIG_LOCKDEP
-# define MAX_LOCK_DEPTH 30UL
+# define MAX_LOCK_DEPTH 48UL
        u64 curr_chain_key;
        int lockdep_depth;
        struct held_lock held_locks[MAX_LOCK_DEPTH];
@@ -1541,10 +1542,6 @@ extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_features;
 extern unsigned int sysctl_sched_migration_cost;
 extern unsigned int sysctl_sched_nr_migrate;
-#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
-extern unsigned int sysctl_sched_min_bal_int_shares;
-extern unsigned int sysctl_sched_max_bal_int_shares;
-#endif
 
 int sched_nr_latency_handler(struct ctl_table *table, int write,
                struct file *file, void __user *buffer, size_t *length,
similarity index 73%
rename from include/asm-sh/sci.h
rename to include/linux/serial_sci.h
index 52e73660c1298ed6f2378cb84ade382c0903fb8b..893cc53486bc676a99ed2712fd220c66623f0814 100644 (file)
@@ -1,12 +1,10 @@
-#ifndef __ASM_SH_SCI_H
-#define __ASM_SH_SCI_H
+#ifndef __LINUX_SERIAL_SCI_H
+#define __LINUX_SERIAL_SCI_H
 
 #include <linux/serial_core.h>
 
 /*
- * Generic header for SuperH SCI(F)
- *
- * Do not place SH-specific parts in here, sh64 and h8300 depend on this too.
+ * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
  */
 
 /* Offsets into the sci_port->irqs array */
@@ -31,4 +29,4 @@ struct plat_sci_port {
 
 int early_sci_setup(struct uart_port *port);
 
-#endif /* __ASM_SH_SCI_H */
+#endif /* __LINUX_SERIAL_SCI_H */
index 57deecc79d52787800aa98b72d0cefe9e556e495..b00c1c73eb0a7a3108f2fcc76d4c6fd4ee54fbb8 100644 (file)
@@ -61,7 +61,7 @@ struct kmem_cache {
        int size;               /* The size of an object including meta data */
        int objsize;            /* The size of an object without meta data */
        int offset;             /* Free pointer offset. */
-       int order;
+       int order;              /* Current preferred allocation order */
 
        /*
         * Avoid an extra cache line for UP, SMP and for the node local to
@@ -138,11 +138,11 @@ static __always_inline int kmalloc_index(size_t size)
        if (size <=        512) return 9;
        if (size <=       1024) return 10;
        if (size <=   2 * 1024) return 11;
+       if (size <=   4 * 1024) return 12;
 /*
  * The following is only needed to support architectures with a larger page
  * size than 4k.
  */
-       if (size <=   4 * 1024) return 12;
        if (size <=   8 * 1024) return 13;
        if (size <=  16 * 1024) return 14;
        if (size <=  32 * 1024) return 15;
index 64236b73c724e2bb4a8b357d8f8c50539377a9ca..d53642d2d8992d33e234d4f24f7dbe366d86cab7 100644 (file)
 
 #define SM501_DEVICEID_SM501           (0x05010000)
 #define SM501_DEVICEID_IDMASK          (0xffff0000)
+#define SM501_DEVICEID_REVMASK         (0x000000ff)
 
 #define SM501_PLLCLOCK_COUNT           (0x000064)
 #define SM501_MISC_TIMING              (0x000068)
 #define SM501_CURRENT_SDRAM_CLOCK      (0x00006C)
 
+#define SM501_PROGRAMMABLE_PLL_CONTROL (0x000074)
+
 /* GPIO base */
 #define SM501_GPIO                     (0x010000)
 #define SM501_GPIO_DATA_LOW            (0x00)
index 932a9efee8a5c7c9508b6c6a4f1caf565069825e..bca13454470025684a946cc63a59026dced33967 100644 (file)
@@ -24,7 +24,8 @@ extern int sm501_unit_power(struct device *dev,
 extern unsigned long sm501_set_clock(struct device *dev,
                                     int clksrc, unsigned long freq);
 
-extern unsigned long sm501_find_clock(int clksrc, unsigned long req_freq);
+extern unsigned long sm501_find_clock(struct device *dev,
+                                     int clksrc, unsigned long req_freq);
 
 /* sm501_misc_control
  *
index 2372e2e6b5271addb820c2c30f0f2549fe340134..583e0481dfa028cfe137156d74dd342dc5a03f9c 100644 (file)
@@ -94,10 +94,9 @@ enum usb_interface_condition {
  * @altsetting: array of interface structures, one for each alternate
  *     setting that may be selected.  Each one includes a set of
  *     endpoint configurations.  They will be in no particular order.
- * @num_altsetting: number of altsettings defined.
  * @cur_altsetting: the current altsetting.
+ * @num_altsetting: number of altsettings defined.
  * @intf_assoc: interface association descriptor
- * @driver: the USB driver that is bound to this interface.
  * @minor: the minor number assigned to this interface, if this
  *     interface is bound to a driver that uses the USB major number.
  *     If this interface does not use the USB major, this field should
@@ -781,8 +780,7 @@ static inline int usb_endpoint_is_isoc_out(
        .idVendor = (vend), \
        .idProduct = (prod)
 /**
- * USB_DEVICE_VER - macro used to describe a specific usb device with a
- *             version range
+ * USB_DEVICE_VER - describe a specific usb device with a version range
  * @vend: the 16 bit USB Vendor ID
  * @prod: the 16 bit USB Product ID
  * @lo: the bcdDevice_lo value
@@ -799,8 +797,7 @@ static inline int usb_endpoint_is_isoc_out(
        .bcdDevice_hi = (hi)
 
 /**
- * USB_DEVICE_INTERFACE_PROTOCOL - macro used to describe a usb
- *             device with a specific interface protocol
+ * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol
  * @vend: the 16 bit USB Vendor ID
  * @prod: the 16 bit USB Product ID
  * @pr: bInterfaceProtocol value
@@ -846,8 +843,7 @@ static inline int usb_endpoint_is_isoc_out(
        .bInterfaceProtocol = (pr)
 
 /**
- * USB_DEVICE_AND_INTERFACE_INFO - macro used to describe a specific usb device
- *             with a class of usb interfaces
+ * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces
  * @vend: the 16 bit USB Vendor ID
  * @prod: the 16 bit USB Product ID
  * @cl: bInterfaceClass value
index 75370ec0923e40760e5616c9864e34e272703832..9f1b4b46151ebbebe40413a3368c18afa850704c 100644 (file)
@@ -246,8 +246,7 @@ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 static inline void __dec_zone_page_state(struct page *page,
                        enum zone_stat_item item)
 {
-       atomic_long_dec(&page_zone(page)->vm_stat[item]);
-       atomic_long_dec(&vm_stat[item]);
+       __dec_zone_state(page_zone(page), item);
 }
 
 /*
index 70013c5f4e59aba0d83f7e99df4da8898d94b05f..89cd011edb998402912543b4486309055f267b8f 100644 (file)
@@ -175,7 +175,8 @@ extern void build_ehash_secret(void);
 static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport,
                                        const __be32 faddr, const __be16 fport)
 {
-       return jhash_2words((__force __u32) laddr ^ (__force __u32) faddr,
+       return jhash_3words((__force __u32) laddr,
+                           (__force __u32) faddr,
                            ((__u32) lport) << 16 | (__force __u32)fport,
                            inet_ehash_secret);
 }
index 9462d6ae2f372170594a1e7923fadb9a173f0094..9619b9d35c9e7d581db9b35448af14c1b9598b8c 100644 (file)
@@ -411,6 +411,7 @@ struct sctp_event_subscribe {
        __u8 sctp_shutdown_event;
        __u8 sctp_partial_delivery_event;
        __u8 sctp_adaptation_layer_event;
+       __u8 sctp_authentication_event;
 };
 
 /*
@@ -587,7 +588,7 @@ struct sctp_authchunk {
  * endpoint requires the peer to use.
 */
 struct sctp_hmacalgo {
-       __u16           shmac_num_idents;
+       __u32           shmac_num_idents;
        __u16           shmac_idents[];
 };
 
@@ -600,7 +601,7 @@ struct sctp_hmacalgo {
 struct sctp_authkey {
        sctp_assoc_t    sca_assoc_id;
        __u16           sca_keynumber;
-       __u16           sca_keylen;
+       __u16           sca_keylength;
        __u8            sca_key[];
 };
 
@@ -693,8 +694,9 @@ struct sctp_status {
  * the peer requires to be received authenticated only.
  */
 struct sctp_authchunks {
-       sctp_assoc_t            gauth_assoc_id;
-       uint8_t                 gauth_chunks[];
+       sctp_assoc_t    gauth_assoc_id;
+       __u32           gauth_number_of_chunks;
+       uint8_t         gauth_chunks[];
 };
 
 /*
index f698a5af500791ae7158457f16c9f00df503cbbb..074ac97f55e32a505ea3479ba85d0c3c619ac9e6 100644 (file)
@@ -366,10 +366,29 @@ config RESOURCE_COUNTERS
           infrastructure that works with cgroups
        depends on CGROUPS
 
+config CGROUP_MEM_RES_CTLR
+       bool "Memory Resource Controller for Control Groups"
+       depends on CGROUPS && RESOURCE_COUNTERS
+       help
+         Provides a memory resource controller that manages both page cache and
+         RSS memory.
+
+         Note that setting this option increases fixed memory overhead
+         associated with each page of memory in the system by 4/8 bytes
+         and also increases cache misses because struct page on many 64bit
+         systems will not fit into a single cache line anymore.
+
+         Only enable when you're ok with these trade offs and really
+         sure you need the memory resource controller.
+
 config SYSFS_DEPRECATED
+       bool
+
+config SYSFS_DEPRECATED_V2
        bool "Create deprecated sysfs files"
        depends on SYSFS
        default y
+       select SYSFS_DEPRECATED
        help
          This option creates deprecated symlinks such as the
          "device"-link, the <subsystem>:<name>-link, and the
@@ -382,25 +401,11 @@ config SYSFS_DEPRECATED
 
          If enabled, this option will also move any device structures
          that belong to a class, back into the /sys/class hierarchy, in
-         order to support older versions of udev.
-
-         If you are using a distro that was released in 2006 or later,
-         it should be safe to say N here.
-
-config CGROUP_MEM_CONT
-       bool "Memory controller for cgroups"
-       depends on CGROUPS && RESOURCE_COUNTERS
-       help
-         Provides a memory controller that manages both page cache and
-         RSS memory.
+         order to support older versions of udev and some userspace
+         programs.
 
-         Note that setting this option increases fixed memory overhead
-         associated with each page of memory in the system by 4/8 bytes
-         and also increases cache misses because struct page on many 64bit
-         systems will not fit into a single cache line anymore.
-
-         Only enable when you're ok with these trade offs and really
-         sure you need the memory controller.
+         If you are using a distro with the most recent userspace
+         packages, it should be safe to say N here.
 
 config PROC_PID_CPUSET
        bool "Include legacy /proc/<pid>/cpuset file"
index 8b1982082ad8ada65bbd1ff4a1c565c2a77feba6..fbb0167c6b8a3532baad7bfeefa5194cc0bb6e40 100644 (file)
@@ -254,7 +254,7 @@ early_param("quiet", quiet_kernel);
 static int __init loglevel(char *str)
 {
        get_option(&str, &console_loglevel);
-       return 1;
+       return 0;
 }
 
 early_param("loglevel", loglevel);
index 2eeea9a142408a156f0897a75c81343a5619c07c..10c4930c2bbfbe0e98a5b56ff61a953fd4017367 100644 (file)
@@ -170,7 +170,9 @@ void audit_panic(const char *message)
                        printk(KERN_ERR "audit: %s\n", message);
                break;
        case AUDIT_FAIL_PANIC:
-               panic("audit: %s\n", message);
+               /* test audit_pid since printk is always losey, why bother? */
+               if (audit_pid)
+                       panic("audit: %s\n", message);
                break;
        }
 }
@@ -352,6 +354,7 @@ static int kauditd_thread(void *dummy)
                                if (err < 0) {
                                        BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
                                        printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
+                                       audit_log_lost("auditd dissapeared\n");
                                        audit_pid = 0;
                                }
                        } else {
@@ -1350,17 +1353,19 @@ void audit_log_end(struct audit_buffer *ab)
        if (!audit_rate_check()) {
                audit_log_lost("rate limit exceeded");
        } else {
+               struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
                if (audit_pid) {
-                       struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
                        nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0);
                        skb_queue_tail(&audit_skb_queue, ab->skb);
                        ab->skb = NULL;
                        wake_up_interruptible(&kauditd_wait);
-               } else if (printk_ratelimit()) {
-                       struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
-                       printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, ab->skb->data + NLMSG_SPACE(0));
-               } else {
-                       audit_log_lost("printk limit exceeded\n");
+               } else if (nlh->nlmsg_type != AUDIT_EOE) {
+                       if (printk_ratelimit()) {
+                               printk(KERN_NOTICE "type=%d %s\n",
+                                       nlh->nlmsg_type,
+                                       ab->skb->data + NLMSG_SPACE(0));
+                       } else
+                               audit_log_lost("printk limit exceeded\n");
                }
        }
        audit_buffer_free(ab);
index 2087d6de67ea4b6ce90b347d0f43a56dbdbad9d3..782262e4107d42822b1a8cf756016ce47720456d 100644 (file)
@@ -1070,7 +1070,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
                 * so we can be sure nothing was lost.
                 */
                if ((i == 0) && (too_long))
-                       audit_log_format(*ab, "a%d_len=%ld ", arg_num,
+                       audit_log_format(*ab, "a%d_len=%zu ", arg_num,
                                         has_cntl ? 2*len : len);
 
                /*
index d8abe996e009702663d9769714f6428f88aac641..e9c2fb01e89bf9e0943c3e7a1230098af3df6243 100644 (file)
@@ -2232,7 +2232,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
 
        mutex_lock(&cgroup_mutex);
 
-       cgrp->flags = 0;
        INIT_LIST_HEAD(&cgrp->sibling);
        INIT_LIST_HEAD(&cgrp->children);
        INIT_LIST_HEAD(&cgrp->css_sets);
@@ -2242,6 +2241,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        cgrp->root = parent->root;
        cgrp->top_cgroup = parent->top_cgroup;
 
+       if (notify_on_release(parent))
+               set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+
        for_each_subsys(root, ss) {
                struct cgroup_subsys_state *css = ss->create(ss, cgrp);
                if (IS_ERR(css)) {
index 506a957b665a69bbf84794e233b8032f2d98b280..cd20bf07e9e3b7bb2260c4a5939642c9e4bcb0a5 100644 (file)
@@ -214,20 +214,19 @@ struct pid *session_of_pgrp(struct pid *pgrp)
 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
 {
        struct task_struct *p;
-       int ret = 1;
 
        do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-               if (p == ignored_task
-                               || p->exit_state
-                               || is_global_init(p->real_parent))
+               if ((p == ignored_task) ||
+                   (p->exit_state && thread_group_empty(p)) ||
+                   is_global_init(p->real_parent))
                        continue;
+
                if (task_pgrp(p->real_parent) != pgrp &&
-                   task_session(p->real_parent) == task_session(p)) {
-                       ret = 0;
-                       break;
-               }
+                   task_session(p->real_parent) == task_session(p))
+                       return 0;
        } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
-       return ret;     /* (sighing) "Often!" */
+
+       return 1;
 }
 
 int is_current_pgrp_orphaned(void)
@@ -255,6 +254,37 @@ static int has_stopped_jobs(struct pid *pgrp)
        return retval;
 }
 
+/*
+ * Check to see if any process groups have become orphaned as
+ * a result of our exiting, and if they have any stopped jobs,
+ * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
+ */
+static void
+kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
+{
+       struct pid *pgrp = task_pgrp(tsk);
+       struct task_struct *ignored_task = tsk;
+
+       if (!parent)
+                /* exit: our father is in a different pgrp than
+                 * we are and we were the only connection outside.
+                 */
+               parent = tsk->real_parent;
+       else
+               /* reparent: our child is in a different pgrp than
+                * we are, and it was the only connection outside.
+                */
+               ignored_task = NULL;
+
+       if (task_pgrp(parent) != pgrp &&
+           task_session(parent) == task_session(tsk) &&
+           will_become_orphaned_pgrp(pgrp, ignored_task) &&
+           has_stopped_jobs(pgrp)) {
+               __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
+               __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
+       }
+}
+
 /**
  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  *
@@ -635,22 +665,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
            p->exit_signal != -1 && thread_group_empty(p))
                do_notify_parent(p, p->exit_signal);
 
-       /*
-        * process group orphan check
-        * Case ii: Our child is in a different pgrp
-        * than we are, and it was the only connection
-        * outside, so the child pgrp is now orphaned.
-        */
-       if ((task_pgrp(p) != task_pgrp(father)) &&
-           (task_session(p) == task_session(father))) {
-               struct pid *pgrp = task_pgrp(p);
-
-               if (will_become_orphaned_pgrp(pgrp, NULL) &&
-                   has_stopped_jobs(pgrp)) {
-                       __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
-                       __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
-               }
-       }
+       kill_orphaned_pgrp(p, father);
 }
 
 /*
@@ -735,11 +750,9 @@ static void forget_original_parent(struct task_struct *father)
  * Send signals to all our closest relatives so that they know
  * to properly mourn us..
  */
-static void exit_notify(struct task_struct *tsk)
+static void exit_notify(struct task_struct *tsk, int group_dead)
 {
        int state;
-       struct task_struct *t;
-       struct pid *pgrp;
 
        /*
         * This does two things:
@@ -753,25 +766,8 @@ static void exit_notify(struct task_struct *tsk)
        exit_task_namespaces(tsk);
 
        write_lock_irq(&tasklist_lock);
-       /*
-        * Check to see if any process groups have become orphaned
-        * as a result of our exiting, and if they have any stopped
-        * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
-        *
-        * Case i: Our father is in a different pgrp than we are
-        * and we were the only connection outside, so our pgrp
-        * is about to become orphaned.
-        */
-       t = tsk->real_parent;
-
-       pgrp = task_pgrp(tsk);
-       if ((task_pgrp(t) != pgrp) &&
-           (task_session(t) == task_session(tsk)) &&
-           will_become_orphaned_pgrp(pgrp, tsk) &&
-           has_stopped_jobs(pgrp)) {
-               __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
-               __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
-       }
+       if (group_dead)
+               kill_orphaned_pgrp(tsk->group_leader, NULL);
 
        /* Let father know we died
         *
@@ -788,8 +784,8 @@ static void exit_notify(struct task_struct *tsk)
         * the same after a fork.
         */
        if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
-           ( tsk->parent_exec_id != t->self_exec_id  ||
-             tsk->self_exec_id != tsk->parent_exec_id)
+           (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
+            tsk->self_exec_id != tsk->parent_exec_id)
            && !capable(CAP_KILL))
                tsk->exit_signal = SIGCHLD;
 
@@ -986,7 +982,7 @@ NORET_TYPE void do_exit(long code)
                module_put(tsk->binfmt->module);
 
        proc_exit_connector(tsk);
-       exit_notify(tsk);
+       exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
        mpol_free(tsk->mempolicy);
        tsk->mempolicy = NULL;
index 7a86e64323385785bda6e838f01c9637e192844a..fcfb580c3afc847e60fc9fa4f682b33c25650963 100644 (file)
@@ -498,27 +498,36 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
        return 0;
 }
 
+/*
+ * If we have a symbol_name argument, look it up and add the offset field
+ * to it. This way, we can specify a relative address to a symbol.
+ */
+static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
+{
+       kprobe_opcode_t *addr = p->addr;
+       if (p->symbol_name) {
+               if (addr)
+                       return NULL;
+               kprobe_lookup_name(p->symbol_name, addr);
+       }
+
+       if (!addr)
+               return NULL;
+       return (kprobe_opcode_t *)(((char *)addr) + p->offset);
+}
+
 static int __kprobes __register_kprobe(struct kprobe *p,
        unsigned long called_from)
 {
        int ret = 0;
        struct kprobe *old_p;
        struct module *probed_mod;
+       kprobe_opcode_t *addr;
 
-       /*
-        * If we have a symbol_name argument look it up,
-        * and add it to the address.  That way the addr
-        * field can either be global or relative to a symbol.
-        */
-       if (p->symbol_name) {
-               if (p->addr)
-                       return -EINVAL;
-               kprobe_lookup_name(p->symbol_name, p->addr);
-       }
-
-       if (!p->addr)
+       addr = kprobe_addr(p);
+       if (!addr)
                return -EINVAL;
-       p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
+       p->addr = addr;
 
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr))
@@ -678,8 +687,7 @@ void __kprobes unregister_jprobe(struct jprobe *jp)
        unregister_kprobe(&jp->kp);
 }
 
-#ifdef ARCH_SUPPORTS_KRETPROBES
-
+#ifdef CONFIG_KRETPROBES
 /*
  * This kprobe pre_handler is registered with every kretprobe. When probe
  * hits it will set up the return probe.
@@ -722,12 +730,12 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
        int ret = 0;
        struct kretprobe_instance *inst;
        int i;
-       void *addr = rp->kp.addr;
+       void *addr;
 
        if (kretprobe_blacklist_size) {
-               if (addr == NULL)
-                       kprobe_lookup_name(rp->kp.symbol_name, addr);
-               addr += rp->kp.offset;
+               addr = kprobe_addr(&rp->kp);
+               if (!addr)
+                       return -EINVAL;
 
                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
                        if (kretprobe_blacklist[i].addr == addr)
@@ -769,8 +777,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
        return ret;
 }
 
-#else /* ARCH_SUPPORTS_KRETPROBES */
-
+#else /* CONFIG_KRETPROBES */
 int __kprobes register_kretprobe(struct kretprobe *rp)
 {
        return -ENOSYS;
@@ -781,8 +788,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
 {
        return 0;
 }
-
-#endif /* ARCH_SUPPORTS_KRETPROBES */
+#endif /* CONFIG_KRETPROBES */
 
 void __kprobes unregister_kretprobe(struct kretprobe *rp)
 {
index 3574379f4d62d2d04c9431c0c2172dc044d0ca66..81a4e4a3f087adfc650eb6baf2c05147f209e062 100644 (file)
@@ -779,6 +779,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
         * parallel walking of the hash-list safe:
         */
        list_add_tail_rcu(&class->hash_entry, hash_head);
+       /*
+        * Add it to the global list of classes:
+        */
+       list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
 
        if (verbose(class)) {
                graph_unlock();
@@ -2282,10 +2286,6 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
                        return 0;
                break;
        case LOCK_USED:
-               /*
-                * Add it to the global list of classes:
-                */
-               list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
                debug_atomic_dec(&nr_unused_locks);
                break;
        default:
index 50effc01d9a2e094217c0c920eb0f64677424fcc..48a4ea5afffde0b758a627fb63046f8a65976ff5 100644 (file)
@@ -698,14 +698,12 @@ int marker_probe_unregister(const char *name,
 {
        struct marker_entry *entry;
        struct marker_probe_closure *old;
-       int ret = 0;
+       int ret = -ENOENT;
 
        mutex_lock(&markers_mutex);
        entry = get_marker(name);
-       if (!entry) {
-               ret = -ENOENT;
+       if (!entry)
                goto end;
-       }
        if (entry->rcu_pending)
                rcu_barrier();
        old = marker_entry_remove_probe(entry, probe, probe_private);
@@ -713,12 +711,15 @@ int marker_probe_unregister(const char *name,
        marker_update_probes();         /* may update entry */
        mutex_lock(&markers_mutex);
        entry = get_marker(name);
+       if (!entry)
+               goto end;
        entry->oldptr = old;
        entry->rcu_pending = 1;
        /* write rcu_pending before calling the RCU callback */
        smp_wmb();
        call_rcu(&entry->rcu, free_old_closure);
        remove_marker(name);    /* Ignore busy error message */
+       ret = 0;
 end:
        mutex_unlock(&markers_mutex);
        return ret;
index 901cd6ac2f11d9d4b54d3bff34e10495e86f01a2..be4807fb90e48afd4ec74c460aaf8a214c4b0a4a 100644 (file)
@@ -1933,8 +1933,15 @@ static struct module *load_module(void __user *umod,
        /* Set up license info based on the info section */
        set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
 
+       /*
+        * ndiswrapper is under GPL by itself, but loads proprietary modules.
+        * Don't use add_taint_module(), as it would prevent ndiswrapper from
+        * using GPL-only symbols it needs.
+        */
        if (strcmp(mod->name, "ndiswrapper") == 0)
-               add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+               add_taint(TAINT_PROPRIETARY_MODULE);
+
+       /* driverloader was caught wrongly pretending to be under GPL */
        if (strcmp(mod->name, "driverloader") == 0)
                add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
 
index 7c2118f9597f3139cfef277233a739d5d717b159..f1d0b345c9ba86a24ac48ee29cc4cb6f800b1c94 100644 (file)
@@ -75,22 +75,15 @@ void refrigerator(void)
        __set_current_state(save);
 }
 
-static void fake_signal_wake_up(struct task_struct *p, int resume)
+static void fake_signal_wake_up(struct task_struct *p)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&p->sighand->siglock, flags);
-       signal_wake_up(p, resume);
+       signal_wake_up(p, 0);
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
 }
 
-static void send_fake_signal(struct task_struct *p)
-{
-       if (task_is_stopped(p))
-               force_sig_specific(SIGSTOP, p);
-       fake_signal_wake_up(p, task_is_stopped(p));
-}
-
 static int has_mm(struct task_struct *p)
 {
        return (p->mm && !(p->flags & PF_BORROWED_MM));
@@ -121,7 +114,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only)
        if (freezing(p)) {
                if (has_mm(p)) {
                        if (!signal_pending(p))
-                               fake_signal_wake_up(p, 0);
+                               fake_signal_wake_up(p);
                } else {
                        if (with_mm_only)
                                ret = 0;
@@ -135,7 +128,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only)
                } else {
                        if (has_mm(p)) {
                                set_freeze_flag(p);
-                               send_fake_signal(p);
+                               fake_signal_wake_up(p);
                        } else {
                                if (with_mm_only) {
                                        ret = 0;
@@ -182,15 +175,17 @@ static int try_to_freeze_tasks(int freeze_user_space)
                        if (frozen(p) || !freezeable(p))
                                continue;
 
-                       if (task_is_traced(p) && frozen(p->parent)) {
-                               cancel_freezing(p);
-                               continue;
-                       }
-
                        if (!freeze_task(p, freeze_user_space))
                                continue;
 
-                       if (!freezer_should_skip(p))
+                       /*
+                        * Now that we've done set_freeze_flag, don't
+                        * perturb a task in TASK_STOPPED or TASK_TRACED.
+                        * It is "frozen enough".  If the task does wake
+                        * up, it will immediately call try_to_freeze.
+                        */
+                       if (!task_is_stopped_or_traced(p) &&
+                           !freezer_should_skip(p))
                                todo++;
                } while_each_thread(g, p);
                read_unlock(&tasklist_lock);
index bee36100f110ab75617ad4dce16beac9e1434f76..9adc2a473e6e0f59bbc41350422d415e8f17a7ec 100644 (file)
@@ -666,7 +666,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
        }
        /* Emit the output into the temporary buffer */
        printed_len += vscnprintf(printk_buf + printed_len,
-                                 sizeof(printk_buf), fmt, args);
+                                 sizeof(printk_buf) - printed_len, fmt, args);
 
        /*
         * Copy the output into log_buf.  If the caller didn't provide
index 987cfb7ade8977225baf5a88695f2c9ba110b549..e9517014b57c100af5926165d2131992f0589401 100644 (file)
  *             to Suparna Bhattacharya for pushing me completely away
  *             from atomic instructions on the read side.
  *
+ *  - Added handling of Dynamic Ticks
+ *      Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com>
+ *                     - Steven Rostedt <srostedt@redhat.com>
+ *
  * Papers:  http://www.rdrop.com/users/paulmck/RCU
  *
  * Design Document: http://lwn.net/Articles/253651/
@@ -409,6 +413,212 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
        }
 }
 
+#ifdef CONFIG_NO_HZ
+
+DEFINE_PER_CPU(long, dynticks_progress_counter) = 1;
+static DEFINE_PER_CPU(long, rcu_dyntick_snapshot);
+static DEFINE_PER_CPU(int, rcu_update_flag);
+
+/**
+ * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI.
+ *
+ * If the CPU was idle with dynamic ticks active, this updates the
+ * dynticks_progress_counter to let the RCU handling know that the
+ * CPU is active.
+ */
+void rcu_irq_enter(void)
+{
+       int cpu = smp_processor_id();
+
+       if (per_cpu(rcu_update_flag, cpu))
+               per_cpu(rcu_update_flag, cpu)++;
+
+       /*
+        * Only update if we are coming from a stopped ticks mode
+        * (dynticks_progress_counter is even).
+        */
+       if (!in_interrupt() &&
+           (per_cpu(dynticks_progress_counter, cpu) & 0x1) == 0) {
+               /*
+                * The following might seem like we could have a race
+                * with NMI/SMIs. But this really isn't a problem.
+                * Here we do a read/modify/write, and the race happens
+                * when an NMI/SMI comes in after the read and before
+                * the write. But NMI/SMIs will increment this counter
+                * twice before returning, so the zero bit will not
+                * be corrupted by the NMI/SMI which is the most important
+                * part.
+                *
+                * The only thing is that we would bring back the counter
+                * to a postion that it was in during the NMI/SMI.
+                * But the zero bit would be set, so the rest of the
+                * counter would again be ignored.
+                *
+                * On return from the IRQ, the counter may have the zero
+                * bit be 0 and the counter the same as the return from
+                * the NMI/SMI. If the state machine was so unlucky to
+                * see that, it still doesn't matter, since all
+                * RCU read-side critical sections on this CPU would
+                * have already completed.
+                */
+               per_cpu(dynticks_progress_counter, cpu)++;
+               /*
+                * The following memory barrier ensures that any
+                * rcu_read_lock() primitives in the irq handler
+                * are seen by other CPUs to follow the above
+                * increment to dynticks_progress_counter. This is
+                * required in order for other CPUs to correctly
+                * determine when it is safe to advance the RCU
+                * grace-period state machine.
+                */
+               smp_mb(); /* see above block comment. */
+               /*
+                * Since we can't determine the dynamic tick mode from
+                * the dynticks_progress_counter after this routine,
+                * we use a second flag to acknowledge that we came
+                * from an idle state with ticks stopped.
+                */
+               per_cpu(rcu_update_flag, cpu)++;
+               /*
+                * If we take an NMI/SMI now, they will also increment
+                * the rcu_update_flag, and will not update the
+                * dynticks_progress_counter on exit. That is for
+                * this IRQ to do.
+                */
+       }
+}
+
+/**
+ * rcu_irq_exit - Called from exiting Hard irq context.
+ *
+ * If the CPU was idle with dynamic ticks active, update the
+ * dynticks_progress_counter to put let the RCU handling be
+ * aware that the CPU is going back to idle with no ticks.
+ */
+void rcu_irq_exit(void)
+{
+       int cpu = smp_processor_id();
+
+       /*
+        * rcu_update_flag is set if we interrupted the CPU
+        * when it was idle with ticks stopped.
+        * Once this occurs, we keep track of interrupt nesting
+        * because a NMI/SMI could also come in, and we still
+        * only want the IRQ that started the increment of the
+        * dynticks_progress_counter to be the one that modifies
+        * it on exit.
+        */
+       if (per_cpu(rcu_update_flag, cpu)) {
+               if (--per_cpu(rcu_update_flag, cpu))
+                       return;
+
+               /* This must match the interrupt nesting */
+               WARN_ON(in_interrupt());
+
+               /*
+                * If an NMI/SMI happens now we are still
+                * protected by the dynticks_progress_counter being odd.
+                */
+
+               /*
+                * The following memory barrier ensures that any
+                * rcu_read_unlock() primitives in the irq handler
+                * are seen by other CPUs to preceed the following
+                * increment to dynticks_progress_counter. This
+                * is required in order for other CPUs to determine
+                * when it is safe to advance the RCU grace-period
+                * state machine.
+                */
+               smp_mb(); /* see above block comment. */
+               per_cpu(dynticks_progress_counter, cpu)++;
+               WARN_ON(per_cpu(dynticks_progress_counter, cpu) & 0x1);
+       }
+}
+
+static void dyntick_save_progress_counter(int cpu)
+{
+       per_cpu(rcu_dyntick_snapshot, cpu) =
+               per_cpu(dynticks_progress_counter, cpu);
+}
+
+static inline int
+rcu_try_flip_waitack_needed(int cpu)
+{
+       long curr;
+       long snap;
+
+       curr = per_cpu(dynticks_progress_counter, cpu);
+       snap = per_cpu(rcu_dyntick_snapshot, cpu);
+       smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+       /*
+        * If the CPU remained in dynticks mode for the entire time
+        * and didn't take any interrupts, NMIs, SMIs, or whatever,
+        * then it cannot be in the middle of an rcu_read_lock(), so
+        * the next rcu_read_lock() it executes must use the new value
+        * of the counter.  So we can safely pretend that this CPU
+        * already acknowledged the counter.
+        */
+
+       if ((curr == snap) && ((curr & 0x1) == 0))
+               return 0;
+
+       /*
+        * If the CPU passed through or entered a dynticks idle phase with
+        * no active irq handlers, then, as above, we can safely pretend
+        * that this CPU already acknowledged the counter.
+        */
+
+       if ((curr - snap) > 2 || (snap & 0x1) == 0)
+               return 0;
+
+       /* We need this CPU to explicitly acknowledge the counter flip. */
+
+       return 1;
+}
+
+static inline int
+rcu_try_flip_waitmb_needed(int cpu)
+{
+       long curr;
+       long snap;
+
+       curr = per_cpu(dynticks_progress_counter, cpu);
+       snap = per_cpu(rcu_dyntick_snapshot, cpu);
+       smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+       /*
+        * If the CPU remained in dynticks mode for the entire time
+        * and didn't take any interrupts, NMIs, SMIs, or whatever,
+        * then it cannot have executed an RCU read-side critical section
+        * during that time, so there is no need for it to execute a
+        * memory barrier.
+        */
+
+       if ((curr == snap) && ((curr & 0x1) == 0))
+               return 0;
+
+       /*
+        * If the CPU either entered or exited an outermost interrupt,
+        * SMI, NMI, or whatever handler, then we know that it executed
+        * a memory barrier when doing so.  So we don't need another one.
+        */
+       if (curr != snap)
+               return 0;
+
+       /* We need the CPU to execute a memory barrier. */
+
+       return 1;
+}
+
+#else /* !CONFIG_NO_HZ */
+
+# define dyntick_save_progress_counter(cpu)    do { } while (0)
+# define rcu_try_flip_waitack_needed(cpu)      (1)
+# define rcu_try_flip_waitmb_needed(cpu)       (1)
+
+#endif /* CONFIG_NO_HZ */
+
 /*
  * Get here when RCU is idle.  Decide whether we need to
  * move out of idle state, and return non-zero if so.
@@ -447,8 +657,10 @@ rcu_try_flip_idle(void)
 
        /* Now ask each CPU for acknowledgement of the flip. */
 
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
+               dyntick_save_progress_counter(cpu);
+       }
 
        return 1;
 }
@@ -464,7 +676,8 @@ rcu_try_flip_waitack(void)
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
        for_each_cpu_mask(cpu, rcu_cpu_online_map)
-               if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
+               if (rcu_try_flip_waitack_needed(cpu) &&
+                   per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
                        return 0;
                }
@@ -509,8 +722,10 @@ rcu_try_flip_waitzero(void)
        smp_mb();  /*  ^^^^^^^^^^^^ */
 
        /* Call for a memory barrier from each CPU. */
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
+               dyntick_save_progress_counter(cpu);
+       }
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
        return 1;
@@ -528,7 +743,8 @@ rcu_try_flip_waitmb(void)
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
        for_each_cpu_mask(cpu, rcu_cpu_online_map)
-               if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
+               if (rcu_try_flip_waitmb_needed(cpu) &&
+                   per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
                        return 0;
                }
@@ -702,8 +918,9 @@ void rcu_offline_cpu(int cpu)
         * fix.
         */
 
+       local_irq_save(flags);
        rdp = RCU_DATA_ME();
-       spin_lock_irqsave(&rdp->lock, flags);
+       spin_lock(&rdp->lock);
        *rdp->nexttail = list;
        if (list)
                rdp->nexttail = tail;
@@ -735,9 +952,11 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 {
        unsigned long flags;
        struct rcu_head *next, *list;
-       struct rcu_data *rdp = RCU_DATA_ME();
+       struct rcu_data *rdp;
 
-       spin_lock_irqsave(&rdp->lock, flags);
+       local_irq_save(flags);
+       rdp = RCU_DATA_ME();
+       spin_lock(&rdp->lock);
        list = rdp->donelist;
        if (list == NULL) {
                spin_unlock_irqrestore(&rdp->lock, flags);
index 16cbec2d5d60514e67c14907e6d11f91cfecafd1..efbfc0fc232f33c49b5b918704b4e13ae5d0f2cc 100644 (file)
@@ -113,6 +113,7 @@ ssize_t res_counter_write(struct res_counter *counter, int member,
 
        ret = -EINVAL;
 
+       strstrip(buf);
        if (write_strategy) {
                if (write_strategy(buf, &tmp)) {
                        goto out_free;
index b387a8de26a5f9ca8b6e2e9d3ad33f66fbf65882..dcd553cc4ee89b52ec511f70361c4f7fd586d976 100644 (file)
@@ -174,41 +174,6 @@ struct task_group {
        struct sched_entity **se;
        /* runqueue "owned" by this group on each cpu */
        struct cfs_rq **cfs_rq;
-
-       /*
-        * shares assigned to a task group governs how much of cpu bandwidth
-        * is allocated to the group. The more shares a group has, the more is
-        * the cpu bandwidth allocated to it.
-        *
-        * For ex, lets say that there are three task groups, A, B and C which
-        * have been assigned shares 1000, 2000 and 3000 respectively. Then,
-        * cpu bandwidth allocated by the scheduler to task groups A, B and C
-        * should be:
-        *
-        *      Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
-        *      Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
-        *      Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
-        *
-        * The weight assigned to a task group's schedulable entities on every
-        * cpu (task_group.se[a_cpu]->load.weight) is derived from the task
-        * group's shares. For ex: lets say that task group A has been
-        * assigned shares of 1000 and there are two CPUs in a system. Then,
-        *
-        *  tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
-        *
-        * Note: It's not necessary that each of a task's group schedulable
-        *       entity have the same weight on all CPUs. If the group
-        *       has 2 of its tasks on CPU0 and 1 task on CPU1, then a
-        *       better distribution of weight could be:
-        *
-        *      tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
-        *      tg_A->se[1]->load.weight = 1/2 * 2000 =  667
-        *
-        * rebalance_shares() is responsible for distributing the shares of a
-        * task groups like this among the group's schedulable entities across
-        * cpus.
-        *
-        */
        unsigned long shares;
 #endif
 
@@ -250,22 +215,12 @@ static DEFINE_SPINLOCK(task_group_lock);
 static DEFINE_MUTEX(doms_cur_mutex);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-#ifdef CONFIG_SMP
-/* kernel thread that runs rebalance_shares() periodically */
-static struct task_struct *lb_monitor_task;
-static int load_balance_monitor(void *unused);
-#endif
-
-static void set_se_shares(struct sched_entity *se, unsigned long shares);
-
 #ifdef CONFIG_USER_SCHED
 # define INIT_TASK_GROUP_LOAD  (2*NICE_0_LOAD)
 #else
 # define INIT_TASK_GROUP_LOAD  NICE_0_LOAD
 #endif
 
-#define MIN_GROUP_SHARES       2
-
 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 #endif
 
@@ -668,6 +623,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
  */
 unsigned int sysctl_sched_rt_period = 1000000;
 
+static __read_mostly int scheduler_running;
+
 /*
  * part of the period that we allow rt tasks to run in us.
  * default: 0.95s
@@ -689,14 +646,16 @@ unsigned long long cpu_clock(int cpu)
        unsigned long flags;
        struct rq *rq;
 
-       local_irq_save(flags);
-       rq = cpu_rq(cpu);
        /*
         * Only call sched_clock() if the scheduler has already been
         * initialized (some code might call cpu_clock() very early):
         */
-       if (rq->idle)
-               update_rq_clock(rq);
+       if (unlikely(!scheduler_running))
+               return 0;
+
+       local_irq_save(flags);
+       rq = cpu_rq(cpu);
+       update_rq_clock(rq);
        now = rq->clock;
        local_irq_restore(flags);
 
@@ -1241,16 +1200,6 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 #endif
 
-static inline void inc_cpu_load(struct rq *rq, unsigned long load)
-{
-       update_load_add(&rq->load, load);
-}
-
-static inline void dec_cpu_load(struct rq *rq, unsigned long load)
-{
-       update_load_sub(&rq->load, load);
-}
-
 #ifdef CONFIG_SMP
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
@@ -1268,14 +1217,26 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 
 #define sched_class_highest (&rt_sched_class)
 
-static void inc_nr_running(struct rq *rq)
+static inline void inc_load(struct rq *rq, const struct task_struct *p)
+{
+       update_load_add(&rq->load, p->se.load.weight);
+}
+
+static inline void dec_load(struct rq *rq, const struct task_struct *p)
+{
+       update_load_sub(&rq->load, p->se.load.weight);
+}
+
+static void inc_nr_running(struct task_struct *p, struct rq *rq)
 {
        rq->nr_running++;
+       inc_load(rq, p);
 }
 
-static void dec_nr_running(struct rq *rq)
+static void dec_nr_running(struct task_struct *p, struct rq *rq)
 {
        rq->nr_running--;
+       dec_load(rq, p);
 }
 
 static void set_load_weight(struct task_struct *p)
@@ -1367,7 +1328,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
                rq->nr_uninterruptible--;
 
        enqueue_task(rq, p, wakeup);
-       inc_nr_running(rq);
+       inc_nr_running(p, rq);
 }
 
 /*
@@ -1379,7 +1340,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
                rq->nr_uninterruptible++;
 
        dequeue_task(rq, p, sleep);
-       dec_nr_running(rq);
+       dec_nr_running(p, rq);
 }
 
 /**
@@ -2019,7 +1980,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                 * management (if any):
                 */
                p->sched_class->task_new(rq, p);
-               inc_nr_running(rq);
+               inc_nr_running(p, rq);
        }
        check_preempt_curr(rq, p);
 #ifdef CONFIG_SMP
@@ -3885,7 +3846,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
 asmlinkage void __sched schedule(void)
 {
        struct task_struct *prev, *next;
-       long *switch_count;
+       unsigned long *switch_count;
        struct rq *rq;
        int cpu;
 
@@ -4358,8 +4319,10 @@ void set_user_nice(struct task_struct *p, long nice)
                goto out_unlock;
        }
        on_rq = p->se.on_rq;
-       if (on_rq)
+       if (on_rq) {
                dequeue_task(rq, p, 0);
+               dec_load(rq, p);
+       }
 
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
@@ -4369,6 +4332,7 @@ void set_user_nice(struct task_struct *p, long nice)
 
        if (on_rq) {
                enqueue_task(rq, p, 0);
+               inc_load(rq, p);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -7083,21 +7047,6 @@ void __init sched_init_smp(void)
        if (set_cpus_allowed(current, non_isolated_cpus) < 0)
                BUG();
        sched_init_granularity();
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-       if (nr_cpu_ids == 1)
-               return;
-
-       lb_monitor_task = kthread_create(load_balance_monitor, NULL,
-                                        "group_balance");
-       if (!IS_ERR(lb_monitor_task)) {
-               lb_monitor_task->flags |= PF_NOFREEZE;
-               wake_up_process(lb_monitor_task);
-       } else {
-               printk(KERN_ERR "Could not create load balance monitor thread"
-                       "(error = %ld) \n", PTR_ERR(lb_monitor_task));
-       }
-#endif
 }
 #else
 void __init sched_init_smp(void)
@@ -7284,6 +7233,8 @@ void __init sched_init(void)
         * During early bootup we pretend to be a normal task:
         */
        current->sched_class = &fair_sched_class;
+
+       scheduler_running = 1;
 }
 
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -7418,157 +7369,6 @@ void set_curr_task(int cpu, struct task_struct *p)
 
 #ifdef CONFIG_GROUP_SCHED
 
-#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
-/*
- * distribute shares of all task groups among their schedulable entities,
- * to reflect load distribution across cpus.
- */
-static int rebalance_shares(struct sched_domain *sd, int this_cpu)
-{
-       struct cfs_rq *cfs_rq;
-       struct rq *rq = cpu_rq(this_cpu);
-       cpumask_t sdspan = sd->span;
-       int balanced = 1;
-
-       /* Walk thr' all the task groups that we have */
-       for_each_leaf_cfs_rq(rq, cfs_rq) {
-               int i;
-               unsigned long total_load = 0, total_shares;
-               struct task_group *tg = cfs_rq->tg;
-
-               /* Gather total task load of this group across cpus */
-               for_each_cpu_mask(i, sdspan)
-                       total_load += tg->cfs_rq[i]->load.weight;
-
-               /* Nothing to do if this group has no load */
-               if (!total_load)
-                       continue;
-
-               /*
-                * tg->shares represents the number of cpu shares the task group
-                * is eligible to hold on a single cpu. On N cpus, it is
-                * eligible to hold (N * tg->shares) number of cpu shares.
-                */
-               total_shares = tg->shares * cpus_weight(sdspan);
-
-               /*
-                * redistribute total_shares across cpus as per the task load
-                * distribution.
-                */
-               for_each_cpu_mask(i, sdspan) {
-                       unsigned long local_load, local_shares;
-
-                       local_load = tg->cfs_rq[i]->load.weight;
-                       local_shares = (local_load * total_shares) / total_load;
-                       if (!local_shares)
-                               local_shares = MIN_GROUP_SHARES;
-                       if (local_shares == tg->se[i]->load.weight)
-                               continue;
-
-                       spin_lock_irq(&cpu_rq(i)->lock);
-                       set_se_shares(tg->se[i], local_shares);
-                       spin_unlock_irq(&cpu_rq(i)->lock);
-                       balanced = 0;
-               }
-       }
-
-       return balanced;
-}
-
-/*
- * How frequently should we rebalance_shares() across cpus?
- *
- * The more frequently we rebalance shares, the more accurate is the fairness
- * of cpu bandwidth distribution between task groups. However higher frequency
- * also implies increased scheduling overhead.
- *
- * sysctl_sched_min_bal_int_shares represents the minimum interval between
- * consecutive calls to rebalance_shares() in the same sched domain.
- *
- * sysctl_sched_max_bal_int_shares represents the maximum interval between
- * consecutive calls to rebalance_shares() in the same sched domain.
- *
- * These settings allows for the appropriate trade-off between accuracy of
- * fairness and the associated overhead.
- *
- */
-
-/* default: 8ms, units: milliseconds */
-const_debug unsigned int sysctl_sched_min_bal_int_shares = 8;
-
-/* default: 128ms, units: milliseconds */
-const_debug unsigned int sysctl_sched_max_bal_int_shares = 128;
-
-/* kernel thread that runs rebalance_shares() periodically */
-static int load_balance_monitor(void *unused)
-{
-       unsigned int timeout = sysctl_sched_min_bal_int_shares;
-       struct sched_param schedparm;
-       int ret;
-
-       /*
-        * We don't want this thread's execution to be limited by the shares
-        * assigned to default group (init_task_group). Hence make it run
-        * as a SCHED_RR RT task at the lowest priority.
-        */
-       schedparm.sched_priority = 1;
-       ret = sched_setscheduler(current, SCHED_RR, &schedparm);
-       if (ret)
-               printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance"
-                               " monitor thread (error = %d) \n", ret);
-
-       while (!kthread_should_stop()) {
-               int i, cpu, balanced = 1;
-
-               /* Prevent cpus going down or coming up */
-               get_online_cpus();
-               /* lockout changes to doms_cur[] array */
-               lock_doms_cur();
-               /*
-                * Enter a rcu read-side critical section to safely walk rq->sd
-                * chain on various cpus and to walk task group list
-                * (rq->leaf_cfs_rq_list) in rebalance_shares().
-                */
-               rcu_read_lock();
-
-               for (i = 0; i < ndoms_cur; i++) {
-                       cpumask_t cpumap = doms_cur[i];
-                       struct sched_domain *sd = NULL, *sd_prev = NULL;
-
-                       cpu = first_cpu(cpumap);
-
-                       /* Find the highest domain at which to balance shares */
-                       for_each_domain(cpu, sd) {
-                               if (!(sd->flags & SD_LOAD_BALANCE))
-                                       continue;
-                               sd_prev = sd;
-                       }
-
-                       sd = sd_prev;
-                       /* sd == NULL? No load balance reqd in this domain */
-                       if (!sd)
-                               continue;
-
-                       balanced &= rebalance_shares(sd, cpu);
-               }
-
-               rcu_read_unlock();
-
-               unlock_doms_cur();
-               put_online_cpus();
-
-               if (!balanced)
-                       timeout = sysctl_sched_min_bal_int_shares;
-               else if (timeout < sysctl_sched_max_bal_int_shares)
-                       timeout *= 2;
-
-               msleep_interruptible(timeout);
-       }
-
-       return 0;
-}
-#endif /* CONFIG_SMP */
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void free_fair_sched_group(struct task_group *tg)
 {
@@ -7835,29 +7635,25 @@ void sched_move_task(struct task_struct *tsk)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-/* rq->lock to be locked by caller */
 static void set_se_shares(struct sched_entity *se, unsigned long shares)
 {
        struct cfs_rq *cfs_rq = se->cfs_rq;
        struct rq *rq = cfs_rq->rq;
        int on_rq;
 
-       if (!shares)
-               shares = MIN_GROUP_SHARES;
+       spin_lock_irq(&rq->lock);
 
        on_rq = se->on_rq;
-       if (on_rq) {
+       if (on_rq)
                dequeue_entity(cfs_rq, se, 0);
-               dec_cpu_load(rq, se->load.weight);
-       }
 
        se->load.weight = shares;
        se->load.inv_weight = div64_64((1ULL<<32), shares);
 
-       if (on_rq) {
+       if (on_rq)
                enqueue_entity(cfs_rq, se, 0);
-               inc_cpu_load(rq, se->load.weight);
-       }
+
+       spin_unlock_irq(&rq->lock);
 }
 
 static DEFINE_MUTEX(shares_mutex);
@@ -7867,18 +7663,18 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
        int i;
        unsigned long flags;
 
+       /*
+        * A weight of 0 or 1 can cause arithmetics problems.
+        * (The default weight is 1024 - so there's no practical
+        *  limitation from this.)
+        */
+       if (shares < 2)
+               shares = 2;
+
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)
                goto done;
 
-       if (shares < MIN_GROUP_SHARES)
-               shares = MIN_GROUP_SHARES;
-
-       /*
-        * Prevent any load balance activity (rebalance_shares,
-        * load_balance_fair) from referring to this group first,
-        * by taking it off the rq->leaf_cfs_rq_list on each cpu.
-        */
        spin_lock_irqsave(&task_group_lock, flags);
        for_each_possible_cpu(i)
                unregister_fair_sched_group(tg, i);
@@ -7892,11 +7688,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
         * w/o tripping rebalance_share or load_balance_fair.
         */
        tg->shares = shares;
-       for_each_possible_cpu(i) {
-               spin_lock_irq(&cpu_rq(i)->lock);
+       for_each_possible_cpu(i)
                set_se_shares(tg->se[i], shares);
-               spin_unlock_irq(&cpu_rq(i)->lock);
-       }
 
        /*
         * Enable load balance activity on this group, by inserting it back on
index 6c091d6e159d01fb23c0dd786495c533cb005c5b..3df4d46994ca80ddc861ba44fe8f7ac24b3c50ed 100644 (file)
@@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
 
 static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
-       struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
-       struct sched_entity *se = NULL;
-       struct rb_node *parent;
+       struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 
-       while (*link) {
-               parent = *link;
-               se = rb_entry(parent, struct sched_entity, run_node);
-               link = &parent->rb_right;
-       }
+       if (!last)
+               return NULL;
 
-       return se;
+       return rb_entry(last, struct sched_entity, run_node);
 }
 
 /**************************************************************
@@ -732,8 +727,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
        return se->parent;
 }
 
-#define GROUP_IMBALANCE_PCT    20
-
 #else  /* CONFIG_FAIR_GROUP_SCHED */
 
 #define for_each_sched_entity(se) \
@@ -824,26 +817,15 @@ hrtick_start_fair(struct rq *rq, struct task_struct *p)
 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se = &p->se,
-                           *topse = NULL;      /* Highest schedulable entity */
-       int incload = 1;
+       struct sched_entity *se = &p->se;
 
        for_each_sched_entity(se) {
-               topse = se;
-               if (se->on_rq) {
-                       incload = 0;
+               if (se->on_rq)
                        break;
-               }
                cfs_rq = cfs_rq_of(se);
                enqueue_entity(cfs_rq, se, wakeup);
                wakeup = 1;
        }
-       /* Increment cpu load if we just enqueued the first task of a group on
-        * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
-        * at the highest grouping level.
-        */
-       if (incload)
-               inc_cpu_load(rq, topse->load.weight);
 
        hrtick_start_fair(rq, rq->curr);
 }
@@ -856,28 +838,16 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se = &p->se,
-                           *topse = NULL;      /* Highest schedulable entity */
-       int decload = 1;
+       struct sched_entity *se = &p->se;
 
        for_each_sched_entity(se) {
-               topse = se;
                cfs_rq = cfs_rq_of(se);
                dequeue_entity(cfs_rq, se, sleep);
                /* Don't dequeue parent if it has other entities besides us */
-               if (cfs_rq->load.weight) {
-                       if (parent_entity(se))
-                               decload = 0;
+               if (cfs_rq->load.weight)
                        break;
-               }
                sleep = 1;
        }
-       /* Decrement cpu load if we just dequeued the last task of a group on
-        * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
-        * at the highest grouping level.
-        */
-       if (decload)
-               dec_cpu_load(rq, topse->load.weight);
 
        hrtick_start_fair(rq, rq->curr);
 }
@@ -1191,6 +1161,25 @@ static struct task_struct *load_balance_next_fair(void *arg)
        return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
+{
+       struct sched_entity *curr;
+       struct task_struct *p;
+
+       if (!cfs_rq->nr_running || !first_fair(cfs_rq))
+               return MAX_PRIO;
+
+       curr = cfs_rq->curr;
+       if (!curr)
+               curr = __pick_next_entity(cfs_rq);
+
+       p = task_of(curr);
+
+       return p->prio;
+}
+#endif
+
 static unsigned long
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                  unsigned long max_load_move,
@@ -1200,45 +1189,28 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
        struct cfs_rq *busy_cfs_rq;
        long rem_load_move = max_load_move;
        struct rq_iterator cfs_rq_iterator;
-       unsigned long load_moved;
 
        cfs_rq_iterator.start = load_balance_start_fair;
        cfs_rq_iterator.next = load_balance_next_fair;
 
        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
 #ifdef CONFIG_FAIR_GROUP_SCHED
-               struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
-               unsigned long maxload, task_load, group_weight;
-               unsigned long thisload, per_task_load;
-               struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
-
-               task_load = busy_cfs_rq->load.weight;
-               group_weight = se->load.weight;
+               struct cfs_rq *this_cfs_rq;
+               long imbalance;
+               unsigned long maxload;
 
-               /*
-                * 'group_weight' is contributed by tasks of total weight
-                * 'task_load'. To move 'rem_load_move' worth of weight only,
-                * we need to move a maximum task load of:
-                *
-                *      maxload = (remload / group_weight) * task_load;
-                */
-               maxload = (rem_load_move * task_load) / group_weight;
+               this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
 
-               if (!maxload || !task_load)
+               imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
+               /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
+               if (imbalance <= 0)
                        continue;
 
-               per_task_load = task_load / busy_cfs_rq->nr_running;
-               /*
-                * balance_tasks will try to forcibly move atleast one task if
-                * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
-                * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
-                */
-                if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
-                       continue;
+               /* Don't pull more than imbalance/2 */
+               imbalance /= 2;
+               maxload = min(rem_load_move, imbalance);
 
-               /* Disable priority-based load balance */
-               *this_best_prio = 0;
-               thisload = this_cfs_rq->load.weight;
+               *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
 #else
 # define maxload rem_load_move
 #endif
@@ -1247,33 +1219,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                 * load_balance_[start|next]_fair iterators
                 */
                cfs_rq_iterator.arg = busy_cfs_rq;
-               load_moved = balance_tasks(this_rq, this_cpu, busiest,
+               rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
                                               maxload, sd, idle, all_pinned,
                                               this_best_prio,
                                               &cfs_rq_iterator);
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-               /*
-                * load_moved holds the task load that was moved. The
-                * effective (group) weight moved would be:
-                *      load_moved_eff = load_moved/task_load * group_weight;
-                */
-               load_moved = (group_weight * load_moved) / task_load;
-
-               /* Adjust shares on both cpus to reflect load_moved */
-               group_weight -= load_moved;
-               set_se_shares(se, group_weight);
-
-               se = busy_cfs_rq->tg->se[this_cpu];
-               if (!thisload)
-                       group_weight = load_moved;
-               else
-                       group_weight = se->load.weight + load_moved;
-               set_se_shares(se, group_weight);
-#endif
-
-               rem_load_move -= load_moved;
-
                if (rem_load_move <= 0)
                        break;
        }
index f54792b175b26a331df3e133a8b7f854122eefdf..76e828517541b742c960de0b48bd503ea88f99d6 100644 (file)
@@ -393,8 +393,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
         */
        for_each_sched_rt_entity(rt_se)
                enqueue_rt_entity(rt_se);
-
-       inc_cpu_load(rq, p->se.load.weight);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -414,8 +412,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
                if (rt_rq && rt_rq->rt_nr_running)
                        enqueue_rt_entity(rt_se);
        }
-
-       dec_cpu_load(rq, p->se.load.weight);
 }
 
 /*
index 84917fe507f77b8ff949af7f958e11ac2ed423b7..6af1210092c39a45db3552ecc9199728f8938410 100644 (file)
@@ -1623,7 +1623,6 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
        /* Let the debugger run.  */
        __set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
-       try_to_freeze();
        read_lock(&tasklist_lock);
        if (!unlikely(killed) && may_ptrace_stop()) {
                do_notify_parent_cldstop(current, CLD_TRAPPED);
@@ -1640,6 +1639,13 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
                read_unlock(&tasklist_lock);
        }
 
+       /*
+        * While in TASK_TRACED, we were considered "frozen enough".
+        * Now that we woke up, it's crucial if we're supposed to be
+        * frozen that we freeze now before running anything substantial.
+        */
+       try_to_freeze();
+
        /*
         * We are back.  Now reacquire the siglock before touching
         * last_siginfo, so that we are sure to have synchronized with
@@ -1757,9 +1763,15 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
        sigset_t *mask = &current->blocked;
        int signr = 0;
 
+relock:
+       /*
+        * We'll jump back here after any time we were stopped in TASK_STOPPED.
+        * While in TASK_STOPPED, we were considered "frozen enough".
+        * Now that we woke up, it's crucial if we're supposed to be
+        * frozen that we freeze now before running anything substantial.
+        */
        try_to_freeze();
 
-relock:
        spin_lock_irq(&current->sighand->siglock);
        for (;;) {
                struct k_sigaction *ka;
index 5b3aea5f471e06a2bac64ad232dce64e60650ea4..31e9f2a4792847388b524d313bc389bd8cd4cf20 100644 (file)
@@ -313,6 +313,7 @@ void irq_exit(void)
        /* Make sure that timer wheel updates are propagated */
        if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
                tick_nohz_stop_sched_tick();
+       rcu_irq_exit();
 #endif
        preempt_enable_no_resched();
 }
index 7c2da88db4eddf6539673357b01d94b543aac52c..01b6522fd92bc0b28f7df00eb66876a389b34908 100644 (file)
@@ -216,26 +216,27 @@ static int watchdog(void *__bind_cpu)
        /* initialize timestamp */
        touch_softlockup_watchdog();
 
+       set_current_state(TASK_INTERRUPTIBLE);
        /*
         * Run briefly once per second to reset the softlockup timestamp.
         * If this gets delayed for more than 60 seconds then the
         * debug-printout triggers in softlockup_tick().
         */
        while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
                touch_softlockup_watchdog();
                schedule();
 
                if (kthread_should_stop())
                        break;
 
-               if (this_cpu != check_cpu)
-                       continue;
-
-               if (sysctl_hung_task_timeout_secs)
-                       check_hung_uninterruptible_tasks(this_cpu);
+               if (this_cpu == check_cpu) {
+                       if (sysctl_hung_task_timeout_secs)
+                               check_hung_uninterruptible_tasks(this_cpu);
+               }
 
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
 
        return 0;
 }
index 8b7e95411795793724f036594e1446e32f1e444b..b2a2d6889babc898794e7c7fb7907ef59a3b487b 100644 (file)
@@ -311,24 +311,6 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = &proc_dointvec,
        },
-#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sched_min_bal_int_shares",
-               .data           = &sysctl_sched_min_bal_int_shares,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sched_max_bal_int_shares",
-               .data           = &sysctl_sched_max_bal_int_shares,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-#endif
 #endif
        {
                .ctl_name       = CTL_UNNUMBERED,
index fa9bb73dbdb41c4678ca0ad1da01ebcaf5e46cc1..2968298f8f364923010c440fe68f6e02d2f32e2a 100644 (file)
@@ -282,6 +282,7 @@ void tick_nohz_stop_sched_tick(void)
                        ts->idle_tick = ts->sched_timer.expires;
                        ts->tick_stopped = 1;
                        ts->idle_jiffies = last_jiffies;
+                       rcu_enter_nohz();
                }
 
                /*
@@ -375,6 +376,8 @@ void tick_nohz_restart_sched_tick(void)
                return;
        }
 
+       rcu_exit_nohz();
+
        /* Update jiffies first */
        select_nohz_load_balancer(0);
        now = ktime_get();
index 495575a59ca643f752a2314b44e5e0e77a2963f5..a3b8d4c3f77a5e7e466bd8f5e5591f3dcaa2c3bd 100644 (file)
@@ -40,10 +40,12 @@ static inline void set_bit_area(unsigned long *map, unsigned long i,
        }
 }
 
-static inline int is_span_boundary(unsigned int index, unsigned int nr,
-                                  unsigned long shift,
-                                  unsigned long boundary_size)
+int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+                          unsigned long shift,
+                          unsigned long boundary_size)
 {
+       BUG_ON(!is_power_of_2(boundary_size));
+
        shift = (shift + index) & (boundary_size - 1);
        return shift + nr > boundary_size;
 }
@@ -57,7 +59,7 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
 again:
        index = find_next_zero_area(map, size, start, nr, align_mask);
        if (index != -1) {
-               if (is_span_boundary(index, nr, shift, boundary_size)) {
+               if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
                        /* we could do more effectively */
                        start = index + 1;
                        goto again;
index d784daeb8571692e0fed288f17df60651a04366c..0d03252f87a8535db2a9fe00632560c6fa337f24 100644 (file)
@@ -153,6 +153,10 @@ static void kobject_init_internal(struct kobject *kobj)
                return;
        kref_init(&kobj->kref);
        INIT_LIST_HEAD(&kobj->entry);
+       kobj->state_in_sysfs = 0;
+       kobj->state_add_uevent_sent = 0;
+       kobj->state_remove_uevent_sent = 0;
+       kobj->state_initialized = 1;
 }
 
 
@@ -289,13 +293,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
                dump_stack();
        }
 
-       kref_init(&kobj->kref);
-       INIT_LIST_HEAD(&kobj->entry);
+       kobject_init_internal(kobj);
        kobj->ktype = ktype;
-       kobj->state_in_sysfs = 0;
-       kobj->state_add_uevent_sent = 0;
-       kobj->state_remove_uevent_sent = 0;
-       kobj->state_initialized = 1;
        return;
 
 error:
index 9f117bab5322e59b49cba45146b0e54176d206ca..a5b0dd93427a7266d7a26dcc7fd1266c602725fb 100644 (file)
@@ -32,5 +32,5 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_SMP) += allocpercpu.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
-obj-$(CONFIG_CGROUP_MEM_CONT) += memcontrol.o
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o
 
index 7e58322b7134ff1d04e40269e030bdd53e1c87f6..b0012e27fea8796da01cfb2172d1d5930c60d22f 100644 (file)
@@ -6,6 +6,10 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 
+#ifndef cache_line_size
+#define cache_line_size()      L1_CACHE_BYTES
+#endif
+
 /**
  * percpu_depopulate - depopulate per-cpu data for given cpu
  * @__pdata: per-cpu data to depopulate
@@ -52,6 +56,11 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
        struct percpu_data *pdata = __percpu_disguise(__pdata);
        int node = cpu_to_node(cpu);
 
+       /*
+        * We should make sure each CPU gets private memory.
+        */
+       size = roundup(size, cache_line_size());
+
        BUG_ON(pdata->ptrs[cpu]);
        if (node_online(node))
                pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
@@ -98,7 +107,11 @@ EXPORT_SYMBOL_GPL(__percpu_populate_mask);
  */
 void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
 {
-       void *pdata = kzalloc(nr_cpu_ids * sizeof(void *), gfp);
+       /*
+        * We allocate whole cache lines to avoid false sharing
+        */
+       size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
+       void *pdata = kzalloc(sz, gfp);
        void *__pdata = __percpu_disguise(pdata);
 
        if (unlikely(!pdata))
index 89e6286a7f57823e0427b7eeb84e6e39340764e1..dcacc811e70ede9cda49d4d2cf56a6d5d5404a56 100644 (file)
@@ -71,7 +71,25 @@ static void enqueue_huge_page(struct page *page)
        free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+       int nid;
+       struct page *page = NULL;
+
+       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
+               if (!list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
+                       break;
+               }
+       }
+       return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
                                unsigned long address)
 {
        int nid;
@@ -296,8 +314,10 @@ static int gather_surplus_pages(int delta)
        int needed, allocated;
 
        needed = (resv_huge_pages + delta) - free_huge_pages;
-       if (needed <= 0)
+       if (needed <= 0) {
+               resv_huge_pages += delta;
                return 0;
+       }
 
        allocated = 0;
        INIT_LIST_HEAD(&surplus_list);
@@ -335,9 +355,12 @@ retry:
         * The surplus_list now contains _at_least_ the number of extra pages
         * needed to accomodate the reservation.  Add the appropriate number
         * of pages to the hugetlb pool and free the extras back to the buddy
-        * allocator.
+        * allocator.  Commit the entire reservation here to prevent another
+        * process from stealing the pages as they are added to the pool but
+        * before they are reserved.
         */
        needed += allocated;
+       resv_huge_pages += delta;
        ret = 0;
 free:
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
@@ -371,6 +394,9 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
        struct page *page;
        unsigned long nr_pages;
 
+       /* Uncommit the reservation */
+       resv_huge_pages -= unused_resv_pages;
+
        nr_pages = min(unused_resv_pages, surplus_huge_pages);
 
        while (nr_pages) {
@@ -402,7 +428,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
        struct page *page;
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page(vma, addr);
+       page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
        return page ? page : ERR_PTR(-VM_FAULT_OOM);
 }
@@ -417,7 +443,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
 
        spin_lock(&hugetlb_lock);
        if (free_huge_pages > resv_huge_pages)
-               page = dequeue_huge_page(vma, addr);
+               page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
        if (!page) {
                page = alloc_buddy_huge_page(vma, addr);
@@ -570,7 +596,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
        min_count = max(count, min_count);
        try_to_free_low(min_count);
        while (min_count < persistent_huge_pages) {
-               struct page *page = dequeue_huge_page(NULL, 0);
+               struct page *page = dequeue_huge_page();
                if (!page)
                        break;
                update_and_free_page(page);
@@ -1205,12 +1231,13 @@ static int hugetlb_acct_memory(long delta)
                if (gather_surplus_pages(delta) < 0)
                        goto out;
 
-               if (delta > cpuset_mems_nr(free_huge_pages_node))
+               if (delta > cpuset_mems_nr(free_huge_pages_node)) {
+                       return_unused_surplus_pages(delta);
                        goto out;
+               }
        }
 
        ret = 0;
-       resv_huge_pages += delta;
        if (delta < 0)
                return_unused_surplus_pages((unsigned long) -delta);
 
index 631002d085d1374dfdfe734f761aaf3e6a65dde3..8b9f6cae938e98090a8850ba31a8aaf24cc1a1c7 100644 (file)
@@ -137,14 +137,21 @@ struct mem_cgroup {
         */
        struct mem_cgroup_stat stat;
 };
+static struct mem_cgroup init_mem_cgroup;
 
 /*
  * We use the lower bit of the page->page_cgroup pointer as a bit spin
- * lock. We need to ensure that page->page_cgroup is atleast two
- * byte aligned (based on comments from Nick Piggin)
+ * lock.  We need to ensure that page->page_cgroup is at least two
+ * byte aligned (based on comments from Nick Piggin).  But since
+ * bit_spin_lock doesn't actually set that lock bit in a non-debug
+ * uniprocessor kernel, we should avoid setting it here too.
  */
 #define PAGE_CGROUP_LOCK_BIT   0x0
-#define PAGE_CGROUP_LOCK               (1 << PAGE_CGROUP_LOCK_BIT)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+#define PAGE_CGROUP_LOCK       (1 << PAGE_CGROUP_LOCK_BIT)
+#else
+#define PAGE_CGROUP_LOCK       0x0
+#endif
 
 /*
  * A page_cgroup page is associated with every page descriptor. The
@@ -154,37 +161,27 @@ struct page_cgroup {
        struct list_head lru;           /* per cgroup LRU list */
        struct page *page;
        struct mem_cgroup *mem_cgroup;
-       atomic_t ref_cnt;               /* Helpful when pages move b/w  */
-                                       /* mapped and cached states     */
-       int      flags;
+       int ref_cnt;                    /* cached, mapped, migrating */
+       int flags;
 };
 #define PAGE_CGROUP_FLAG_CACHE (0x1)   /* charged as cache */
 #define PAGE_CGROUP_FLAG_ACTIVE (0x2)  /* page is active in this cgroup */
 
-static inline int page_cgroup_nid(struct page_cgroup *pc)
+static int page_cgroup_nid(struct page_cgroup *pc)
 {
        return page_to_nid(pc->page);
 }
 
-static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
+static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
 {
        return page_zonenum(pc->page);
 }
 
-enum {
-       MEM_CGROUP_TYPE_UNSPEC = 0,
-       MEM_CGROUP_TYPE_MAPPED,
-       MEM_CGROUP_TYPE_CACHED,
-       MEM_CGROUP_TYPE_ALL,
-       MEM_CGROUP_TYPE_MAX,
-};
-
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
        MEM_CGROUP_CHARGE_TYPE_MAPPED,
 };
 
-
 /*
  * Always modified under lru lock. Then, not necessary to preempt_disable()
  */
@@ -193,23 +190,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
 {
        int val = (charge)? 1 : -1;
        struct mem_cgroup_stat *stat = &mem->stat;
-       VM_BUG_ON(!irqs_disabled());
 
+       VM_BUG_ON(!irqs_disabled());
        if (flags & PAGE_CGROUP_FLAG_CACHE)
-               __mem_cgroup_stat_add_safe(stat,
-                                       MEM_CGROUP_STAT_CACHE, val);
+               __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
        else
                __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
 }
 
-static inline struct mem_cgroup_per_zone *
+static struct mem_cgroup_per_zone *
 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
 {
-       BUG_ON(!mem->info.nodeinfo[nid]);
        return &mem->info.nodeinfo[nid]->zoneinfo[zid];
 }
 
-static inline struct mem_cgroup_per_zone *
+static struct mem_cgroup_per_zone *
 page_cgroup_zoneinfo(struct page_cgroup *pc)
 {
        struct mem_cgroup *mem = pc->mem_cgroup;
@@ -234,18 +229,14 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
        return total;
 }
 
-static struct mem_cgroup init_mem_cgroup;
-
-static inline
-struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 {
        return container_of(cgroup_subsys_state(cont,
                                mem_cgroup_subsys_id), struct mem_cgroup,
                                css);
 }
 
-static inline
-struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
        return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
                                struct mem_cgroup, css);
@@ -267,81 +258,33 @@ void mm_free_cgroup(struct mm_struct *mm)
 
 static inline int page_cgroup_locked(struct page *page)
 {
-       return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
-                                       &page->page_cgroup);
+       return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
+static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
 {
-       int locked;
-
-       /*
-        * While resetting the page_cgroup we might not hold the
-        * page_cgroup lock. free_hot_cold_page() is an example
-        * of such a scenario
-        */
-       if (pc)
-               VM_BUG_ON(!page_cgroup_locked(page));
-       locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
-       page->page_cgroup = ((unsigned long)pc | locked);
+       VM_BUG_ON(!page_cgroup_locked(page));
+       page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
 }
 
 struct page_cgroup *page_get_page_cgroup(struct page *page)
 {
-       return (struct page_cgroup *)
-               (page->page_cgroup & ~PAGE_CGROUP_LOCK);
+       return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
 }
 
-static void __always_inline lock_page_cgroup(struct page *page)
+static void lock_page_cgroup(struct page *page)
 {
        bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-       VM_BUG_ON(!page_cgroup_locked(page));
-}
-
-static void __always_inline unlock_page_cgroup(struct page *page)
-{
-       bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-/*
- * Tie new page_cgroup to struct page under lock_page_cgroup()
- * This can fail if the page has been tied to a page_cgroup.
- * If success, returns 0.
- */
-static int page_cgroup_assign_new_page_cgroup(struct page *page,
-                                               struct page_cgroup *pc)
+static int try_lock_page_cgroup(struct page *page)
 {
-       int ret = 0;
-
-       lock_page_cgroup(page);
-       if (!page_get_page_cgroup(page))
-               page_assign_page_cgroup(page, pc);
-       else /* A page is tied to other pc. */
-               ret = 1;
-       unlock_page_cgroup(page);
-       return ret;
+       return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-/*
- * Clear page->page_cgroup member under lock_page_cgroup().
- * If given "pc" value is different from one page->page_cgroup,
- * page->cgroup is not cleared.
- * Returns a value of page->page_cgroup at lock taken.
- * A can can detect failure of clearing by following
- *  clear_page_cgroup(page, pc) == pc
- */
-
-static struct page_cgroup *clear_page_cgroup(struct page *page,
-                                               struct page_cgroup *pc)
+static void unlock_page_cgroup(struct page *page)
 {
-       struct page_cgroup *ret;
-       /* lock and clear */
-       lock_page_cgroup(page);
-       ret = page_get_page_cgroup(page);
-       if (likely(ret == pc))
-               page_assign_page_cgroup(page, NULL);
-       unlock_page_cgroup(page);
-       return ret;
+       bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
 static void __mem_cgroup_remove_list(struct page_cgroup *pc)
@@ -399,7 +342,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
        int ret;
 
        task_lock(task);
-       ret = task->mm && vm_match_cgroup(task->mm, mem);
+       ret = task->mm && mm_match_cgroup(task->mm, mem);
        task_unlock(task);
        return ret;
 }
@@ -407,18 +350,30 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
-void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
+void mem_cgroup_move_lists(struct page *page, bool active)
 {
+       struct page_cgroup *pc;
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
-       if (!pc)
+       /*
+        * We cannot lock_page_cgroup while holding zone's lru_lock,
+        * because other holders of lock_page_cgroup can be interrupted
+        * with an attempt to rotate_reclaimable_page.  But we cannot
+        * safely get to page_cgroup without it, so just try_lock it:
+        * mem_cgroup_isolate_pages allows for page left on wrong list.
+        */
+       if (!try_lock_page_cgroup(page))
                return;
 
-       mz = page_cgroup_zoneinfo(pc);
-       spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_move_lists(pc, active);
-       spin_unlock_irqrestore(&mz->lru_lock, flags);
+       pc = page_get_page_cgroup(page);
+       if (pc) {
+               mz = page_cgroup_zoneinfo(pc);
+               spin_lock_irqsave(&mz->lru_lock, flags);
+               __mem_cgroup_move_lists(pc, active);
+               spin_unlock_irqrestore(&mz->lru_lock, flags);
+       }
+       unlock_page_cgroup(page);
 }
 
 /*
@@ -437,6 +392,7 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
        rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
        return (int)((rss * 100L) / total);
 }
+
 /*
  * This function is called from vmscan.c. In page reclaiming loop. balance
  * between active and inactive list is calculated. For memory controller
@@ -500,7 +456,6 @@ long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
        struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
 
        nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
-
        return (nr_inactive >> priority);
 }
 
@@ -586,26 +541,21 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
         * with it
         */
 retry:
-       if (page) {
-               lock_page_cgroup(page);
-               pc = page_get_page_cgroup(page);
-               /*
-                * The page_cgroup exists and
-                * the page has already been accounted.
-                */
-               if (pc) {
-                       if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
-                               /* this page is under being uncharged ? */
-                               unlock_page_cgroup(page);
-                               cpu_relax();
-                               goto retry;
-                       } else {
-                               unlock_page_cgroup(page);
-                               goto done;
-                       }
-               }
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       /*
+        * The page_cgroup exists and
+        * the page has already been accounted.
+        */
+       if (pc) {
+               VM_BUG_ON(pc->page != page);
+               VM_BUG_ON(pc->ref_cnt <= 0);
+
+               pc->ref_cnt++;
                unlock_page_cgroup(page);
+               goto done;
        }
+       unlock_page_cgroup(page);
 
        pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
        if (pc == NULL)
@@ -623,16 +573,11 @@ retry:
        rcu_read_lock();
        mem = rcu_dereference(mm->mem_cgroup);
        /*
-        * For every charge from the cgroup, increment reference
-        * count
+        * For every charge from the cgroup, increment reference count
         */
        css_get(&mem->css);
        rcu_read_unlock();
 
-       /*
-        * If we created the page_cgroup, we should free it on exceeding
-        * the cgroup limit.
-        */
        while (res_counter_charge(&mem->res, PAGE_SIZE)) {
                if (!(gfp_mask & __GFP_WAIT))
                        goto out;
@@ -641,12 +586,12 @@ retry:
                        continue;
 
                /*
-                * try_to_free_mem_cgroup_pages() might not give us a full
-                * picture of reclaim. Some pages are reclaimed and might be
-                * moved to swap cache or just unmapped from the cgroup.
-                * Check the limit again to see if the reclaim reduced the
-                * current usage of the cgroup before giving up
-                */
+                * try_to_free_mem_cgroup_pages() might not give us a full
+                * picture of reclaim. Some pages are reclaimed and might be
+                * moved to swap cache or just unmapped from the cgroup.
+                * Check the limit again to see if the reclaim reduced the
+                * current usage of the cgroup before giving up
+                */
                if (res_counter_check_under_limit(&mem->res))
                        continue;
 
@@ -657,14 +602,16 @@ retry:
                congestion_wait(WRITE, HZ/10);
        }
 
-       atomic_set(&pc->ref_cnt, 1);
+       pc->ref_cnt = 1;
        pc->mem_cgroup = mem;
        pc->page = page;
        pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
        if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
                pc->flags |= PAGE_CGROUP_FLAG_CACHE;
 
-       if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
+       lock_page_cgroup(page);
+       if (page_get_page_cgroup(page)) {
+               unlock_page_cgroup(page);
                /*
                 * Another charge has been added to this page already.
                 * We take lock_page_cgroup(page) again and read
@@ -673,17 +620,16 @@ retry:
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
                kfree(pc);
-               if (!page)
-                       goto done;
                goto retry;
        }
+       page_assign_page_cgroup(page, pc);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       /* Update statistics vector */
        __mem_cgroup_add_list(pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
+       unlock_page_cgroup(page);
 done:
        return 0;
 out:
@@ -693,70 +639,61 @@ err:
        return -ENOMEM;
 }
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
-                       gfp_t gfp_mask)
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 {
        return mem_cgroup_charge_common(page, mm, gfp_mask,
-                       MEM_CGROUP_CHARGE_TYPE_MAPPED);
+                               MEM_CGROUP_CHARGE_TYPE_MAPPED);
 }
 
-/*
- * See if the cached pages should be charged at all?
- */
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
-       int ret = 0;
        if (!mm)
                mm = &init_mm;
-
-       ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+       return mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE);
-       return ret;
 }
 
 /*
  * Uncharging is always a welcome operation, we never complain, simply
- * uncharge. This routine should be called with lock_page_cgroup held
+ * uncharge.
  */
-void mem_cgroup_uncharge(struct page_cgroup *pc)
+void mem_cgroup_uncharge_page(struct page *page)
 {
+       struct page_cgroup *pc;
        struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
-       struct page *page;
        unsigned long flags;
 
        /*
         * Check if our page_cgroup is valid
         */
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
        if (!pc)
-               return;
+               goto unlock;
 
-       if (atomic_dec_and_test(&pc->ref_cnt)) {
-               page = pc->page;
+       VM_BUG_ON(pc->page != page);
+       VM_BUG_ON(pc->ref_cnt <= 0);
+
+       if (--(pc->ref_cnt) == 0) {
                mz = page_cgroup_zoneinfo(pc);
-               /*
-                * get page->cgroup and clear it under lock.
-                * force_empty can drop page->cgroup without checking refcnt.
-                */
+               spin_lock_irqsave(&mz->lru_lock, flags);
+               __mem_cgroup_remove_list(pc);
+               spin_unlock_irqrestore(&mz->lru_lock, flags);
+
+               page_assign_page_cgroup(page, NULL);
                unlock_page_cgroup(page);
-               if (clear_page_cgroup(page, pc) == pc) {
-                       mem = pc->mem_cgroup;
-                       css_put(&mem->css);
-                       res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       spin_lock_irqsave(&mz->lru_lock, flags);
-                       __mem_cgroup_remove_list(pc);
-                       spin_unlock_irqrestore(&mz->lru_lock, flags);
-                       kfree(pc);
-               }
-               lock_page_cgroup(page);
+
+               mem = pc->mem_cgroup;
+               res_counter_uncharge(&mem->res, PAGE_SIZE);
+               css_put(&mem->css);
+
+               kfree(pc);
+               return;
        }
-}
 
-void mem_cgroup_uncharge_page(struct page *page)
-{
-       lock_page_cgroup(page);
-       mem_cgroup_uncharge(page_get_page_cgroup(page));
+unlock:
        unlock_page_cgroup(page);
 }
 
@@ -764,63 +701,59 @@ void mem_cgroup_uncharge_page(struct page *page)
  * Returns non-zero if a page (under migration) has valid page_cgroup member.
  * Refcnt of page_cgroup is incremented.
  */
-
 int mem_cgroup_prepare_migration(struct page *page)
 {
        struct page_cgroup *pc;
-       int ret = 0;
+
        lock_page_cgroup(page);
        pc = page_get_page_cgroup(page);
-       if (pc && atomic_inc_not_zero(&pc->ref_cnt))
-               ret = 1;
+       if (pc)
+               pc->ref_cnt++;
        unlock_page_cgroup(page);
-       return ret;
+       return pc != NULL;
 }
 
 void mem_cgroup_end_migration(struct page *page)
 {
-       struct page_cgroup *pc;
-
-       lock_page_cgroup(page);
-       pc = page_get_page_cgroup(page);
-       mem_cgroup_uncharge(pc);
-       unlock_page_cgroup(page);
+       mem_cgroup_uncharge_page(page);
 }
+
 /*
- * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
+ * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
  * And no race with uncharge() routines because page_cgroup for *page*
  * has extra one reference by mem_cgroup_prepare_migration.
  */
-
 void mem_cgroup_page_migration(struct page *page, struct page *newpage)
 {
        struct page_cgroup *pc;
-       struct mem_cgroup *mem;
-       unsigned long flags;
        struct mem_cgroup_per_zone *mz;
-retry:
+       unsigned long flags;
+
+       lock_page_cgroup(page);
        pc = page_get_page_cgroup(page);
-       if (!pc)
+       if (!pc) {
+               unlock_page_cgroup(page);
                return;
-       mem = pc->mem_cgroup;
+       }
+
        mz = page_cgroup_zoneinfo(pc);
-       if (clear_page_cgroup(page, pc) != pc)
-               goto retry;
        spin_lock_irqsave(&mz->lru_lock, flags);
-
        __mem_cgroup_remove_list(pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
+       page_assign_page_cgroup(page, NULL);
+       unlock_page_cgroup(page);
+
        pc->page = newpage;
        lock_page_cgroup(newpage);
        page_assign_page_cgroup(newpage, pc);
-       unlock_page_cgroup(newpage);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
        __mem_cgroup_add_list(pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
-       return;
+
+       unlock_page_cgroup(newpage);
 }
 
 /*
@@ -829,14 +762,13 @@ retry:
  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  */
 #define FORCE_UNCHARGE_BATCH   (128)
-static void
-mem_cgroup_force_empty_list(struct mem_cgroup *mem,
+static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
                            struct mem_cgroup_per_zone *mz,
                            int active)
 {
        struct page_cgroup *pc;
        struct page *page;
-       int count;
+       int count = FORCE_UNCHARGE_BATCH;
        unsigned long flags;
        struct list_head *list;
 
@@ -845,46 +777,36 @@ mem_cgroup_force_empty_list(struct mem_cgroup *mem,
        else
                list = &mz->inactive_list;
 
-       if (list_empty(list))
-               return;
-retry:
-       count = FORCE_UNCHARGE_BATCH;
        spin_lock_irqsave(&mz->lru_lock, flags);
-
-       while (--count && !list_empty(list)) {
+       while (!list_empty(list)) {
                pc = list_entry(list->prev, struct page_cgroup, lru);
                page = pc->page;
-               /* Avoid race with charge */
-               atomic_set(&pc->ref_cnt, 0);
-               if (clear_page_cgroup(page, pc) == pc) {
-                       css_put(&mem->css);
-                       res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       __mem_cgroup_remove_list(pc);
-                       kfree(pc);
-               } else  /* being uncharged ? ...do relax */
-                       break;
+               get_page(page);
+               spin_unlock_irqrestore(&mz->lru_lock, flags);
+               mem_cgroup_uncharge_page(page);
+               put_page(page);
+               if (--count <= 0) {
+                       count = FORCE_UNCHARGE_BATCH;
+                       cond_resched();
+               }
+               spin_lock_irqsave(&mz->lru_lock, flags);
        }
        spin_unlock_irqrestore(&mz->lru_lock, flags);
-       if (!list_empty(list)) {
-               cond_resched();
-               goto retry;
-       }
-       return;
 }
 
 /*
  * make mem_cgroup's charge to be 0 if there is no task.
  * This enables deleting this mem_cgroup.
  */
-
-int mem_cgroup_force_empty(struct mem_cgroup *mem)
+static int mem_cgroup_force_empty(struct mem_cgroup *mem)
 {
        int ret = -EBUSY;
        int node, zid;
+
        css_get(&mem->css);
        /*
         * page reclaim code (kswapd etc..) will move pages between
-`       * active_list <-> inactive_list while we don't take a lock.
+        * active_list <-> inactive_list while we don't take a lock.
         * So, we have to do loop here until all lists are empty.
         */
        while (mem->res.usage > 0) {
@@ -906,9 +828,7 @@ out:
        return ret;
 }
 
-
-
-int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
+static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
 {
        *tmp = memparse(buf, &buf);
        if (*buf != '\0')
@@ -945,8 +865,7 @@ static ssize_t mem_force_empty_write(struct cgroup *cont,
                                size_t nbytes, loff_t *ppos)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       int ret;
-       ret = mem_cgroup_force_empty(mem);
+       int ret = mem_cgroup_force_empty(mem);
        if (!ret)
                ret = nbytes;
        return ret;
@@ -955,7 +874,6 @@ static ssize_t mem_force_empty_write(struct cgroup *cont,
 /*
  * Note: This should be removed if cgroup supports write-only file.
  */
-
 static ssize_t mem_force_empty_read(struct cgroup *cont,
                                struct cftype *cft,
                                struct file *file, char __user *userbuf,
@@ -964,7 +882,6 @@ static ssize_t mem_force_empty_read(struct cgroup *cont,
        return -EINVAL;
 }
 
-
 static const struct mem_cgroup_stat_desc {
        const char *msg;
        u64 unit;
@@ -1017,8 +934,6 @@ static int mem_control_stat_open(struct inode *unused, struct file *file)
        return single_open(file, mem_control_stat_show, cont);
 }
 
-
-
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -1084,9 +999,6 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
        kfree(mem->info.nodeinfo[node]);
 }
 
-
-static struct mem_cgroup init_mem_cgroup;
-
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -1176,7 +1088,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
 
 out:
        mmput(mm);
-       return;
 }
 
 struct cgroup_subsys mem_cgroup_subsys = {
index ce3c9e4492d803b011f50ea8641e477056116ddd..0d14d1e58a5fa78e6b6a01369cc1e0869c77dfe9 100644 (file)
@@ -1711,7 +1711,7 @@ unlock:
        }
        return ret;
 oom_free_new:
-       __free_page(new_page);
+       page_cache_release(new_page);
 oom:
        if (old_page)
                page_cache_release(old_page);
@@ -2093,12 +2093,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unlock_page(page);
 
        if (write_access) {
-               /* XXX: We could OR the do_wp_page code with this one? */
-               if (do_wp_page(mm, vma, address,
-                               page_table, pmd, ptl, pte) & VM_FAULT_OOM) {
-                       mem_cgroup_uncharge_page(page);
-                       ret = VM_FAULT_OOM;
-               }
+               ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
+               if (ret & VM_FAULT_ERROR)
+                       ret &= VM_FAULT_ERROR;
                goto out;
        }
 
@@ -2163,7 +2160,7 @@ release:
        page_cache_release(page);
        goto unlock;
 oom_free_page:
-       __free_page(page);
+       page_cache_release(page);
 oom:
        return VM_FAULT_OOM;
 }
index a73504ff5ab982007b2e0355e49f0dedcac65899..4e0eccca5e265ac19bc507a171a2f720d27f8c21 100644 (file)
@@ -153,11 +153,6 @@ static void remove_migration_pte(struct vm_area_struct *vma,
                return;
        }
 
-       if (mem_cgroup_charge(new, mm, GFP_KERNEL)) {
-               pte_unmap(ptep);
-               return;
-       }
-
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        pte = *ptep;
@@ -169,6 +164,20 @@ static void remove_migration_pte(struct vm_area_struct *vma,
        if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
                goto out;
 
+       /*
+        * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
+        * Failure is not an option here: we're now expected to remove every
+        * migration pte, and will cause crashes otherwise.  Normally this
+        * is not an issue: mem_cgroup_prepare_migration bumped up the old
+        * page_cgroup count for safety, that's now attached to the new page,
+        * so this charge should just be another incrementation of the count,
+        * to keep in balance with rmap.c's mem_cgroup_uncharging.  But if
+        * there's been a force_empty, those reference counts may no longer
+        * be reliable, and this charge can actually fail: oh well, we don't
+        * make the situation any worse by proceeding as if it had succeeded.
+        */
+       mem_cgroup_charge(new, mm, GFP_ATOMIC);
+
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
        if (is_write_migration_entry(entry))
index 4194b9db0104d54dea5ea871e0e111695fa49cba..44b2da11bf43f75a3714761ab09217339579977d 100644 (file)
@@ -412,7 +412,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        return oom_kill_task(p);
 }
 
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
 {
        unsigned long points = 0;
index 8896e874a67dae2c558034dbbf27491b231c2b2a..402a504f12283f23cb510ec9ebfce4d0d52eea29 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/swap.h>
 #include <linux/interrupt.h>
 #include <linux/pagemap.h>
+#include <linux/jiffies.h>
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
@@ -221,13 +222,19 @@ static inline int bad_range(struct zone *zone, struct page *page)
 
 static void bad_page(struct page *page)
 {
-       printk(KERN_EMERG "Bad page state in process '%s'\n"
-               KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
-               KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
-               KERN_EMERG "Backtrace:\n",
+       void *pc = page_get_page_cgroup(page);
+
+       printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
+               "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
                current->comm, page, (int)(2*sizeof(unsigned long)),
                (unsigned long)page->flags, page->mapping,
                page_mapcount(page), page_count(page));
+       if (pc) {
+               printk(KERN_EMERG "cgroup:%p\n", pc);
+               page_reset_bad_cgroup(page);
+       }
+       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
+               KERN_EMERG "Backtrace:\n");
        dump_stack();
        page->flags &= ~(1 << PG_lru    |
                        1 << PG_private |
@@ -453,6 +460,7 @@ static inline int free_pages_check(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
+               (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
                (page->flags & (
                        1 << PG_lru     |
@@ -602,6 +610,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
+               (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
                (page->flags & (
                        1 << PG_lru     |
@@ -988,7 +997,6 @@ static void free_hot_cold_page(struct page *page, int cold)
 
        if (!PageHighMem(page))
                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
-       VM_BUG_ON(page_get_page_cgroup(page));
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
@@ -1276,7 +1284,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
        if (!zlc)
                return NULL;
 
-       if (jiffies - zlc->last_full_zap > 1 * HZ) {
+       if (time_after(jiffies, zlc->last_full_zap + HZ)) {
                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
                zlc->last_full_zap = jiffies;
        }
@@ -2527,7 +2535,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
                reset_page_mapcount(page);
-               page_assign_page_cgroup(page, NULL);
                SetPageReserved(page);
 
                /*
index 8fd527c4e2bff8e45c929c42f01c60834c573ef9..0c9a2df06c39cc3733f0e5b6975d82d4b82807ae 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -321,7 +321,7 @@ static int page_referenced_anon(struct page *page,
                 * counting on behalf of references from different
                 * cgroups
                 */
-               if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont))
+               if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
                referenced += page_referenced_one(page, vma, &mapcount);
                if (!mapcount)
@@ -382,7 +382,7 @@ static int page_referenced_file(struct page *page,
                 * counting on behalf of references from different
                 * cgroups
                 */
-               if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont))
+               if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
                if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
                                  == (VM_LOCKED|VM_MAYSHARE)) {
index 90b576cbc06e40fb0f43d06f37f5ef24deee9f13..3372bc579e896f474ef24cffa929b69228ff9445 100644 (file)
@@ -1370,14 +1370,17 @@ repeat:
                        shmem_swp_unmap(entry);
                        spin_unlock(&info->lock);
                        unlock_page(swappage);
-                       page_cache_release(swappage);
                        if (error == -ENOMEM) {
                                /* allow reclaim from this memory cgroup */
-                               error = mem_cgroup_cache_charge(NULL,
+                               error = mem_cgroup_cache_charge(swappage,
                                        current->mm, gfp & ~__GFP_HIGHMEM);
-                               if (error)
+                               if (error) {
+                                       page_cache_release(swappage);
                                        goto failed;
+                               }
+                               mem_cgroup_uncharge_page(swappage);
                        }
+                       page_cache_release(swappage);
                        goto repeat;
                }
        } else if (sgp == SGP_READ && !filepage) {
index 74c65af0a54f4c112e6f2bc223018deb74771d25..0863fd38a5ce06a87392f5ba753606a516a5d262 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -291,32 +291,16 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
 #endif
 }
 
-/*
- * The end pointer in a slab is special. It points to the first object in the
- * slab but has bit 0 set to mark it.
- *
- * Note that SLUB relies on page_mapping returning NULL for pages with bit 0
- * in the mapping set.
- */
-static inline int is_end(void *addr)
-{
-       return (unsigned long)addr & PAGE_MAPPING_ANON;
-}
-
-static void *slab_address(struct page *page)
-{
-       return page->end - PAGE_MAPPING_ANON;
-}
-
+/* Verify that a pointer has an address that is valid within a slab page */
 static inline int check_valid_pointer(struct kmem_cache *s,
                                struct page *page, const void *object)
 {
        void *base;
 
-       if (object == page->end)
+       if (!object)
                return 1;
 
-       base = slab_address(page);
+       base = page_address(page);
        if (object < base || object >= base + s->objects * s->size ||
                (object - base) % s->size) {
                return 0;
@@ -349,8 +333,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 
 /* Scan freelist */
 #define for_each_free_object(__p, __s, __free) \
-       for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\
-               __p))
+       for (__p = (__free); __p; __p = get_freepointer((__s), __p))
 
 /* Determine object index from a given position */
 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -502,7 +485,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 {
        unsigned int off;       /* Offset of last byte */
-       u8 *addr = slab_address(page);
+       u8 *addr = page_address(page);
 
        print_tracking(s, p);
 
@@ -637,7 +620,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  *     A. Free pointer (if we cannot overwrite object on free)
  *     B. Tracking data for SLAB_STORE_USER
  *     C. Padding to reach required alignment boundary or at mininum
- *             one word if debuggin is on to be able to detect writes
+ *             one word if debugging is on to be able to detect writes
  *             before the word boundary.
  *
  *     Padding is done using 0x5a (POISON_INUSE)
@@ -680,7 +663,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
        if (!(s->flags & SLAB_POISON))
                return 1;
 
-       start = slab_address(page);
+       start = page_address(page);
        end = start + (PAGE_SIZE << s->order);
        length = s->objects * s->size;
        remainder = end - (start + length);
@@ -748,7 +731,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
                 * of the free objects in this slab. May cause
                 * another error because the object count is now wrong.
                 */
-               set_freepointer(s, p, page->end);
+               set_freepointer(s, p, NULL);
                return 0;
        }
        return 1;
@@ -782,18 +765,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
        void *fp = page->freelist;
        void *object = NULL;
 
-       while (fp != page->end && nr <= s->objects) {
+       while (fp && nr <= s->objects) {
                if (fp == search)
                        return 1;
                if (!check_valid_pointer(s, page, fp)) {
                        if (object) {
                                object_err(s, page, object,
                                        "Freechain corrupt");
-                               set_freepointer(s, object, page->end);
+                               set_freepointer(s, object, NULL);
                                break;
                        } else {
                                slab_err(s, page, "Freepointer corrupt");
-                               page->freelist = page->end;
+                               page->freelist = NULL;
                                page->inuse = s->objects;
                                slab_fix(s, "Freelist cleared");
                                return 0;
@@ -870,7 +853,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
        if (!check_slab(s, page))
                goto bad;
 
-       if (object && !on_freelist(s, page, object)) {
+       if (!on_freelist(s, page, object)) {
                object_err(s, page, object, "Object already allocated");
                goto bad;
        }
@@ -880,7 +863,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
                goto bad;
        }
 
-       if (object && !check_object(s, page, object, 0))
+       if (!check_object(s, page, object, 0))
                goto bad;
 
        /* Success perform special debug activities for allocs */
@@ -899,7 +882,7 @@ bad:
                 */
                slab_fix(s, "Marking all objects used");
                page->inuse = s->objects;
-               page->freelist = page->end;
+               page->freelist = NULL;
        }
        return 0;
 }
@@ -939,7 +922,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
        }
 
        /* Special debug activities for freeing objects */
-       if (!SlabFrozen(page) && page->freelist == page->end)
+       if (!SlabFrozen(page) && !page->freelist)
                remove_full(s, page);
        if (s->flags & SLAB_STORE_USER)
                set_track(s, object, TRACK_FREE, addr);
@@ -1015,30 +998,11 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
        void (*ctor)(struct kmem_cache *, void *))
 {
        /*
-        * The page->offset field is only 16 bit wide. This is an offset
-        * in units of words from the beginning of an object. If the slab
-        * size is bigger then we cannot move the free pointer behind the
-        * object anymore.
-        *
-        * On 32 bit platforms the limit is 256k. On 64bit platforms
-        * the limit is 512k.
-        *
-        * Debugging or ctor may create a need to move the free
-        * pointer. Fail if this happens.
+        * Enable debugging if selected on the kernel commandline.
         */
-       if (objsize >= 65535 * sizeof(void *)) {
-               BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
-                               SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
-               BUG_ON(ctor);
-       } else {
-               /*
-                * Enable debugging if selected on the kernel commandline.
-                */
-               if (slub_debug && (!slub_debug_slabs ||
-                   strncmp(slub_debug_slabs, name,
-                       strlen(slub_debug_slabs)) == 0))
-                               flags |= slub_debug;
-       }
+       if (slub_debug && (!slub_debug_slabs ||
+           strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
+                       flags |= slub_debug;
 
        return flags;
 }
@@ -1124,7 +1088,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
                SetSlabDebug(page);
 
        start = page_address(page);
-       page->end = start + 1;
 
        if (unlikely(s->flags & SLAB_POISON))
                memset(start, POISON_INUSE, PAGE_SIZE << s->order);
@@ -1136,7 +1099,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
                last = p;
        }
        setup_object(s, page, last);
-       set_freepointer(s, last, page->end);
+       set_freepointer(s, last, NULL);
 
        page->freelist = start;
        page->inuse = 0;
@@ -1152,7 +1115,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
                void *p;
 
                slab_pad_check(s, page);
-               for_each_object(p, s, slab_address(page))
+               for_each_object(p, s, page_address(page))
                        check_object(s, page, p, 0);
                ClearSlabDebug(page);
        }
@@ -1162,7 +1125,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                -pages);
 
-       page->mapping = NULL;
        __free_pages(page, s->order);
 }
 
@@ -1307,7 +1269,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
         * may return off node objects because partial slabs are obtained
         * from other nodes and filled up.
         *
-        * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
+        * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
         * defrag_ratio = 1000) then every (well almost) allocation will
         * first attempt to defrag slab caches on other nodes. This means
         * scanning over all nodes to look for partial slabs which may be
@@ -1366,7 +1328,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
        ClearSlabFrozen(page);
        if (page->inuse) {
 
-               if (page->freelist != page->end) {
+               if (page->freelist) {
                        add_partial(n, page, tail);
                        stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
                } else {
@@ -1382,9 +1344,11 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
                         * Adding an empty slab to the partial slabs in order
                         * to avoid page allocator overhead. This slab needs
                         * to come after the other slabs with objects in
-                        * order to fill them up. That way the size of the
-                        * partial list stays small. kmem_cache_shrink can
-                        * reclaim empty slabs from the partial list.
+                        * so that the others get filled first. That way the
+                        * size of the partial list stays small.
+                        *
+                        * kmem_cache_shrink can reclaim any empty slabs from the
+                        * partial list.
                         */
                        add_partial(n, page, 1);
                        slab_unlock(page);
@@ -1407,15 +1371,11 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
        if (c->freelist)
                stat(c, DEACTIVATE_REMOTE_FREES);
        /*
-        * Merge cpu freelist into freelist. Typically we get here
+        * Merge cpu freelist into slab freelist. Typically we get here
         * because both freelists are empty. So this is unlikely
         * to occur.
-        *
-        * We need to use _is_end here because deactivate slab may
-        * be called for a debug slab. Then c->freelist may contain
-        * a dummy pointer.
         */
-       while (unlikely(!is_end(c->freelist))) {
+       while (unlikely(c->freelist)) {
                void **object;
 
                tail = 0;       /* Hot objects. Put the slab first */
@@ -1442,6 +1402,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 
 /*
  * Flush cpu slab.
+ *
  * Called from IPI handler with interrupts disabled.
  */
 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
@@ -1500,7 +1461,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
  * rest of the freelist to the lockless freelist.
  *
  * And if we were unable to get a new slab from the partial slab lists then
- * we need to allocate a new slab. This is slowest path since we may sleep.
+ * we need to allocate a new slab. This is the slowest path since it involves
+ * a call to the page allocator and the setup of a new slab.
  */
 static void *__slab_alloc(struct kmem_cache *s,
                gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
@@ -1514,18 +1476,19 @@ static void *__slab_alloc(struct kmem_cache *s,
        slab_lock(c->page);
        if (unlikely(!node_match(c, node)))
                goto another_slab;
+
        stat(c, ALLOC_REFILL);
+
 load_freelist:
        object = c->page->freelist;
-       if (unlikely(object == c->page->end))
+       if (unlikely(!object))
                goto another_slab;
        if (unlikely(SlabDebug(c->page)))
                goto debug;
 
-       object = c->page->freelist;
        c->freelist = object[c->offset];
        c->page->inuse = s->objects;
-       c->page->freelist = c->page->end;
+       c->page->freelist = NULL;
        c->node = page_to_nid(c->page);
 unlock_out:
        slab_unlock(c->page);
@@ -1578,7 +1541,6 @@ new_slab:
 
        return NULL;
 debug:
-       object = c->page->freelist;
        if (!alloc_debug_processing(s, c->page, object, addr))
                goto another_slab;
 
@@ -1607,7 +1569,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
 
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
-       if (unlikely(is_end(c->freelist) || !node_match(c, node)))
+       if (unlikely(!c->freelist || !node_match(c, node)))
 
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
@@ -1659,6 +1621,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 
        if (unlikely(SlabDebug(page)))
                goto debug;
+
 checks_ok:
        prior = object[offset] = page->freelist;
        page->freelist = object;
@@ -1673,11 +1636,10 @@ checks_ok:
                goto slab_empty;
 
        /*
-        * Objects left in the slab. If it
-        * was not on the partial list before
+        * Objects left in the slab. If it was not on the partial list before
         * then add it.
         */
-       if (unlikely(prior == page->end)) {
+       if (unlikely(!prior)) {
                add_partial(get_node(s, page_to_nid(page)), page, 1);
                stat(c, FREE_ADD_PARTIAL);
        }
@@ -1687,7 +1649,7 @@ out_unlock:
        return;
 
 slab_empty:
-       if (prior != page->end) {
+       if (prior) {
                /*
                 * Slab still on the partial list.
                 */
@@ -1724,8 +1686,8 @@ static __always_inline void slab_free(struct kmem_cache *s,
        unsigned long flags;
 
        local_irq_save(flags);
-       debug_check_no_locks_freed(object, s->objsize);
        c = get_cpu_slab(s, smp_processor_id());
+       debug_check_no_locks_freed(object, c->objsize);
        if (likely(page == c->page && c->node >= 0)) {
                object[c->offset] = c->freelist;
                c->freelist = object;
@@ -1888,13 +1850,11 @@ static unsigned long calculate_alignment(unsigned long flags,
                unsigned long align, unsigned long size)
 {
        /*
-        * If the user wants hardware cache aligned objects then
-        * follow that suggestion if the object is sufficiently
-        * large.
+        * If the user wants hardware cache aligned objects then follow that
+        * suggestion if the object is sufficiently large.
         *
-        * The hardware cache alignment cannot override the
-        * specified alignment though. If that is greater
-        * then use it.
+        * The hardware cache alignment cannot override the specified
+        * alignment though. If that is greater then use it.
         */
        if ((flags & SLAB_HWCACHE_ALIGN) &&
                        size > cache_line_size() / 2)
@@ -1910,7 +1870,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
                        struct kmem_cache_cpu *c)
 {
        c->page = NULL;
-       c->freelist = (void *)PAGE_MAPPING_ANON;
+       c->freelist = NULL;
        c->node = 0;
        c->offset = s->offset / sizeof(void *);
        c->objsize = s->objsize;
@@ -2092,6 +2052,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
 #endif
        init_kmem_cache_node(n);
        atomic_long_inc(&n->nr_slabs);
+
        /*
         * lockdep requires consistent irq usage for each lock
         * so even though there cannot be a race this early in
@@ -2172,6 +2133,14 @@ static int calculate_sizes(struct kmem_cache *s)
        unsigned long size = s->objsize;
        unsigned long align = s->align;
 
+       /*
+        * Round up object size to the next word boundary. We can only
+        * place the free pointer at word boundaries and this determines
+        * the possible location of the free pointer.
+        */
+       size = ALIGN(size, sizeof(void *));
+
+#ifdef CONFIG_SLUB_DEBUG
        /*
         * Determine if we can poison the object itself. If the user of
         * the slab may touch the object after free or before allocation
@@ -2183,14 +2152,7 @@ static int calculate_sizes(struct kmem_cache *s)
        else
                s->flags &= ~__OBJECT_POISON;
 
-       /*
-        * Round up object size to the next word boundary. We can only
-        * place the free pointer at word boundaries and this determines
-        * the possible location of the free pointer.
-        */
-       size = ALIGN(size, sizeof(void *));
 
-#ifdef CONFIG_SLUB_DEBUG
        /*
         * If we are Redzoning then check if there is some space between the
         * end of the object and the free pointer. If not then add an
@@ -2343,7 +2305,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object)
        /*
         * We could also check if the object is on the slabs freelist.
         * But this would be too expensive and it seems that the main
-        * purpose of kmem_ptr_valid is to check if the object belongs
+        * purpose of kmem_ptr_valid() is to check if the object belongs
         * to a certain slab.
         */
        return 1;
@@ -2630,13 +2592,24 @@ void *__kmalloc(size_t size, gfp_t flags)
 }
 EXPORT_SYMBOL(__kmalloc);
 
+static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+{
+       struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
+                                               get_order(size));
+
+       if (page)
+               return page_address(page);
+       else
+               return NULL;
+}
+
 #ifdef CONFIG_NUMA
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE))
-               return kmalloc_large(size, flags);
+               return kmalloc_large_node(size, flags, node);
 
        s = get_slab(size, flags);
 
@@ -2653,19 +2626,17 @@ size_t ksize(const void *object)
        struct page *page;
        struct kmem_cache *s;
 
-       BUG_ON(!object);
        if (unlikely(object == ZERO_SIZE_PTR))
                return 0;
 
        page = virt_to_head_page(object);
-       BUG_ON(!page);
 
        if (unlikely(!PageSlab(page)))
                return PAGE_SIZE << compound_order(page);
 
        s = page->slab;
-       BUG_ON(!s);
 
+#ifdef CONFIG_SLUB_DEBUG
        /*
         * Debugging requires use of the padding between object
         * and whatever may come after it.
@@ -2673,6 +2644,7 @@ size_t ksize(const void *object)
        if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
                return s->objsize;
 
+#endif
        /*
         * If we have the need to store the freelist pointer
         * back there or track user information then we can
@@ -2680,7 +2652,6 @@ size_t ksize(const void *object)
         */
        if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
                return s->inuse;
-
        /*
         * Else we can use all the padding etc for the allocation
         */
@@ -2957,7 +2928,7 @@ void __init kmem_cache_init(void)
        /*
         * Patch up the size_index table if we have strange large alignment
         * requirements for the kmalloc array. This is only the case for
-        * mips it seems. The standard arches will not generate any code here.
+        * MIPS it seems. The standard arches will not generate any code here.
         *
         * Largest permitted alignment is 256 bytes due to the way we
         * handle the index determination for the smaller caches.
@@ -2986,7 +2957,6 @@ void __init kmem_cache_init(void)
        kmem_size = sizeof(struct kmem_cache);
 #endif
 
-
        printk(KERN_INFO
                "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
                " CPUs=%d, Nodes=%d\n",
@@ -3083,12 +3053,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                 */
                for_each_online_cpu(cpu)
                        get_cpu_slab(s, cpu)->objsize = s->objsize;
+
                s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
                up_write(&slub_lock);
+
                if (sysfs_slab_alias(s, name))
                        goto err;
                return s;
        }
+
        s = kmalloc(kmem_size, GFP_KERNEL);
        if (s) {
                if (kmem_cache_open(s, GFP_KERNEL, name,
@@ -3184,7 +3157,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        struct kmem_cache *s;
 
        if (unlikely(size > PAGE_SIZE))
-               return kmalloc_large(size, gfpflags);
+               return kmalloc_large_node(size, gfpflags, node);
 
        s = get_slab(size, gfpflags);
 
@@ -3199,7 +3172,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
                                                unsigned long *map)
 {
        void *p;
-       void *addr = slab_address(page);
+       void *addr = page_address(page);
 
        if (!check_slab(s, page) ||
                        !on_freelist(s, page, NULL))
@@ -3482,7 +3455,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
 static void process_slab(struct loc_track *t, struct kmem_cache *s,
                struct page *page, enum track_item alloc)
 {
-       void *addr = slab_address(page);
+       void *addr = page_address(page);
        DECLARE_BITMAP(map, s->objects);
        void *p;
 
@@ -3591,8 +3564,8 @@ enum slab_stat_type {
 #define SO_CPU         (1 << SL_CPU)
 #define SO_OBJECTS     (1 << SL_OBJECTS)
 
-static unsigned long slab_objects(struct kmem_cache *s,
-                       char *buf, unsigned long flags)
+static ssize_t show_slab_objects(struct kmem_cache *s,
+                           char *buf, unsigned long flags)
 {
        unsigned long total = 0;
        int cpu;
@@ -3602,6 +3575,8 @@ static unsigned long slab_objects(struct kmem_cache *s,
        unsigned long *per_cpu;
 
        nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+       if (!nodes)
+               return -ENOMEM;
        per_cpu = nodes + nr_node_ids;
 
        for_each_possible_cpu(cpu) {
@@ -3754,25 +3729,25 @@ SLAB_ATTR_RO(aliases);
 
 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
 {
-       return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
+       return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
 }
 SLAB_ATTR_RO(slabs);
 
 static ssize_t partial_show(struct kmem_cache *s, char *buf)
 {
-       return slab_objects(s, buf, SO_PARTIAL);
+       return show_slab_objects(s, buf, SO_PARTIAL);
 }
 SLAB_ATTR_RO(partial);
 
 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
 {
-       return slab_objects(s, buf, SO_CPU);
+       return show_slab_objects(s, buf, SO_CPU);
 }
 SLAB_ATTR_RO(cpu_slabs);
 
 static ssize_t objects_show(struct kmem_cache *s, char *buf)
 {
-       return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
+       return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
 }
 SLAB_ATTR_RO(objects);
 
@@ -3971,7 +3946,6 @@ SLAB_ATTR(remote_node_defrag_ratio);
 #endif
 
 #ifdef CONFIG_SLUB_STATS
-
 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
 {
        unsigned long sum  = 0;
@@ -4155,8 +4129,8 @@ static struct kset *slab_kset;
 #define ID_STR_LENGTH 64
 
 /* Create a unique string id for a slab cache:
- * format
- * :[flags-]size:[memory address of kmemcache]
+ *
+ * Format      :[flags-]size
  */
 static char *create_unique_id(struct kmem_cache *s)
 {
index 710a20bb9749cc9c9397e85bb59643dc38c27698..d4ec59aa5c4632962f8adf1aad5cef68afbe5701 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -176,7 +176,7 @@ void activate_page(struct page *page)
                SetPageActive(page);
                add_page_to_active_list(zone, page);
                __count_vm_event(PGACTIVATE);
-               mem_cgroup_move_lists(page_get_page_cgroup(page), true);
+               mem_cgroup_move_lists(page, true);
        }
        spin_unlock_irq(&zone->lru_lock);
 }
index c35c49e54fb6527eae91cb02177fc2b05a00abad..7d20ce41ecf52c2cd4027207558734f835124992 100644 (file)
@@ -134,8 +134,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
 }
 
 /**
- * truncate_inode_pages - truncate range of pages specified by start and
- * end byte offsets
+ * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
  * @lend: offset to which to truncate
index a26dabd62fed40c8ec7832dc4656feaf7d3909f9..45711585684ec74c01f7b93ae28a8d8b4f62bfb9 100644 (file)
@@ -126,7 +126,7 @@ long vm_total_pages;        /* The total number of pages which the VM controls */
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 #define scan_global_lru(sc)    (!(sc)->mem_cgroup)
 #else
 #define scan_global_lru(sc)    (1)
@@ -1128,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->inactive_list);
-               mem_cgroup_move_lists(page_get_page_cgroup(page), false);
+               mem_cgroup_move_lists(page, false);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
@@ -1156,8 +1156,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
                VM_BUG_ON(!PageActive(page));
+
                list_move(&page->lru, &zone->active_list);
-               mem_cgroup_move_lists(page_get_page_cgroup(page), true);
+               mem_cgroup_move_lists(page, true);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
@@ -1427,7 +1428,7 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
        return do_try_to_free_pages(zones, gfp_mask, &sc);
 }
 
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                                gfp_t gfp_mask)
index a0ec4792559713a0afe6fb1051be7d0b3cec31f1..146cfb0e98829eac8dc16263ac5912b0f6eedab4 100644 (file)
@@ -161,11 +161,10 @@ int __init vlan_proc_init(void)
        if (!proc_vlan_dir)
                goto err;
 
-       proc_vlan_conf = create_proc_entry(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
-                                          proc_vlan_dir);
+       proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
+                                    proc_vlan_dir, &vlan_fops);
        if (!proc_vlan_conf)
                goto err;
-       proc_vlan_conf->proc_fops = &vlan_fops;
        return 0;
 
 err:
@@ -182,13 +181,11 @@ int vlan_proc_add_dev(struct net_device *vlandev)
 {
        struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
 
-       dev_info->dent = create_proc_entry(vlandev->name,
-                                          S_IFREG|S_IRUSR|S_IWUSR,
-                                          proc_vlan_dir);
+       dev_info->dent = proc_create(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
+                                    proc_vlan_dir, &vlandev_fops);
        if (!dev_info->dent)
                return -ENOBUFS;
 
-       dev_info->dent->proc_fops = &vlandev_fops;
        dev_info->dent->data = vlandev;
        return 0;
 }
index 8e8dcfd532dbba7ca73edae10ec88a52879ef08f..162199a2d74f18e6b41ee7322c2ed8d66dfc4539 100644 (file)
@@ -283,25 +283,24 @@ int __init atalk_proc_init(void)
                goto out;
        atalk_proc_dir->owner = THIS_MODULE;
 
-       p = create_proc_entry("interface", S_IRUGO, atalk_proc_dir);
+       p = proc_create("interface", S_IRUGO, atalk_proc_dir,
+                       &atalk_seq_interface_fops);
        if (!p)
                goto out_interface;
-       p->proc_fops = &atalk_seq_interface_fops;
 
-       p = create_proc_entry("route", S_IRUGO, atalk_proc_dir);
+       p = proc_create("route", S_IRUGO, atalk_proc_dir,
+                       &atalk_seq_route_fops);
        if (!p)
                goto out_route;
-       p->proc_fops = &atalk_seq_route_fops;
 
-       p = create_proc_entry("socket", S_IRUGO, atalk_proc_dir);
+       p = proc_create("socket", S_IRUGO, atalk_proc_dir,
+                       &atalk_seq_socket_fops);
        if (!p)
                goto out_socket;
-       p->proc_fops = &atalk_seq_socket_fops;
 
-       p = create_proc_entry("arp", S_IRUGO, atalk_proc_dir);
+       p = proc_create("arp", S_IRUGO, atalk_proc_dir, &atalk_seq_arp_fops);
        if (!p)
                goto out_arp;
-       p->proc_fops = &atalk_seq_arp_fops;
 
        rc = 0;
 out:
index 574d9a9641764f0a0d354a5b871ac33572f3d89f..1b228065e745c812ee8db3d6ca0b3bb68586b796 100644 (file)
@@ -742,9 +742,9 @@ static int __init br2684_init(void)
 {
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry *p;
-       if ((p = create_proc_entry("br2684", 0, atm_proc_root)) == NULL)
+       p = proc_create("br2684", 0, atm_proc_root, &br2684_proc_ops);
+       if (p == NULL)
                return -ENOMEM;
-       p->proc_fops = &br2684_proc_ops;
 #endif
        register_atm_ioctl(&br2684_ioctl_ops);
        return 0;
index 86b885ec1cbd4b6cf992f2332bab5a0bd4c445a3..d30167c0b48eb980ed337237ddc50b797d8c5a1c 100644 (file)
@@ -962,9 +962,7 @@ static int __init atm_clip_init(void)
        {
                struct proc_dir_entry *p;
 
-               p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
-               if (p)
-                       p->proc_fops = &arp_seq_fops;
+               p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
        }
 #endif
 
index 1a8c4c6c0cd054d928fd974ec33094b8586ec305..0e450d12f0358651d9fd5bb0ecada2461c7960f8 100644 (file)
@@ -1249,9 +1249,7 @@ static int __init lane_module_init(void)
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry *p;
 
-       p = create_proc_entry("lec", S_IRUGO, atm_proc_root);
-       if (p)
-               p->proc_fops = &lec_seq_fops;
+       p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
 #endif
 
        register_atm_ioctl(&lane_ioctl_ops);
index 91f3ffc90dbdc938a211b46aaccb565a51b5be40..4990541ef5da4ca7d0b2325bd39ebb4ffbf3c290 100644 (file)
@@ -276,12 +276,11 @@ int mpc_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = create_proc_entry(STAT_FILE_NAME, 0, atm_proc_root);
+       p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
        if (!p) {
                printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
                return -ENOMEM;
        }
-       p->proc_fops = &mpc_file_operations;
        p->owner = THIS_MODULE;
        return 0;
 }
index 49125110bb8b0910c777dcb2f8402f9271f294c2..e9693aed7ef8f1cce3f87bc21ea2ba94ec582b38 100644 (file)
@@ -435,11 +435,11 @@ int atm_proc_dev_register(struct atm_dev *dev)
                goto err_out;
        sprintf(dev->proc_name,"%s:%d",dev->type, dev->number);
 
-       dev->proc_entry = create_proc_entry(dev->proc_name, 0, atm_proc_root);
+       dev->proc_entry = proc_create(dev->proc_name, 0, atm_proc_root,
+                                     &proc_atm_dev_ops);
        if (!dev->proc_entry)
                goto err_free_name;
        dev->proc_entry->data = dev;
-       dev->proc_entry->proc_fops = &proc_atm_dev_ops;
        dev->proc_entry->owner = THIS_MODULE;
        return 0;
 err_free_name:
@@ -492,10 +492,10 @@ int __init atm_proc_init(void)
        for (e = atm_proc_ents; e->name; e++) {
                struct proc_dir_entry *dirent;
 
-               dirent = create_proc_entry(e->name, S_IRUGO, atm_proc_root);
+               dirent = proc_create(e->name, S_IRUGO,
+                                    atm_proc_root, e->proc_fops);
                if (!dirent)
                        goto err_out_remove;
-               dirent->proc_fops = e->proc_fops;
                dirent->owner = THIS_MODULE;
                e->dirent = dirent;
        }
index a8811c0a0ceaf436f74002397041765e5757f7a3..34f8bf98bc0529b4d69f7c60ff6f739ed63077a3 100644 (file)
@@ -417,6 +417,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
                l2cap_sock_kill(sk);
        }
 
+       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+               del_timer_sync(&conn->info_timer);
+
        hcon->l2cap_data = NULL;
        kfree(conn);
 }
index 2328acbd16cdf1c87dd84c8e151cfc466126b256..d9a02b2cc28940d1dde4f5f7ee6064f9581b046b 100644 (file)
@@ -839,7 +839,7 @@ static void neigh_timer_handler(unsigned long arg)
                struct sk_buff *skb = skb_peek(&neigh->arp_queue);
                /* keep skb alive even if arp_queue overflows */
                if (skb)
-                       skb_get(skb);
+                       skb = skb_copy(skb, GFP_ATOMIC);
                write_unlock(&neigh->lock);
                neigh->ops->solicit(neigh, skb);
                atomic_inc(&neigh->probes);
@@ -1389,10 +1389,10 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
                panic("cannot create neighbour cache statistics");
 
 #ifdef CONFIG_PROC_FS
-       tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
+       tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat,
+                              &neigh_stat_seq_fops);
        if (!tbl->pde)
                panic("cannot create neighbour proc dir entry");
-       tbl->pde->proc_fops = &neigh_stat_seq_fops;
        tbl->pde->data = tbl;
 #endif
 
index 6faa128a4c8ef22d110c22b2add2dc8a4ffbbfd3..4b7e756181c9ec96eea6b0f5397aec3c87008a5a 100644 (file)
@@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
 static atomic_t trapped;
 
 #define USEC_PER_POLL  50
+#define NETPOLL_RX_ENABLED  1
+#define NETPOLL_RX_DROP     2
 
 #define MAX_SKB_SIZE \
                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
@@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
        if (!test_bit(NAPI_STATE_SCHED, &napi->state))
                return budget;
 
+       npinfo->rx_flags |= NETPOLL_RX_DROP;
        atomic_inc(&trapped);
 
        work = napi->poll(napi, budget);
 
        atomic_dec(&trapped);
+       npinfo->rx_flags &= ~NETPOLL_RX_DROP;
 
        return budget - work;
 }
@@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb)
        if (skb->dev->type != ARPHRD_ETHER)
                goto out;
 
-       /* if receive ARP during middle of NAPI poll, then queue */
+       /* check if netpoll clients need ARP */
        if (skb->protocol == htons(ETH_P_ARP) &&
            atomic_read(&trapped)) {
                skb_queue_tail(&npi->arp_tx, skb);
@@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb)
        return 1;
 
 out:
-       /* If packet received while already in poll then just
-        * silently drop.
-        */
        if (atomic_read(&trapped)) {
                kfree_skb(skb);
                return 1;
@@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np)
                        goto release;
                }
 
+               npinfo->rx_flags = 0;
                npinfo->rx_np = NULL;
 
                spin_lock_init(&npinfo->rx_lock);
@@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
 
        if (np->rx_hook) {
                spin_lock_irqsave(&npinfo->rx_lock, flags);
+               npinfo->rx_flags |= NETPOLL_RX_ENABLED;
                npinfo->rx_np = np;
                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
        }
@@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np)
                        if (npinfo->rx_np == np) {
                                spin_lock_irqsave(&npinfo->rx_lock, flags);
                                npinfo->rx_np = NULL;
+                               npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
                                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
                        }
 
index bfcdfaebca5c809b79b9076b56748e9b342cd776..20e63b302ba613ea7112365376d0773f4a7a8fbf 100644 (file)
@@ -3570,14 +3570,14 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
        if (err)
                goto out1;
 
-       pkt_dev->entry = create_proc_entry(ifname, 0600, pg_proc_dir);
+       pkt_dev->entry = proc_create(ifname, 0600,
+                                    pg_proc_dir, &pktgen_if_fops);
        if (!pkt_dev->entry) {
                printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
                       PG_PROC_DIR, ifname);
                err = -EINVAL;
                goto out2;
        }
-       pkt_dev->entry->proc_fops = &pktgen_if_fops;
        pkt_dev->entry->data = pkt_dev;
 #ifdef CONFIG_XFRM
        pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
@@ -3628,7 +3628,7 @@ static int __init pktgen_create_thread(int cpu)
        kthread_bind(p, cpu);
        t->tsk = p;
 
-       pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir);
+       pe = proc_create(t->tsk->comm, 0600, pg_proc_dir, &pktgen_thread_fops);
        if (!pe) {
                printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
                       PG_PROC_DIR, t->tsk->comm);
@@ -3638,7 +3638,6 @@ static int __init pktgen_create_thread(int cpu)
                return -EINVAL;
        }
 
-       pe->proc_fops = &pktgen_thread_fops;
        pe->data = t;
 
        wake_up_process(p);
@@ -3709,7 +3708,7 @@ static int __init pg_init(void)
                return -ENODEV;
        pg_proc_dir->owner = THIS_MODULE;
 
-       pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
+       pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
        if (pe == NULL) {
                printk(KERN_ERR "pktgen: ERROR: cannot create %s "
                       "procfs entry.\n", PGCTRL);
@@ -3717,7 +3716,6 @@ static int __init pg_init(void)
                return -EINVAL;
        }
 
-       pe->proc_fops = &pktgen_fops;
        pe->data = NULL;
 
        /* Register us to receive netdevice events */
index 19880b086e712f4e29932debfdc0b06714fef6e7..9c7e5ffb223dfcc10f3873a48ce99d6f87fe5386 100644 (file)
@@ -343,7 +343,7 @@ config INET_ESP
        tristate "IP: ESP transformation"
        select XFRM
        select CRYPTO
-       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
        select CRYPTO_HMAC
        select CRYPTO_MD5
        select CRYPTO_CBC
index f282b26f63eb7c20d0b456e104b1a8772b009bb8..87490f7bb0f72a47db2a3e8a28ce3816cd236032 100644 (file)
@@ -752,6 +752,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
                        inet_del_ifa(in_dev, ifap, 0);
                        ifa->ifa_broadcast = 0;
                        ifa->ifa_anycast = 0;
+                       ifa->ifa_scope = 0;
                }
 
                ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
index 906cb1ada4c35db7ef7281c7dd69907854b68169..e7821ba7a9a05886d49f7078244371ab321461bd 100644 (file)
@@ -266,20 +266,24 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
        if (!dev)
          return NULL;
 
+       if (strchr(name, '%')) {
+               if (dev_alloc_name(dev, name) < 0)
+                       goto failed_free;
+       }
+
        dev->init = ipgre_tunnel_init;
        nt = netdev_priv(dev);
        nt->parms = *parms;
 
-       if (register_netdevice(dev) < 0) {
-               free_netdev(dev);
-               goto failed;
-       }
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
 
        dev_hold(dev);
        ipgre_tunnel_link(nt);
        return nt;
 
-failed:
+failed_free:
+       free_netdev(dev);
        return NULL;
 }
 
index ae1f45fc23b966869eb7a3b0bf1dbb06712cd7c8..58b60b2fb01175fb4c02fb72598619dfb38df716 100644 (file)
@@ -108,8 +108,11 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
        const int cpu = get_cpu();
        u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
        struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
-       int err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
+       int err;
 
+       local_bh_disable();
+       err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
+       local_bh_enable();
        if (err)
                goto out;
 
index 10013ccee8dd92c0939b2ef1e06678ffc272efe1..5dd938579eeb62187ddee9ebce6f6eb2e9d7602a 100644 (file)
@@ -753,9 +753,9 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
                printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name);
                b->htype = dev->type; /* can cause undefined behavior */
        }
+
+       /* server_ip and your_ip address are both already zero per RFC2131 */
        b->hlen = dev->addr_len;
-       b->your_ip = NONE;
-       b->server_ip = NONE;
        memcpy(b->hw_addr, dev->dev_addr, dev->addr_len);
        b->secs = htons(jiffies_diff / HZ);
        b->xid = d->xid;
index e77e3b8558340d9f60f2f42a6a9eb7bc8e8720bb..dbaed69de06a8f1a52b4034c8531118bd9824d74 100644 (file)
@@ -228,20 +228,24 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
        if (dev == NULL)
                return NULL;
 
+       if (strchr(name, '%')) {
+               if (dev_alloc_name(dev, name) < 0)
+                       goto failed_free;
+       }
+
        nt = netdev_priv(dev);
        dev->init = ipip_tunnel_init;
        nt->parms = *parms;
 
-       if (register_netdevice(dev) < 0) {
-               free_netdev(dev);
-               goto failed;
-       }
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
 
        dev_hold(dev);
        ipip_tunnel_link(nt);
        return nt;
 
-failed:
+failed_free:
+       free_netdev(dev);
        return NULL;
 }
 
index 525787b52b72f03f87e77c0f785623083fca8b2b..7b5e8e1d94be2eb19764880c99597854f34657f3 100644 (file)
@@ -542,12 +542,11 @@ static __init int ip_rt_proc_init(struct net *net)
        if (!pde)
                goto err1;
 
-       pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat);
+       pde = proc_create("rt_cache", S_IRUGO,
+                         net->proc_net_stat, &rt_cpu_seq_fops);
        if (!pde)
                goto err2;
 
-       pde->proc_fops = &rt_cpu_seq_fops;
-
 #ifdef CONFIG_NET_CLS_ROUTE
        pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
                        ip_rt_acct_read, NULL);
index 5212ed9b0c98d701169074f83898cd143e9315c6..7eb7636db0d0e0f0b0675d17de4b4a17db32d4b3 100644 (file)
@@ -1,12 +1,13 @@
 /*
  * Binary Increase Congestion control for TCP
- *
+ * Home page:
+ *      http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
  * This is from the implementation of BICTCP in
  * Lison-Xu, Kahaled Harfoush, and Injong Rhee.
  *  "Binary Increase Congestion Control for Fast, Long Distance
  *  Networks" in InfoComm 2004
  * Available from:
- *  http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf
+ *  http://netsrv.csc.ncsu.edu/export/bitcp.pdf
  *
  * Unless BIC is enabled and congestion window is large
  * this behaves the same as the original Reno.
index 19c449f62672d7cf41f03a65f6d6dbc0cc3b3c04..7facdb0f69608be661e409c2ead641f5d0fb1bc8 100644 (file)
@@ -1367,7 +1367,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
  * a normal way
  */
 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
-                                       u32 skip_to_seq)
+                                       u32 skip_to_seq, int *fack_count)
 {
        tcp_for_write_queue_from(skb, sk) {
                if (skb == tcp_send_head(sk))
@@ -1375,6 +1375,8 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
 
                if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
                        break;
+
+               *fack_count += tcp_skb_pcount(skb);
        }
        return skb;
 }
@@ -1390,7 +1392,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
                return skb;
 
        if (before(next_dup->start_seq, skip_to_seq)) {
-               skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
+               skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
                tcp_sacktag_walk(skb, sk, NULL,
                                 next_dup->start_seq, next_dup->end_seq,
                                 1, fack_count, reord, flag);
@@ -1537,7 +1539,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
 
                        /* Head todo? */
                        if (before(start_seq, cache->start_seq)) {
-                               skb = tcp_sacktag_skip(skb, sk, start_seq);
+                               skb = tcp_sacktag_skip(skb, sk, start_seq,
+                                                      &fack_count);
                                skb = tcp_sacktag_walk(skb, sk, next_dup,
                                                       start_seq,
                                                       cache->start_seq,
@@ -1565,7 +1568,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                goto walk;
                        }
 
-                       skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
+                       skb = tcp_sacktag_skip(skb, sk, cache->end_seq,
+                                              &fack_count);
                        /* Check overlap against next cached too (past this one already) */
                        cache++;
                        continue;
@@ -1577,7 +1581,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                break;
                        fack_count = tp->fackets_out;
                }
-               skb = tcp_sacktag_skip(skb, sk, start_seq);
+               skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count);
 
 walk:
                skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
index 3ffb0323668cc8fe40cba537589f061a68923a6c..58219dfffef8a499c4508181ccdd0cf13a8af49b 100644 (file)
@@ -85,7 +85,7 @@ config INET6_ESP
        depends on IPV6
        select XFRM
        select CRYPTO
-       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
        select CRYPTO_HMAC
        select CRYPTO_MD5
        select CRYPTO_CBC
index e40213db9e4c6355e665c8684e4549feca77614d..101e0e70ba276966a281b99601c17cfaf648f0c5 100644 (file)
@@ -1557,6 +1557,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
                .fc_expires = expires,
                .fc_dst_len = plen,
                .fc_flags = RTF_UP | flags,
+               .fc_nlinfo.nl_net = &init_net,
        };
 
        ipv6_addr_copy(&cfg.fc_dst, pfx);
@@ -1583,6 +1584,7 @@ static void addrconf_add_mroute(struct net_device *dev)
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 8,
                .fc_flags = RTF_UP,
+               .fc_nlinfo.nl_net = &init_net,
        };
 
        ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
@@ -1599,6 +1601,7 @@ static void sit_route_add(struct net_device *dev)
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 96,
                .fc_flags = RTF_UP | RTF_NONEXTHOP,
+               .fc_nlinfo.nl_net = &init_net,
        };
 
        /* prefix length - 96 bits "::d.d.d.d" */
index 2a124e9a1b2d5b46bcd70c18364ce1e9775b5cbc..78f438880923b74e24dfd545b9d586b7a54fecd6 100644 (file)
@@ -238,17 +238,24 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
        if (dev == NULL)
                goto failed;
 
+       if (strchr(name, '%')) {
+               if (dev_alloc_name(dev, name) < 0)
+                       goto failed_free;
+       }
+
        t = netdev_priv(dev);
        dev->init = ip6_tnl_dev_init;
        t->parms = *p;
 
-       if ((err = register_netdevice(dev)) < 0) {
-               free_netdev(dev);
-               goto failed;
-       }
+       if ((err = register_netdevice(dev)) < 0)
+               goto failed_free;
+
        dev_hold(dev);
        ip6_tnl_link(t);
        return t;
+
+failed_free:
+       free_netdev(dev);
 failed:
        return NULL;
 }
index b90039593a7f0873c0d8af1b7c67144a2a34f531..e3dcfa2f436bade207d6b03711868cd30f5531bf 100644 (file)
@@ -146,7 +146,9 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
        scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
        tfm = *per_cpu_ptr(ipcd->tfms, cpu);
 
+       local_bh_disable();
        err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
+       local_bh_enable();
        if (err || (dlen + sizeof(*ipch)) >= plen) {
                put_cpu();
                goto out_ok;
index 35e502a72495f97efc72c7f386e25ca9d53f95e0..199ef379e5017e10d1e1da74cfb0d865e8266f1c 100644 (file)
@@ -217,12 +217,12 @@ int snmp6_register_dev(struct inet6_dev *idev)
        if (!proc_net_devsnmp6)
                return -ENOENT;
 
-       p = create_proc_entry(idev->dev->name, S_IRUGO, proc_net_devsnmp6);
+       p = proc_create(idev->dev->name, S_IRUGO,
+                       proc_net_devsnmp6, &snmp6_seq_fops);
        if (!p)
                return -ENOMEM;
 
        p->data = idev;
-       p->proc_fops = &snmp6_seq_fops;
 
        idev->stats.proc_dir_entry = p;
        return 0;
index 6e7b56ef44499c0bf5bf48af941f8b77181131df..e8b241cb60bc434a4240e0d955f0a1d168b39e07 100644 (file)
@@ -1719,6 +1719,8 @@ static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg,
        cfg->fc_src_len = rtmsg->rtmsg_src_len;
        cfg->fc_flags = rtmsg->rtmsg_flags;
 
+       cfg->fc_nlinfo.nl_net = &init_net;
+
        ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
        ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
        ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
index dde7801abeffb83240ac4460c773369843eaa384..1656c003b98958df7afbeaec66a3c7654d4d64ee 100644 (file)
@@ -171,6 +171,11 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
        if (dev == NULL)
                return NULL;
 
+       if (strchr(name, '%')) {
+               if (dev_alloc_name(dev, name) < 0)
+                       goto failed_free;
+       }
+
        nt = netdev_priv(dev);
        dev->init = ipip6_tunnel_init;
        nt->parms = *parms;
@@ -178,16 +183,16 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
        if (parms->i_flags & SIT_ISATAP)
                dev->priv_flags |= IFF_ISATAP;
 
-       if (register_netdevice(dev) < 0) {
-               free_netdev(dev);
-               goto failed;
-       }
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
 
        dev_hold(dev);
 
        ipip6_tunnel_link(nt);
        return nt;
 
+failed_free:
+       free_netdev(dev);
 failed:
        return NULL;
 }
index 408691b777c226c2d0468137d5839cd658f8523d..d6d3e68086f8d85052ad47d31319561c50fc6fb6 100644 (file)
@@ -101,9 +101,6 @@ static int ipv6_sysctl_net_init(struct net *net)
 
        net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
                                                           ipv6_table);
-       if (!net->ipv6.sysctl.table)
-               return -ENOMEM;
-
        if (!net->ipv6.sysctl.table)
                goto out_ipv6_icmp_table;
 
index d483a00dc427994abf293771391b28a4194c8b03..5ed97ad0e2e33e438cf482324b9b06d806a6d5fb 100644 (file)
@@ -358,22 +358,19 @@ int __init ipx_proc_init(void)
 
        if (!ipx_proc_dir)
                goto out;
-       p = create_proc_entry("interface", S_IRUGO, ipx_proc_dir);
+       p = proc_create("interface", S_IRUGO,
+                       ipx_proc_dir, &ipx_seq_interface_fops);
        if (!p)
                goto out_interface;
 
-       p->proc_fops = &ipx_seq_interface_fops;
-       p = create_proc_entry("route", S_IRUGO, ipx_proc_dir);
+       p = proc_create("route", S_IRUGO, ipx_proc_dir, &ipx_seq_route_fops);
        if (!p)
                goto out_route;
 
-       p->proc_fops = &ipx_seq_route_fops;
-       p = create_proc_entry("socket", S_IRUGO, ipx_proc_dir);
+       p = proc_create("socket", S_IRUGO, ipx_proc_dir, &ipx_seq_socket_fops);
        if (!p)
                goto out_socket;
 
-       p->proc_fops = &ipx_seq_socket_fops;
-
        rc = 0;
 out:
        return rc;
index b825399fc16002dc98a3983c8475462286072afc..6eef1f2a75535503f5c1562fb196bd56a3626c05 100644 (file)
@@ -76,9 +76,11 @@ static int __init ircomm_init(void)
 
 #ifdef CONFIG_PROC_FS
        { struct proc_dir_entry *ent;
-       ent = create_proc_entry("ircomm", 0, proc_irda);
-       if (ent)
-               ent->proc_fops = &ircomm_proc_fops;
+       ent = proc_create("ircomm", 0, proc_irda, &ircomm_proc_fops);
+       if (!ent) {
+               printk(KERN_ERR "ircomm_init: can't create /proc entry!\n");
+               return -ENODEV;
+       }
        }
 #endif /* CONFIG_PROC_FS */
 
index a4b56e25a91705abb9899559f7fb6ae07a535b52..1eb4bbcb1c9e9ef59aee2d8474275fac3dc79b20 100644 (file)
@@ -128,13 +128,11 @@ static int __init irlan_init(void)
 
 #ifdef CONFIG_PROC_FS
        { struct proc_dir_entry *proc;
-       proc = create_proc_entry("irlan", 0, proc_irda);
+       proc = proc_create("irlan", 0, proc_irda, &irlan_fops);
        if (!proc) {
                printk(KERN_ERR "irlan_init: can't create /proc entry!\n");
                return -ENODEV;
        }
-
-       proc->proc_fops = &irlan_fops;
        }
 #endif /* CONFIG_PROC_FS */
 
index cae24fbda966f654d3817dcb38077b334ddd0b1a..88e80a312732a9ed18e7e1d511d79ac6f0f9d682 100644 (file)
@@ -72,11 +72,9 @@ void __init irda_proc_register(void)
                return;
        proc_irda->owner = THIS_MODULE;
 
-       for (i=0; i<ARRAY_SIZE(irda_dirs); i++) {
-               d = create_proc_entry(irda_dirs[i].name, 0, proc_irda);
-               if (d)
-                       d->proc_fops = irda_dirs[i].fops;
-       }
+       for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
+               d = proc_create(irda_dirs[i].name, 0, proc_irda,
+                               irda_dirs[i].fops);
 }
 
 /*
index 2753b0c448f374f3f27c26f991e97d94c43459ad..d764f4c1b7e4eac68962e3bb95b37df2d57ece97 100644 (file)
@@ -621,7 +621,6 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
        return iucv_call_b2f0(IUCV_SEVER, parm);
 }
 
-#ifdef CONFIG_SMP
 /**
  * __iucv_cleanup_queue
  * @dummy: unused dummy argument
@@ -632,7 +631,6 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
 static void __iucv_cleanup_queue(void *dummy)
 {
 }
-#endif
 
 /**
  * iucv_cleanup_queue
index 1c853927810add38d862838c702de91bb687e4f0..8b5f486ac80f298fe6034a2a8b576c3c2ad9ea83 100644 (file)
@@ -3807,17 +3807,16 @@ static int pfkey_init_proc(void)
 {
        struct proc_dir_entry *e;
 
-       e = create_proc_entry("pfkey", 0, init_net.proc_net);
+       e = proc_net_fops_create(&init_net, "pfkey", 0, &pfkey_proc_ops);
        if (e == NULL)
                return -ENOMEM;
 
-       e->proc_fops = &pfkey_proc_ops;
        return 0;
 }
 
 static void pfkey_exit_proc(void)
 {
-       remove_proc_entry("net/pfkey", NULL);
+       proc_net_remove(&init_net, "pfkey");
 }
 #else
 static inline int pfkey_init_proc(void)
index cb34bc0518e80acf384e10cdcdf58968ab61b215..48212c0a961ca2f5f73d77a239203c4a206562dc 100644 (file)
@@ -239,18 +239,14 @@ int __init llc_proc_init(void)
                goto out;
        llc_proc_dir->owner = THIS_MODULE;
 
-       p = create_proc_entry("socket", S_IRUGO, llc_proc_dir);
+       p = proc_create("socket", S_IRUGO, llc_proc_dir, &llc_seq_socket_fops);
        if (!p)
                goto out_socket;
 
-       p->proc_fops = &llc_seq_socket_fops;
-
-       p = create_proc_entry("core", S_IRUGO, llc_proc_dir);
+       p = proc_create("core", S_IRUGO, llc_proc_dir, &llc_seq_core_fops);
        if (!p)
                goto out_core;
 
-       p->proc_fops = &llc_seq_core_fops;
-
        rc = 0;
 out:
        return rc;
index 2019b4f0528d8f063e94e3bdcc6450b854765e60..9aeed5320228bfd0e1df34d600c0c725498be7f9 100644 (file)
@@ -1116,9 +1116,10 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
        /* prepare reordering buffer */
        tid_agg_rx->reorder_buf =
                kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC);
-       if ((!tid_agg_rx->reorder_buf) && net_ratelimit()) {
-               printk(KERN_ERR "can not allocate reordering buffer "
-                                               "to tid %d\n", tid);
+       if (!tid_agg_rx->reorder_buf) {
+               if (net_ratelimit())
+                       printk(KERN_ERR "can not allocate reordering buffer "
+                              "to tid %d\n", tid);
                goto end;
        }
        memset(tid_agg_rx->reorder_buf, 0,
index c339571632b2f102fdc8a81a8416fb71826ccf98..3b77410588e734258706245c8057d741a7916e8d 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright 2002-2005, Instant802 Networks, Inc.
  * Copyright 2005, Devicescape Software, Inc.
  * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
- * Copyright 2007, Stefano Brivio <stefano.brivio@polimi.it>
+ * Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * RC_PID_ARITH_SHIFT.
  */
 
-
-/* Shift the adjustment so that we won't switch to a lower rate if it exhibited
- * a worse failed frames behaviour and we'll choose the highest rate whose
- * failed frames behaviour is not worse than the one of the original rate
- * target. While at it, check that the adjustment is within the ranges. Then,
- * provide the new rate index. */
-static int rate_control_pid_shift_adjust(struct rc_pid_rateinfo *r,
-                                        int adj, int cur, int l)
-{
-       int i, j, k, tmp;
-
-       j = r[cur].rev_index;
-       i = j + adj;
-
-       if (i < 0)
-               return r[0].index;
-       if (i >= l - 1)
-               return r[l - 1].index;
-
-       tmp = i;
-
-       if (adj < 0) {
-               for (k = j; k >= i; k--)
-                       if (r[k].diff <= r[j].diff)
-                               tmp = k;
-       } else {
-               for (k = i + 1; k + i < l; k++)
-                       if (r[k].diff <= r[i].diff)
-                               tmp = k;
-       }
-
-       return r[tmp].index;
-}
-
+/* Adjust the rate while ensuring that we won't switch to a lower rate if it
+ * exhibited a worse failed frames behaviour and we'll choose the highest rate
+ * whose failed frames behaviour is not worse than the one of the original rate
+ * target. While at it, check that the new rate is valid. */
 static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
                                         struct sta_info *sta, int adj,
                                         struct rc_pid_rateinfo *rinfo)
 {
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_hw_mode *mode;
-       int newidx;
-       int maxrate;
-       int back = (adj > 0) ? 1 : -1;
+       int cur_sorted, new_sorted, probe, tmp, n_bitrates;
+       int cur = sta->txrate;
 
        sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
 
        mode = local->oper_hw_mode;
-       maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1;
+       n_bitrates = mode->num_rates;
 
-       newidx = rate_control_pid_shift_adjust(rinfo, adj, sta->txrate,
-                                              mode->num_rates);
+       /* Map passed arguments to sorted values. */
+       cur_sorted = rinfo[cur].rev_index;
+       new_sorted = cur_sorted + adj;
 
-       while (newidx != sta->txrate) {
-               if (rate_supported(sta, mode, newidx) &&
-                   (maxrate < 0 || newidx <= maxrate)) {
-                       sta->txrate = newidx;
-                       break;
-               }
+       /* Check limits. */
+       if (new_sorted < 0)
+               new_sorted = rinfo[0].rev_index;
+       else if (new_sorted >= n_bitrates)
+               new_sorted = rinfo[n_bitrates - 1].rev_index;
 
-               newidx += back;
+       tmp = new_sorted;
+
+       if (adj < 0) {
+               /* Ensure that the rate decrease isn't disadvantageous. */
+               for (probe = cur_sorted; probe >= new_sorted; probe--)
+                       if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
+                           rate_supported(sta, mode, rinfo[probe].index))
+                               tmp = probe;
+       } else {
+               /* Look for rate increase with zero (or below) cost. */
+               for (probe = new_sorted + 1; probe < n_bitrates; probe++)
+                       if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
+                           rate_supported(sta, mode, rinfo[probe].index))
+                               tmp = probe;
        }
 
+       /* Fit the rate found to the nearest supported rate. */
+       do {
+               if (rate_supported(sta, mode, rinfo[tmp].index)) {
+                       sta->txrate = rinfo[tmp].index;
+                       break;
+               }
+               if (adj < 0)
+                       tmp--;
+               else
+                       tmp++;
+       } while (tmp < n_bitrates && tmp >= 0);
+
 #ifdef CONFIG_MAC80211_DEBUGFS
        rate_control_pid_event_rate_change(
                &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events,
-               newidx, mode->rates[newidx].rate);
+               cur, mode->rates[cur].rate);
 #endif
 }
 
index 327e847d2702d8e647ca656b5e89db6ba2ca74fd..b77eb56a87e33a7ec85bd351e5b6d512922ebbc3 100644 (file)
@@ -256,13 +256,19 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
        struct hlist_node *n;
        unsigned int hash = hash_conntrack(tuple);
 
+       /* Disable BHs the entire time since we normally need to disable them
+        * at least once for the stats anyway.
+        */
+       local_bh_disable();
        hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
                if (nf_ct_tuple_equal(tuple, &h->tuple)) {
                        NF_CT_STAT_INC(found);
+                       local_bh_enable();
                        return h;
                }
                NF_CT_STAT_INC(searched);
        }
+       local_bh_enable();
 
        return NULL;
 }
@@ -400,17 +406,20 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        struct hlist_node *n;
        unsigned int hash = hash_conntrack(tuple);
 
-       rcu_read_lock();
+       /* Disable BHs the entire time since we need to disable them at
+        * least once for the stats anyway.
+        */
+       rcu_read_lock_bh();
        hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
                if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
                    nf_ct_tuple_equal(tuple, &h->tuple)) {
                        NF_CT_STAT_INC(found);
-                       rcu_read_unlock();
+                       rcu_read_unlock_bh();
                        return 1;
                }
                NF_CT_STAT_INC(searched);
        }
-       rcu_read_unlock();
+       rcu_read_unlock_bh();
 
        return 0;
 }
index 85330856a29c52065771c413737f958639d7cd20..0c50b289405537348d4004108e7559169ff9031e 100644 (file)
@@ -122,7 +122,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr,
                   const union nf_inet_addr *umask, unsigned int l3proto)
 {
        if (l3proto == AF_INET)
-               return (kaddr->ip & umask->ip) == uaddr->ip;
+               return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
        else if (l3proto == AF_INET6)
                return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
                       &uaddr->in6) == 0;
@@ -231,7 +231,7 @@ conntrack_mt(const struct sk_buff *skb, const struct net_device *in,
                        if (test_bit(IPS_DST_NAT_BIT, &ct->status))
                                statebit |= XT_CONNTRACK_STATE_DNAT;
                }
-               if ((info->state_mask & statebit) ^
+               if (!!(info->state_mask & statebit) ^
                    !(info->invert_flags & XT_CONNTRACK_STATE))
                        return false;
        }
index 8bb79f281774b690aec0f42973889de07e7d7fcd..675a5c3e68a6f82806b4b8d97411af24774d87fe 100644 (file)
@@ -838,11 +838,11 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
        }
 
        /* Create a new key data based on the info passed in */
-       key = sctp_auth_create_key(auth_key->sca_keylen, GFP_KERNEL);
+       key = sctp_auth_create_key(auth_key->sca_keylength, GFP_KERNEL);
        if (!key)
                goto nomem;
 
-       memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylen);
+       memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
 
        /* If we are replacing, remove the old keys data from the
         * key id.  If we are adding new key id, add it to the
index 4d7ec961ae1da98c919fdb1927be5b297f94627c..87f940587d5fe8d3cd72ff33cf0a995498e4c093 100644 (file)
@@ -966,7 +966,7 @@ static struct inet6_protocol sctpv6_protocol = {
        .flags        = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
 };
 
-static struct sctp_af sctp_ipv6_specific = {
+static struct sctp_af sctp_af_inet6 = {
        .sa_family         = AF_INET6,
        .sctp_xmit         = sctp_v6_xmit,
        .setsockopt        = ipv6_setsockopt,
@@ -998,7 +998,7 @@ static struct sctp_af sctp_ipv6_specific = {
 #endif
 };
 
-static struct sctp_pf sctp_pf_inet6_specific = {
+static struct sctp_pf sctp_pf_inet6 = {
        .event_msgname = sctp_inet6_event_msgname,
        .skb_msgname   = sctp_inet6_skb_msgname,
        .af_supported  = sctp_inet6_af_supported,
@@ -1008,7 +1008,7 @@ static struct sctp_pf sctp_pf_inet6_specific = {
        .supported_addrs = sctp_inet6_supported_addrs,
        .create_accept_sk = sctp_v6_create_accept_sk,
        .addr_v4map    = sctp_v6_addr_v4map,
-       .af            = &sctp_ipv6_specific,
+       .af            = &sctp_af_inet6,
 };
 
 /* Initialize IPv6 support and register with socket layer.  */
@@ -1017,10 +1017,10 @@ int sctp_v6_init(void)
        int rc;
 
        /* Register the SCTP specific PF_INET6 functions. */
-       sctp_register_pf(&sctp_pf_inet6_specific, PF_INET6);
+       sctp_register_pf(&sctp_pf_inet6, PF_INET6);
 
        /* Register the SCTP specific AF_INET6 functions. */
-       sctp_register_af(&sctp_ipv6_specific);
+       sctp_register_af(&sctp_af_inet6);
 
        rc = proto_register(&sctpv6_prot, 1);
        if (rc)
@@ -1051,7 +1051,7 @@ void sctp_v6_exit(void)
        inet6_unregister_protosw(&sctpv6_seqpacket_protosw);
        inet6_unregister_protosw(&sctpv6_stream_protosw);
        proto_unregister(&sctpv6_prot);
-       list_del(&sctp_ipv6_specific.list);
+       list_del(&sctp_af_inet6.list);
 }
 
 /* Unregister with inet6 layer. */
index 14e294e3762665759c514846334551d82a1e9ddf..cfeb07ea1b046a814592207e1da3e6b3e387f498 100644 (file)
@@ -132,12 +132,11 @@ void sctp_dbg_objcnt_init(void)
 {
        struct proc_dir_entry *ent;
 
-       ent = create_proc_entry("sctp_dbg_objcnt", 0, proc_net_sctp);
+       ent = proc_create("sctp_dbg_objcnt", 0,
+                         proc_net_sctp, &sctp_objcnt_ops);
        if (!ent)
                printk(KERN_WARNING
                        "sctp_dbg_objcnt: Unable to create /proc entry.\n");
-       else
-               ent->proc_fops = &sctp_objcnt_ops;
 }
 
 /* Cleanup the objcount entry in the proc filesystem.  */
index 69bb5a63fd8bc3774817e67f2e80d3857f87768d..973f1dbc2ec3a8555542601cf900abb4b22d022a 100644 (file)
@@ -108,12 +108,10 @@ int __init sctp_snmp_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = create_proc_entry("snmp", S_IRUGO, proc_net_sctp);
+       p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops);
        if (!p)
                return -ENOMEM;
 
-       p->proc_fops = &sctp_snmp_seq_fops;
-
        return 0;
 }
 
@@ -258,12 +256,10 @@ int __init sctp_eps_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = create_proc_entry("eps", S_IRUGO, proc_net_sctp);
+       p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops);
        if (!p)
                return -ENOMEM;
 
-       p->proc_fops = &sctp_eps_seq_fops;
-
        return 0;
 }
 
@@ -369,12 +365,11 @@ int __init sctp_assocs_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = create_proc_entry("assocs", S_IRUGO, proc_net_sctp);
+       p = proc_create("assocs", S_IRUGO, proc_net_sctp,
+                       &sctp_assocs_seq_fops);
        if (!p)
                return -ENOMEM;
 
-       p->proc_fops = &sctp_assocs_seq_fops;
-
        return 0;
 }
 
index 22a16571499c3bc82902941d96f2bdbdfb00eca7..688546dccd828f0821736a301535049cc3d8ca04 100644 (file)
@@ -832,7 +832,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
        return ip_queue_xmit(skb, ipfragok);
 }
 
-static struct sctp_af sctp_ipv4_specific;
+static struct sctp_af sctp_af_inet;
 
 static struct sctp_pf sctp_pf_inet = {
        .event_msgname = sctp_inet_event_msgname,
@@ -844,7 +844,7 @@ static struct sctp_pf sctp_pf_inet = {
        .supported_addrs = sctp_inet_supported_addrs,
        .create_accept_sk = sctp_v4_create_accept_sk,
        .addr_v4map     = sctp_v4_addr_v4map,
-       .af            = &sctp_ipv4_specific,
+       .af            = &sctp_af_inet
 };
 
 /* Notifier for inetaddr addition/deletion events.  */
@@ -906,7 +906,7 @@ static struct net_protocol sctp_protocol = {
 };
 
 /* IPv4 address related functions.  */
-static struct sctp_af sctp_ipv4_specific = {
+static struct sctp_af sctp_af_inet = {
        .sa_family         = AF_INET,
        .sctp_xmit         = sctp_v4_xmit,
        .setsockopt        = ip_setsockopt,
@@ -1192,7 +1192,7 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_sysctl_register();
 
        INIT_LIST_HEAD(&sctp_address_families);
-       sctp_register_af(&sctp_ipv4_specific);
+       sctp_register_af(&sctp_af_inet);
 
        status = proto_register(&sctp_prot, 1);
        if (status)
@@ -1249,7 +1249,7 @@ err_v6_init:
        proto_unregister(&sctp_prot);
 err_proto_register:
        sctp_sysctl_unregister();
-       list_del(&sctp_ipv4_specific.list);
+       list_del(&sctp_af_inet.list);
        free_pages((unsigned long)sctp_port_hashtable,
                   get_order(sctp_port_hashsize *
                             sizeof(struct sctp_bind_hashbucket)));
@@ -1299,7 +1299,7 @@ SCTP_STATIC __exit void sctp_exit(void)
        inet_unregister_protosw(&sctp_seqpacket_protosw);
 
        sctp_sysctl_unregister();
-       list_del(&sctp_ipv4_specific.list);
+       list_del(&sctp_af_inet.list);
 
        free_pages((unsigned long)sctp_assoc_hashtable,
                   get_order(sctp_assoc_hashsize *
index 44797ad88a05b9527e48f6fca47765308829e083..939892691a260e46f5901581123c723c007d0f5c 100644 (file)
@@ -1964,7 +1964,7 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
                                        int optlen)
 {
-       if (optlen != sizeof(struct sctp_event_subscribe))
+       if (optlen > sizeof(struct sctp_event_subscribe))
                return -EINVAL;
        if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
                return -EFAULT;
@@ -5070,6 +5070,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        struct sctp_authchunks val;
        struct sctp_association *asoc;
        struct sctp_chunks_param *ch;
+       u32    num_chunks;
        char __user *to;
 
        if (len <= sizeof(struct sctp_authchunks))
@@ -5086,12 +5087,15 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        ch = asoc->peer.peer_chunks;
 
        /* See if the user provided enough room for all the data */
-       if (len < ntohs(ch->param_hdr.length))
+       num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
+       if (len < num_chunks)
                return -EINVAL;
 
-       len = ntohs(ch->param_hdr.length);
+       len = num_chunks;
        if (put_user(len, optlen))
                return -EFAULT;
+       if (put_user(num_chunks, &p->gauth_number_of_chunks))
+               return -EFAULT;
        if (copy_to_user(to, ch->chunks, len))
                return -EFAULT;
 
@@ -5105,6 +5109,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        struct sctp_authchunks val;
        struct sctp_association *asoc;
        struct sctp_chunks_param *ch;
+       u32    num_chunks;
        char __user *to;
 
        if (len <= sizeof(struct sctp_authchunks))
@@ -5123,12 +5128,15 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        else
                ch = sctp_sk(sk)->ep->auth_chunk_list;
 
-       if (len < ntohs(ch->param_hdr.length))
+       num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
+       if (len < num_chunks)
                return -EINVAL;
 
-       len = ntohs(ch->param_hdr.length);
+       len = num_chunks;
        if (put_user(len, optlen))
                return -EFAULT;
+       if (put_user(num_chunks, &p->gauth_number_of_chunks))
+               return -EFAULT;
        if (copy_to_user(to, ch->chunks, len))
                return -EFAULT;
 
index e27b11f18b7f2a32935e9de3a24dd8a4030e7b91..b43f1f110f8738069c59f425bd9e40a8e81ecbee 100644 (file)
@@ -206,7 +206,7 @@ struct sctp_ulpevent  *sctp_ulpevent_make_assoc_change(
         * This field is the total length of the notification data, including
         * the notification header.
         */
-       sac->sac_length = sizeof(struct sctp_assoc_change);
+       sac->sac_length = skb->len;
 
        /* Socket Extensions for SCTP
         * 5.3.1.1 SCTP_ASSOC_CHANGE
index 636c8e04e0bebb0fff9b33e34a23bd8e40941dfc..b5f2786251b95368f4bccda69290683e1c3f0b28 100644 (file)
@@ -316,31 +316,29 @@ static int create_cache_proc_entries(struct cache_detail *cd)
        cd->proc_ent->owner = cd->owner;
        cd->channel_ent = cd->content_ent = NULL;
 
-       p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
+       p = proc_create("flush", S_IFREG|S_IRUSR|S_IWUSR,
+                       cd->proc_ent, &cache_flush_operations);
        cd->flush_ent = p;
        if (p == NULL)
                goto out_nomem;
-       p->proc_fops = &cache_flush_operations;
        p->owner = cd->owner;
        p->data = cd;
 
        if (cd->cache_request || cd->cache_parse) {
-               p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
-                                     cd->proc_ent);
+               p = proc_create("channel", S_IFREG|S_IRUSR|S_IWUSR,
+                               cd->proc_ent, &cache_file_operations);
                cd->channel_ent = p;
                if (p == NULL)
                        goto out_nomem;
-               p->proc_fops = &cache_file_operations;
                p->owner = cd->owner;
                p->data = cd;
        }
        if (cd->cache_show) {
-               p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
-                                     cd->proc_ent);
+               p = proc_create("content", S_IFREG|S_IRUSR|S_IWUSR,
+                               cd->proc_ent, &content_file_operations);
                cd->content_ent = p;
                if (p == NULL)
                        goto out_nomem;
-               p->proc_fops = &content_file_operations;
                p->owner = cd->owner;
                p->data = cd;
        }
index 5a16875f5ac8b0686ccdf5e9448dac850aab7064..c6061a4346c8ea73c94edd6d99f3e55e28d134c2 100644 (file)
@@ -229,9 +229,8 @@ do_register(const char *name, void *data, const struct file_operations *fops)
        rpc_proc_init();
        dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
 
-       ent = create_proc_entry(name, 0, proc_net_rpc);
+       ent = proc_create(name, 0, proc_net_rpc, fops);
        if (ent) {
-               ent->proc_fops = fops;
                ent->data = data;
        }
        return ent;
index 95b373913aa0dd21ae33a73dc730b92fd2e5c2fa..4bb3404f610b4c7f96cb74666d894efc2e2aae57 100644 (file)
@@ -142,7 +142,7 @@ void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
                max_n_num = tipc_highest_allowed_slave;
        assert(n_num > 0);
        assert(n_num <= max_n_num);
-       assert(c_ptr->nodes[n_num] == 0);
+       assert(c_ptr->nodes[n_num] == NULL);
        c_ptr->nodes[n_num] = n_ptr;
        if (n_num > c_ptr->highest_node)
                c_ptr->highest_node = n_num;
index 1b17fecee74784cd2a2f701a374eb0769607fb29..cefa99824c58a1a1f061db74b6491ec09805d7cf 100644 (file)
@@ -3251,7 +3251,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
                if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
                         msg_seqno(buf_msg(l_ptr->first_out)))
                     != (l_ptr->out_queue_size - 1))
-                   || (l_ptr->last_out->next != 0)) {
+                   || (l_ptr->last_out->next != NULL)) {
                        tipc_printf(buf, "\nSend queue inconsistency\n");
                        tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
                        tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
index 6704a58c785162e40989711bd061c44e6b8f25e8..c38744c96ed144509b34a4751c8143cfdbd149e1 100644 (file)
@@ -148,7 +148,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
                reference = (next_plus_upper & ~index_mask) + index;
                entry->data.reference = reference;
                entry->object = object;
-               if (lock != 0)
+               if (lock != NULL)
                        *lock = &entry->lock;
                spin_unlock_bh(&entry->lock);
        }
index 114e173f11a5fbbb6b459219984778fe44ebd2d8..3506f856344162669d43a7b63ffebb6b04da0807 100644 (file)
@@ -82,7 +82,7 @@ void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
 
        assert(c_ptr->addr);
        assert(c_num <= tipc_max_clusters);
-       assert(z_ptr->clusters[c_num] == 0);
+       assert(z_ptr->clusters[c_num] == NULL);
        z_ptr->clusters[c_num] = c_ptr;
 }
 
index f2e54c3f064ebb64360339d9b68d5137df570124..5bebe40bf4e68584d7531eca76e89957381d2adb 100644 (file)
@@ -292,14 +292,12 @@ int __init wanrouter_proc_init(void)
        if (!proc_router)
                goto fail;
 
-       p = create_proc_entry("config", S_IRUGO, proc_router);
+       p = proc_create("config", S_IRUGO, proc_router, &config_fops);
        if (!p)
                goto fail_config;
-       p->proc_fops = &config_fops;
-       p = create_proc_entry("status", S_IRUGO, proc_router);
+       p = proc_create("status", S_IRUGO, proc_router, &status_fops);
        if (!p)
                goto fail_stat;
-       p->proc_fops = &status_fops;
        return 0;
 fail_stat:
        remove_proc_entry("config", proc_router);
@@ -329,10 +327,10 @@ int wanrouter_proc_add(struct wan_device* wandev)
        if (wandev->magic != ROUTER_MAGIC)
                return -EINVAL;
 
-       wandev->dent = create_proc_entry(wandev->name, S_IRUGO, proc_router);
+       wandev->dent = proc_create(wandev->name, S_IRUGO,
+                                  proc_router, &wandev_fops);
        if (!wandev->dent)
                return -ENOMEM;
-       wandev->dent->proc_fops = &wandev_fops;
        wandev->dent->data      = wandev;
        return 0;
 }
index 3f52b09bed033fbb8f77e4b37342fd757183744d..1afa44d25beb8825e41ac5d263e826352b19a03e 100644 (file)
@@ -312,20 +312,18 @@ int __init x25_proc_init(void)
        if (!x25_proc_dir)
                goto out;
 
-       p = create_proc_entry("route", S_IRUGO, x25_proc_dir);
+       p = proc_create("route", S_IRUGO, x25_proc_dir, &x25_seq_route_fops);
        if (!p)
                goto out_route;
-       p->proc_fops = &x25_seq_route_fops;
 
-       p = create_proc_entry("socket", S_IRUGO, x25_proc_dir);
+       p = proc_create("socket", S_IRUGO, x25_proc_dir, &x25_seq_socket_fops);
        if (!p)
                goto out_socket;
-       p->proc_fops = &x25_seq_socket_fops;
 
-       p = create_proc_entry("forward", S_IRUGO, x25_proc_dir);
+       p = proc_create("forward", S_IRUGO, x25_proc_dir,
+                       &x25_seq_forward_fops);
        if (!p)
                goto out_forward;
-       p->proc_fops = &x25_seq_forward_fops;
        rc = 0;
 
 out:
index 74d97cc247872216fc179bf5fdce5b0b9c2cd671..e1fb471cc50182e5e907bef3caa566f8ede7e6d4 100644 (file)
@@ -22,5 +22,16 @@ config SAMPLE_KOBJECT
 
          If in doubt, say "N" here.
 
+config SAMPLE_KPROBES
+       tristate "Build kprobes examples -- loadable modules only"
+       depends on KPROBES && m
+       help
+         This build several kprobes example modules.
+
+config SAMPLE_KRETPROBES
+       tristate "Build kretprobes example -- loadable modules only"
+       default m
+       depends on SAMPLE_KPROBES && KRETPROBES
+
 endif # SAMPLES
 
index 8652d0f268ad0fb25e24c65375828ec932df2e96..2e02575f779441bf1babe686f50f0b7fb1d3e03e 100644 (file)
@@ -1,3 +1,3 @@
 # Makefile for Linux samples code
 
-obj-$(CONFIG_SAMPLES)  += markers/ kobject/
+obj-$(CONFIG_SAMPLES)  += markers/ kobject/ kprobes/
diff --git a/samples/kprobes/Makefile b/samples/kprobes/Makefile
new file mode 100644 (file)
index 0000000..68739bc
--- /dev/null
@@ -0,0 +1,5 @@
+# builds the kprobes example kernel modules;
+# then to use one (as root):  insmod <module_name.ko>
+
+obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o jprobe_example.o
+obj-$(CONFIG_SAMPLE_KRETPROBES) += kretprobe_example.o
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
new file mode 100644 (file)
index 0000000..b754135
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Here's a sample kernel module showing the use of jprobes to dump
+ * the arguments of do_fork().
+ *
+ * For more information on theory of operation of jprobes, see
+ * Documentation/kprobes.txt
+ *
+ * Build and insert the kernel module as done in the kprobe example.
+ * You will see the trace data in /var/log/messages and on the
+ * console whenever do_fork() is invoked to create a new process.
+ * (Some messages may be suppressed if syslogd is configured to
+ * eliminate duplicate messages.)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+
+/*
+ * Jumper probe for do_fork.
+ * Mirror principle enables access to arguments of the probed routine
+ * from the probe handler.
+ */
+
+/* Proxy routine having the same arguments as actual do_fork() routine */
+static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
+             struct pt_regs *regs, unsigned long stack_size,
+             int __user *parent_tidptr, int __user *child_tidptr)
+{
+       printk(KERN_INFO "jprobe: clone_flags = 0x%lx, stack_size = 0x%lx,"
+                       " regs = 0x%p\n",
+              clone_flags, stack_size, regs);
+
+       /* Always end with a call to jprobe_return(). */
+       jprobe_return();
+       return 0;
+}
+
+static struct jprobe my_jprobe = {
+       .entry                  = jdo_fork,
+       .kp = {
+               .symbol_name    = "do_fork",
+       },
+};
+
+static int __init jprobe_init(void)
+{
+       int ret;
+
+       ret = register_jprobe(&my_jprobe);
+       if (ret < 0) {
+               printk(KERN_INFO "register_jprobe failed, returned %d\n", ret);
+               return -1;
+       }
+       printk(KERN_INFO "Planted jprobe at %p, handler addr %p\n",
+              my_jprobe.kp.addr, my_jprobe.entry);
+       return 0;
+}
+
+static void __exit jprobe_exit(void)
+{
+       unregister_jprobe(&my_jprobe);
+       printk(KERN_INFO "jprobe at %p unregistered\n", my_jprobe.kp.addr);
+}
+
+module_init(jprobe_init)
+module_exit(jprobe_exit)
+MODULE_LICENSE("GPL");
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
new file mode 100644 (file)
index 0000000..a681998
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * NOTE: This example is works on x86 and powerpc.
+ * Here's a sample kernel module showing the use of kprobes to dump a
+ * stack trace and selected registers when do_fork() is called.
+ *
+ * For more information on theory of operation of kprobes, see
+ * Documentation/kprobes.txt
+ *
+ * You will see the trace data in /var/log/messages and on the console
+ * whenever do_fork() is invoked to create a new process.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+
+/* For each probe you need to allocate a kprobe structure */
+static struct kprobe kp = {
+       .symbol_name    = "do_fork",
+};
+
+/* kprobe pre_handler: called just before the probed instruction is executed */
+static int handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+#ifdef CONFIG_X86
+       printk(KERN_INFO "pre_handler: p->addr = 0x%p, ip = %lx,"
+                       " flags = 0x%lx\n",
+               p->addr, regs->ip, regs->flags);
+#endif
+#ifdef CONFIG_PPC
+       printk(KERN_INFO "pre_handler: p->addr = 0x%p, nip = 0x%lx,"
+                       " msr = 0x%lx\n",
+               p->addr, regs->nip, regs->msr);
+#endif
+
+       /* A dump_stack() here will give a stack backtrace */
+       return 0;
+}
+
+/* kprobe post_handler: called after the probed instruction is executed */
+static void handler_post(struct kprobe *p, struct pt_regs *regs,
+                               unsigned long flags)
+{
+#ifdef CONFIG_X86
+       printk(KERN_INFO "post_handler: p->addr = 0x%p, flags = 0x%lx\n",
+               p->addr, regs->flags);
+#endif
+#ifdef CONFIG_PPC
+       printk(KERN_INFO "post_handler: p->addr = 0x%p, msr = 0x%lx\n",
+               p->addr, regs->msr);
+#endif
+}
+
+/*
+ * fault_handler: this is called if an exception is generated for any
+ * instruction within the pre- or post-handler, or when Kprobes
+ * single-steps the probed instruction.
+ */
+static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
+{
+       printk(KERN_INFO "fault_handler: p->addr = 0x%p, trap #%dn",
+               p->addr, trapnr);
+       /* Return 0 because we don't handle the fault. */
+       return 0;
+}
+
+static int __init kprobe_init(void)
+{
+       int ret;
+       kp.pre_handler = handler_pre;
+       kp.post_handler = handler_post;
+       kp.fault_handler = handler_fault;
+
+       ret = register_kprobe(&kp);
+       if (ret < 0) {
+               printk(KERN_INFO "register_kprobe failed, returned %d\n", ret);
+               return ret;
+       }
+       printk(KERN_INFO "Planted kprobe at %p\n", kp.addr);
+       return 0;
+}
+
+static void __exit kprobe_exit(void)
+{
+       unregister_kprobe(&kp);
+       printk(KERN_INFO "kprobe at %p unregistered\n", kp.addr);
+}
+
+module_init(kprobe_init)
+module_exit(kprobe_exit)
+MODULE_LICENSE("GPL");
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
new file mode 100644 (file)
index 0000000..4e764b3
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * kretprobe_example.c
+ *
+ * Here's a sample kernel module showing the use of return probes to
+ * report the return value and total time taken for probed function
+ * to run.
+ *
+ * usage: insmod kretprobe_example.ko func=<func_name>
+ *
+ * If no func_name is specified, do_fork is instrumented
+ *
+ * For more information on theory of operation of kretprobes, see
+ * Documentation/kprobes.txt
+ *
+ * Build and insert the kernel module as done in the kprobe example.
+ * You will see the trace data in /var/log/messages and on the console
+ * whenever the probed function returns. (Some messages may be suppressed
+ * if syslogd is configured to eliminate duplicate messages.)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+
+static char func_name[NAME_MAX] = "do_fork";
+module_param_string(func, func_name, NAME_MAX, S_IRUGO);
+MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
+                       " function's execution time");
+
+/* per-instance private data */
+struct my_data {
+       ktime_t entry_stamp;
+};
+
+/* Here we use the entry_hanlder to timestamp function entry */
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+       struct my_data *data;
+
+       if (!current->mm)
+               return 1;       /* Skip kernel threads */
+
+       data = (struct my_data *)ri->data;
+       data->entry_stamp = ktime_get();
+       return 0;
+}
+
+/*
+ * Return-probe handler: Log the return value and duration. Duration may turn
+ * out to be zero consistently, depending upon the granularity of time
+ * accounting on the platform.
+ */
+static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+       int retval = regs_return_value(regs);
+       struct my_data *data = (struct my_data *)ri->data;
+       s64 delta;
+       ktime_t now;
+
+       now = ktime_get();
+       delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
+       printk(KERN_INFO "%s returned %d and took %lld ns to execute\n",
+                       func_name, retval, (long long)delta);
+       return 0;
+}
+
+static struct kretprobe my_kretprobe = {
+       .handler                = ret_handler,
+       .entry_handler          = entry_handler,
+       .data_size              = sizeof(struct my_data),
+       /* Probe up to 20 instances concurrently. */
+       .maxactive              = 20,
+};
+
+static int __init kretprobe_init(void)
+{
+       int ret;
+
+       my_kretprobe.kp.symbol_name = func_name;
+       ret = register_kretprobe(&my_kretprobe);
+       if (ret < 0) {
+               printk(KERN_INFO "register_kretprobe failed, returned %d\n",
+                               ret);
+               return -1;
+       }
+       printk(KERN_INFO "Planted return probe at %s: %p\n",
+                       my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr);
+       return 0;
+}
+
+static void __exit kretprobe_exit(void)
+{
+       unregister_kretprobe(&my_kretprobe);
+       printk(KERN_INFO "kretprobe at %p unregistered\n",
+                       my_kretprobe.kp.addr);
+
+       /* nmissed > 0 suggests that maxactive was set too low. */
+       printk(KERN_INFO "Missed probing %d instances of %s\n",
+               my_kretprobe.nmissed, my_kretprobe.kp.symbol_name);
+}
+
+module_init(kretprobe_init)
+module_exit(kretprobe_exit)
+MODULE_LICENSE("GPL");
index 2086a856400a9c7258958b96fe0f3d53d3d70ff8..2a7cef9726e4892b3454e7198dca4c529aeed891 100755 (executable)
@@ -9,7 +9,7 @@ use strict;
 my $P = $0;
 $P =~ s@.*/@@g;
 
-my $V = '0.14';
+my $V = '0.15';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -105,8 +105,7 @@ our $Sparse = qr{
                        __iomem|
                        __must_check|
                        __init_refok|
-                       __kprobes|
-                       fastcall
+                       __kprobes
                }x;
 our $Attribute = qr{
                        const|
@@ -158,7 +157,10 @@ sub build_types {
                        \b
                        (?:const\s+)?
                        (?:unsigned\s+)?
-                       $all
+                       (?:
+                               $all|
+                               (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)
+                       )
                        (?:\s+$Sparse|\s+const)*
                        \b
                  }x;
@@ -362,6 +364,7 @@ sub ctx_statement_block {
 
        my $type = '';
        my $level = 0;
+       my $p;
        my $c;
        my $len = 0;
 
@@ -386,6 +389,7 @@ sub ctx_statement_block {
                                last;
                        }
                }
+               $p = $c;
                $c = substr($blk, $off, 1);
                $remainder = substr($blk, $off);
 
@@ -397,8 +401,9 @@ sub ctx_statement_block {
                }
 
                # An else is really a conditional as long as its not else if
-               if ($level == 0 && $remainder =~ /(\s+else)(?:\s|{)/ &&
-                                       $remainder !~ /\s+else\s+if\b/) {
+               if ($level == 0 && (!defined($p) || $p =~ /(?:\s|\})/) &&
+                               $remainder =~ /(else)(?:\s|{)/ &&
+                               $remainder !~ /else\s+if\b/) {
                        $coff = $off + length($1);
                }
 
@@ -445,21 +450,73 @@ sub ctx_statement_block {
                        $line, $remain + 1, $off - $loff + 1, $level);
 }
 
+sub statement_lines {
+       my ($stmt) = @_;
+
+       # Strip the diff line prefixes and rip blank lines at start and end.
+       $stmt =~ s/(^|\n)./$1/g;
+       $stmt =~ s/^\s*//;
+       $stmt =~ s/\s*$//;
+
+       my @stmt_lines = ($stmt =~ /\n/g);
+
+       return $#stmt_lines + 2;
+}
+
+sub statement_rawlines {
+       my ($stmt) = @_;
+
+       my @stmt_lines = ($stmt =~ /\n/g);
+
+       return $#stmt_lines + 2;
+}
+
+sub statement_block_size {
+       my ($stmt) = @_;
+
+       $stmt =~ s/(^|\n)./$1/g;
+       $stmt =~ s/^\s*{//;
+       $stmt =~ s/}\s*$//;
+       $stmt =~ s/^\s*//;
+       $stmt =~ s/\s*$//;
+
+       my @stmt_lines = ($stmt =~ /\n/g);
+       my @stmt_statements = ($stmt =~ /;/g);
+
+       my $stmt_lines = $#stmt_lines + 2;
+       my $stmt_statements = $#stmt_statements + 1;
+
+       if ($stmt_lines > $stmt_statements) {
+               return $stmt_lines;
+       } else {
+               return $stmt_statements;
+       }
+}
+
 sub ctx_statement_full {
        my ($linenr, $remain, $off) = @_;
        my ($statement, $condition, $level);
 
        my (@chunks);
 
+       # Grab the first conditional/block pair.
        ($statement, $condition, $linenr, $remain, $off, $level) =
                                ctx_statement_block($linenr, $remain, $off);
        #print "F: c<$condition> s<$statement>\n";
+       push(@chunks, [ $condition, $statement ]);
+       if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) {
+               return ($level, $linenr, @chunks);
+       }
+
+       # Pull in the following conditional/block pairs and see if they
+       # could continue the statement.
        for (;;) {
-               push(@chunks, [ $condition, $statement ]);
-               last if (!($remain > 0 && $condition =~ /^.\s*(?:if|else|do)/));
                ($statement, $condition, $linenr, $remain, $off, $level) =
                                ctx_statement_block($linenr, $remain, $off);
-               #print "C: c<$condition> s<$statement>\n";
+               #print "C: c<$condition> s<$statement> remain<$remain>\n";
+               last if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:else|do)\b/s));
+               #print "C: push\n";
+               push(@chunks, [ $condition, $statement ]);
        }
 
        return ($level, $linenr, @chunks);
@@ -593,13 +650,13 @@ sub cat_vet {
 }
 
 my $av_preprocessor = 0;
-my $av_paren = 0;
+my $av_pending;
 my @av_paren_type;
 
 sub annotate_reset {
        $av_preprocessor = 0;
-       $av_paren = 0;
-       @av_paren_type = ();
+       $av_pending = '_';
+       @av_paren_type = ('E');
 }
 
 sub annotate_values {
@@ -611,12 +668,13 @@ sub annotate_values {
        print "$stream\n" if ($dbg_values > 1);
 
        while (length($cur)) {
-               print " <$type> " if ($dbg_values > 1);
+               print " <" . join('', @av_paren_type) .
+                                       "> <$type> " if ($dbg_values > 1);
                if ($cur =~ /^(\s+)/o) {
                        print "WS($1)\n" if ($dbg_values > 1);
                        if ($1 =~ /\n/ && $av_preprocessor) {
+                               $type = pop(@av_paren_type);
                                $av_preprocessor = 0;
-                               $type = 'N';
                        }
 
                } elsif ($cur =~ /^($Type)/) {
@@ -626,11 +684,33 @@ sub annotate_values {
                } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) {
                        print "DEFINE($1)\n" if ($dbg_values > 1);
                        $av_preprocessor = 1;
-                       $av_paren_type[$av_paren] = 'N';
+                       $av_pending = 'N';
 
-               } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if|else|elif|endif))/o) {
-                       print "PRE($1)\n" if ($dbg_values > 1);
+               } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) {
+                       print "PRE_START($1)\n" if ($dbg_values > 1);
                        $av_preprocessor = 1;
+
+                       push(@av_paren_type, $type);
+                       push(@av_paren_type, $type);
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(#\s*(?:else|elif))/o) {
+                       print "PRE_RESTART($1)\n" if ($dbg_values > 1);
+                       $av_preprocessor = 1;
+
+                       push(@av_paren_type, $av_paren_type[$#av_paren_type]);
+
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(#\s*(?:endif))/o) {
+                       print "PRE_END($1)\n" if ($dbg_values > 1);
+
+                       $av_preprocessor = 1;
+
+                       # Assume all arms of the conditional end as this
+                       # one does, and continue as if the #endif was not here.
+                       pop(@av_paren_type);
+                       push(@av_paren_type, $type);
                        $type = 'N';
 
                } elsif ($cur =~ /^(\\\n)/o) {
@@ -639,13 +719,13 @@ sub annotate_values {
                } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
                        print "SIZEOF($1)\n" if ($dbg_values > 1);
                        if (defined $2) {
-                               $av_paren_type[$av_paren] = 'V';
+                               $av_pending = 'V';
                        }
                        $type = 'N';
 
                } elsif ($cur =~ /^(if|while|typeof|__typeof__|for)\b/o) {
                        print "COND($1)\n" if ($dbg_values > 1);
-                       $av_paren_type[$av_paren] = 'N';
+                       $av_pending = 'N';
                        $type = 'N';
 
                } elsif ($cur =~/^(return|case|else)/o) {
@@ -654,14 +734,14 @@ sub annotate_values {
 
                } elsif ($cur =~ /^(\()/o) {
                        print "PAREN('$1')\n" if ($dbg_values > 1);
-                       $av_paren++;
+                       push(@av_paren_type, $av_pending);
+                       $av_pending = '_';
                        $type = 'N';
 
                } elsif ($cur =~ /^(\))/o) {
-                       $av_paren-- if ($av_paren > 0);
-                       if (defined $av_paren_type[$av_paren]) {
-                               $type = $av_paren_type[$av_paren];
-                               undef $av_paren_type[$av_paren];
+                       my $new_type = pop(@av_paren_type);
+                       if ($new_type ne '_') {
+                               $type = $new_type;
                                print "PAREN('$1') -> $type\n"
                                                        if ($dbg_values > 1);
                        } else {
@@ -670,7 +750,7 @@ sub annotate_values {
 
                } elsif ($cur =~ /^($Ident)\(/o) {
                        print "FUNC($1)\n" if ($dbg_values > 1);
-                       $av_paren_type[$av_paren] = 'V';
+                       $av_pending = 'V';
 
                } elsif ($cur =~ /^($Ident|$Constant)/o) {
                        print "IDENT($1)\n" if ($dbg_values > 1);
@@ -680,11 +760,11 @@ sub annotate_values {
                        print "ASSIGN($1)\n" if ($dbg_values > 1);
                        $type = 'N';
 
-               } elsif ($cur =~/^(;)/) {
+               } elsif ($cur =~/^(;|{|})/) {
                        print "END($1)\n" if ($dbg_values > 1);
                        $type = 'E';
 
-               } elsif ($cur =~ /^(;|{|}|\?|:|\[)/o) {
+               } elsif ($cur =~ /^(;|\?|:|\[)/o) {
                        print "CLOSE($1)\n" if ($dbg_values > 1);
                        $type = 'N';
 
@@ -988,7 +1068,7 @@ sub process {
                }
 
 # check for RCS/CVS revision markers
-               if ($rawline =~ /\$(Revision|Log|Id)(?:\$|)/) {
+               if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
                        WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr);
                }
 
@@ -999,41 +1079,44 @@ sub process {
 
 # Check for potential 'bare' types
                if ($realcnt) {
+                       my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
+                       $s =~ s/\n./ /g;
+                       $s =~ s/{.*$//;
+
                        # Ignore goto labels.
-                       if ($line =~ /$Ident:\*$/) {
+                       if ($s =~ /$Ident:\*$/) {
 
                        # Ignore functions being called
-                       } elsif ($line =~ /^.\s*$Ident\s*\(/) {
+                       } elsif ($s =~ /^.\s*$Ident\s*\(/) {
 
                        # definitions in global scope can only start with types
-                       } elsif ($line =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) {
-                               possible($1, $line);
+                       } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) {
+                               possible($1, $s);
 
                        # declarations always start with types
-                       } elsif ($prev_values eq 'E' && $line =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) {
-                               possible($1);
+                       } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) {
+                               possible($1, $s);
                        }
 
                        # any (foo ... *) is a pointer cast, and foo is a type
-                       while ($line =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) {
-                               possible($1, $line);
+                       while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) {
+                               possible($1, $s);
                        }
 
                        # Check for any sort of function declaration.
                        # int foo(something bar, other baz);
                        # void (*store_gdt)(x86_descr_ptr *);
-                       if ($prev_values eq 'E' && $line =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) {
+                       if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) {
                                my ($name_len) = length($1);
-                               my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, $name_len);
-                               my $ctx = join("\n", @ctx);
 
-                               $ctx =~ s/\n.//;
+                               my $ctx = $s;
                                substr($ctx, 0, $name_len + 1) = '';
                                $ctx =~ s/\)[^\)]*$//;
+
                                for my $arg (split(/\s*,\s*/, $ctx)) {
                                        if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/ || $arg =~ /^($Ident)$/) {
 
-                                               possible($1, $line);
+                                               possible($1, $s);
                                        }
                                }
                        }
@@ -1100,8 +1183,8 @@ sub process {
                $curr_values = $prev_values . $curr_values;
                if ($dbg_values) {
                        my $outline = $opline; $outline =~ s/\t/ /g;
-                       warn "--> .$outline\n";
-                       warn "--> $curr_values\n";
+                       print "$linenr > .$outline\n";
+                       print "$linenr > $curr_values\n";
                }
                $prev_values = substr($curr_values, -1);
 
@@ -1148,7 +1231,9 @@ sub process {
                        if (($prevline !~ /^}/) &&
                           ($prevline !~ /^\+}/) &&
                           ($prevline !~ /^ }/) &&
-                          ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=)/)) {
+                          ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) &&
+                          ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) &&
+                          ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) {
                                WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
                        }
                }
@@ -1266,7 +1351,7 @@ sub process {
                                =>|->|<<|>>|<|>|=|!|~|
                                &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%
                        }x;
-                       my @elements = split(/($;+|$ops|;)/, $opline);
+                       my @elements = split(/($ops|;)/, $opline);
                        my $off = 0;
 
                        my $blank = copy_spacing($opline);
@@ -1277,6 +1362,7 @@ sub process {
                                my $a = '';
                                $a = 'V' if ($elements[$n] ne '');
                                $a = 'W' if ($elements[$n] =~ /\s$/);
+                               $a = 'C' if ($elements[$n] =~ /$;$/);
                                $a = 'B' if ($elements[$n] =~ /(\[|\()$/);
                                $a = 'O' if ($elements[$n] eq '');
                                $a = 'E' if ($elements[$n] eq '' && $n == 0);
@@ -1287,6 +1373,7 @@ sub process {
                                if (defined $elements[$n + 2]) {
                                        $c = 'V' if ($elements[$n + 2] ne '');
                                        $c = 'W' if ($elements[$n + 2] =~ /^\s/);
+                                       $c = 'C' if ($elements[$n + 2] =~ /^$;/);
                                        $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/);
                                        $c = 'O' if ($elements[$n + 2] eq '');
                                        $c = 'E' if ($elements[$n + 2] =~ /\s*\\$/);
@@ -1330,13 +1417,13 @@ sub process {
                                if ($op_type ne 'V' &&
                                    $ca =~ /\s$/ && $cc =~ /^\s*,/) {
 
-                               # Ignore comments
-                               } elsif ($op =~ /^$;+$/) {
+#                              # Ignore comments
+#                              } elsif ($op =~ /^$;+$/) {
 
                                # ; should have either the end of line or a space or \ after it
                                } elsif ($op eq ';') {
-                                       if ($ctx !~ /.x[WEB]/ && $cc !~ /^\\/ &&
-                                           $cc !~ /^;/) {
+                                       if ($ctx !~ /.x[WEBC]/ &&
+                                           $cc !~ /^\\/ && $cc !~ /^;/) {
                                                ERROR("need space after that '$op' $at\n" . $hereptr);
                                        }
 
@@ -1351,7 +1438,7 @@ sub process {
 
                                # , must have a space on the right.
                                } elsif ($op eq ',') {
-                                       if ($ctx !~ /.xW|.xE/ && $cc !~ /^}/) {
+                                       if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
                                                ERROR("need space after that '$op' $at\n" . $hereptr);
                                        }
 
@@ -1364,7 +1451,7 @@ sub process {
                                # unary operator, or a cast
                                } elsif ($op eq '!' || $op eq '~' ||
                                         ($is_unary && ($op eq '*' || $op eq '-' || $op eq '&'))) {
-                                       if ($ctx !~ /[WEB]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
+                                       if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
                                                ERROR("need space before that '$op' $at\n" . $hereptr);
                                        }
                                        if ($ctx =~ /.xW/) {
@@ -1373,7 +1460,7 @@ sub process {
 
                                # unary ++ and unary -- are allowed no space on one side.
                                } elsif ($op eq '++' or $op eq '--') {
-                                       if ($ctx !~ /[WOB]x[^W]/ && $ctx !~ /[^W]x[WOBE]/) {
+                                       if ($ctx !~ /[WOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
                                                ERROR("need space one side of that '$op' $at\n" . $hereptr);
                                        }
                                        if ($ctx =~ /WxB/ || ($ctx =~ /Wx./ && $cc =~ /^;/)) {
@@ -1387,13 +1474,13 @@ sub process {
                                         $op eq '*' or $op eq '/' or
                                         $op eq '%')
                                {
-                                       if ($ctx !~ /VxV|WxW|VxE|WxE|VxO/) {
+                                       if ($ctx !~ /VxV|WxW|VxE|WxE|VxO|Cx.|.xC/) {
                                                ERROR("need consistent spacing around '$op' $at\n" .
                                                        $hereptr);
                                        }
 
                                # All the others need spaces both sides.
-                               } elsif ($ctx !~ /[EW]x[WE]/) {
+                               } elsif ($ctx !~ /[EWC]x[CWE]/) {
                                        # Ignore email addresses <foo@bar>
                                        if (!($op eq '<' && $cb =~ /$;\S+\@\S+>/) &&
                                            !($op eq '>' && $cb =~ /<\S+\@\S+$;/)) {
@@ -1551,7 +1638,7 @@ sub process {
 
 # multi-statement macros should be enclosed in a do while loop, grab the
 # first statement and ensure its the whole macro if its not enclosed
-# in a known goot container
+# in a known good container
                if ($prevline =~ /\#define.*\\/ &&
                   $prevline !~/(?:do\s+{|\(\{|\{)/ &&
                   $line !~ /(?:do\s+{|\(\{|\{)/ &&
@@ -1599,84 +1686,95 @@ sub process {
 # check for redundant bracing round if etc
                if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) {
                        my ($level, $endln, @chunks) =
-                               ctx_statement_full($linenr, $realcnt, 0);
+                               ctx_statement_full($linenr, $realcnt, 1);
                        #print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n";
-                       if ($#chunks > 1 && $level == 0) {
+                       #print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n";
+                       if ($#chunks > 0 && $level == 0) {
                                my $allowed = 0;
                                my $seen = 0;
+                               my $herectx = $here . "\n";;
+                               my $ln = $linenr - 1;
                                for my $chunk (@chunks) {
                                        my ($cond, $block) = @{$chunk};
 
+                                       $herectx .= "$rawlines[$ln]\n[...]\n";
+                                       $ln += statement_rawlines($block) - 1;
+
                                        substr($block, 0, length($cond)) = '';
 
                                        $seen++ if ($block =~ /^\s*{/);
 
-                                       $block =~ s/(^|\n)./$1/g;
-                                       $block =~ s/^\s*{//;
-                                       $block =~ s/}\s*$//;
-                                       $block =~ s/^\s*//;
-                                       $block =~ s/\s*$//;
-
-                                       my @lines = ($block =~ /\n/g);
-                                       my @statements = ($block =~ /;/g);
-
-                                       #print "cond<$cond> block<$block> lines<" . scalar(@lines) . "> statements<" . scalar(@statements) . "> seen<$seen> allowed<$allowed>\n";
-                                       if (scalar(@lines) != 0) {
+                                       #print "cond<$cond> block<$block> allowed<$allowed>\n";
+                                       if (statement_lines($cond) > 1) {
+                                               #print "APW: ALLOWED: cond<$cond>\n";
                                                $allowed = 1;
                                        }
                                        if ($block =~/\b(?:if|for|while)\b/) {
+                                               #print "APW: ALLOWED: block<$block>\n";
                                                $allowed = 1;
                                        }
-                                       if (scalar(@statements) > 1) {
+                                       if (statement_block_size($block) > 1) {
+                                               #print "APW: ALLOWED: lines block<$block>\n";
                                                $allowed = 1;
                                        }
                                }
                                if ($seen && !$allowed) {
-                                       WARN("braces {} are not necessary for any arm of this statement\n" . $herecurr);
-                                       $suppress_ifbraces = $endln;
+                                       WARN("braces {} are not necessary for any arm of this statement\n" . $herectx);
                                }
+                               # Either way we have looked over this whole
+                               # statement and said what needs to be said.
+                               $suppress_ifbraces = $endln;
                        }
                }
                if ($linenr > $suppress_ifbraces &&
                                        $line =~ /\b(if|while|for|else)\b/) {
-                       # Locate the end of the opening statement.
-                       my @control = ctx_statement($linenr, $realcnt, 0);
-                       my $nr = $linenr + (scalar(@control) - 1);
-                       my $cnt = $realcnt - (scalar(@control) - 1);
-
-                       my $off = $realcnt - $cnt;
-                       #print "$off: line<$line>end<" . $lines[$nr - 1] . ">\n";
-
-                       # If this is is a braced statement group check it
-                       if ($lines[$nr - 1] =~ /{\s*$/) {
-                               my ($lvl, @block) = ctx_block_level($nr, $cnt);
-
-                               my $stmt = join("\n", @block);
-                               # Drop the diff line leader.
-                               $stmt =~ s/\n./\n/g;
-                               # Drop the code outside the block.
-                               $stmt =~ s/(^[^{]*){\s*//;
-                               my $before = $1;
-                               $stmt =~ s/\s*}([^}]*$)//;
-                               my $after = $1;
-
-                               #print "block<" . join(' ', @block) . "><" . scalar(@block) . ">\n";
-                               #print "before<$before> stmt<$stmt> after<$after>\n\n";
-
-                               # Count the newlines, if there is only one
-                               # then the block should not have {}'s.
-                               my @lines = ($stmt =~ /\n/g);
-                               my @statements = ($stmt =~ /;/g);
-                               #print "lines<" . scalar(@lines) . ">\n";
-                               #print "statements<" . scalar(@statements) . ">\n";
-                               if ($lvl == 0 && scalar(@lines) == 0 &&
-                                   scalar(@statements) < 2 &&
-                                   $stmt !~ /{/ && $stmt !~ /\bif\b/ &&
-                                   $before !~ /}/ && $after !~ /{/) {
-                                       my $herectx = "$here\n" . join("\n", @control, @block[1 .. $#block]) . "\n";
-                                       shift(@block);
-                                       WARN("braces {} are not necessary for single statement blocks\n" . $herectx);
+                       my ($level, $endln, @chunks) =
+                               ctx_statement_full($linenr, $realcnt, $-[0]);
+
+                       my $allowed = 0;
+
+                       # Check the pre-context.
+                       if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) {
+                               #print "APW: ALLOWED: pre<$1>\n";
+                               $allowed = 1;
+                       }
+                       # Check the condition.
+                       my ($cond, $block) = @{$chunks[0]};
+                       if (defined $cond) {
+                               substr($block, 0, length($cond)) = '';
+                       }
+                       if (statement_lines($cond) > 1) {
+                               #print "APW: ALLOWED: cond<$cond>\n";
+                               $allowed = 1;
+                       }
+                       if ($block =~/\b(?:if|for|while)\b/) {
+                               #print "APW: ALLOWED: block<$block>\n";
+                               $allowed = 1;
+                       }
+                       if (statement_block_size($block) > 1) {
+                               #print "APW: ALLOWED: lines block<$block>\n";
+                               $allowed = 1;
+                       }
+                       # Check the post-context.
+                       if (defined $chunks[1]) {
+                               my ($cond, $block) = @{$chunks[1]};
+                               if (defined $cond) {
+                                       substr($block, 0, length($cond)) = '';
+                               }
+                               if ($block =~ /^\s*\{/) {
+                                       #print "APW: ALLOWED: chunk-1 block<$block>\n";
+                                       $allowed = 1;
+                               }
+                       }
+                       if ($level == 0 && $block =~ /^\s*\{/ && !$allowed) {
+                               my $herectx = $here . "\n";;
+                               my $end = $linenr + statement_rawlines($block) - 1;
+
+                               for (my $ln = $linenr - 1; $ln < $end; $ln++) {
+                                       $herectx .= $rawlines[$ln] . "\n";;
                                }
+
+                               WARN("braces {} are not necessary for single statement blocks\n" . $herectx);
                        }
                }
 
@@ -1828,15 +1926,6 @@ sub process {
                print "are false positives report them to the maintainer, see\n";
                print "CHECKPATCH in MAINTAINERS.\n";
        }
-       print <<EOL if ($file == 1 && $quiet == 0);
-
-WARNING: Using --file mode. Please do not send patches to linux-kernel
-that change whole existing files if you did not significantly change most
-of the the file for other reasons anyways or just wrote the file newly
-from scratch. Pure code style patches have a significant cost in a
-quickly changing code base like Linux because they cause rejects
-with other changes.
-EOL
 
        return $clean;
 }
index 6304c3a89ba0650b5a644c555f81dfded2cd2620..fe03bb820532beed11afd25dbfe8c67959ead1fa 100644 (file)
@@ -277,7 +277,7 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
        } else {
                snd_sbdsp_command(chip, 256 - runtime->rate_den);
        }
-       if (chip->capture_format != SB_DSP_OUTPUT) {
+       if (chip->capture_format != SB_DSP_INPUT) {
                count--;
                snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE);
                snd_sbdsp_command(chip, count & 0xff);
index 19f08846d6fcaa84a800a7fcf932b0b52858172c..c8649282c2cfec54b191468f52b98bc18b82e385 100644 (file)
@@ -1778,9 +1778,9 @@ static hda_nid_t ad1988_capsrc_nids[3] = {
 static struct hda_input_mux ad1988_6stack_capture_source = {
        .num_items = 5,
        .items = {
-               { "Front Mic", 0x0 },
-               { "Line", 0x1 },
-               { "Mic", 0x4 },
+               { "Front Mic", 0x1 },   /* port-B */
+               { "Line", 0x2 },        /* port-C */
+               { "Mic", 0x4 },         /* port-E */
                { "CD", 0x5 },
                { "Mix", 0x9 },
        },
@@ -1789,7 +1789,7 @@ static struct hda_input_mux ad1988_6stack_capture_source = {
 static struct hda_input_mux ad1988_laptop_capture_source = {
        .num_items = 3,
        .items = {
-               { "Mic/Line", 0x0 },
+               { "Mic/Line", 0x1 },    /* port-B */
                { "CD", 0x5 },
                { "Mix", 0x9 },
        },
index f7cd3a804b11b5faa9283646cf0817efce8069e1..7206b30cbf9454c30e19f0c2c49e9dbe9684df2f 100644 (file)
@@ -1230,6 +1230,11 @@ static struct hda_verb cxt5047_toshiba_init_verbs[] = {
 static struct hda_verb cxt5047_hp_init_verbs[] = {
        /* pin sensing on HP jack */
        {0x13, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+       /* 0x13 is actually shared by both HP and speaker;
+        * setting the connection to 0 (=0x19) makes the master volume control
+        * working mysteriouslly...
+        */
+       {0x13, AC_VERB_SET_CONNECT_SEL, 0x0},
        /* Record selector: Ext Mic */
        {0x12, AC_VERB_SET_CONNECT_SEL,0x03},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE,
index 777f8c01ca7a6c804d0050d785aaa163f0f42780..33282f9c01c7f9a586038243ea50695a3572a982 100644 (file)
@@ -3973,8 +3973,8 @@ static struct snd_kcontrol_new alc260_fujitsu_mixer[] = {
        ALC_PIN_MODE("Mic/Line Jack Mode", 0x12, ALC_PIN_DIR_IN),
        HDA_CODEC_VOLUME("Beep Playback Volume", 0x07, 0x05, HDA_INPUT),
        HDA_CODEC_MUTE("Beep Playback Switch", 0x07, 0x05, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Speaker Playback Volume", 0x09, 0x0, HDA_OUTPUT),
-       HDA_BIND_MUTE("Internal Speaker Playback Switch", 0x09, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME("Speaker Playback Volume", 0x09, 0x0, HDA_OUTPUT),
+       HDA_BIND_MUTE("Speaker Playback Switch", 0x09, 2, HDA_INPUT),
        { } /* end */
 };
 
@@ -4005,9 +4005,9 @@ static struct snd_kcontrol_new alc260_acer_mixer[] = {
        HDA_CODEC_VOLUME("Master Playback Volume", 0x08, 0x0, HDA_OUTPUT),
        HDA_BIND_MUTE("Master Playback Switch", 0x08, 2, HDA_INPUT),
        ALC_PIN_MODE("Headphone Jack Mode", 0x0f, ALC_PIN_DIR_INOUT),
-       HDA_CODEC_VOLUME_MONO("Mono Speaker Playback Volume", 0x0a, 1, 0x0,
+       HDA_CODEC_VOLUME_MONO("Speaker Playback Volume", 0x0a, 1, 0x0,
                              HDA_OUTPUT),
-       HDA_BIND_MUTE_MONO("Mono Speaker Playback Switch", 0x0a, 1, 2,
+       HDA_BIND_MUTE_MONO("Speaker Playback Switch", 0x0a, 1, 2,
                           HDA_INPUT),
        HDA_CODEC_VOLUME("CD Playback Volume", 0x07, 0x04, HDA_INPUT),
        HDA_CODEC_MUTE("CD Playback Switch", 0x07, 0x04, HDA_INPUT),
@@ -7639,6 +7639,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763),
        SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763),
        SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2),
+       SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
        SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
        SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
        {}
@@ -8102,7 +8103,7 @@ static struct snd_kcontrol_new alc262_base_mixer[] = {
        HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
        HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
        /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT),
-          HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */
+          HDA_CODEC_MUTE("PC Beep Playback Switch", 0x0b, 0x05, HDA_INPUT), */
        HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),
        HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
        HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
@@ -8124,7 +8125,7 @@ static struct snd_kcontrol_new alc262_hippo1_mixer[] = {
        HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
        HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
        /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT),
-          HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */
+          HDA_CODEC_MUTE("PC Beep Playback Switch", 0x0b, 0x05, HDA_INPUT), */
        /*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/
        HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
        { } /* end */
@@ -9238,6 +9239,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x900e, "Sony ASSAMD", ALC262_SONY_ASSAMD),
        SND_PCI_QUIRK(0x104d, 0x9015, "Sony 0x9015", ALC262_SONY_ASSAMD),
        SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FUJITSU),
+       SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU),
        SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA),
        SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8),
        SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31),
@@ -12993,8 +12995,8 @@ static struct snd_kcontrol_new alc662_lenovo_101e_mixer[] = {
 static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = {
        HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 
-       HDA_CODEC_VOLUME("LineOut Playback Volume", 0x02, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("LineOut Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x02, 0x0, HDA_OUTPUT),
+       HDA_CODEC_MUTE("Line-Out Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
        HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT),
        HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
@@ -13007,8 +13009,8 @@ static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = {
 };
 
 static struct snd_kcontrol_new alc662_eeepc_ep20_mixer[] = {
-       HDA_CODEC_VOLUME("LineOut Playback Volume", 0x02, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("LineOut Playback Switch", 0x14, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x02, 0x0, HDA_OUTPUT),
+       HDA_CODEC_MUTE("Line-Out Playback Switch", 0x14, 0x0, HDA_OUTPUT),
        HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT),
        HDA_BIND_MUTE("Surround Playback Switch", 0x03, 2, HDA_INPUT),
        HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x04, 1, 0x0, HDA_OUTPUT),
index 9ab4a9f383cbb19b69834a7b27be19e759ea80db..5a158b73dcaa2050211bf55291d3de67b0621ebd 100644 (file)
@@ -51,7 +51,7 @@
 struct phase28_spec {
        unsigned short master[2];
        unsigned short vol[8];
-} phase28;
+};
 
 /* WM8770 registers */
 #define WM_DAC_ATTEN           0x00    /* DAC1-8 analog attenuation */
index ddd5fc8d4fe12e9bbd3e61db39e95b8e07381826..301bf929acd9018310fe96b94c1c37ce521f05e2 100644 (file)
@@ -36,7 +36,7 @@
 struct revo51_spec {
        struct snd_i2c_device *dev;
        struct snd_pt2258 *pt2258;
-} revo51;
+};
 
 static void revo_i2s_mclk_changed(struct snd_ice1712 *ice)
 {
index 061072c7db034722398571b253cfba6eb50be8e3..c52abd0bf22e3026b03274851f71c15d1fbf947b 100644 (file)
@@ -1708,6 +1708,12 @@ static struct ac97_pcm ac97_pcm_defs[] __devinitdata = {
 };
 
 static struct ac97_quirk ac97_quirks[] __devinitdata = {
+        {
+               .subvendor = 0x0e11,
+               .subdevice = 0x000e,
+               .name = "Compaq Deskpro EN",    /* AD1885 */
+               .type = AC97_TUNE_HP_ONLY
+        },
        {
                .subvendor = 0x0e11,
                .subdevice = 0x008a,
@@ -1738,6 +1744,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "IBM NetVista A30p",    /* AD1981B */
                .type = AC97_TUNE_HP_ONLY
        },
+       {
+               .subvendor = 0x1025,
+               .subdevice = 0x0082,
+               .name = "Acer Travelmate 2310",
+               .type = AC97_TUNE_HP_ONLY
+       },
        {
                .subvendor = 0x1025,
                .subdevice = 0x0083,
index 3ea1f05228a18f04b090ff4b07fb0f804b8bfa84..666f69a3312e9a3c08cd90ac91008105f482fa3d 100644 (file)
@@ -150,6 +150,7 @@ static const struct oxygen_model model_hifier = {
        .shortname = "C-Media CMI8787",
        .longname = "C-Media Oxygen HD Audio",
        .chip = "CMI8788",
+       .owner = THIS_MODULE,
        .init = hifier_init,
        .control_filter = hifier_control_filter,
        .mixer_init = hifier_mixer_init,
index 40e92f5cd69c760111073cfe8e8a9c60e1f00cce..d163397b85cc3d351cc60b1b3b187d191c0ae549 100644 (file)
@@ -389,6 +389,7 @@ static const struct oxygen_model model_xonar = {
        .shortname = "Asus AV200",
        .longname = "Asus Virtuoso 200",
        .chip = "AV200",
+       .owner = THIS_MODULE,
        .init = xonar_init,
        .control_filter = xonar_control_filter,
        .mixer_init = xonar_mixer_init,
index 710e0287ef8c13d2b70f72ef42e58e8a4bd8a9cf..569ecaca0e8b9caf7723d9a393367ab6853242c9 100644 (file)
@@ -681,8 +681,8 @@ static const struct aic3x_rate_divs aic3x_divs[] = {
        {22579200, 48000, 48000, 0x0, 8, 7075},
        {33868800, 48000, 48000, 0x0, 5, 8049},
        /* 64k */
-       {22579200, 96000, 96000, 0x1, 8, 7075},
-       {33868800, 96000, 96000, 0x1, 5, 8049},
+       {22579200, 64000, 96000, 0x1, 8, 7075},
+       {33868800, 64000, 96000, 0x1, 5, 8049},
        /* 88.2k */
        {22579200, 88200, 88200, 0x0, 8, 0},
        {33868800, 88200, 88200, 0x0, 5, 3333},
index 590baea3c4c3d9d3a2666025615b27f3c506545c..524f7450804f07cc506547618c17bdff96c925e6 100644 (file)
@@ -176,7 +176,8 @@ static int wm9712_add_controls(struct snd_soc_codec *codec)
  * the codec only has a single control that is shared by both channels.
  * This makes it impossible to determine the audio path.
  */
-static int mixer_event (struct snd_soc_dapm_widget *w, int event)
+static int mixer_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *k, int event)
 {
        u16 l, r, beep, line, phone, mic, pcm, aux;
 
index 3f34e531bebf7921d210a05e583c937535095de1..1a70a6ac98ced1a2a9a5830e7a5678df8bea7b36 100644 (file)
@@ -215,7 +215,8 @@ static int corgi_set_spk(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
-static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event)
+static int corgi_amp_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *k, int event)
 {
        if (SND_SOC_DAPM_EVENT_ON(event))
                set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_APM_ON);
@@ -225,7 +226,8 @@ static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event)
        return 0;
 }
 
-static int corgi_mic_event(struct snd_soc_dapm_widget *w, int event)
+static int corgi_mic_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *k, int event)
 {
        if (SND_SOC_DAPM_EVENT_ON(event))
                set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_MIC_BIAS);
index 5ae59bd309a33863d05b3173c7e879df77a6372a..4fbf8bba9627322cd628fb8d21d545ebe608c658 100644 (file)
@@ -196,7 +196,8 @@ static int poodle_set_spk(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
-static int poodle_amp_event(struct snd_soc_dapm_widget *w, int event)
+static int poodle_amp_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *k, int event)
 {
        if (SND_SOC_DAPM_EVENT_ON(event))
                locomo_gpio_write(&poodle_locomo_device.dev,
index d56709e15435e175cd533f7cd2badd558c3aded8..ecca39033fcceff2ae449cec9c48ab3f17f3061f 100644 (file)
@@ -215,7 +215,8 @@ static int spitz_set_spk(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
-static int spitz_mic_bias(struct snd_soc_dapm_widget *w, int event)
+static int spitz_mic_bias(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *k, int event)
 {
        if (machine_is_borzoi() || machine_is_spitz()) {
                if (SND_SOC_DAPM_EVENT_ON(event))
index e4d40b528ca47e6b85017ac52529b34fece23c55..7346d7e5d066e71b84669f0fe6c054e77b003ca1 100644 (file)
@@ -135,7 +135,8 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol,
 }
 
 /* tosa dapm event handlers */
-static int tosa_hp_event(struct snd_soc_dapm_widget *w, int event)
+static int tosa_hp_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *k, int event)
 {
        if (SND_SOC_DAPM_EVENT_ON(event))
                set_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE);
index 8fa93566570272036365009776ec0fa952c1303d..675672f313be322138c8902ce95a889a78c72782 100644 (file)
@@ -479,6 +479,33 @@ static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs,
        return 0;
 }
 
+/*
+ * process after E-Mu 0202/0404 high speed playback sync complete
+ *
+ * These devices return the number of samples per packet instead of the number
+ * of samples per microframe.
+ */
+static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs,
+                                          struct snd_pcm_runtime *runtime,
+                                          struct urb *urb)
+{
+       unsigned int f;
+       unsigned long flags;
+
+       if (urb->iso_frame_desc[0].status == 0 &&
+           urb->iso_frame_desc[0].actual_length == 4) {
+               f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
+               f >>= subs->datainterval;
+               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
+                       spin_lock_irqsave(&subs->lock, flags);
+                       subs->freqm = f;
+                       spin_unlock_irqrestore(&subs->lock, flags);
+               }
+       }
+
+       return 0;
+}
+
 /* determine the number of frames in the next packet */
 static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
 {
@@ -2219,10 +2246,17 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
        subs->stream = as;
        subs->direction = stream;
        subs->dev = as->chip->dev;
-       if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
+       if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
                subs->ops = audio_urb_ops[stream];
-       else
+       } else {
                subs->ops = audio_urb_ops_high_speed[stream];
+               switch (as->chip->usb_id) {
+               case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
+               case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
+                       subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
+                       break;
+               }
+       }
        snd_pcm_set_ops(as->pcm, stream,
                        stream == SNDRV_PCM_STREAM_PLAYBACK ?
                        &snd_usb_playback_ops : &snd_usb_capture_ops);
index 317f8e211cd2a136f6a07c8333731dadd0389cbe..4232fd75dd20b710a4ca16c857b80a83948f2fe9 100644 (file)
@@ -211,6 +211,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
        case IOAPIC_LOWEST_PRIORITY:
                vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
                                deliver_bitmask);
+#ifdef CONFIG_X86
+               if (irq == 0)
+                       vcpu = ioapic->kvm->vcpus[0];
+#endif
                if (vcpu != NULL)
                        ioapic_inj_irq(ioapic, vcpu, vector,
                                       trig_mode, delivery_mode);
@@ -220,6 +224,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
                                     deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
                break;
        case IOAPIC_FIXED:
+#ifdef CONFIG_X86
+               if (irq == 0)
+                       deliver_bitmask = 1;
+#endif
                for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
                        if (!(deliver_bitmask & (1 << vcpu_id)))
                                continue;
index 32fbf800696901866b760cef48b880a151c58399..b2e12893e3f4d4b0f39d64ff733333171e3a0dfb 100644 (file)
@@ -169,6 +169,7 @@ static struct kvm *kvm_create_vm(void)
        kvm_io_bus_init(&kvm->pio_bus);
        mutex_init(&kvm->lock);
        kvm_io_bus_init(&kvm->mmio_bus);
+       init_rwsem(&kvm->slots_lock);
        spin_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
        spin_unlock(&kvm_lock);
@@ -339,9 +340,9 @@ int kvm_set_memory_region(struct kvm *kvm,
 {
        int r;
 
-       down_write(&current->mm->mmap_sem);
+       down_write(&kvm->slots_lock);
        r = __kvm_set_memory_region(kvm, mem, user_alloc);
-       up_write(&current->mm->mmap_sem);
+       up_write(&kvm->slots_lock);
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_set_memory_region);