NULL, (void *)&pr);
/* Check ACPI support for C3 state */
- if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) {
+ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
cx = &pr->power.states[ACPI_STATE_C3];
if (cx->address > 0 && cx->latency <= 1000) {
longhaul_flags |= USE_ACPI_C3;
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
- mfc0 t2, CP0_STATUS
+ mfc0 t1, CP0_STATUS
+ sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
lw t3, TASK_THREAD_INFO(a0)
lw t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
- and t1, t0
- beqz t1, 1f
+ and t2, t0, t1
+ beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
li t1, ~ST0_CU1
and t0, t0, t1
sw t0, ST_OFF(t3)
- /* clear thread_struct CU1 bit */
- and t2, t1
fpu_save_single a0, t0 # clobbers t0
1:
- sw t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
- mfc0 t2, CP0_STATUS
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
- and t1, t0
- beqz t1, 1f
+ and t2, t0, t1
+ beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
- /* clear thread_struct CU1 bit */
- and t2, t1
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
1:
- LONG_S t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
+/* Make sure we will not lose FPU ownership */
+#ifdef CONFIG_PREEMPT
+#define lock_fpu_owner() preempt_disable()
+#define unlock_fpu_owner() preempt_enable()
+#else
+#define lock_fpu_owner() pagefault_disable()
+#define unlock_fpu_owner() pagefault_enable()
+#endif
+
#endif /* __SIGNAL_COMMON_H */
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
+#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
-#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
/*
* Helper routines
*/
+static int protected_save_fp_context(struct sigcontext __user *sc)
+{
+ int err;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(1);
+ err = save_fp_context(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int protected_restore_fp_context(struct sigcontext __user *sc)
+{
+ int err, tmp;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(0);
+ err = restore_fp_context(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
- own_fpu(1);
- enable_fp_in_kernel();
- err |= save_fp_context(sc);
- disable_fp_in_kernel();
+ err |= protected_save_fp_context(sc);
}
return err;
}
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
- err |= restore_fp_context(sc);
+ err |= protected_restore_fp_context(sc);
return err ?: sig;
}
if (used_math) {
/* restore fpu context if we have used it before */
- own_fpu(0);
- enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context(sc);
- disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
+#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/sim.h>
-#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/system.h>
#include <asm/fpu.h>
/*
* sigcontext handlers
*/
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(1);
+ err = save_fp_context32(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err, tmp;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(0);
+ err = restore_fp_context32(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
- own_fpu(1);
- enable_fp_in_kernel();
- err |= save_fp_context32(sc);
- disable_fp_in_kernel();
+ err |= protected_save_fp_context32(sc);
}
return err;
}
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
- err |= restore_fp_context32(sc);
+ err |= protected_restore_fp_context32(sc);
return err ?: sig;
}
if (used_math) {
/* restore fpu context if we have used it before */
- own_fpu(0);
- enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context32(sc);
- disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
unsigned int opcode, bcode;
siginfo_t info;
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
unsigned int opcode, tcode = 0;
siginfo_t info;
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
{
unsigned int cpid;
+ die_if_kernel("do_cpu invoked from kernel context!", regs);
+
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
- die_if_kernel("do_cpu invoked from kernel context!", regs);
if (!cpu_has_llsc)
if (!simulate_llsc(regs))
return;
break;
case 1:
- if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
- die_if_kernel("do_cpu invoked from kernel context!",
- regs);
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
set_used_math();
}
- if (raw_cpu_has_fpu) {
- if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
- local_irq_disable();
- if (cpu_has_fpu)
- regs->cp0_status |= ST0_CU1;
- /*
- * We must return without enabling
- * interrupts to ensure keep FPU
- * ownership until resume.
- */
- return;
- }
- } else {
+ if (!raw_cpu_has_fpu) {
int sig;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu, 0);
case 2:
case 3:
- die_if_kernel("do_cpu invoked from kernel context!", regs);
break;
}
periph_rev = 3;
pass_str = "A2";
break;
+ case K_SYS_REVISION_BCM112x_A3:
+ periph_rev = 3;
+ pass_str = "A3";
+ break;
+ case K_SYS_REVISION_BCM112x_A4:
+ periph_rev = 3;
+ pass_str = "A4";
+ break;
+ case K_SYS_REVISION_BCM112x_B0:
+ periph_rev = 3;
+ pass_str = "B0";
+ break;
default:
printk("Unknown %s rev %x\n", soc_str, soc_pass);
ret = 1;
* expire an async queue immediately if it has used up its slice. idle
* queue always expire after 1 dispatch round.
*/
- if ((!cfq_cfqq_sync(cfqq) &&
+ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq)) {
+ cfq_class_idle(cfqq))) {
cfqq->slice_end = jiffies + 1;
cfq_slice_expired(cfqd, 0, 0);
}
while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
int max_dispatch;
- /*
- * Don't repeat dispatch from the previous queue.
- */
- if (prev_cfqq == cfqq)
- break;
+ if (cfqd->busy_queues > 1) {
+ /*
+ * Don't repeat dispatch from the previous queue.
+ */
+ if (prev_cfqq == cfqq)
+ break;
- /*
- * So we have dispatched before in this round, if the
- * next queue has idling enabled (must be sync), don't
- * allow it service until the previous have continued.
- */
- if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
- break;
+ /*
+ * So we have dispatched before in this round, if the
+ * next queue has idling enabled (must be sync), don't
+ * allow it service until the previous have continued.
+ */
+ if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
+ break;
+ }
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (key != CFQ_KEY_ASYNC)
+ cfq_mark_cfqq_idle_window(cfqq);
+
cfq_mark_cfqq_prio_changed(cfqq);
cfq_mark_cfqq_queue_new(cfqq);
cfq_init_prio_data(cfqq);
struct ata_port_info *port;
struct pci_dev *host = NULL;
struct sis_chipset *chipset = NULL;
+ struct sis_chipset *sets;
static struct sis_chipset sis_chipsets[] = {
/* We have to find the bridge first */
- for (chipset = &sis_chipsets[0]; chipset->device; chipset++) {
- host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL);
+ for (sets = &sis_chipsets[0]; sets->device; sets++) {
+ host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
if (host != NULL) {
- if (chipset->device == 0x630) { /* SIS630 */
+ chipset = sets; /* Match found */
+ if (sets->device == 0x630) { /* SIS630 */
u8 host_rev;
pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
if (host_rev >= 0x30) /* 630 ET */
}
/* Look for concealed bridges */
- if (host == NULL) {
+ if (chipset == NULL) {
/* Second check */
u32 idemisc;
u16 trueid;
config IDE_MAX_HWIFS
int "Max IDE interfaces"
depends on ALPHA || SUPERH || IA64 || EMBEDDED
+ range 1 10
default 4
help
This is the maximum number of IDE hardware interfaces that will
static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
/*
- * linux/drivers/ide/pci/hpt366.c Version 1.01 Dec 23, 2006
+ * linux/drivers/ide/pci/hpt366.c Version 1.02 Apr 18, 2007
*
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2005-2006 MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
*
* Thanks to HighPoint Technologies for their assistance, and hardware.
* Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
.chip_type = HPT302N,
.max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3,
.dpll_clk = 77,
+ .settings = hpt37x_settings
};
static struct hpt_info hpt371n __devinitdata = {
tristate "Gianfar Ethernet"
depends on 85xx || 83xx || PPC_86xx
select PHYLIB
+ select CRC32
help
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
- return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
+ struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+ &(t->tid_tab[tid]) : NULL;
+
+ return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
}
/*
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
+ p->client = NULL;
td->tid_release_list = p;
if (!p->ctx)
schedule_work(&td->tid_release_task);
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
- if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+ t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
t3c_tid->
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
}
}
+/*
+ * Returns an sk_buff for a reply CPL message of size len. If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated. The input skb must be of size at least len. Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+ int gfp)
+{
+ if (likely(!skb_cloned(skb))) {
+ BUG_ON(skb->len < len);
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
{
union opcode_tid *p = cplhdr(skb);
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
struct cpl_abort_req_rss *req = cplhdr(skb);
struct cpl_abort_rpl *rpl;
+ struct sk_buff *reply_skb;
+ unsigned int tid = GET_TID(req);
+ u8 cmd = req->status;
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ goto out;
- struct sk_buff *skb =
- alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
- if (!skb) {
+ reply_skb = cxgb3_get_cpl_reply_skb(skb,
+ sizeof(struct
+ cpl_abort_rpl),
+ GFP_ATOMIC);
+
+ if (!reply_skb) {
printk("do_abort_req_rss: couldn't get skb!\n");
goto out;
}
- skb->priority = CPL_PRIORITY_DATA;
- __skb_put(skb, sizeof(struct cpl_abort_rpl));
- rpl = cplhdr(skb);
+ reply_skb->priority = CPL_PRIORITY_DATA;
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ rpl = cplhdr(reply_skb);
rpl->wr.wr_hi =
htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
- OPCODE_TID(rpl) =
- htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
- rpl->cmd = req->status;
- cxgb3_ofld_send(dev, skb);
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+ cxgb3_ofld_send(dev, reply_skb);
out:
return CPL_RET_BUF_DONE;
}
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[opcode]) {
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
for (tid = 0; tid < ti->ntids; tid++) {
te = lookup_tid(ti, tid);
BUG_ON(!te);
- if (te->ctx && te->client && te->client->redirect) {
+ if (te && te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
l2t_hold(L2DATA(tdev), e);
*/
int t3_phy_intr_handler(struct adapter *adapter)
{
- static const int intr_gpio_bits[] = { 8, 0x20 };
-
+ u32 mask, gpi = adapter_info(adapter)->gpio_intr;
u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
for_each_port(adapter, i) {
- if (cause & intr_gpio_bits[i]) {
- struct cphy *phy = &adap2pinfo(adapter, i)->phy;
- int phy_cause = phy->ops->intr_handler(phy);
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ mask = gpi - (gpi & (gpi - 1));
+ gpi -= mask;
+
+ if (!(p->port_type->caps & SUPPORTED_IRQ))
+ continue;
+
+ if (cause & mask) {
+ int phy_cause = p->phy.ops->intr_handler(&p->phy);
if (phy_cause & cphy_cause_link_change)
t3_link_changed(adapter, i);
if (phy_cause & cphy_cause_fifo_error)
- phy->fifo_errors++;
+ p->phy.fifo_errors++;
}
}
for (i = 0; i < E1000_MAX_INTR; i++)
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
- e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
if (likely(adapter->itr_setting & 3))
for (i = 0; i < E1000_MAX_INTR; i++)
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
- e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
if (likely(adapter->itr_setting & 3))
poll_dev->quota -= work_done;
/* If no Tx and not enough Rx work done, exit the polling mode */
- if ((tx_cleaned && (work_done < work_to_do)) ||
+ if ((!tx_cleaned && (work_done == 0)) ||
!netif_running(poll_dev)) {
quit_polling:
if (likely(adapter->itr_setting & 3))
#ifdef CONFIG_E1000_NAPI
unsigned int count = 0;
#endif
- boolean_t cleaned = TRUE;
+ boolean_t cleaned = FALSE;
unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
#ifdef CONFIG_E1000_NAPI
#define E1000_TX_WEIGHT 64
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (count++ == E1000_TX_WEIGHT) {
- cleaned = FALSE;
- break;
- }
+ if (count++ == E1000_TX_WEIGHT) break;
#endif
}
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.13"
+#define DRV_VERSION "1.14"
#define PFX DRV_NAME " "
/*
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+#ifdef broken
+ /* This device causes data corruption problems that are not resolved */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+#endif
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
- if (hw->dev[port]->mtu > ETH_DATA_LEN) {
- /* set Tx GMAC FIFO Almost Empty Threshold */
- sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
- /* Disable Store & Forward mode for TX */
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
- }
+
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_ENA | TX_STFW_DIS);
+ else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_DIS | TX_STFW_ENA);
}
}
/* Set almost empty threshold */
if (hw->chip_id == CHIP_ID_YUKON_EC_U
&& hw->chip_rev == CHIP_REV_YU_EC_U_A0)
- sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1);
sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
RB_RST_SET | RB_DIS_OP_MD);
- /* WA for dev. #4.209 */
- if (hw->chip_id == CHIP_ID_YUKON_EC_U
- && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- sky2->speed != SPEED_1000 ?
- TX_STFW_ENA : TX_STFW_DIS);
-
ctrl = gma_read16(hw, port, GM_GP_CTRL);
ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl);
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
int err;
u16 ctl, mode;
u32 imask;
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
- /* TSO on Yukon Ultra and MTU > 1500 not supported */
- if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
- dev->features &= ~NETIF_F_TSO;
+ if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE)
+ return -EINVAL;
if (!netif_running(dev)) {
dev->mtu = new_mtu;
synchronize_irq(hw->pdev->irq);
- ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
- gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
+ if (new_mtu > ETH_DATA_LEN) {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_ENA | TX_STFW_DIS);
+ dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_DIS | TX_STFW_ENA);
+ }
+
+ ctl = gma_read16(hw, port, GM_GP_CTRL);
+ gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
sky2_rx_stop(sky2);
sky2_rx_clean(sky2);
if (dev->mtu > ETH_DATA_LEN)
mode |= GM_SMOD_JUMBO_ENA;
- gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
+ gma_write16(hw, port, GM_SERIAL_MODE, mode);
- sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
+ sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
err = sky2_rx_start(sky2);
sky2_write32(hw, B0_IMSK, imask);
if (err)
dev_close(dev);
else {
- gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
+ gma_write16(hw, port, GM_GP_CTRL, ctl);
netif_poll_enable(hw->dev[0]);
netif_wake_queue(dev);
}
}
-/* This should never happen it is a fatal situation */
-static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
- const char *rxtx, u32 mask)
+/* This should never happen it is a bug. */
+static void sky2_le_error(struct sky2_hw *hw, unsigned port,
+ u16 q, unsigned ring_size)
{
struct net_device *dev = hw->dev[port];
struct sky2_port *sky2 = netdev_priv(dev);
- u32 imask;
-
- printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
- dev ? dev->name : "<not registered>", rxtx);
+ unsigned idx;
+ const u64 *le = (q == Q_R1 || q == Q_R2)
+ ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~mask;
- sky2_write32(hw, B0_IMSK, imask);
+ idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+ printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n",
+ dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
+ (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
- if (dev) {
- spin_lock(&sky2->phy_lock);
- sky2_link_down(sky2);
- spin_unlock(&sky2->phy_lock);
- }
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
}
/* If idle then force a fake soft NAPI poll once a second
mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
}
-
-static int sky2_poll(struct net_device *dev0, int *budget)
+/* Hardware/software error handling */
+static void sky2_err_intr(struct sky2_hw *hw, u32 status)
{
- struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
- int work_limit = min(dev0->quota, *budget);
- int work_done = 0;
- u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
- if (status & Y2_IS_IRQ_PHY1)
- sky2_phy_intr(hw, 0);
-
- if (status & Y2_IS_IRQ_PHY2)
- sky2_phy_intr(hw, 1);
-
if (status & Y2_IS_IRQ_MAC1)
sky2_mac_intr(hw, 0);
sky2_mac_intr(hw, 1);
if (status & Y2_IS_CHK_RX1)
- sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+ sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
if (status & Y2_IS_CHK_RX2)
- sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+ sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
if (status & Y2_IS_CHK_TXA1)
- sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+ sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
if (status & Y2_IS_CHK_TXA2)
- sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
+ sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
+}
+
+static int sky2_poll(struct net_device *dev0, int *budget)
+{
+ struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
+ int work_limit = min(dev0->quota, *budget);
+ int work_done = 0;
+ u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+
+ if (unlikely(status & Y2_IS_ERROR))
+ sky2_err_intr(hw, status);
+
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
work_done = sky2_status_intr(hw, work_limit);
if (work_done < work_limit) {
int i;
/* disable ASF */
- if (hw->chip_id <= CHIP_ID_YUKON_EC) {
- if (hw->chip_id == CHIP_ID_YUKON_EX) {
- status = sky2_read16(hw, HCU_CCSR);
- status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
- HCU_CCSR_UC_STATE_MSK);
- sky2_write16(hw, HCU_CCSR, status);
- } else
- sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
- sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
- }
+ if (hw->chip_id == CHIP_ID_YUKON_EX) {
+ status = sky2_read16(hw, HCU_CCSR);
+ status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+ HCU_CCSR_UC_STATE_MSK);
+ sky2_write16(hw, HCU_CCSR, status);
+ } else
+ sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+ sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
/* do a SW reset */
sky2_write8(hw, B0_CTST, CS_RST_SET);
regs->len - B3_RI_WTO_R1);
}
+/* In order to do Jumbo packets on these chips, need to turn off the
+ * transmit store/forward. Therefore checksum offload won't work.
+ */
+static int no_tx_offload(struct net_device *dev)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
+
+ return dev->mtu > ETH_DATA_LEN &&
+ (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_EC_U);
+}
+
+static int sky2_set_tx_csum(struct net_device *dev, u32 data)
+{
+ if (data && no_tx_offload(dev))
+ return -EINVAL;
+
+ return ethtool_op_set_tx_csum(dev, data);
+}
+
+
+static int sky2_set_tso(struct net_device *dev, u32 data)
+{
+ if (data && no_tx_offload(dev))
+ return -EINVAL;
+
+ return ethtool_op_set_tso(dev, data);
+}
+
static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
.set_settings = sky2_set_settings,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_tx_csum = sky2_set_tx_csum,
.get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
+ .set_tso = sky2_set_tso,
.get_rx_csum = sky2_get_rx_csum,
.set_rx_csum = sky2_set_rx_csum,
.get_strings = sky2_get_strings,
| Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
| Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+ Y2_IS_ERROR = Y2_IS_HW_ERR |
+ Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
+ Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
};
/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
+
+ /* Threshold values for Yukon-EC Ultra and Extreme */
+ ECU_AE_THR = 0x0070, /* Almost Empty Threshold */
+ ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */
+ ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */
};
/* Descriptor Poll Timer Registers */
TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
+ TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+ TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+
GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
spin_unlock_irqrestore(&chain->lock, flags);
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL)
switch (skb->nh.iph->protocol) {
case IPPROTO_TCP:
hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
static struct nfs_page * nfs_update_request(struct nfs_open_context*,
struct page *,
unsigned int, unsigned int);
-static void nfs_mark_request_dirty(struct nfs_page *req);
static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
static int nfs_page_mark_flush(struct page *page)
{
struct nfs_page *req;
- spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ spinlock_t *req_lock = &nfsi->req_lock;
int ret;
spin_lock(req_lock);
return ret;
spin_lock(req_lock);
}
- spin_unlock(req_lock);
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ /* This request is marked for commit */
+ spin_unlock(req_lock);
+ nfs_unlock_request(req);
+ return 1;
+ }
if (nfs_set_page_writeback(page) == 0) {
nfs_list_remove_request(req);
- nfs_mark_request_dirty(req);
- }
+ /* add the request to the inode's dirty list. */
+ radix_tree_tag_set(&nfsi->nfs_page_tree,
+ req->wb_index, NFS_PAGE_TAG_DIRTY);
+ nfs_list_add_request(req, &nfsi->dirty);
+ nfsi->ndirty++;
+ spin_unlock(req_lock);
+ __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES);
+ } else
+ spin_unlock(req_lock);
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_unlock_request(req);
return ret;
}
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
+ if (PageDirty(req->wb_page))
+ set_bit(PG_NEED_FLUSH, &req->wb_flags);
nfsi->npages++;
atomic_inc(&req->wb_count);
return 0;
set_page_private(req->wb_page, 0);
ClearPagePrivate(req->wb_page);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
+ if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
+ __set_page_dirty_nobuffers(req->wb_page);
nfsi->npages--;
if (!nfsi->npages) {
spin_unlock(&nfsi->req_lock);
nfs_release_request(req);
}
-/*
- * Add a request to the inode's dirty list.
- */
-static void
-nfs_mark_request_dirty(struct nfs_page *req)
-{
- struct inode *inode = req->wb_context->dentry->d_inode;
- struct nfs_inode *nfsi = NFS_I(inode);
-
- spin_lock(&nfsi->req_lock);
- radix_tree_tag_set(&nfsi->nfs_page_tree,
- req->wb_index, NFS_PAGE_TAG_DIRTY);
- nfs_list_add_request(req, &nfsi->dirty);
- nfsi->ndirty++;
- spin_unlock(&nfsi->req_lock);
- __mark_inode_dirty(inode, I_DIRTY_PAGES);
-}
-
static void
nfs_redirty_request(struct nfs_page *req)
{
{
struct page *page = req->wb_page;
- if (page == NULL)
+ if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
return 0;
return !PageWriteback(req->wb_page);
}
spin_lock(&nfsi->req_lock);
nfs_list_add_request(req, &nfsi->commit);
nfsi->ncommit++;
+ set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
spin_unlock(&nfsi->req_lock);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return data->verf.committed != NFS_FILE_SYNC;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ nfs_mark_request_commit(req);
+ return 1;
+ }
+ if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ nfs_redirty_request(req);
+ return 1;
+ }
+ return 0;
+}
+#else
+static inline void
+nfs_mark_request_commit(struct nfs_page *req)
+{
+}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return 0;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+ return 0;
+}
#endif
/*
req = nfs_list_entry(head->next);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
nfs_list_remove_request(req);
+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
nfs_inode_remove_request(req);
nfs_unlock_request(req);
}
static void nfs_writepage_release(struct nfs_page *req)
{
- nfs_end_page_writeback(req->wb_page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (!PageError(req->wb_page)) {
- if (NFS_NEED_RESCHED(req)) {
- nfs_redirty_request(req);
- goto out;
- } else if (NFS_NEED_COMMIT(req)) {
- nfs_mark_request_commit(req);
- goto out;
- }
- }
- nfs_inode_remove_request(req);
-
-out:
- nfs_clear_commit(req);
- nfs_clear_reschedule(req);
-#else
- nfs_inode_remove_request(req);
-#endif
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
+ nfs_end_page_writeback(req->wb_page);
+ nfs_inode_remove_request(req);
+ } else
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
list_del(&data->pages);
nfs_writedata_release(data);
}
- nfs_end_page_writeback(req->wb_page);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
return -ENOMEM;
}
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_end_page_writeback(req->wb_page);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_end_page_writeback(req->wb_page);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
return error;
nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
dprintk(", error = %d\n", task->tk_status);
- } else {
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (data->verf.committed < NFS_FILE_SYNC) {
- if (!NFS_NEED_COMMIT(req)) {
- nfs_defer_commit(req);
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- dprintk(" defer commit\n");
- } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
- nfs_defer_reschedule(req);
- dprintk(" server reboot detected\n");
- }
- } else
-#endif
- dprintk(" OK\n");
+ goto out;
}
+ if (nfs_write_need_commit(data)) {
+ spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+
+ spin_lock(req_lock);
+ if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ /* Do nothing we need to resend the writes */
+ } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ dprintk(" defer commit\n");
+ } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
+ set_bit(PG_NEED_RESCHED, &req->wb_flags);
+ clear_bit(PG_NEED_COMMIT, &req->wb_flags);
+ dprintk(" server reboot detected\n");
+ }
+ spin_unlock(req_lock);
+ } else
+ dprintk(" OK\n");
+
+out:
if (atomic_dec_and_test(&req->wb_complete))
nfs_writepage_release(req);
}
if (task->tk_status < 0) {
nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
- nfs_end_page_writeback(page);
- nfs_inode_remove_request(req);
dprintk(", error = %d\n", task->tk_status);
- goto next;
+ goto remove_request;
}
- nfs_end_page_writeback(page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
- nfs_inode_remove_request(req);
- dprintk(" OK\n");
+ if (nfs_write_need_commit(data)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ nfs_mark_request_commit(req);
+ nfs_end_page_writeback(page);
+ dprintk(" marked for commit\n");
goto next;
}
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req);
- dprintk(" marked for commit\n");
-#else
+ dprintk(" OK\n");
+remove_request:
+ nfs_end_page_writeback(page);
nfs_inode_remove_request(req);
-#endif
next:
nfs_clear_page_writeback(req);
}
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dprintk("NFS: commit (%s/%Ld %d@%Ld)",
int nfs_set_page_dirty(struct page *page)
{
+ spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
struct nfs_page *req;
+ int ret;
- req = nfs_page_find_request(page);
+ spin_lock(req_lock);
+ req = nfs_page_find_request_locked(page);
if (req != NULL) {
/* Mark any existing write requests for flushing */
- set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ spin_unlock(req_lock);
nfs_release_request(req);
+ return ret;
}
- return __set_page_dirty_nobuffers(page);
+ ret = __set_page_dirty_nobuffers(page);
+ spin_unlock(req_lock);
+ return ret;
}
#define BUG_ON(condition) \
do { \
- __asm__ __volatile__("tne $0, %0" : : "r" (condition)); \
+ __asm__ __volatile__("tne $0, %0, %1" \
+ : : "r" (condition), "i" (BRK_BUG)); \
} while (0)
#define HAVE_ARCH_BUG_ON
#else
"r" (proto + len),
#endif
- "r" (sum));
+ "r" ((__force unsigned long)sum));
return sum;
}
/* We don't care about the c0 hazard here */ \
} while (0)
-#define __fpu_enabled() (read_c0_status() & ST0_CU1)
-
#define enable_fpu() \
do { \
if (cpu_has_fpu) \
set_thread_flag(TIF_USEDFPU);
}
-static inline void own_fpu(int restore)
+static inline void own_fpu_inatomic(int restore)
{
- preempt_disable();
if (cpu_has_fpu && !__is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(current);
}
+}
+
+static inline void own_fpu(int restore)
+{
+ preempt_disable();
+ own_fpu_inatomic(restore);
preempt_enable();
}
return tsk->thread.fpu.fpr;
}
-static inline void enable_fp_in_kernel(void)
-{
- set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
- /* make sure CU1 and FPU ownership are consistent */
- if (!__is_fpu_owner() && __fpu_enabled())
- __disable_fpu();
-}
-
-static inline void disable_fp_in_kernel(void)
-{
- BUG_ON(!__is_fpu_owner() && __fpu_enabled());
- clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
-}
-
#endif /* _ASM_FPU_H */
#define K_SYS_REVISION_BCM112x_A2 0x21
#define K_SYS_REVISION_BCM112x_A3 0x22
#define K_SYS_REVISION_BCM112x_A4 0x23
+#define K_SYS_REVISION_BCM112x_B0 0x30
#define K_SYS_REVISION_BCM1480_S0 0x01
#define K_SYS_REVISION_BCM1480_A1 0x02
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define TIF_FREEZE 19
-#define TIF_ALLOW_FP_IN_KERNEL 20
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-#define NFS_NEED_COMMIT(req) (test_bit(PG_NEED_COMMIT,&(req)->wb_flags))
-#define NFS_NEED_RESCHED(req) (test_bit(PG_NEED_RESCHED,&(req)->wb_flags))
extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
struct inode *inode,
req->wb_list_head = NULL;
}
-static inline int
-nfs_defer_commit(struct nfs_page *req)
-{
- return !test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags);
-}
-
-static inline void
-nfs_clear_commit(struct nfs_page *req)
-{
- smp_mb__before_clear_bit();
- clear_bit(PG_NEED_COMMIT, &req->wb_flags);
- smp_mb__after_clear_bit();
-}
-
-static inline int
-nfs_defer_reschedule(struct nfs_page *req)
-{
- return !test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags);
-}
-
-static inline void
-nfs_clear_reschedule(struct nfs_page *req)
-{
- smp_mb__before_clear_bit();
- clear_bit(PG_NEED_RESCHED, &req->wb_flags);
- smp_mb__after_clear_bit();
-}
-
static inline struct nfs_page *
nfs_list_entry(struct list_head *head)
{
rpc_delay(task, 3*HZ);
case -ETIMEDOUT:
task->tk_action = call_timeout;
+ if (task->tk_client->cl_discrtry)
+ xprt_disconnect(task->tk_xprt);
break;
case -ECONNREFUSED:
case -ENOTCONN:
out_retry:
req->rq_received = req->rq_private_buf.len = 0;
task->tk_status = 0;
+ if (task->tk_client->cl_discrtry)
+ xprt_disconnect(task->tk_xprt);
}
/*
xprt_reset_majortimeo(req);
/* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer);
- } else {
- /* If all request bytes have been sent,
- * then we must be retransmitting this one */
- if (!req->rq_bytes_sent) {
- if (task->tk_client->cl_discrtry) {
- xprt_disconnect(xprt);
- task->tk_status = -ENOTCONN;
- return;
- }
- }
}
} else if (!req->rq_bytes_sent)
return;