kinfo->spi_piobufbase = (u64) pd->port_piobufs +
                        dd->ipath_palign * kinfo->spi_piocnt * slave;
        }
+
+       /*
+        * Set the PIO avail update threshold to no larger
+        * than the number of buffers per process. Note that
+        * we decrease it here, but won't ever increase it.
+        */
+       if (dd->ipath_pioupd_thresh &&
+           kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
+               unsigned long flags;
+
+               dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
+               ipath_dbg("Decreased pio update threshold to %u\n",
+                       dd->ipath_pioupd_thresh);
+               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+               dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
+                       << INFINIPATH_S_UPDTHRESH_SHIFT);
+               dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
+                       << INFINIPATH_S_UPDTHRESH_SHIFT;
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+                       dd->ipath_sendctrl);
+               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+       }
+
        if (shared) {
                kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
                        dd->ipath_ureg_align * pd->port_port;
 
 {
        u32 rtmp;
        int i;
+       unsigned long flags;
 
        /*
         * ensure chip does no sends or receives, tail updates, or
        ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
                dd->ipath_rcvctrl);
 
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       dd->ipath_sendctrl = 0U; /* no sdma, etc */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
 
        rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
        if (rtmp != dd->ipath_rcvtidcnt)
        /* Enable PIO send, and update of PIOavail regs to memory. */
        dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
                INFINIPATH_S_PIOBUFAVAILUPD;
+
+       /*
+        * Set the PIO avail update threshold to host memory
+        * on chips that support it.
+        */
+       if (dd->ipath_pioupd_thresh)
+               dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
+                       << INFINIPATH_S_UPDTHRESH_SHIFT;
        ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
        ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
        spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
        ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
                   "each for %u user ports\n", kpiobufs,
                   piobufs, dd->ipath_pbufsport, uports);
+       if (dd->ipath_pioupd_thresh) {
+               if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
+                       dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
+               if (kpiobufs < dd->ipath_pioupd_thresh)
+                       dd->ipath_pioupd_thresh = kpiobufs;
+       }
 
        dd->ipath_f_early_init(dd);
        /*