struct ehci_qh  *qh = (struct ehci_qh *) urb->hcpriv;
 
                /* S-mask in a QH means it's an interrupt urb */
-               if ((qh->hw_info2 & __constant_cpu_to_le32 (0x00ff)) != 0) {
+               if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) {
 
                        /* ... update hc-wide periodic stats (for usbfs) */
                        ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
                        /* should be rare for periodic transfers,
                         * except maybe high bandwidth ...
                         */
-                       if (qh->period) {
+                       if ((__constant_cpu_to_le32 (QH_SMASK)
+                                       & qh->hw_info2) != 0) {
                                intr_deschedule (ehci, qh);
                                (void) qh_schedule (ehci, qh);
                        } else
 
 
        dev_dbg (&qh->dev->dev,
                "link qh%d-%04x/%p start %d [%d/%d us]\n",
-               period, le32_to_cpup (&qh->hw_info2) & 0xffff,
+               period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
                qh, qh->start, qh->usecs, qh->c_usecs);
 
        /* high bandwidth, or otherwise every microframe */
 
        dev_dbg (&qh->dev->dev,
                "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
-               qh->period, le32_to_cpup (&qh->hw_info2) & 0xffff,
+               qh->period,
+               le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
                qh, qh->start, qh->usecs, qh->c_usecs);
 
        /* qh->qh_next still "live" to HC */
         * active high speed queues may need bigger delays...
         */
        if (list_empty (&qh->qtd_list)
-                       || (__constant_cpu_to_le32 (0x0ff << 8)
+                       || (__constant_cpu_to_le32 (QH_CMASK)
                                        & qh->hw_info2) != 0)
                wait = 2;
        else
 
        /* reuse the previous schedule slots, if we can */
        if (frame < qh->period) {
-               uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
+               uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK);
                status = check_intr_schedule (ehci, frame, --uframe,
                                qh, &c_mask);
        } else {
                qh->start = frame;
 
                /* reset S-frame and (maybe) C-frame masks */
-               qh->hw_info2 &= __constant_cpu_to_le32 (~0xffff);
+               qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
                qh->hw_info2 |= qh->period
                        ? cpu_to_le32 (1 << uframe)
-                       : __constant_cpu_to_le32 (0xff);
+                       : __constant_cpu_to_le32 (QH_SMASK);
                qh->hw_info2 |= c_mask;
        } else
                ehci_dbg (ehci, "reused qh %p schedule\n", qh);
 
        __le32                  hw_info1;        /* see EHCI 3.6.2 */
 #define        QH_HEAD         0x00008000
        __le32                  hw_info2;        /* see EHCI 3.6.2 */
+#define        QH_SMASK        0x000000ff
+#define        QH_CMASK        0x0000ff00
+#define        QH_HUBADDR      0x007f0000
+#define        QH_HUBPORT      0x3f800000
+#define        QH_MULT         0xc0000000
        __le32                  hw_current;      /* qtd list - see EHCI 3.6.4 */
        
        /* qtd overlay (hardware parts of a struct ehci_qtd) */