return 0;
 }
 
+#ifdef DEBUG
+STATIC int
+xfs_allocbt_keys_inorder(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_key     *k1,
+       union xfs_btree_key     *k2)
+{
+       if (cur->bc_btnum == XFS_BTNUM_BNO) {
+               return be32_to_cpu(k1->alloc.ar_startblock) <
+                      be32_to_cpu(k2->alloc.ar_startblock);
+       } else {
+               return be32_to_cpu(k1->alloc.ar_blockcount) <
+                       be32_to_cpu(k2->alloc.ar_blockcount) ||
+                       (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
+                        be32_to_cpu(k1->alloc.ar_startblock) <
+                        be32_to_cpu(k2->alloc.ar_startblock));
+       }
+}
+
+STATIC int
+xfs_allocbt_recs_inorder(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_rec     *r1,
+       union xfs_btree_rec     *r2)
+{
+       if (cur->bc_btnum == XFS_BTNUM_BNO) {
+               return be32_to_cpu(r1->alloc.ar_startblock) +
+                       be32_to_cpu(r1->alloc.ar_blockcount) <=
+                       be32_to_cpu(r2->alloc.ar_startblock);
+       } else {
+               return be32_to_cpu(r1->alloc.ar_blockcount) <
+                       be32_to_cpu(r2->alloc.ar_blockcount) ||
+                       (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
+                        be32_to_cpu(r1->alloc.ar_startblock) <
+                        be32_to_cpu(r2->alloc.ar_startblock));
+       }
+}
+#endif /* DEBUG */
+
 #ifdef XFS_BTREE_TRACE
 ktrace_t       *xfs_allocbt_trace_buf;
 
        .init_ptr_from_cur      = xfs_allocbt_init_ptr_from_cur,
        .key_diff               = xfs_allocbt_key_diff,
 
+#ifdef DEBUG
+       .keys_inorder           = xfs_allocbt_keys_inorder,
+       .recs_inorder           = xfs_allocbt_recs_inorder,
+#endif
+
 #ifdef XFS_BTREE_TRACE
        .trace_enter            = xfs_allocbt_trace_enter,
        .trace_cursor           = xfs_allocbt_trace_cursor,
 
                }
 
                if (prevp) {
-                       xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp);
+                       ASSERT(be64_to_cpu(prevp->br_startoff) <
+                              be64_to_cpu(keyp->br_startoff));
                }
                prevp = keyp;
 
 
                ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
                if (i) {
-                       xfs_btree_check_rec(XFS_BTNUM_BMAP, &last, ep);
+                       ASSERT(xfs_bmbt_disk_get_startoff(&last) +
+                              xfs_bmbt_disk_get_blockcount(&last) <=
+                              xfs_bmbt_disk_get_startoff(ep));
                }
                for (j = 1; j < num_recs; j++) {
                        nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1);
-                       xfs_btree_check_rec(XFS_BTNUM_BMAP, ep, nextp);
+                       ASSERT(xfs_bmbt_disk_get_startoff(ep) +
+                              xfs_bmbt_disk_get_blockcount(ep) <=
+                              xfs_bmbt_disk_get_startoff(nextp));
                        ep = nextp;
                }
 
 
                                      cur->bc_rec.b.br_startoff;
 }
 
+#ifdef DEBUG
+STATIC int
+xfs_bmbt_keys_inorder(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_key     *k1,
+       union xfs_btree_key     *k2)
+{
+       return be64_to_cpu(k1->bmbt.br_startoff) <
+               be64_to_cpu(k2->bmbt.br_startoff);
+}
+
+STATIC int
+xfs_bmbt_recs_inorder(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_rec     *r1,
+       union xfs_btree_rec     *r2)
+{
+       return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
+               xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
+               xfs_bmbt_disk_get_startoff(&r2->bmbt);
+}
+#endif /* DEBUG */
+
 #ifdef XFS_BTREE_TRACE
 ktrace_t       *xfs_bmbt_trace_buf;
 
        .init_ptr_from_cur      = xfs_bmbt_init_ptr_from_cur,
        .key_diff               = xfs_bmbt_key_diff,
 
+#ifdef DEBUG
+       .keys_inorder           = xfs_bmbt_keys_inorder,
+       .recs_inorder           = xfs_bmbt_recs_inorder,
+#endif
+
 #ifdef XFS_BTREE_TRACE
        .trace_enter            = xfs_bmbt_trace_enter,
        .trace_cursor           = xfs_bmbt_trace_cursor,
 
        XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
 };
 
-/*
- * External routines.
- */
-
-#ifdef DEBUG
-/*
- * Debug routine: check that keys are in the right order.
- */
-void
-xfs_btree_check_key(
-       xfs_btnum_t     btnum,          /* btree identifier */
-       void            *ak1,           /* pointer to left (lower) key */
-       void            *ak2)           /* pointer to right (higher) key */
-{
-       switch (btnum) {
-       case XFS_BTNUM_BNO: {
-               xfs_alloc_key_t *k1;
-               xfs_alloc_key_t *k2;
-
-               k1 = ak1;
-               k2 = ak2;
-               ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock));
-               break;
-           }
-       case XFS_BTNUM_CNT: {
-               xfs_alloc_key_t *k1;
-               xfs_alloc_key_t *k2;
-
-               k1 = ak1;
-               k2 = ak2;
-               ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) ||
-                      (k1->ar_blockcount == k2->ar_blockcount &&
-                       be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)));
-               break;
-           }
-       case XFS_BTNUM_BMAP: {
-               xfs_bmbt_key_t  *k1;
-               xfs_bmbt_key_t  *k2;
-
-               k1 = ak1;
-               k2 = ak2;
-               ASSERT(be64_to_cpu(k1->br_startoff) < be64_to_cpu(k2->br_startoff));
-               break;
-           }
-       case XFS_BTNUM_INO: {
-               xfs_inobt_key_t *k1;
-               xfs_inobt_key_t *k2;
-
-               k1 = ak1;
-               k2 = ak2;
-               ASSERT(be32_to_cpu(k1->ir_startino) < be32_to_cpu(k2->ir_startino));
-               break;
-           }
-       default:
-               ASSERT(0);
-       }
-}
-
-/*
- * Debug routine: check that records are in the right order.
- */
-void
-xfs_btree_check_rec(
-       xfs_btnum_t     btnum,          /* btree identifier */
-       void            *ar1,           /* pointer to left (lower) record */
-       void            *ar2)           /* pointer to right (higher) record */
-{
-       switch (btnum) {
-       case XFS_BTNUM_BNO: {
-               xfs_alloc_rec_t *r1;
-               xfs_alloc_rec_t *r2;
-
-               r1 = ar1;
-               r2 = ar2;
-               ASSERT(be32_to_cpu(r1->ar_startblock) +
-                      be32_to_cpu(r1->ar_blockcount) <=
-                      be32_to_cpu(r2->ar_startblock));
-               break;
-           }
-       case XFS_BTNUM_CNT: {
-               xfs_alloc_rec_t *r1;
-               xfs_alloc_rec_t *r2;
-
-               r1 = ar1;
-               r2 = ar2;
-               ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) ||
-                      (r1->ar_blockcount == r2->ar_blockcount &&
-                       be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock)));
-               break;
-           }
-       case XFS_BTNUM_BMAP: {
-               xfs_bmbt_rec_t  *r1;
-               xfs_bmbt_rec_t  *r2;
-
-               r1 = ar1;
-               r2 = ar2;
-               ASSERT(xfs_bmbt_disk_get_startoff(r1) +
-                      xfs_bmbt_disk_get_blockcount(r1) <=
-                      xfs_bmbt_disk_get_startoff(r2));
-               break;
-           }
-       case XFS_BTNUM_INO: {
-               xfs_inobt_rec_t *r1;
-               xfs_inobt_rec_t *r2;
-
-               r1 = ar1;
-               r2 = ar2;
-               ASSERT(be32_to_cpu(r1->ir_startino) + XFS_INODES_PER_CHUNK <=
-                      be32_to_cpu(r2->ir_startino));
-               break;
-           }
-       default:
-               ASSERT(0);
-       }
-}
-#endif /* DEBUG */
 
 int                                    /* error (0 or EFSCORRUPTED) */
 xfs_btree_check_lblock(
                xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
                xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
 
-               xfs_btree_check_key(cur->bc_btnum,
-                                   xfs_btree_key_addr(cur, lrecs - 1, left),
-                                   lkp);
+               ASSERT(cur->bc_ops->keys_inorder(cur,
+                       xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
        } else {
                /* It's a leaf.  Move records.  */
                union xfs_btree_rec     *lrp;   /* left record pointer */
                xfs_btree_copy_recs(cur, lrp, rrp, 1);
                xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
 
-               xfs_btree_check_rec(cur->bc_btnum,
-                                   xfs_btree_rec_addr(cur, lrecs - 1, left),
-                                   lrp);
+               ASSERT(cur->bc_ops->recs_inorder(cur,
+                       xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
        }
 
        xfs_btree_set_numrecs(left, lrecs);
                xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
                xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
 
-               xfs_btree_check_key(cur->bc_btnum, rkp,
-                                   xfs_btree_key_addr(cur, 2, right));
+               ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
+                       xfs_btree_key_addr(cur, 2, right)));
        } else {
                /* It's a leaf. make a hole in the records */
                union xfs_btree_rec     *lrp;
                cur->bc_ops->init_key_from_rec(&key, rrp);
                rkp = &key;
 
-               xfs_btree_check_rec(cur->bc_btnum, rrp,
-                                   xfs_btree_rec_addr(cur, 2, right));
+               ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
+                       xfs_btree_rec_addr(cur, 2, right)));
        }
 
        /*
        /* Check that the new entry is being inserted in the right place. */
        if (ptr <= numrecs) {
                if (level == 0) {
-                       xfs_btree_check_rec(cur->bc_btnum, recp,
-                                       xfs_btree_rec_addr(cur, ptr, block));
+                       ASSERT(cur->bc_ops->recs_inorder(cur, recp,
+                               xfs_btree_rec_addr(cur, ptr, block)));
                } else {
-                       xfs_btree_check_key(cur->bc_btnum, &key,
-                                       xfs_btree_key_addr(cur, ptr, block));
+                       ASSERT(cur->bc_ops->keys_inorder(cur, &key,
+                               xfs_btree_key_addr(cur, ptr, block)));
                }
        }
 #endif
                xfs_btree_log_keys(cur, bp, ptr, numrecs);
 #ifdef DEBUG
                if (ptr < numrecs) {
-                       xfs_btree_check_key(cur->bc_btnum, kp,
-                               xfs_btree_key_addr(cur, ptr + 1, block));
+                       ASSERT(cur->bc_ops->keys_inorder(cur, kp,
+                               xfs_btree_key_addr(cur, ptr + 1, block)));
                }
 #endif
        } else {
                xfs_btree_log_recs(cur, bp, ptr, numrecs);
 #ifdef DEBUG
                if (ptr < numrecs) {
-                       xfs_btree_check_rec(cur->bc_btnum, rp,
-                               xfs_btree_rec_addr(cur, ptr + 1, block));
+                       ASSERT(cur->bc_ops->recs_inorder(cur, rp,
+                               xfs_btree_rec_addr(cur, ptr + 1, block)));
                }
 #endif
        }
 
        __int64_t (*key_diff)(struct xfs_btree_cur *cur,
                              union xfs_btree_key *key);
 
+#ifdef DEBUG
+       /* check that k1 is lower than k2 */
+       int     (*keys_inorder)(struct xfs_btree_cur *cur,
+                               union xfs_btree_key *k1,
+                               union xfs_btree_key *k2);
+
+       /* check that r1 is lower than r2 */
+       int     (*recs_inorder)(struct xfs_btree_cur *cur,
+                               union xfs_btree_rec *r1,
+                               union xfs_btree_rec *r2);
+#endif
+
        /* btree tracing */
 #ifdef XFS_BTREE_TRACE
        void            (*trace_enter)(struct xfs_btree_cur *, const char *,
        int                     index,  /* offset from ptr to check */
        int                     level); /* btree block level */
 
-#ifdef DEBUG
-
-/*
- * Debug routine: check that keys are in the right order.
- */
-void
-xfs_btree_check_key(
-       xfs_btnum_t             btnum,  /* btree identifier */
-       void                    *ak1,   /* pointer to left (lower) key */
-       void                    *ak2);  /* pointer to right (higher) key */
-
-/*
- * Debug routine: check that records are in the right order.
- */
-void
-xfs_btree_check_rec(
-       xfs_btnum_t             btnum,  /* btree identifier */
-       void                    *ar1,   /* pointer to left (lower) record */
-       void                    *ar2);  /* pointer to right (higher) record */
-#else
-#define        xfs_btree_check_key(a, b, c)
-#define        xfs_btree_check_rec(a, b, c)
-#endif /* DEBUG */
-
 /*
  * Delete the btree cursor.
  */
 
        return 0;
 }
 
+#ifdef DEBUG
+STATIC int
+xfs_inobt_keys_inorder(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_key     *k1,
+       union xfs_btree_key     *k2)
+{
+       return be32_to_cpu(k1->inobt.ir_startino) <
+               be32_to_cpu(k2->inobt.ir_startino);
+}
+
+STATIC int
+xfs_inobt_recs_inorder(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_rec     *r1,
+       union xfs_btree_rec     *r2)
+{
+       return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
+               be32_to_cpu(r2->inobt.ir_startino);
+}
+#endif /* DEBUG */
+
 #ifdef XFS_BTREE_TRACE
 ktrace_t       *xfs_inobt_trace_buf;
 
        .init_ptr_from_cur      = xfs_inobt_init_ptr_from_cur,
        .key_diff               = xfs_inobt_key_diff,
 
+#ifdef DEBUG
+       .keys_inorder           = xfs_inobt_keys_inorder,
+       .recs_inorder           = xfs_inobt_recs_inorder,
+#endif
+
 #ifdef XFS_BTREE_TRACE
        .trace_enter            = xfs_inobt_trace_enter,
        .trace_cursor           = xfs_inobt_trace_cursor,