one page.
SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26800a
Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
        gfp_t   lflags = kmem_flags_convert(flags);
        void    *ptr;
 
+#ifdef DEBUG
+       if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
+               printk(KERN_WARNING "Large %s attempt, size=%ld\n",
+                       __FUNCTION__, (long)size);
+               dump_stack();
+       }
+#endif
+
        do {
                if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
                        ptr = kmalloc(size, lflags);
 
 #define KM_NOSLEEP     0x0002u
 #define KM_NOFS                0x0004u
 #define KM_MAYFAIL     0x0008u
+#define KM_LARGE       0x0010u
 
 /*
  * We use a special process flag to avoid recursive callbacks into
 {
        gfp_t   lflags;
 
-       BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
+       BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
 
        if (flags & KM_NOSLEEP) {
                lflags = GFP_ATOMIC | __GFP_NOWARN;
 
        _xfs_buf_initialize(bp, target, 0, len, 0);
 
  try_again:
-       data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
+       data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
        if (unlikely(data == NULL))
                goto fail_free_buf;
 
 
 {
        xfs_dqhash_t    *udqhash, *gdqhash;
        xfs_qm_t        *xqm;
-       uint            i, hsize, flags = KM_SLEEP | KM_MAYFAIL;
+       uint            i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
 
        /*
         * Initialize the dquot hash tables.
         */
        hsize = XFS_QM_HASHSIZE_HIGH;
-       while (!(udqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), flags))) {
+       while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) {
                if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW)
                        flags = KM_SLEEP;
        }
-       gdqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), KM_SLEEP);
+       gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
        ndquot = hsize << 8;
 
        xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
 
                                                            sleep);
        } else {
                ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
-                                                           sleep);
+                                                           sleep | KM_LARGE);
        }
 
        if (ktep == NULL) {
 
 xfs_ihash_init(xfs_mount_t *mp)
 {
        __uint64_t      icount;
-       uint            i, flags = KM_SLEEP | KM_MAYFAIL;
+       uint            i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
 
        if (!mp->m_ihsize) {
                icount = mp->m_maxicount ? mp->m_maxicount :
        mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
        mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
                                                 * sizeof(xfs_chash_t),
-                                                KM_SLEEP);
+                                                KM_SLEEP | KM_LARGE);
        for (i = 0; i < mp->m_chsize; i++) {
                spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
        }
 
                          kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
                iclog = *iclogp;
                iclog->hic_data = (xlog_in_core_2_t *)
-                         kmem_zalloc(iclogsize, KM_SLEEP);
+                         kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE);
 
                iclog->ic_prev = prev_iclog;
                prev_iclog = iclog;