static int rq_promote(struct gfs2_holder *gh)
 {
        struct gfs2_glock *gl = gh->gh_gl;
-       struct gfs2_sbd *sdp = gl->gl_sbd;
 
        if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
                if (list_empty(&gl->gl_holders)) {
                        gl->gl_req_gh = gh;
                        set_bit(GLF_LOCK, &gl->gl_flags);
                        spin_unlock(&gl->gl_spin);
-
-                       if (atomic_read(&sdp->sd_reclaim_count) >
-                           gfs2_tune_get(sdp, gt_reclaim_limit) &&
-                           !(gh->gh_flags & LM_FLAG_PRIORITY)) {
-                               gfs2_reclaim_glock(sdp);
-                               gfs2_reclaim_glock(sdp);
-                       }
-
                        gfs2_glock_xmote_th(gh->gh_gl, gh);
                        spin_lock(&gl->gl_spin);
                }
 
        unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
        unsigned int gt_stall_secs; /* Detects trouble! */
        unsigned int gt_complain_secs;
-       unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
        unsigned int gt_statfs_quantum;
        unsigned int gt_statfs_slow;
 };
 
        gt->gt_max_readahead = 1 << 18;
        gt->gt_stall_secs = 600;
        gt->gt_complain_secs = 10;
-       gt->gt_reclaim_limit = 5000;
        gt->gt_statfs_quantum = 30;
        gt->gt_statfs_slow = 0;
 }
 
 TUNE_ATTR(atime_quantum, 0);
 TUNE_ATTR(max_readahead, 0);
 TUNE_ATTR(complain_secs, 0);
-TUNE_ATTR(reclaim_limit, 0);
 TUNE_ATTR(statfs_slow, 0);
 TUNE_ATTR(new_files_jdata, 0);
 TUNE_ATTR(new_files_directio, 0);
        &tune_attr_atime_quantum.attr,
        &tune_attr_max_readahead.attr,
        &tune_attr_complain_secs.attr,
-       &tune_attr_reclaim_limit.attr,
        &tune_attr_statfs_slow.attr,
        &tune_attr_quota_simul_sync.attr,
        &tune_attr_quota_cache_secs.attr,