]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
blktrace: fix blk_probes_ref chaos
authorLi Zefan <lizf@cn.fujitsu.com>
Fri, 27 Mar 2009 02:20:09 +0000 (10:20 +0800)
committerIngo Molnar <mingo@elte.hu>
Tue, 31 Mar 2009 15:27:45 +0000 (17:27 +0200)
Impact: fix mixed ioctl and ftrace-plugin blktrace use refcount bugs

ioctl-based blktrace allocates bt and registers tracepoints when
ioctl(BLKTRACESETUP), and do all cleanups when ioctl(BLKTRACETEARDOWN).

while ftrace-based blktrace allocates/frees bt when:
  # echo 1/0 > /sys/block/sda/sda1/trace/enable

and registers/unregisters tracepoints when:
  # echo blk/nop > /debugfs/tracing/current_tracer
or
  # echo 1/0 > /debugfs/tracing/tracing_enable

The separatation of allocation and registeration causes 2 problems:

  1. current user-space blktrace still calls ioctl(TEARDOWN) when
     ioctl(SETUP) failed:
       # echo 1 > /sys/block/sda/sda1/trace/enable
       # blktrace /dev/sda
         BLKTRACESETUP: Device or resource busy
         ^C
     and now blk_probes_ref == -1

  2. Another way to make blk_probes_ref == -1:
     # plugin sdb && mount sdb1
     # echo 1 > /sys/block/sdb/sdb1/trace/enable
     # remove sdb

This patch does the allocation and registeration when writing
sdaX/trace/enable.

Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/trace/blktrace.c

index 5b28f0f119c53bbc9e46775786cf6923439d64d7..8d6bd12aab10a1d1deae03c07ccc7e88cffba4cc 100644 (file)
@@ -478,7 +478,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                goto err;
        }
 
-       if (atomic_add_return(1, &blk_probes_ref) == 1)
+       if (atomic_inc_return(&blk_probes_ref) == 1)
                blk_register_tracepoints();
 
        return 0;
@@ -1091,8 +1091,6 @@ static void blk_tracer_print_header(struct seq_file *m)
 
 static void blk_tracer_start(struct trace_array *tr)
 {
-       if (atomic_add_return(1, &blk_probes_ref) == 1)
-               blk_register_tracepoints();
        trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
 }
 
@@ -1107,15 +1105,10 @@ static int blk_tracer_init(struct trace_array *tr)
 static void blk_tracer_stop(struct trace_array *tr)
 {
        trace_flags |= TRACE_ITER_CONTEXT_INFO;
-       if (atomic_dec_and_test(&blk_probes_ref))
-               blk_unregister_tracepoints();
 }
 
 static void blk_tracer_reset(struct trace_array *tr)
 {
-       if (!atomic_read(&blk_probes_ref))
-               return;
-
        blk_tracer_enabled = false;
        blk_tracer_stop(tr);
 }
@@ -1254,6 +1247,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
        if (bt == NULL)
                return -EINVAL;
 
+       if (atomic_dec_and_test(&blk_probes_ref))
+               blk_unregister_tracepoints();
+
        kfree(bt);
        return 0;
 }
@@ -1280,6 +1276,9 @@ static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
                return -EBUSY;
        }
 
+       if (atomic_inc_return(&blk_probes_ref) == 1)
+               blk_register_tracepoints();
+
        return 0;
 }