]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/rcuclassic.c
Merge branch 'linus' into core/rcu
[linux-2.6-omap-h63xx.git] / kernel / rcuclassic.c
index aad93cdc9f68656b95eb496003c593d5d03beed4..d4271146a9bd7497ebd21419f44e210629d8d346 100644 (file)
@@ -60,12 +60,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
 static struct rcu_ctrlblk rcu_ctrlblk = {
        .cur = -300,
        .completed = -300,
+       .pending = -300,
        .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
        .cpumask = CPU_MASK_NONE,
 };
 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
        .cur = -300,
        .completed = -300,
+       .pending = -300,
        .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
        .cpumask = CPU_MASK_NONE,
 };
@@ -118,6 +120,43 @@ static inline void force_quiescent_state(struct rcu_data *rdp,
 }
 #endif
 
+static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
+               struct rcu_data *rdp)
+{
+       long batch;
+       smp_mb(); /* reads the most recently updated value of rcu->cur. */
+
+       /*
+        * Determine the batch number of this callback.
+        *
+        * Using ACCESS_ONCE to avoid the following error when gcc eliminates
+        * local variable "batch" and emits codes like this:
+        *      1) rdp->batch = rcp->cur + 1 # gets old value
+        *      ......
+        *      2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
+        * then [*nxttail[0], *nxttail[1]) may contain callbacks
+        * that batch# = rdp->batch, see the comment of struct rcu_data.
+        */
+       batch = ACCESS_ONCE(rcp->cur) + 1;
+
+       if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
+               /* process callbacks */
+               rdp->nxttail[0] = rdp->nxttail[1];
+               rdp->nxttail[1] = rdp->nxttail[2];
+               if (rcu_batch_after(batch - 1, rdp->batch))
+                       rdp->nxttail[0] = rdp->nxttail[2];
+       }
+
+       rdp->batch = batch;
+       *rdp->nxttail[2] = head;
+       rdp->nxttail[2] = &head->next;
+
+       if (unlikely(++rdp->qlen > qhimark)) {
+               rdp->blimit = INT_MAX;
+               force_quiescent_state(rdp, &rcu_ctrlblk);
+       }
+}
+
 /**
  * call_rcu - Queue an RCU callback for invocation after a grace period.
  * @head: structure to be used for queueing the RCU updates.
@@ -133,18 +172,11 @@ void call_rcu(struct rcu_head *head,
                                void (*func)(struct rcu_head *rcu))
 {
        unsigned long flags;
-       struct rcu_data *rdp;
 
        head->func = func;
        head->next = NULL;
        local_irq_save(flags);
-       rdp = &__get_cpu_var(rcu_data);
-       *rdp->nxttail = head;
-       rdp->nxttail = &head->next;
-       if (unlikely(++rdp->qlen > qhimark)) {
-               rdp->blimit = INT_MAX;
-               force_quiescent_state(rdp, &rcu_ctrlblk);
-       }
+       __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(call_rcu);
@@ -169,20 +201,11 @@ void call_rcu_bh(struct rcu_head *head,
                                void (*func)(struct rcu_head *rcu))
 {
        unsigned long flags;
-       struct rcu_data *rdp;
 
        head->func = func;
        head->next = NULL;
        local_irq_save(flags);
-       rdp = &__get_cpu_var(rcu_bh_data);
-       *rdp->nxttail = head;
-       rdp->nxttail = &head->next;
-
-       if (unlikely(++rdp->qlen > qhimark)) {
-               rdp->blimit = INT_MAX;
-               force_quiescent_state(rdp, &rcu_bh_ctrlblk);
-       }
-
+       __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -211,12 +234,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 static inline void raise_rcu_softirq(void)
 {
        raise_softirq(RCU_SOFTIRQ);
-       /*
-        * The smp_mb() here is required to ensure that this cpu's
-        * __rcu_process_callbacks() reads the most recently updated
-        * value of rcu->cur.
-        */
-       smp_mb();
 }
 
 /*
@@ -276,14 +293,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
  */
 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
 {
-       if (rcp->next_pending &&
+       if (rcp->cur != rcp->pending &&
                        rcp->completed == rcp->cur) {
-               rcp->next_pending = 0;
-               /*
-                * next_pending == 0 must be visible in
-                * __rcu_process_callbacks() before it can see new value of cur.
-                */
-               smp_wmb();
                rcp->cur++;
 
                /*
@@ -364,13 +375,15 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
  * which is dead and hence not processing interrupts.
  */
 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
-                               struct rcu_head **tail)
+                               struct rcu_head **tail, long batch)
 {
-       local_irq_disable();
-       *this_rdp->nxttail = list;
-       if (list)
-               this_rdp->nxttail = tail;
-       local_irq_enable();
+       if (list) {
+               local_irq_disable();
+               this_rdp->batch = batch;
+               *this_rdp->nxttail[2] = list;
+               this_rdp->nxttail[2] = tail;
+               local_irq_enable();
+       }
 }
 
 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
@@ -384,9 +397,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
        if (rcp->cur != rcp->completed)
                cpu_quiet(rdp->cpu, rcp);
        spin_unlock_bh(&rcp->lock);
-       rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
-       rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
-       rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
+       /* spin_lock implies smp_mb() */
+       rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
+       rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
 
        local_irq_disable();
        this_rdp->qlen += rdp->qlen;
@@ -420,37 +433,45 @@ static void rcu_offline_cpu(int cpu)
 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
                                        struct rcu_data *rdp)
 {
-       if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
-               *rdp->donetail = rdp->curlist;
-               rdp->donetail = rdp->curtail;
-               rdp->curlist = NULL;
-               rdp->curtail = &rdp->curlist;
-       }
-
-       if (rdp->nxtlist && !rdp->curlist) {
+       if (rdp->nxtlist) {
                local_irq_disable();
-               rdp->curlist = rdp->nxtlist;
-               rdp->curtail = rdp->nxttail;
-               rdp->nxtlist = NULL;
-               rdp->nxttail = &rdp->nxtlist;
-               local_irq_enable();
 
                /*
-                * start the next batch of callbacks
+                * move the other grace-period-completed entries to
+                * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
                 */
+               if (!rcu_batch_before(rcp->completed, rdp->batch))
+                       rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
+               else if (!rcu_batch_before(rcp->completed, rdp->batch - 1))
+                       rdp->nxttail[0] = rdp->nxttail[1];
 
-               /* determine batch number */
-               rdp->batch = rcp->cur + 1;
-               /* see the comment and corresponding wmb() in
-                * the rcu_start_batch()
+               /*
+                * the grace period for entries in
+                * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
+                * move these entries to donelist
                 */
-               smp_rmb();
+               if (rdp->nxttail[0] != &rdp->nxtlist) {
+                       *rdp->donetail = rdp->nxtlist;
+                       rdp->donetail = rdp->nxttail[0];
+                       rdp->nxtlist = *rdp->nxttail[0];
+                       *rdp->donetail = NULL;
+
+                       if (rdp->nxttail[1] == rdp->nxttail[0])
+                               rdp->nxttail[1] = &rdp->nxtlist;
+                       if (rdp->nxttail[2] == rdp->nxttail[0])
+                               rdp->nxttail[2] = &rdp->nxtlist;
+                       rdp->nxttail[0] = &rdp->nxtlist;
+               }
+
+               local_irq_enable();
 
-               if (!rcp->next_pending) {
+               if (rcu_batch_after(rdp->batch, rcp->pending)) {
                        /* and start it/schedule start if it's a new batch */
                        spin_lock(&rcp->lock);
-                       rcp->next_pending = 1;
-                       rcu_start_batch(rcp);
+                       if (rcu_batch_after(rdp->batch, rcp->pending)) {
+                               rcp->pending = rdp->batch;
+                               rcu_start_batch(rcp);
+                       }
                        spin_unlock(&rcp->lock);
                }
        }
@@ -468,15 +489,26 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 
 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
 {
-       /* This cpu has pending rcu entries and the grace period
-        * for them has completed.
-        */
-       if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
-               return 1;
+       if (rdp->nxtlist) {
+               /*
+                * This cpu has pending rcu entries and the grace period
+                * for them has completed.
+                */
+               if (!rcu_batch_before(rcp->completed, rdp->batch))
+                       return 1;
+               if (!rcu_batch_before(rcp->completed, rdp->batch - 1) &&
+                               rdp->nxttail[0] != rdp->nxttail[1])
+                       return 1;
+               if (rdp->nxttail[0] != &rdp->nxtlist)
+                       return 1;
 
-       /* This cpu has no pending entries, but there are new entries */
-       if (!rdp->curlist && rdp->nxtlist)
-               return 1;
+               /*
+                * This cpu has pending rcu entries and the new batch
+                * for then hasn't been started nor scheduled start
+                */
+               if (rcu_batch_after(rdp->batch, rcp->pending))
+                       return 1;
+       }
 
        /* This cpu has finished callbacks to invoke */
        if (rdp->donelist)
@@ -512,7 +544,7 @@ int rcu_needs_cpu(int cpu)
        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
        struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
 
-       return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
+       return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
 }
 
 void rcu_check_callbacks(int cpu, int user)
@@ -559,8 +591,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
                                                struct rcu_data *rdp)
 {
        memset(rdp, 0, sizeof(*rdp));
-       rdp->curtail = &rdp->curlist;
-       rdp->nxttail = &rdp->nxtlist;
+       rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
        rdp->donetail = &rdp->donelist;
        rdp->quiescbatch = rcp->completed;
        rdp->qs_pending = 0;