X-Git-Url: http://www.pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=net%2Funix%2Fgarbage.c;h=6d4a9a8de5ef1d8974a4f8fe17956dfd31b63fe6;hb=6209344f5a3795d34b7f2c0061f49802283b6bdd;hp=406b6433e467b4b4cbe697d24ec03f4cfa33e22e;hpb=f79e3185dd0f8650022518d7624c876d8929061b;p=linux-2.6-omap-h63xx.git diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 406b6433e46..6d4a9a8de5e 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -92,7 +92,7 @@ static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); static DEFINE_SPINLOCK(unix_gc_lock); -atomic_t unix_tot_inflight = ATOMIC_INIT(0); +unsigned int unix_tot_inflight; static struct sock *unix_get_socket(struct file *filp) @@ -127,13 +127,13 @@ void unix_inflight(struct file *fp) if(s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); - if (atomic_inc_return(&u->inflight) == 1) { + if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } - atomic_inc(&unix_tot_inflight); + unix_tot_inflight++; spin_unlock(&unix_gc_lock); } } @@ -145,9 +145,9 @@ void unix_notinflight(struct file *fp) struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); BUG_ON(list_empty(&u->link)); - if (atomic_dec_and_test(&u->inflight)) + if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); - atomic_dec(&unix_tot_inflight); + unix_tot_inflight--; spin_unlock(&unix_gc_lock); } } @@ -161,7 +161,7 @@ static inline struct sk_buff *sock_queue_head(struct sock *sk) for (skb = sock_queue_head(sk)->next, next = skb->next; \ skb != sock_queue_head(sk); skb = next, next = skb->next) -static void scan_inflight(struct sock *x, void (*func)(struct sock *), +static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; @@ -185,9 +185,18 @@ static void scan_inflight(struct sock *x, void (*func)(struct sock *), * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); - if(sk) { - hit = true; - func(sk); + if (sk) { + struct unix_sock *u = unix_sk(sk); + + /* + * Ignore non-candidates, they could + * have been added to the queues after + * starting the garbage collection + */ + if (u->gc_candidate) { + hit = true; + func(u); + } } } if (hit && hitlist != NULL) { @@ -199,7 +208,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct sock *), spin_unlock(&x->sk_receive_queue.lock); } -static void scan_children(struct sock *x, void (*func)(struct sock *), +static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) @@ -235,27 +244,25 @@ static void scan_children(struct sock *x, void (*func)(struct sock *), } } -static void dec_inflight(struct sock *sk) +static void dec_inflight(struct unix_sock *usk) { - atomic_dec(&unix_sk(sk)->inflight); + atomic_long_dec(&usk->inflight); } -static void inc_inflight(struct sock *sk) +static void inc_inflight(struct unix_sock *usk) { - atomic_inc(&unix_sk(sk)->inflight); + atomic_long_inc(&usk->inflight); } -static void inc_inflight_move_tail(struct sock *sk) +static void inc_inflight_move_tail(struct unix_sock *u) { - struct unix_sock *u = unix_sk(sk); - - atomic_inc(&u->inflight); + atomic_long_inc(&u->inflight); /* - * If this is still a candidate, move it to the end of the - * list, so that it's checked even if it was already passed - * over + * If this still might be part of a cycle, move it to the end + * of the list, so that it's checked even if it was already + * passed over */ - if (u->gc_candidate) + if (u->gc_maybe_cycle) list_move_tail(&u->link, &gc_candidates); } @@ -269,6 +276,7 @@ void unix_gc(void) struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; + LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); @@ -284,23 +292,28 @@ void unix_gc(void) * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external - * reference. This also means, that since there are no - * possible receivers, the receive queues of these sockets are - * static during the GC, even though the dequeue is done - * before the detach without atomicity guarantees. + * reference. Since there are no possible receivers, all + * buffers currently on the candidates' queues stay there + * during the garbage collection. + * + * We also know that no new candidate can be added onto the + * receive queues. Other, non candidate sockets _can_ be + * added to queue, so we must make sure only to touch + * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { - int total_refs; - int inflight_refs; + long total_refs; + long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); - inflight_refs = atomic_read(&u->inflight); + inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; + u->gc_maybe_cycle = 1; } } @@ -326,14 +339,24 @@ void unix_gc(void) /* Move cursor to after the current position. */ list_move(&cursor, &u->link); - if (atomic_read(&u->inflight) > 0) { - list_move_tail(&u->link, &gc_inflight_list); - u->gc_candidate = 0; + if (atomic_long_read(&u->inflight) > 0) { + list_move_tail(&u->link, ¬_cycle_list); + u->gc_maybe_cycle = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); + /* + * not_cycle_list contains those sockets which do not make up a + * cycle. Restore these to the inflight list. + */ + while (!list_empty(¬_cycle_list)) { + u = list_entry(not_cycle_list.next, struct unix_sock, link); + u->gc_candidate = 0; + list_move_tail(&u->link, &gc_inflight_list); + } + /* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs