}
/**
- * skb_recv_datagram - Receive a datagram skbuff
+ * __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
* @flags: MSG_ flags
- * @noblock: blocking operation?
+ * @peeked: returns non-zero if this packet has been seen before
* @err: error code returned
*
* Get a datagram skbuff, understands the peeking, nonblocking wakeups
* quite explicitly by POSIX 1003.1g, don't change them without having
* the standard around please.
*/
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
- int noblock, int *err)
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ int *peeked, int *err)
{
struct sk_buff *skb;
long timeo;
if (error)
goto no_packet;
- timeo = sock_rcvtimeo(sk, noblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
/* Again only user level code calls this function, so nothing
* Look at current nfs client by the way...
* However, this function was corrent in any case. 8)
*/
- if (flags & MSG_PEEK) {
- unsigned long cpu_flags;
-
- spin_lock_irqsave(&sk->sk_receive_queue.lock,
- cpu_flags);
- skb = skb_peek(&sk->sk_receive_queue);
- if (skb)
+ unsigned long cpu_flags;
+
+ spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb) {
+ *peeked = skb->peeked;
+ if (flags & MSG_PEEK) {
+ skb->peeked = 1;
atomic_inc(&skb->users);
- spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
- cpu_flags);
- } else
- skb = skb_dequeue(&sk->sk_receive_queue);
+ } else
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ }
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
if (skb)
return skb;
*err = error;
return NULL;
}
+EXPORT_SYMBOL(__skb_recv_datagram);
+
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+ int noblock, int *err)
+{
+ int peeked;
+
+ return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+ &peeked, err);
+}
void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
+ sk_mem_reclaim(sk);
}
/**
* This function currently only disables BH when acquiring the
* sk_receive_queue lock. Therefore it must not be used in a
* context where that lock is acquired in an IRQ context.
+ *
+ * It returns 0 if the packet was removed by us.
*/
-void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
{
+ int err = 0;
+
if (flags & MSG_PEEK) {
+ err = -ENOENT;
spin_lock_bh(&sk->sk_receive_queue.lock);
if (skb == skb_peek(&sk->sk_receive_queue)) {
__skb_unlink(skb, &sk->sk_receive_queue);
atomic_dec(&skb->users);
+ err = 0;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
}
kfree_skb(skb);
+ sk_mem_reclaim(sk);
+ return err;
}
EXPORT_SYMBOL(skb_kill_datagram);
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
struct iovec *to, int len)
{
- int end = skb_headlen(skb);
- int i, copy = end - offset;
+ int start = skb_headlen(skb);
+ int i, copy = start - offset;
/* Copy header. */
if (copy > 0) {
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- BUG_TRAP(len >= 0);
+ int end;
- end = offset + skb_shinfo(skb)->frags[i].size;
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap(page);
- err = memcpy_toiovec(to, vaddr + frag->page_offset,
- copy);
+ err = memcpy_toiovec(to, vaddr + frag->page_offset +
+ offset - start, copy);
kunmap(page);
if (err)
goto fault;
return 0;
offset += copy;
}
+ start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- BUG_TRAP(len >= 0);
+ int end;
+
+ WARN_ON(start > offset + len);
- end = offset + list->len;
+ end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_datagram_iovec(list, 0, to, copy))
+ if (skb_copy_datagram_iovec(list,
+ offset - start,
+ to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
}
+ start = end;
}
}
if (!len)
u8 __user *to, int len,
__wsum *csump)
{
- int end = skb_headlen(skb);
+ int start = skb_headlen(skb);
int pos = 0;
- int i, copy = end - offset;
+ int i, copy = start - offset;
/* Copy header. */
if (copy > 0) {
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- BUG_TRAP(len >= 0);
+ int end;
- end = offset + skb_shinfo(skb)->frags[i].size;
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
__wsum csum2;
int err = 0;
copy = len;
vaddr = kmap(page);
csum2 = csum_and_copy_to_user(vaddr +
- frag->page_offset,
+ frag->page_offset +
+ offset - start,
to, copy, 0, &err);
kunmap(page);
if (err)
to += copy;
pos += copy;
}
+ start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list=list->next) {
- BUG_TRAP(len >= 0);
+ int end;
+
+ WARN_ON(start > offset + len);
- end = offset + list->len;
+ end = start + list->len;
if ((copy = end - offset) > 0) {
__wsum csum2 = 0;
if (copy > len)
copy = len;
- if (skb_copy_and_csum_datagram(list, 0,
+ if (skb_copy_and_csum_datagram(list,
+ offset - start,
to, copy,
&csum2))
goto fault;
to += copy;
pos += copy;
}
+ start = end;
}
}
if (!len)
__wsum csum;
int chunk = skb->len - hlen;
+ if (!chunk)
+ return 0;
+
/* Skip filled elements.
* Pretty silly, look at memcpy_toiovec, though 8)
*/