]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - net/ipv4/ipvs/ip_vs_proto_udp.c
Merge branch 'for-linus' of git://git.o-hand.com/linux-mfd
[linux-2.6-omap-h63xx.git] / net / ipv4 / ipvs / ip_vs_proto_udp.c
1 /*
2  * ip_vs_proto_udp.c:   UDP load balancing support for IPVS
3  *
4  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
5  *              Julian Anastasov <ja@ssi.bg>
6  *
7  *              This program is free software; you can redistribute it and/or
8  *              modify it under the terms of the GNU General Public License
9  *              as published by the Free Software Foundation; either version
10  *              2 of the License, or (at your option) any later version.
11  *
12  * Changes:
13  *
14  */
15
16 #include <linux/in.h>
17 #include <linux/ip.h>
18 #include <linux/kernel.h>
19 #include <linux/netfilter.h>
20 #include <linux/netfilter_ipv4.h>
21 #include <linux/udp.h>
22
23 #include <net/ip_vs.h>
24 #include <net/ip.h>
25
26 static struct ip_vs_conn *
27 udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
28                 const struct iphdr *iph, unsigned int proto_off, int inverse)
29 {
30         struct ip_vs_conn *cp;
31         __be16 _ports[2], *pptr;
32
33         pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
34         if (pptr == NULL)
35                 return NULL;
36
37         if (likely(!inverse)) {
38                 cp = ip_vs_conn_in_get(iph->protocol,
39                                        iph->saddr, pptr[0],
40                                        iph->daddr, pptr[1]);
41         } else {
42                 cp = ip_vs_conn_in_get(iph->protocol,
43                                        iph->daddr, pptr[1],
44                                        iph->saddr, pptr[0]);
45         }
46
47         return cp;
48 }
49
50
51 static struct ip_vs_conn *
52 udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
53                  const struct iphdr *iph, unsigned int proto_off, int inverse)
54 {
55         struct ip_vs_conn *cp;
56         __be16 _ports[2], *pptr;
57
58         pptr = skb_header_pointer(skb, ip_hdrlen(skb),
59                                   sizeof(_ports), _ports);
60         if (pptr == NULL)
61                 return NULL;
62
63         if (likely(!inverse)) {
64                 cp = ip_vs_conn_out_get(iph->protocol,
65                                         iph->saddr, pptr[0],
66                                         iph->daddr, pptr[1]);
67         } else {
68                 cp = ip_vs_conn_out_get(iph->protocol,
69                                         iph->daddr, pptr[1],
70                                         iph->saddr, pptr[0]);
71         }
72
73         return cp;
74 }
75
76
77 static int
78 udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
79                   int *verdict, struct ip_vs_conn **cpp)
80 {
81         struct ip_vs_service *svc;
82         struct udphdr _udph, *uh;
83
84         uh = skb_header_pointer(skb, ip_hdrlen(skb),
85                                 sizeof(_udph), &_udph);
86         if (uh == NULL) {
87                 *verdict = NF_DROP;
88                 return 0;
89         }
90
91         if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol,
92                                      ip_hdr(skb)->daddr, uh->dest))) {
93                 if (ip_vs_todrop()) {
94                         /*
95                          * It seems that we are very loaded.
96                          * We have to drop this packet :(
97                          */
98                         ip_vs_service_put(svc);
99                         *verdict = NF_DROP;
100                         return 0;
101                 }
102
103                 /*
104                  * Let the virtual server select a real server for the
105                  * incoming connection, and create a connection entry.
106                  */
107                 *cpp = ip_vs_schedule(svc, skb);
108                 if (!*cpp) {
109                         *verdict = ip_vs_leave(svc, skb, pp);
110                         return 0;
111                 }
112                 ip_vs_service_put(svc);
113         }
114         return 1;
115 }
116
117
118 static inline void
119 udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip,
120                      __be16 oldport, __be16 newport)
121 {
122         uhdr->check =
123                 csum_fold(ip_vs_check_diff4(oldip, newip,
124                                  ip_vs_check_diff2(oldport, newport,
125                                         ~csum_unfold(uhdr->check))));
126         if (!uhdr->check)
127                 uhdr->check = CSUM_MANGLED_0;
128 }
129
130 static int
131 udp_snat_handler(struct sk_buff *skb,
132                  struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
133 {
134         struct udphdr *udph;
135         const unsigned int udphoff = ip_hdrlen(skb);
136
137         /* csum_check requires unshared skb */
138         if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
139                 return 0;
140
141         if (unlikely(cp->app != NULL)) {
142                 /* Some checks before mangling */
143                 if (pp->csum_check && !pp->csum_check(skb, pp))
144                         return 0;
145
146                 /*
147                  *      Call application helper if needed
148                  */
149                 if (!ip_vs_app_pkt_out(cp, skb))
150                         return 0;
151         }
152
153         udph = (void *)ip_hdr(skb) + udphoff;
154         udph->source = cp->vport;
155
156         /*
157          *      Adjust UDP checksums
158          */
159         if (!cp->app && (udph->check != 0)) {
160                 /* Only port and addr are changed, do fast csum update */
161                 udp_fast_csum_update(udph, cp->daddr, cp->vaddr,
162                                      cp->dport, cp->vport);
163                 if (skb->ip_summed == CHECKSUM_COMPLETE)
164                         skb->ip_summed = CHECKSUM_NONE;
165         } else {
166                 /* full checksum calculation */
167                 udph->check = 0;
168                 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
169                 udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr,
170                                                 skb->len - udphoff,
171                                                 cp->protocol, skb->csum);
172                 if (udph->check == 0)
173                         udph->check = CSUM_MANGLED_0;
174                 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
175                           pp->name, udph->check,
176                           (char*)&(udph->check) - (char*)udph);
177         }
178         return 1;
179 }
180
181
182 static int
183 udp_dnat_handler(struct sk_buff *skb,
184                  struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
185 {
186         struct udphdr *udph;
187         unsigned int udphoff = ip_hdrlen(skb);
188
189         /* csum_check requires unshared skb */
190         if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
191                 return 0;
192
193         if (unlikely(cp->app != NULL)) {
194                 /* Some checks before mangling */
195                 if (pp->csum_check && !pp->csum_check(skb, pp))
196                         return 0;
197
198                 /*
199                  *      Attempt ip_vs_app call.
200                  *      It will fix ip_vs_conn
201                  */
202                 if (!ip_vs_app_pkt_in(cp, skb))
203                         return 0;
204         }
205
206         udph = (void *)ip_hdr(skb) + udphoff;
207         udph->dest = cp->dport;
208
209         /*
210          *      Adjust UDP checksums
211          */
212         if (!cp->app && (udph->check != 0)) {
213                 /* Only port and addr are changed, do fast csum update */
214                 udp_fast_csum_update(udph, cp->vaddr, cp->daddr,
215                                      cp->vport, cp->dport);
216                 if (skb->ip_summed == CHECKSUM_COMPLETE)
217                         skb->ip_summed = CHECKSUM_NONE;
218         } else {
219                 /* full checksum calculation */
220                 udph->check = 0;
221                 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
222                 udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr,
223                                                 skb->len - udphoff,
224                                                 cp->protocol, skb->csum);
225                 if (udph->check == 0)
226                         udph->check = CSUM_MANGLED_0;
227                 skb->ip_summed = CHECKSUM_UNNECESSARY;
228         }
229         return 1;
230 }
231
232
233 static int
234 udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
235 {
236         struct udphdr _udph, *uh;
237         const unsigned int udphoff = ip_hdrlen(skb);
238
239         uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
240         if (uh == NULL)
241                 return 0;
242
243         if (uh->check != 0) {
244                 switch (skb->ip_summed) {
245                 case CHECKSUM_NONE:
246                         skb->csum = skb_checksum(skb, udphoff,
247                                                  skb->len - udphoff, 0);
248                 case CHECKSUM_COMPLETE:
249                         if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
250                                               ip_hdr(skb)->daddr,
251                                               skb->len - udphoff,
252                                               ip_hdr(skb)->protocol,
253                                               skb->csum)) {
254                                 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
255                                                  "Failed checksum for");
256                                 return 0;
257                         }
258                         break;
259                 default:
260                         /* No need to checksum. */
261                         break;
262                 }
263         }
264         return 1;
265 }
266
267
268 /*
269  *      Note: the caller guarantees that only one of register_app,
270  *      unregister_app or app_conn_bind is called each time.
271  */
272
273 #define UDP_APP_TAB_BITS        4
274 #define UDP_APP_TAB_SIZE        (1 << UDP_APP_TAB_BITS)
275 #define UDP_APP_TAB_MASK        (UDP_APP_TAB_SIZE - 1)
276
277 static struct list_head udp_apps[UDP_APP_TAB_SIZE];
278 static DEFINE_SPINLOCK(udp_app_lock);
279
280 static inline __u16 udp_app_hashkey(__be16 port)
281 {
282         return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
283                 & UDP_APP_TAB_MASK;
284 }
285
286
287 static int udp_register_app(struct ip_vs_app *inc)
288 {
289         struct ip_vs_app *i;
290         __u16 hash;
291         __be16 port = inc->port;
292         int ret = 0;
293
294         hash = udp_app_hashkey(port);
295
296
297         spin_lock_bh(&udp_app_lock);
298         list_for_each_entry(i, &udp_apps[hash], p_list) {
299                 if (i->port == port) {
300                         ret = -EEXIST;
301                         goto out;
302                 }
303         }
304         list_add(&inc->p_list, &udp_apps[hash]);
305         atomic_inc(&ip_vs_protocol_udp.appcnt);
306
307   out:
308         spin_unlock_bh(&udp_app_lock);
309         return ret;
310 }
311
312
313 static void
314 udp_unregister_app(struct ip_vs_app *inc)
315 {
316         spin_lock_bh(&udp_app_lock);
317         atomic_dec(&ip_vs_protocol_udp.appcnt);
318         list_del(&inc->p_list);
319         spin_unlock_bh(&udp_app_lock);
320 }
321
322
323 static int udp_app_conn_bind(struct ip_vs_conn *cp)
324 {
325         int hash;
326         struct ip_vs_app *inc;
327         int result = 0;
328
329         /* Default binding: bind app only for NAT */
330         if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
331                 return 0;
332
333         /* Lookup application incarnations and bind the right one */
334         hash = udp_app_hashkey(cp->vport);
335
336         spin_lock(&udp_app_lock);
337         list_for_each_entry(inc, &udp_apps[hash], p_list) {
338                 if (inc->port == cp->vport) {
339                         if (unlikely(!ip_vs_app_inc_get(inc)))
340                                 break;
341                         spin_unlock(&udp_app_lock);
342
343                         IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
344                                   "%u.%u.%u.%u:%u to app %s on port %u\n",
345                                   __func__,
346                                   NIPQUAD(cp->caddr), ntohs(cp->cport),
347                                   NIPQUAD(cp->vaddr), ntohs(cp->vport),
348                                   inc->name, ntohs(inc->port));
349                         cp->app = inc;
350                         if (inc->init_conn)
351                                 result = inc->init_conn(inc, cp);
352                         goto out;
353                 }
354         }
355         spin_unlock(&udp_app_lock);
356
357   out:
358         return result;
359 }
360
361
362 static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
363         [IP_VS_UDP_S_NORMAL]            =       5*60*HZ,
364         [IP_VS_UDP_S_LAST]              =       2*HZ,
365 };
366
367 static char * udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
368         [IP_VS_UDP_S_NORMAL]            =       "UDP",
369         [IP_VS_UDP_S_LAST]              =       "BUG!",
370 };
371
372
373 static int
374 udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
375 {
376         return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
377                                        udp_state_name_table, sname, to);
378 }
379
380 static const char * udp_state_name(int state)
381 {
382         if (state >= IP_VS_UDP_S_LAST)
383                 return "ERR!";
384         return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
385 }
386
387 static int
388 udp_state_transition(struct ip_vs_conn *cp, int direction,
389                      const struct sk_buff *skb,
390                      struct ip_vs_protocol *pp)
391 {
392         cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
393         return 1;
394 }
395
396 static void udp_init(struct ip_vs_protocol *pp)
397 {
398         IP_VS_INIT_HASH_TABLE(udp_apps);
399         pp->timeout_table = udp_timeouts;
400 }
401
402 static void udp_exit(struct ip_vs_protocol *pp)
403 {
404 }
405
406
407 struct ip_vs_protocol ip_vs_protocol_udp = {
408         .name =                 "UDP",
409         .protocol =             IPPROTO_UDP,
410         .num_states =           IP_VS_UDP_S_LAST,
411         .dont_defrag =          0,
412         .init =                 udp_init,
413         .exit =                 udp_exit,
414         .conn_schedule =        udp_conn_schedule,
415         .conn_in_get =          udp_conn_in_get,
416         .conn_out_get =         udp_conn_out_get,
417         .snat_handler =         udp_snat_handler,
418         .dnat_handler =         udp_dnat_handler,
419         .csum_check =           udp_csum_check,
420         .state_transition =     udp_state_transition,
421         .state_name =           udp_state_name,
422         .register_app =         udp_register_app,
423         .unregister_app =       udp_unregister_app,
424         .app_conn_bind =        udp_app_conn_bind,
425         .debug_packet =         ip_vs_tcpudp_debug_packet,
426         .timeout_change =       NULL,
427         .set_state_timeout =    udp_set_state_timeout,
428 };