ia64/linux-2.6.18-xen.hg

view net/sunrpc/svcsock.c @ 871:9cbcc9008446

xen/x86: don't initialize cpu_data[]'s apicid field on generic code

Afaict, this is not only redundant with the intialization done in
drivers/xen/core/smpboot.c, but actually results - at least for
secondary CPUs - in the Xen-specific value written to be later
overwritten with whatever the generic code determines (with no
guarantee that the two values are identical).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 14 10:09:15 2009 +0100 (2009-05-14)
parents 3e8752eb6d9c
children
line source
1 /*
2 * linux/net/sunrpc/svcsock.c
3 *
4 * These are the RPC server socket internals.
5 *
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
9 *
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
18 *
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
20 */
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <net/sock.h>
35 #include <net/checksum.h>
36 #include <net/ip.h>
37 #include <net/tcp_states.h>
38 #include <asm/uaccess.h>
39 #include <asm/ioctls.h>
41 #include <linux/sunrpc/types.h>
42 #include <linux/sunrpc/xdr.h>
43 #include <linux/sunrpc/svcsock.h>
44 #include <linux/sunrpc/stats.h>
46 /* SMP locking strategy:
47 *
48 * svc_serv->sv_lock protects most stuff for that service.
49 *
50 * Some flags can be set to certain values at any time
51 * providing that certain rules are followed:
52 *
53 * SK_BUSY can be set to 0 at any time.
54 * svc_sock_enqueue must be called afterwards
55 * SK_CONN, SK_DATA, can be set or cleared at any time.
56 * after a set, svc_sock_enqueue must be called.
57 * after a clear, the socket must be read/accepted
58 * if this succeeds, it must be set again.
59 * SK_CLOSE can set at any time. It is never cleared.
60 *
61 */
63 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
66 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
67 int *errp, int pmap_reg);
68 static void svc_udp_data_ready(struct sock *, int);
69 static int svc_udp_recvfrom(struct svc_rqst *);
70 static int svc_udp_sendto(struct svc_rqst *);
72 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
73 static int svc_deferred_recv(struct svc_rqst *rqstp);
74 static struct cache_deferred_req *svc_defer(struct cache_req *req);
76 /*
77 * Queue up an idle server thread. Must have serv->sv_lock held.
78 * Note: this is really a stack rather than a queue, so that we only
79 * use as many different threads as we need, and the rest don't polute
80 * the cache.
81 */
82 static inline void
83 svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp)
84 {
85 list_add(&rqstp->rq_list, &serv->sv_threads);
86 }
88 /*
89 * Dequeue an nfsd thread. Must have serv->sv_lock held.
90 */
91 static inline void
92 svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp)
93 {
94 list_del(&rqstp->rq_list);
95 }
97 /*
98 * Release an skbuff after use
99 */
100 static inline void
101 svc_release_skb(struct svc_rqst *rqstp)
102 {
103 struct sk_buff *skb = rqstp->rq_skbuff;
104 struct svc_deferred_req *dr = rqstp->rq_deferred;
106 if (skb) {
107 rqstp->rq_skbuff = NULL;
109 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
110 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
111 }
112 if (dr) {
113 rqstp->rq_deferred = NULL;
114 kfree(dr);
115 }
116 }
118 /*
119 * Any space to write?
120 */
121 static inline unsigned long
122 svc_sock_wspace(struct svc_sock *svsk)
123 {
124 int wspace;
126 if (svsk->sk_sock->type == SOCK_STREAM)
127 wspace = sk_stream_wspace(svsk->sk_sk);
128 else
129 wspace = sock_wspace(svsk->sk_sk);
131 return wspace;
132 }
134 /*
135 * Queue up a socket with data pending. If there are idle nfsd
136 * processes, wake 'em up.
137 *
138 */
139 static void
140 svc_sock_enqueue(struct svc_sock *svsk)
141 {
142 struct svc_serv *serv = svsk->sk_server;
143 struct svc_rqst *rqstp;
145 if (!(svsk->sk_flags &
146 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
147 return;
148 if (test_bit(SK_DEAD, &svsk->sk_flags))
149 return;
151 spin_lock_bh(&serv->sv_lock);
153 if (!list_empty(&serv->sv_threads) &&
154 !list_empty(&serv->sv_sockets))
155 printk(KERN_ERR
156 "svc_sock_enqueue: threads and sockets both waiting??\n");
158 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
159 /* Don't enqueue dead sockets */
160 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
161 goto out_unlock;
162 }
164 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
165 /* Don't enqueue socket while daemon is receiving */
166 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
167 goto out_unlock;
168 }
170 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
171 if (((svsk->sk_reserved + serv->sv_bufsz)*2
172 > svc_sock_wspace(svsk))
173 && !test_bit(SK_CLOSE, &svsk->sk_flags)
174 && !test_bit(SK_CONN, &svsk->sk_flags)) {
175 /* Don't enqueue while not enough space for reply */
176 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
177 svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz,
178 svc_sock_wspace(svsk));
179 goto out_unlock;
180 }
181 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
183 /* Mark socket as busy. It will remain in this state until the
184 * server has processed all pending data and put the socket back
185 * on the idle list.
186 */
187 set_bit(SK_BUSY, &svsk->sk_flags);
189 if (!list_empty(&serv->sv_threads)) {
190 rqstp = list_entry(serv->sv_threads.next,
191 struct svc_rqst,
192 rq_list);
193 dprintk("svc: socket %p served by daemon %p\n",
194 svsk->sk_sk, rqstp);
195 svc_serv_dequeue(serv, rqstp);
196 if (rqstp->rq_sock)
197 printk(KERN_ERR
198 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
199 rqstp, rqstp->rq_sock);
200 rqstp->rq_sock = svsk;
201 svsk->sk_inuse++;
202 rqstp->rq_reserved = serv->sv_bufsz;
203 svsk->sk_reserved += rqstp->rq_reserved;
204 wake_up(&rqstp->rq_wait);
205 } else {
206 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
207 list_add_tail(&svsk->sk_ready, &serv->sv_sockets);
208 }
210 out_unlock:
211 spin_unlock_bh(&serv->sv_lock);
212 }
214 /*
215 * Dequeue the first socket. Must be called with the serv->sv_lock held.
216 */
217 static inline struct svc_sock *
218 svc_sock_dequeue(struct svc_serv *serv)
219 {
220 struct svc_sock *svsk;
222 if (list_empty(&serv->sv_sockets))
223 return NULL;
225 svsk = list_entry(serv->sv_sockets.next,
226 struct svc_sock, sk_ready);
227 list_del_init(&svsk->sk_ready);
229 dprintk("svc: socket %p dequeued, inuse=%d\n",
230 svsk->sk_sk, svsk->sk_inuse);
232 return svsk;
233 }
235 /*
236 * Having read something from a socket, check whether it
237 * needs to be re-enqueued.
238 * Note: SK_DATA only gets cleared when a read-attempt finds
239 * no (or insufficient) data.
240 */
241 static inline void
242 svc_sock_received(struct svc_sock *svsk)
243 {
244 clear_bit(SK_BUSY, &svsk->sk_flags);
245 svc_sock_enqueue(svsk);
246 }
249 /**
250 * svc_reserve - change the space reserved for the reply to a request.
251 * @rqstp: The request in question
252 * @space: new max space to reserve
253 *
254 * Each request reserves some space on the output queue of the socket
255 * to make sure the reply fits. This function reduces that reserved
256 * space to be the amount of space used already, plus @space.
257 *
258 */
259 void svc_reserve(struct svc_rqst *rqstp, int space)
260 {
261 space += rqstp->rq_res.head[0].iov_len;
263 if (space < rqstp->rq_reserved) {
264 struct svc_sock *svsk = rqstp->rq_sock;
265 spin_lock_bh(&svsk->sk_server->sv_lock);
266 svsk->sk_reserved -= (rqstp->rq_reserved - space);
267 rqstp->rq_reserved = space;
268 spin_unlock_bh(&svsk->sk_server->sv_lock);
270 svc_sock_enqueue(svsk);
271 }
272 }
274 /*
275 * Release a socket after use.
276 */
277 static inline void
278 svc_sock_put(struct svc_sock *svsk)
279 {
280 struct svc_serv *serv = svsk->sk_server;
282 spin_lock_bh(&serv->sv_lock);
283 if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
284 spin_unlock_bh(&serv->sv_lock);
285 dprintk("svc: releasing dead socket\n");
286 sock_release(svsk->sk_sock);
287 kfree(svsk);
288 }
289 else
290 spin_unlock_bh(&serv->sv_lock);
291 }
293 static void
294 svc_sock_release(struct svc_rqst *rqstp)
295 {
296 struct svc_sock *svsk = rqstp->rq_sock;
298 svc_release_skb(rqstp);
300 svc_free_allpages(rqstp);
301 rqstp->rq_res.page_len = 0;
302 rqstp->rq_res.page_base = 0;
305 /* Reset response buffer and release
306 * the reservation.
307 * But first, check that enough space was reserved
308 * for the reply, otherwise we have a bug!
309 */
310 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
311 printk(KERN_ERR "RPC request reserved %d but used %d\n",
312 rqstp->rq_reserved,
313 rqstp->rq_res.len);
315 rqstp->rq_res.head[0].iov_len = 0;
316 svc_reserve(rqstp, 0);
317 rqstp->rq_sock = NULL;
319 svc_sock_put(svsk);
320 }
322 /*
323 * External function to wake up a server waiting for data
324 */
325 void
326 svc_wake_up(struct svc_serv *serv)
327 {
328 struct svc_rqst *rqstp;
330 spin_lock_bh(&serv->sv_lock);
331 if (!list_empty(&serv->sv_threads)) {
332 rqstp = list_entry(serv->sv_threads.next,
333 struct svc_rqst,
334 rq_list);
335 dprintk("svc: daemon %p woken up.\n", rqstp);
336 /*
337 svc_serv_dequeue(serv, rqstp);
338 rqstp->rq_sock = NULL;
339 */
340 wake_up(&rqstp->rq_wait);
341 }
342 spin_unlock_bh(&serv->sv_lock);
343 }
345 /*
346 * Generic sendto routine
347 */
348 static int
349 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
350 {
351 struct svc_sock *svsk = rqstp->rq_sock;
352 struct socket *sock = svsk->sk_sock;
353 int slen;
354 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
355 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
356 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
357 int len = 0;
358 int result;
359 int size;
360 struct page **ppage = xdr->pages;
361 size_t base = xdr->page_base;
362 unsigned int pglen = xdr->page_len;
363 unsigned int flags = MSG_MORE;
365 slen = xdr->len;
367 if (rqstp->rq_prot == IPPROTO_UDP) {
368 /* set the source and destination */
369 struct msghdr msg;
370 msg.msg_name = &rqstp->rq_addr;
371 msg.msg_namelen = sizeof(rqstp->rq_addr);
372 msg.msg_iov = NULL;
373 msg.msg_iovlen = 0;
374 msg.msg_flags = MSG_MORE;
376 msg.msg_control = cmh;
377 msg.msg_controllen = sizeof(buffer);
378 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
379 cmh->cmsg_level = SOL_IP;
380 cmh->cmsg_type = IP_PKTINFO;
381 pki->ipi_ifindex = 0;
382 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
384 if (sock_sendmsg(sock, &msg, 0) < 0)
385 goto out;
386 }
388 /* send head */
389 if (slen == xdr->head[0].iov_len)
390 flags = 0;
391 len = sock->ops->sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
392 if (len != xdr->head[0].iov_len)
393 goto out;
394 slen -= xdr->head[0].iov_len;
395 if (slen == 0)
396 goto out;
398 /* send page data */
399 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
400 while (pglen > 0) {
401 if (slen == size)
402 flags = 0;
403 result = sock->ops->sendpage(sock, *ppage, base, size, flags);
404 if (result > 0)
405 len += result;
406 if (result != size)
407 goto out;
408 slen -= size;
409 pglen -= size;
410 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
411 base = 0;
412 ppage++;
413 }
414 /* send tail */
415 if (xdr->tail[0].iov_len) {
416 result = sock->ops->sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
417 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
418 xdr->tail[0].iov_len, 0);
420 if (result > 0)
421 len += result;
422 }
423 out:
424 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
425 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
426 rqstp->rq_addr.sin_addr.s_addr);
428 return len;
429 }
431 /*
432 * Check input queue length
433 */
434 static int
435 svc_recv_available(struct svc_sock *svsk)
436 {
437 mm_segment_t oldfs;
438 struct socket *sock = svsk->sk_sock;
439 int avail, err;
441 oldfs = get_fs(); set_fs(KERNEL_DS);
442 err = sock->ops->ioctl(sock, TIOCINQ, (unsigned long) &avail);
443 set_fs(oldfs);
445 return (err >= 0)? avail : err;
446 }
448 /*
449 * Generic recvfrom routine.
450 */
451 static int
452 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
453 {
454 struct msghdr msg;
455 struct socket *sock;
456 int len, alen;
458 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
459 sock = rqstp->rq_sock->sk_sock;
461 msg.msg_name = &rqstp->rq_addr;
462 msg.msg_namelen = sizeof(rqstp->rq_addr);
463 msg.msg_control = NULL;
464 msg.msg_controllen = 0;
466 msg.msg_flags = MSG_DONTWAIT;
468 len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
470 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
471 * possibly we should cache this in the svc_sock structure
472 * at accept time. FIXME
473 */
474 alen = sizeof(rqstp->rq_addr);
475 sock->ops->getname(sock, (struct sockaddr *)&rqstp->rq_addr, &alen, 1);
477 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
478 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
480 return len;
481 }
483 /*
484 * Set socket snd and rcv buffer lengths
485 */
486 static inline void
487 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
488 {
489 #if 0
490 mm_segment_t oldfs;
491 oldfs = get_fs(); set_fs(KERNEL_DS);
492 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
493 (char*)&snd, sizeof(snd));
494 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
495 (char*)&rcv, sizeof(rcv));
496 #else
497 /* sock_setsockopt limits use to sysctl_?mem_max,
498 * which isn't acceptable. Until that is made conditional
499 * on not having CAP_SYS_RESOURCE or similar, we go direct...
500 * DaveM said I could!
501 */
502 lock_sock(sock->sk);
503 sock->sk->sk_sndbuf = snd * 2;
504 sock->sk->sk_rcvbuf = rcv * 2;
505 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
506 release_sock(sock->sk);
507 #endif
508 }
509 /*
510 * INET callback when data has been received on the socket.
511 */
512 static void
513 svc_udp_data_ready(struct sock *sk, int count)
514 {
515 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
517 if (svsk) {
518 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
519 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
520 set_bit(SK_DATA, &svsk->sk_flags);
521 svc_sock_enqueue(svsk);
522 }
523 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
524 wake_up_interruptible(sk->sk_sleep);
525 }
527 /*
528 * INET callback when space is newly available on the socket.
529 */
530 static void
531 svc_write_space(struct sock *sk)
532 {
533 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
535 if (svsk) {
536 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
537 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
538 svc_sock_enqueue(svsk);
539 }
541 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
542 dprintk("RPC svc_write_space: someone sleeping on %p\n",
543 svsk);
544 wake_up_interruptible(sk->sk_sleep);
545 }
546 }
548 /*
549 * Receive a datagram from a UDP socket.
550 */
551 static int
552 svc_udp_recvfrom(struct svc_rqst *rqstp)
553 {
554 struct svc_sock *svsk = rqstp->rq_sock;
555 struct svc_serv *serv = svsk->sk_server;
556 struct sk_buff *skb;
557 int err, len;
559 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
560 /* udp sockets need large rcvbuf as all pending
561 * requests are still in that buffer. sndbuf must
562 * also be large enough that there is enough space
563 * for one reply per thread.
564 */
565 svc_sock_setbufsize(svsk->sk_sock,
566 (serv->sv_nrthreads+3) * serv->sv_bufsz,
567 (serv->sv_nrthreads+3) * serv->sv_bufsz);
569 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
570 svc_sock_received(svsk);
571 return svc_deferred_recv(rqstp);
572 }
574 clear_bit(SK_DATA, &svsk->sk_flags);
575 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
576 if (err == -EAGAIN) {
577 svc_sock_received(svsk);
578 return err;
579 }
580 /* possibly an icmp error */
581 dprintk("svc: recvfrom returned error %d\n", -err);
582 }
583 if (skb->tstamp.off_sec == 0) {
584 struct timeval tv;
586 tv.tv_sec = xtime.tv_sec;
587 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
588 skb_set_timestamp(skb, &tv);
589 /* Don't enable netstamp, sunrpc doesn't
590 need that much accuracy */
591 }
592 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
593 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
595 /*
596 * Maybe more packets - kick another thread ASAP.
597 */
598 svc_sock_received(svsk);
600 len = skb->len - sizeof(struct udphdr);
601 rqstp->rq_arg.len = len;
603 rqstp->rq_prot = IPPROTO_UDP;
605 /* Get sender address */
606 rqstp->rq_addr.sin_family = AF_INET;
607 rqstp->rq_addr.sin_port = skb->h.uh->source;
608 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
609 rqstp->rq_daddr = skb->nh.iph->daddr;
611 if (skb_is_nonlinear(skb)) {
612 /* we have to copy */
613 local_bh_disable();
614 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
615 local_bh_enable();
616 /* checksum error */
617 skb_free_datagram(svsk->sk_sk, skb);
618 return 0;
619 }
620 local_bh_enable();
621 skb_free_datagram(svsk->sk_sk, skb);
622 } else {
623 /* we can use it in-place */
624 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
625 rqstp->rq_arg.head[0].iov_len = len;
626 if (skb_checksum_complete(skb)) {
627 skb_free_datagram(svsk->sk_sk, skb);
628 return 0;
629 }
630 rqstp->rq_skbuff = skb;
631 }
633 rqstp->rq_arg.page_base = 0;
634 if (len <= rqstp->rq_arg.head[0].iov_len) {
635 rqstp->rq_arg.head[0].iov_len = len;
636 rqstp->rq_arg.page_len = 0;
637 } else {
638 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
639 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
640 }
642 if (serv->sv_stats)
643 serv->sv_stats->netudpcnt++;
645 return len;
646 }
648 static int
649 svc_udp_sendto(struct svc_rqst *rqstp)
650 {
651 int error;
653 error = svc_sendto(rqstp, &rqstp->rq_res);
654 if (error == -ECONNREFUSED)
655 /* ICMP error on earlier request. */
656 error = svc_sendto(rqstp, &rqstp->rq_res);
658 return error;
659 }
661 static void
662 svc_udp_init(struct svc_sock *svsk)
663 {
664 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
665 svsk->sk_sk->sk_write_space = svc_write_space;
666 svsk->sk_recvfrom = svc_udp_recvfrom;
667 svsk->sk_sendto = svc_udp_sendto;
669 /* initialise setting must have enough space to
670 * receive and respond to one request.
671 * svc_udp_recvfrom will re-adjust if necessary
672 */
673 svc_sock_setbufsize(svsk->sk_sock,
674 3 * svsk->sk_server->sv_bufsz,
675 3 * svsk->sk_server->sv_bufsz);
677 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
678 set_bit(SK_CHNGBUF, &svsk->sk_flags);
679 }
681 /*
682 * A data_ready event on a listening socket means there's a connection
683 * pending. Do not use state_change as a substitute for it.
684 */
685 static void
686 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
687 {
688 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
690 dprintk("svc: socket %p TCP (listen) state change %d\n",
691 sk, sk->sk_state);
693 /*
694 * This callback may called twice when a new connection
695 * is established as a child socket inherits everything
696 * from a parent LISTEN socket.
697 * 1) data_ready method of the parent socket will be called
698 * when one of child sockets become ESTABLISHED.
699 * 2) data_ready method of the child socket may be called
700 * when it receives data before the socket is accepted.
701 * In case of 2, we should ignore it silently.
702 */
703 if (sk->sk_state == TCP_LISTEN) {
704 if (svsk) {
705 set_bit(SK_CONN, &svsk->sk_flags);
706 svc_sock_enqueue(svsk);
707 } else
708 printk("svc: socket %p: no user data\n", sk);
709 }
711 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
712 wake_up_interruptible_all(sk->sk_sleep);
713 }
715 /*
716 * A state change on a connected socket means it's dying or dead.
717 */
718 static void
719 svc_tcp_state_change(struct sock *sk)
720 {
721 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
723 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
724 sk, sk->sk_state, sk->sk_user_data);
726 if (!svsk)
727 printk("svc: socket %p: no user data\n", sk);
728 else {
729 set_bit(SK_CLOSE, &svsk->sk_flags);
730 svc_sock_enqueue(svsk);
731 }
732 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
733 wake_up_interruptible_all(sk->sk_sleep);
734 }
736 static void
737 svc_tcp_data_ready(struct sock *sk, int count)
738 {
739 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
741 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
742 sk, sk->sk_user_data);
743 if (svsk) {
744 set_bit(SK_DATA, &svsk->sk_flags);
745 svc_sock_enqueue(svsk);
746 }
747 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
748 wake_up_interruptible(sk->sk_sleep);
749 }
751 /*
752 * Accept a TCP connection
753 */
754 static void
755 svc_tcp_accept(struct svc_sock *svsk)
756 {
757 struct sockaddr_in sin;
758 struct svc_serv *serv = svsk->sk_server;
759 struct socket *sock = svsk->sk_sock;
760 struct socket *newsock;
761 const struct proto_ops *ops;
762 struct svc_sock *newsvsk;
763 int err, slen;
765 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
766 if (!sock)
767 return;
769 err = sock_create_lite(PF_INET, SOCK_STREAM, IPPROTO_TCP, &newsock);
770 if (err) {
771 if (err == -ENOMEM)
772 printk(KERN_WARNING "%s: no more sockets!\n",
773 serv->sv_name);
774 return;
775 }
777 dprintk("svc: tcp_accept %p allocated\n", newsock);
778 newsock->ops = ops = sock->ops;
780 clear_bit(SK_CONN, &svsk->sk_flags);
781 if ((err = ops->accept(sock, newsock, O_NONBLOCK)) < 0) {
782 if (err != -EAGAIN && net_ratelimit())
783 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
784 serv->sv_name, -err);
785 goto failed; /* aborted connection or whatever */
786 }
787 set_bit(SK_CONN, &svsk->sk_flags);
788 svc_sock_enqueue(svsk);
790 slen = sizeof(sin);
791 err = ops->getname(newsock, (struct sockaddr *) &sin, &slen, 1);
792 if (err < 0) {
793 if (net_ratelimit())
794 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
795 serv->sv_name, -err);
796 goto failed; /* aborted connection or whatever */
797 }
799 /* Ideally, we would want to reject connections from unauthorized
800 * hosts here, but when we get encription, the IP of the host won't
801 * tell us anything. For now just warn about unpriv connections.
802 */
803 if (ntohs(sin.sin_port) >= 1024) {
804 dprintk(KERN_WARNING
805 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
806 serv->sv_name,
807 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
808 }
810 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
811 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
813 /* make sure that a write doesn't block forever when
814 * low on memory
815 */
816 newsock->sk->sk_sndtimeo = HZ*30;
818 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
819 goto failed;
822 /* make sure that we don't have too many active connections.
823 * If we have, something must be dropped.
824 *
825 * There's no point in trying to do random drop here for
826 * DoS prevention. The NFS clients does 1 reconnect in 15
827 * seconds. An attacker can easily beat that.
828 *
829 * The only somewhat efficient mechanism would be if drop
830 * old connections from the same IP first. But right now
831 * we don't even record the client IP in svc_sock.
832 */
833 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
834 struct svc_sock *svsk = NULL;
835 spin_lock_bh(&serv->sv_lock);
836 if (!list_empty(&serv->sv_tempsocks)) {
837 if (net_ratelimit()) {
838 /* Try to help the admin */
839 printk(KERN_NOTICE "%s: too many open TCP "
840 "sockets, consider increasing the "
841 "number of nfsd threads\n",
842 serv->sv_name);
843 printk(KERN_NOTICE "%s: last TCP connect from "
844 "%u.%u.%u.%u:%d\n",
845 serv->sv_name,
846 NIPQUAD(sin.sin_addr.s_addr),
847 ntohs(sin.sin_port));
848 }
849 /*
850 * Always select the oldest socket. It's not fair,
851 * but so is life
852 */
853 svsk = list_entry(serv->sv_tempsocks.prev,
854 struct svc_sock,
855 sk_list);
856 set_bit(SK_CLOSE, &svsk->sk_flags);
857 svsk->sk_inuse ++;
858 }
859 spin_unlock_bh(&serv->sv_lock);
861 if (svsk) {
862 svc_sock_enqueue(svsk);
863 svc_sock_put(svsk);
864 }
866 }
868 if (serv->sv_stats)
869 serv->sv_stats->nettcpconn++;
871 return;
873 failed:
874 sock_release(newsock);
875 return;
876 }
878 /*
879 * Receive data from a TCP socket.
880 */
881 static int
882 svc_tcp_recvfrom(struct svc_rqst *rqstp)
883 {
884 struct svc_sock *svsk = rqstp->rq_sock;
885 struct svc_serv *serv = svsk->sk_server;
886 int len;
887 struct kvec vec[RPCSVC_MAXPAGES];
888 int pnum, vlen;
890 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
891 svsk, test_bit(SK_DATA, &svsk->sk_flags),
892 test_bit(SK_CONN, &svsk->sk_flags),
893 test_bit(SK_CLOSE, &svsk->sk_flags));
895 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
896 svc_sock_received(svsk);
897 return svc_deferred_recv(rqstp);
898 }
900 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
901 svc_delete_socket(svsk);
902 return 0;
903 }
905 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
906 svc_tcp_accept(svsk);
907 svc_sock_received(svsk);
908 return 0;
909 }
911 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
912 /* sndbuf needs to have room for one request
913 * per thread, otherwise we can stall even when the
914 * network isn't a bottleneck.
915 * rcvbuf just needs to be able to hold a few requests.
916 * Normally they will be removed from the queue
917 * as soon a a complete request arrives.
918 */
919 svc_sock_setbufsize(svsk->sk_sock,
920 (serv->sv_nrthreads+3) * serv->sv_bufsz,
921 3 * serv->sv_bufsz);
923 clear_bit(SK_DATA, &svsk->sk_flags);
925 /* Receive data. If we haven't got the record length yet, get
926 * the next four bytes. Otherwise try to gobble up as much as
927 * possible up to the complete record length.
928 */
929 if (svsk->sk_tcplen < 4) {
930 unsigned long want = 4 - svsk->sk_tcplen;
931 struct kvec iov;
933 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
934 iov.iov_len = want;
935 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
936 goto error;
937 svsk->sk_tcplen += len;
939 if (len < want) {
940 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
941 len, want);
942 svc_sock_received(svsk);
943 return -EAGAIN; /* record header not complete */
944 }
946 svsk->sk_reclen = ntohl(svsk->sk_reclen);
947 if (!(svsk->sk_reclen & 0x80000000)) {
948 /* FIXME: technically, a record can be fragmented,
949 * and non-terminal fragments will not have the top
950 * bit set in the fragment length header.
951 * But apparently no known nfs clients send fragmented
952 * records. */
953 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
954 (unsigned long) svsk->sk_reclen);
955 goto err_delete;
956 }
957 svsk->sk_reclen &= 0x7fffffff;
958 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
959 if (svsk->sk_reclen > serv->sv_bufsz) {
960 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
961 (unsigned long) svsk->sk_reclen);
962 goto err_delete;
963 }
964 }
966 /* Check whether enough data is available */
967 len = svc_recv_available(svsk);
968 if (len < 0)
969 goto error;
971 if (len < svsk->sk_reclen) {
972 dprintk("svc: incomplete TCP record (%d of %d)\n",
973 len, svsk->sk_reclen);
974 svc_sock_received(svsk);
975 return -EAGAIN; /* record not complete */
976 }
977 len = svsk->sk_reclen;
978 set_bit(SK_DATA, &svsk->sk_flags);
980 vec[0] = rqstp->rq_arg.head[0];
981 vlen = PAGE_SIZE;
982 pnum = 1;
983 while (vlen < len) {
984 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]);
985 vec[pnum].iov_len = PAGE_SIZE;
986 pnum++;
987 vlen += PAGE_SIZE;
988 }
990 /* Now receive data */
991 len = svc_recvfrom(rqstp, vec, pnum, len);
992 if (len < 0)
993 goto error;
995 dprintk("svc: TCP complete record (%d bytes)\n", len);
996 rqstp->rq_arg.len = len;
997 rqstp->rq_arg.page_base = 0;
998 if (len <= rqstp->rq_arg.head[0].iov_len) {
999 rqstp->rq_arg.head[0].iov_len = len;
1000 rqstp->rq_arg.page_len = 0;
1001 } else {
1002 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1005 rqstp->rq_skbuff = NULL;
1006 rqstp->rq_prot = IPPROTO_TCP;
1008 /* Reset TCP read info */
1009 svsk->sk_reclen = 0;
1010 svsk->sk_tcplen = 0;
1012 svc_sock_received(svsk);
1013 if (serv->sv_stats)
1014 serv->sv_stats->nettcpcnt++;
1016 return len;
1018 err_delete:
1019 svc_delete_socket(svsk);
1020 return -EAGAIN;
1022 error:
1023 if (len == -EAGAIN) {
1024 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1025 svc_sock_received(svsk);
1026 } else {
1027 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1028 svsk->sk_server->sv_name, -len);
1029 goto err_delete;
1032 return len;
1035 /*
1036 * Send out data on TCP socket.
1037 */
1038 static int
1039 svc_tcp_sendto(struct svc_rqst *rqstp)
1041 struct xdr_buf *xbufp = &rqstp->rq_res;
1042 int sent;
1043 u32 reclen;
1045 /* Set up the first element of the reply kvec.
1046 * Any other kvecs that may be in use have been taken
1047 * care of by the server implementation itself.
1048 */
1049 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1050 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1052 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1053 return -ENOTCONN;
1055 sent = svc_sendto(rqstp, &rqstp->rq_res);
1056 if (sent != xbufp->len) {
1057 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1058 rqstp->rq_sock->sk_server->sv_name,
1059 (sent<0)?"got error":"sent only",
1060 sent, xbufp->len);
1061 svc_delete_socket(rqstp->rq_sock);
1062 sent = -EAGAIN;
1064 return sent;
1067 static void
1068 svc_tcp_init(struct svc_sock *svsk)
1070 struct sock *sk = svsk->sk_sk;
1071 struct tcp_sock *tp = tcp_sk(sk);
1073 svsk->sk_recvfrom = svc_tcp_recvfrom;
1074 svsk->sk_sendto = svc_tcp_sendto;
1076 if (sk->sk_state == TCP_LISTEN) {
1077 dprintk("setting up TCP socket for listening\n");
1078 sk->sk_data_ready = svc_tcp_listen_data_ready;
1079 set_bit(SK_CONN, &svsk->sk_flags);
1080 } else {
1081 dprintk("setting up TCP socket for reading\n");
1082 sk->sk_state_change = svc_tcp_state_change;
1083 sk->sk_data_ready = svc_tcp_data_ready;
1084 sk->sk_write_space = svc_write_space;
1086 svsk->sk_reclen = 0;
1087 svsk->sk_tcplen = 0;
1089 tp->nonagle = 1; /* disable Nagle's algorithm */
1091 /* initialise setting must have enough space to
1092 * receive and respond to one request.
1093 * svc_tcp_recvfrom will re-adjust if necessary
1094 */
1095 svc_sock_setbufsize(svsk->sk_sock,
1096 3 * svsk->sk_server->sv_bufsz,
1097 3 * svsk->sk_server->sv_bufsz);
1099 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1100 set_bit(SK_DATA, &svsk->sk_flags);
1101 if (sk->sk_state != TCP_ESTABLISHED)
1102 set_bit(SK_CLOSE, &svsk->sk_flags);
1106 void
1107 svc_sock_update_bufs(struct svc_serv *serv)
1109 /*
1110 * The number of server threads has changed. Update
1111 * rcvbuf and sndbuf accordingly on all sockets
1112 */
1113 struct list_head *le;
1115 spin_lock_bh(&serv->sv_lock);
1116 list_for_each(le, &serv->sv_permsocks) {
1117 struct svc_sock *svsk =
1118 list_entry(le, struct svc_sock, sk_list);
1119 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1121 list_for_each(le, &serv->sv_tempsocks) {
1122 struct svc_sock *svsk =
1123 list_entry(le, struct svc_sock, sk_list);
1124 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1126 spin_unlock_bh(&serv->sv_lock);
1129 /*
1130 * Receive the next request on any socket.
1131 */
1132 int
1133 svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1135 struct svc_sock *svsk =NULL;
1136 int len;
1137 int pages;
1138 struct xdr_buf *arg;
1139 DECLARE_WAITQUEUE(wait, current);
1141 dprintk("svc: server %p waiting for data (to = %ld)\n",
1142 rqstp, timeout);
1144 if (rqstp->rq_sock)
1145 printk(KERN_ERR
1146 "svc_recv: service %p, socket not NULL!\n",
1147 rqstp);
1148 if (waitqueue_active(&rqstp->rq_wait))
1149 printk(KERN_ERR
1150 "svc_recv: service %p, wait queue active!\n",
1151 rqstp);
1153 /* Initialize the buffers */
1154 /* first reclaim pages that were moved to response list */
1155 svc_pushback_allpages(rqstp);
1157 /* now allocate needed pages. If we get a failure, sleep briefly */
1158 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1159 while (rqstp->rq_arghi < pages) {
1160 struct page *p = alloc_page(GFP_KERNEL);
1161 if (!p) {
1162 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1163 continue;
1165 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1168 /* Make arg->head point to first page and arg->pages point to rest */
1169 arg = &rqstp->rq_arg;
1170 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]);
1171 arg->head[0].iov_len = PAGE_SIZE;
1172 rqstp->rq_argused = 1;
1173 arg->pages = rqstp->rq_argpages + 1;
1174 arg->page_base = 0;
1175 /* save at least one page for response */
1176 arg->page_len = (pages-2)*PAGE_SIZE;
1177 arg->len = (pages-1)*PAGE_SIZE;
1178 arg->tail[0].iov_len = 0;
1180 try_to_freeze();
1181 cond_resched();
1182 if (signalled())
1183 return -EINTR;
1185 spin_lock_bh(&serv->sv_lock);
1186 if (!list_empty(&serv->sv_tempsocks)) {
1187 svsk = list_entry(serv->sv_tempsocks.next,
1188 struct svc_sock, sk_list);
1189 /* apparently the "standard" is that clients close
1190 * idle connections after 5 minutes, servers after
1191 * 6 minutes
1192 * http://www.connectathon.org/talks96/nfstcp.pdf
1193 */
1194 if (get_seconds() - svsk->sk_lastrecv < 6*60
1195 || test_bit(SK_BUSY, &svsk->sk_flags))
1196 svsk = NULL;
1198 if (svsk) {
1199 set_bit(SK_BUSY, &svsk->sk_flags);
1200 set_bit(SK_CLOSE, &svsk->sk_flags);
1201 rqstp->rq_sock = svsk;
1202 svsk->sk_inuse++;
1203 } else if ((svsk = svc_sock_dequeue(serv)) != NULL) {
1204 rqstp->rq_sock = svsk;
1205 svsk->sk_inuse++;
1206 rqstp->rq_reserved = serv->sv_bufsz;
1207 svsk->sk_reserved += rqstp->rq_reserved;
1208 } else {
1209 /* No data pending. Go to sleep */
1210 svc_serv_enqueue(serv, rqstp);
1212 /*
1213 * We have to be able to interrupt this wait
1214 * to bring down the daemons ...
1215 */
1216 set_current_state(TASK_INTERRUPTIBLE);
1217 add_wait_queue(&rqstp->rq_wait, &wait);
1218 spin_unlock_bh(&serv->sv_lock);
1220 schedule_timeout(timeout);
1222 try_to_freeze();
1224 spin_lock_bh(&serv->sv_lock);
1225 remove_wait_queue(&rqstp->rq_wait, &wait);
1227 if (!(svsk = rqstp->rq_sock)) {
1228 svc_serv_dequeue(serv, rqstp);
1229 spin_unlock_bh(&serv->sv_lock);
1230 dprintk("svc: server %p, no data yet\n", rqstp);
1231 return signalled()? -EINTR : -EAGAIN;
1234 spin_unlock_bh(&serv->sv_lock);
1236 dprintk("svc: server %p, socket %p, inuse=%d\n",
1237 rqstp, svsk, svsk->sk_inuse);
1238 len = svsk->sk_recvfrom(rqstp);
1239 dprintk("svc: got len=%d\n", len);
1241 /* No data, incomplete (TCP) read, or accept() */
1242 if (len == 0 || len == -EAGAIN) {
1243 rqstp->rq_res.len = 0;
1244 svc_sock_release(rqstp);
1245 return -EAGAIN;
1247 svsk->sk_lastrecv = get_seconds();
1248 if (test_bit(SK_TEMP, &svsk->sk_flags)) {
1249 /* push active sockets to end of list */
1250 spin_lock_bh(&serv->sv_lock);
1251 if (!list_empty(&svsk->sk_list))
1252 list_move_tail(&svsk->sk_list, &serv->sv_tempsocks);
1253 spin_unlock_bh(&serv->sv_lock);
1256 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1257 rqstp->rq_chandle.defer = svc_defer;
1259 if (serv->sv_stats)
1260 serv->sv_stats->netcnt++;
1261 return len;
1264 /*
1265 * Drop request
1266 */
1267 void
1268 svc_drop(struct svc_rqst *rqstp)
1270 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1271 svc_sock_release(rqstp);
1274 /*
1275 * Return reply to client.
1276 */
1277 int
1278 svc_send(struct svc_rqst *rqstp)
1280 struct svc_sock *svsk;
1281 int len;
1282 struct xdr_buf *xb;
1284 if ((svsk = rqstp->rq_sock) == NULL) {
1285 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1286 __FILE__, __LINE__);
1287 return -EFAULT;
1290 /* release the receive skb before sending the reply */
1291 svc_release_skb(rqstp);
1293 /* calculate over-all length */
1294 xb = & rqstp->rq_res;
1295 xb->len = xb->head[0].iov_len +
1296 xb->page_len +
1297 xb->tail[0].iov_len;
1299 /* Grab svsk->sk_mutex to serialize outgoing data. */
1300 mutex_lock(&svsk->sk_mutex);
1301 if (test_bit(SK_DEAD, &svsk->sk_flags))
1302 len = -ENOTCONN;
1303 else
1304 len = svsk->sk_sendto(rqstp);
1305 mutex_unlock(&svsk->sk_mutex);
1306 svc_sock_release(rqstp);
1308 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1309 return 0;
1310 return len;
1313 /*
1314 * Initialize socket for RPC use and create svc_sock struct
1315 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1316 */
1317 static struct svc_sock *
1318 svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1319 int *errp, int pmap_register)
1321 struct svc_sock *svsk;
1322 struct sock *inet;
1324 dprintk("svc: svc_setup_socket %p\n", sock);
1325 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1326 *errp = -ENOMEM;
1327 return NULL;
1330 inet = sock->sk;
1332 /* Register socket with portmapper */
1333 if (*errp >= 0 && pmap_register)
1334 *errp = svc_register(serv, inet->sk_protocol,
1335 ntohs(inet_sk(inet)->sport));
1337 if (*errp < 0) {
1338 kfree(svsk);
1339 return NULL;
1342 set_bit(SK_BUSY, &svsk->sk_flags);
1343 inet->sk_user_data = svsk;
1344 svsk->sk_sock = sock;
1345 svsk->sk_sk = inet;
1346 svsk->sk_ostate = inet->sk_state_change;
1347 svsk->sk_odata = inet->sk_data_ready;
1348 svsk->sk_owspace = inet->sk_write_space;
1349 svsk->sk_server = serv;
1350 svsk->sk_lastrecv = get_seconds();
1351 INIT_LIST_HEAD(&svsk->sk_deferred);
1352 INIT_LIST_HEAD(&svsk->sk_ready);
1353 mutex_init(&svsk->sk_mutex);
1355 /* Initialize the socket */
1356 if (sock->type == SOCK_DGRAM)
1357 svc_udp_init(svsk);
1358 else
1359 svc_tcp_init(svsk);
1361 spin_lock_bh(&serv->sv_lock);
1362 if (!pmap_register) {
1363 set_bit(SK_TEMP, &svsk->sk_flags);
1364 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1365 serv->sv_tmpcnt++;
1366 } else {
1367 clear_bit(SK_TEMP, &svsk->sk_flags);
1368 list_add(&svsk->sk_list, &serv->sv_permsocks);
1370 spin_unlock_bh(&serv->sv_lock);
1372 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1373 svsk, svsk->sk_sk);
1375 clear_bit(SK_BUSY, &svsk->sk_flags);
1376 svc_sock_enqueue(svsk);
1377 return svsk;
1380 /*
1381 * Create socket for RPC service.
1382 */
1383 static int
1384 svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1386 struct svc_sock *svsk;
1387 struct socket *sock;
1388 int error;
1389 int type;
1391 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1392 serv->sv_program->pg_name, protocol,
1393 NIPQUAD(sin->sin_addr.s_addr),
1394 ntohs(sin->sin_port));
1396 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1397 printk(KERN_WARNING "svc: only UDP and TCP "
1398 "sockets supported\n");
1399 return -EINVAL;
1401 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1403 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1404 return error;
1406 if (sin != NULL) {
1407 if (type == SOCK_STREAM)
1408 sock->sk->sk_reuse = 1; /* allow address reuse */
1409 error = sock->ops->bind(sock, (struct sockaddr *) sin,
1410 sizeof(*sin));
1411 if (error < 0)
1412 goto bummer;
1415 if (protocol == IPPROTO_TCP) {
1416 if ((error = sock->ops->listen(sock, 64)) < 0)
1417 goto bummer;
1420 if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL)
1421 return 0;
1423 bummer:
1424 dprintk("svc: svc_create_socket error = %d\n", -error);
1425 sock_release(sock);
1426 return error;
1429 /*
1430 * Remove a dead socket
1431 */
1432 void
1433 svc_delete_socket(struct svc_sock *svsk)
1435 struct svc_serv *serv;
1436 struct sock *sk;
1438 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1440 serv = svsk->sk_server;
1441 sk = svsk->sk_sk;
1443 sk->sk_state_change = svsk->sk_ostate;
1444 sk->sk_data_ready = svsk->sk_odata;
1445 sk->sk_write_space = svsk->sk_owspace;
1447 spin_lock_bh(&serv->sv_lock);
1449 list_del_init(&svsk->sk_list);
1450 list_del_init(&svsk->sk_ready);
1451 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
1452 if (test_bit(SK_TEMP, &svsk->sk_flags))
1453 serv->sv_tmpcnt--;
1455 if (!svsk->sk_inuse) {
1456 spin_unlock_bh(&serv->sv_lock);
1457 sock_release(svsk->sk_sock);
1458 kfree(svsk);
1459 } else {
1460 spin_unlock_bh(&serv->sv_lock);
1461 dprintk(KERN_NOTICE "svc: server socket destroy delayed\n");
1462 /* svsk->sk_server = NULL; */
1466 /*
1467 * Make a socket for nfsd and lockd
1468 */
1469 int
1470 svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1472 struct sockaddr_in sin;
1474 dprintk("svc: creating socket proto = %d\n", protocol);
1475 sin.sin_family = AF_INET;
1476 sin.sin_addr.s_addr = INADDR_ANY;
1477 sin.sin_port = htons(port);
1478 return svc_create_socket(serv, protocol, &sin);
1481 /*
1482 * Handle defer and revisit of requests
1483 */
1485 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1487 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1488 struct svc_serv *serv = dreq->owner;
1489 struct svc_sock *svsk;
1491 if (too_many) {
1492 svc_sock_put(dr->svsk);
1493 kfree(dr);
1494 return;
1496 dprintk("revisit queued\n");
1497 svsk = dr->svsk;
1498 dr->svsk = NULL;
1499 spin_lock_bh(&serv->sv_lock);
1500 list_add(&dr->handle.recent, &svsk->sk_deferred);
1501 spin_unlock_bh(&serv->sv_lock);
1502 set_bit(SK_DEFERRED, &svsk->sk_flags);
1503 svc_sock_enqueue(svsk);
1504 svc_sock_put(svsk);
1507 static struct cache_deferred_req *
1508 svc_defer(struct cache_req *req)
1510 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1511 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1512 struct svc_deferred_req *dr;
1514 if (rqstp->rq_arg.page_len)
1515 return NULL; /* if more than a page, give up FIXME */
1516 if (rqstp->rq_deferred) {
1517 dr = rqstp->rq_deferred;
1518 rqstp->rq_deferred = NULL;
1519 } else {
1520 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1521 /* FIXME maybe discard if size too large */
1522 dr = kmalloc(size, GFP_KERNEL);
1523 if (dr == NULL)
1524 return NULL;
1526 dr->handle.owner = rqstp->rq_server;
1527 dr->prot = rqstp->rq_prot;
1528 dr->addr = rqstp->rq_addr;
1529 dr->daddr = rqstp->rq_daddr;
1530 dr->argslen = rqstp->rq_arg.len >> 2;
1531 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1533 spin_lock_bh(&rqstp->rq_server->sv_lock);
1534 rqstp->rq_sock->sk_inuse++;
1535 dr->svsk = rqstp->rq_sock;
1536 spin_unlock_bh(&rqstp->rq_server->sv_lock);
1538 dr->handle.revisit = svc_revisit;
1539 return &dr->handle;
1542 /*
1543 * recv data from a deferred request into an active one
1544 */
1545 static int svc_deferred_recv(struct svc_rqst *rqstp)
1547 struct svc_deferred_req *dr = rqstp->rq_deferred;
1549 rqstp->rq_arg.head[0].iov_base = dr->args;
1550 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1551 rqstp->rq_arg.page_len = 0;
1552 rqstp->rq_arg.len = dr->argslen<<2;
1553 rqstp->rq_prot = dr->prot;
1554 rqstp->rq_addr = dr->addr;
1555 rqstp->rq_daddr = dr->daddr;
1556 return dr->argslen<<2;
1560 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1562 struct svc_deferred_req *dr = NULL;
1563 struct svc_serv *serv = svsk->sk_server;
1565 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1566 return NULL;
1567 spin_lock_bh(&serv->sv_lock);
1568 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1569 if (!list_empty(&svsk->sk_deferred)) {
1570 dr = list_entry(svsk->sk_deferred.next,
1571 struct svc_deferred_req,
1572 handle.recent);
1573 list_del_init(&dr->handle.recent);
1574 set_bit(SK_DEFERRED, &svsk->sk_flags);
1576 spin_unlock_bh(&serv->sv_lock);
1577 return dr;