2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
40 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
59 # define RPCDBG_FACILITY RPCDBG_XPRT
65 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
66 static void xprt_connect_status(struct rpc_task *task);
67 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69 static DEFINE_SPINLOCK(xprt_list_lock);
70 static LIST_HEAD(xprt_list);
73 * The transport code maintains an estimate on the maximum number of out-
74 * standing RPC requests, using a smoothed version of the congestion
75 * avoidance implemented in 44BSD. This is basically the Van Jacobson
76 * congestion algorithm: If a retransmit occurs, the congestion window is
77 * halved; otherwise, it is incremented by 1/cwnd when
79 * - a reply is received and
80 * - a full number of requests are outstanding and
81 * - the congestion window hasn't been updated recently.
83 #define RPC_CWNDSHIFT (8U)
84 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
85 #define RPC_INITCWND RPC_CWNDSCALE
86 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
88 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
91 * xprt_register_transport - register a transport implementation
92 * @transport: transport to register
94 * If a transport implementation is loaded as a kernel module, it can
95 * call this interface to make itself known to the RPC client.
98 * 0: transport successfully registered
99 * -EEXIST: transport already registered
100 * -EINVAL: transport module being unloaded
102 int xprt_register_transport(struct xprt_class *transport)
104 struct xprt_class *t;
108 spin_lock(&xprt_list_lock);
109 list_for_each_entry(t, &xprt_list, list) {
110 /* don't register the same transport class twice */
111 if (t->ident == transport->ident)
115 list_add_tail(&transport->list, &xprt_list);
116 printk(KERN_INFO "RPC: Registered %s transport module.\n",
121 spin_unlock(&xprt_list_lock);
124 EXPORT_SYMBOL_GPL(xprt_register_transport);
127 * xprt_unregister_transport - unregister a transport implementation
128 * @transport: transport to unregister
131 * 0: transport successfully unregistered
132 * -ENOENT: transport never registered
134 int xprt_unregister_transport(struct xprt_class *transport)
136 struct xprt_class *t;
140 spin_lock(&xprt_list_lock);
141 list_for_each_entry(t, &xprt_list, list) {
142 if (t == transport) {
144 "RPC: Unregistered %s transport module.\n",
146 list_del_init(&transport->list);
153 spin_unlock(&xprt_list_lock);
156 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
159 * xprt_load_transport - load a transport implementation
160 * @transport_name: transport to load
163 * 0: transport successfully loaded
164 * -ENOENT: transport module not available
166 int xprt_load_transport(const char *transport_name)
168 struct xprt_class *t;
172 spin_lock(&xprt_list_lock);
173 list_for_each_entry(t, &xprt_list, list) {
174 if (strcmp(t->name, transport_name) == 0) {
175 spin_unlock(&xprt_list_lock);
179 spin_unlock(&xprt_list_lock);
180 result = request_module("xprt%s", transport_name);
184 EXPORT_SYMBOL_GPL(xprt_load_transport);
187 * xprt_reserve_xprt - serialize write access to transports
188 * @task: task that is requesting access to the transport
190 * This prevents mixing the payload of separate requests, and prevents
191 * transport connects from colliding with writes. No congestion control
194 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
196 struct rpc_rqst *req = task->tk_rqstp;
198 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
199 if (task == xprt->snd_task)
203 xprt->snd_task = task;
205 req->rq_bytes_sent = 0;
212 dprintk("RPC: %5u failed to lock transport %p\n",
214 task->tk_timeout = 0;
215 task->tk_status = -EAGAIN;
216 if (req != NULL && req->rq_ntrans)
217 rpc_sleep_on(&xprt->resend, task, NULL);
219 rpc_sleep_on(&xprt->sending, task, NULL);
222 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
224 static void xprt_clear_locked(struct rpc_xprt *xprt)
226 xprt->snd_task = NULL;
227 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
228 smp_mb__before_clear_bit();
229 clear_bit(XPRT_LOCKED, &xprt->state);
230 smp_mb__after_clear_bit();
232 queue_work(rpciod_workqueue, &xprt->task_cleanup);
236 * xprt_reserve_xprt_cong - serialize write access to transports
237 * @task: task that is requesting access to the transport
239 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
240 * integrated into the decision of whether a request is allowed to be
241 * woken up and given access to the transport.
243 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
245 struct rpc_rqst *req = task->tk_rqstp;
247 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
248 if (task == xprt->snd_task)
253 xprt->snd_task = task;
256 if (__xprt_get_cong(xprt, task)) {
257 xprt->snd_task = task;
258 req->rq_bytes_sent = 0;
262 xprt_clear_locked(xprt);
264 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
265 task->tk_timeout = 0;
266 task->tk_status = -EAGAIN;
267 if (req != NULL && req->rq_ntrans)
268 rpc_sleep_on(&xprt->resend, task, NULL);
270 rpc_sleep_on(&xprt->sending, task, NULL);
273 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
275 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
279 spin_lock_bh(&xprt->transport_lock);
280 retval = xprt->ops->reserve_xprt(xprt, task);
281 spin_unlock_bh(&xprt->transport_lock);
285 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
287 struct rpc_task *task;
288 struct rpc_rqst *req;
290 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
293 task = rpc_wake_up_next(&xprt->resend);
295 task = rpc_wake_up_next(&xprt->sending);
300 req = task->tk_rqstp;
301 xprt->snd_task = task;
303 req->rq_bytes_sent = 0;
309 xprt_clear_locked(xprt);
312 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
314 struct rpc_task *task;
315 struct rpc_rqst *req;
317 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
319 if (RPCXPRT_CONGESTED(xprt))
321 task = rpc_wake_up_next(&xprt->resend);
323 task = rpc_wake_up_next(&xprt->sending);
328 req = task->tk_rqstp;
330 xprt->snd_task = task;
333 if (__xprt_get_cong(xprt, task)) {
334 xprt->snd_task = task;
335 req->rq_bytes_sent = 0;
340 xprt_clear_locked(xprt);
344 * xprt_release_xprt - allow other requests to use a transport
345 * @xprt: transport with other tasks potentially waiting
346 * @task: task that is releasing access to the transport
348 * Note that "task" can be NULL. No congestion control is provided.
350 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
352 if (xprt->snd_task == task) {
353 xprt_clear_locked(xprt);
354 __xprt_lock_write_next(xprt);
357 EXPORT_SYMBOL_GPL(xprt_release_xprt);
360 * xprt_release_xprt_cong - allow other requests to use a transport
361 * @xprt: transport with other tasks potentially waiting
362 * @task: task that is releasing access to the transport
364 * Note that "task" can be NULL. Another task is awoken to use the
365 * transport if the transport's congestion window allows it.
367 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
369 if (xprt->snd_task == task) {
370 xprt_clear_locked(xprt);
371 __xprt_lock_write_next_cong(xprt);
374 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
376 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
378 spin_lock_bh(&xprt->transport_lock);
379 xprt->ops->release_xprt(xprt, task);
380 spin_unlock_bh(&xprt->transport_lock);
384 * Van Jacobson congestion avoidance. Check if the congestion window
385 * overflowed. Put the task to sleep if this is the case.
388 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
390 struct rpc_rqst *req = task->tk_rqstp;
394 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
395 task->tk_pid, xprt->cong, xprt->cwnd);
396 if (RPCXPRT_CONGESTED(xprt))
399 xprt->cong += RPC_CWNDSCALE;
404 * Adjust the congestion window, and wake up the next task
405 * that has been sleeping due to congestion
408 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
413 xprt->cong -= RPC_CWNDSCALE;
414 __xprt_lock_write_next_cong(xprt);
418 * xprt_release_rqst_cong - housekeeping when request is complete
419 * @task: RPC request that recently completed
421 * Useful for transports that require congestion control.
423 void xprt_release_rqst_cong(struct rpc_task *task)
425 __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
427 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
430 * xprt_adjust_cwnd - adjust transport congestion window
431 * @task: recently completed RPC request used to adjust window
432 * @result: result code of completed RPC request
434 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
436 void xprt_adjust_cwnd(struct rpc_task *task, int result)
438 struct rpc_rqst *req = task->tk_rqstp;
439 struct rpc_xprt *xprt = task->tk_xprt;
440 unsigned long cwnd = xprt->cwnd;
442 if (result >= 0 && cwnd <= xprt->cong) {
443 /* The (cwnd >> 1) term makes sure
444 * the result gets rounded properly. */
445 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
446 if (cwnd > RPC_MAXCWND(xprt))
447 cwnd = RPC_MAXCWND(xprt);
448 __xprt_lock_write_next_cong(xprt);
449 } else if (result == -ETIMEDOUT) {
451 if (cwnd < RPC_CWNDSCALE)
452 cwnd = RPC_CWNDSCALE;
454 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
455 xprt->cong, xprt->cwnd, cwnd);
457 __xprt_put_cong(xprt, req);
459 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
462 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
463 * @xprt: transport with waiting tasks
464 * @status: result code to plant in each task before waking it
467 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
470 rpc_wake_up_status(&xprt->pending, status);
472 rpc_wake_up(&xprt->pending);
474 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
477 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
478 * @task: task to be put to sleep
479 * @action: function pointer to be executed after wait
481 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
483 struct rpc_rqst *req = task->tk_rqstp;
484 struct rpc_xprt *xprt = req->rq_xprt;
486 task->tk_timeout = req->rq_timeout;
487 rpc_sleep_on(&xprt->pending, task, action);
489 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
492 * xprt_write_space - wake the task waiting for transport output buffer space
493 * @xprt: transport with waiting tasks
495 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
497 void xprt_write_space(struct rpc_xprt *xprt)
499 if (unlikely(xprt->shutdown))
502 spin_lock_bh(&xprt->transport_lock);
503 if (xprt->snd_task) {
504 dprintk("RPC: write space: waking waiting task on "
506 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
508 spin_unlock_bh(&xprt->transport_lock);
510 EXPORT_SYMBOL_GPL(xprt_write_space);
513 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
514 * @task: task whose timeout is to be set
516 * Set a request's retransmit timeout based on the transport's
517 * default timeout parameters. Used by transports that don't adjust
518 * the retransmit timeout based on round-trip time estimation.
520 void xprt_set_retrans_timeout_def(struct rpc_task *task)
522 task->tk_timeout = task->tk_rqstp->rq_timeout;
524 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
527 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
528 * @task: task whose timeout is to be set
530 * Set a request's retransmit timeout using the RTT estimator.
532 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
534 int timer = task->tk_msg.rpc_proc->p_timer;
535 struct rpc_clnt *clnt = task->tk_client;
536 struct rpc_rtt *rtt = clnt->cl_rtt;
537 struct rpc_rqst *req = task->tk_rqstp;
538 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
540 task->tk_timeout = rpc_calc_rto(rtt, timer);
541 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
542 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
543 task->tk_timeout = max_timeout;
545 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
547 static void xprt_reset_majortimeo(struct rpc_rqst *req)
549 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
551 req->rq_majortimeo = req->rq_timeout;
552 if (to->to_exponential)
553 req->rq_majortimeo <<= to->to_retries;
555 req->rq_majortimeo += to->to_increment * to->to_retries;
556 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
557 req->rq_majortimeo = to->to_maxval;
558 req->rq_majortimeo += jiffies;
562 * xprt_adjust_timeout - adjust timeout values for next retransmit
563 * @req: RPC request containing parameters to use for the adjustment
566 int xprt_adjust_timeout(struct rpc_rqst *req)
568 struct rpc_xprt *xprt = req->rq_xprt;
569 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
572 if (time_before(jiffies, req->rq_majortimeo)) {
573 if (to->to_exponential)
574 req->rq_timeout <<= 1;
576 req->rq_timeout += to->to_increment;
577 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
578 req->rq_timeout = to->to_maxval;
581 req->rq_timeout = to->to_initval;
583 xprt_reset_majortimeo(req);
584 /* Reset the RTT counters == "slow start" */
585 spin_lock_bh(&xprt->transport_lock);
586 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
587 spin_unlock_bh(&xprt->transport_lock);
591 if (req->rq_timeout == 0) {
592 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
593 req->rq_timeout = 5 * HZ;
598 static void xprt_autoclose(struct work_struct *work)
600 struct rpc_xprt *xprt =
601 container_of(work, struct rpc_xprt, task_cleanup);
603 xprt->ops->close(xprt);
604 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
605 xprt_release_write(xprt, NULL);
609 * xprt_disconnect_done - mark a transport as disconnected
610 * @xprt: transport to flag for disconnect
613 void xprt_disconnect_done(struct rpc_xprt *xprt)
615 dprintk("RPC: disconnected transport %p\n", xprt);
616 spin_lock_bh(&xprt->transport_lock);
617 xprt_clear_connected(xprt);
618 xprt_wake_pending_tasks(xprt, -EAGAIN);
619 spin_unlock_bh(&xprt->transport_lock);
621 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
624 * xprt_force_disconnect - force a transport to disconnect
625 * @xprt: transport to disconnect
628 void xprt_force_disconnect(struct rpc_xprt *xprt)
630 /* Don't race with the test_bit() in xprt_clear_locked() */
631 spin_lock_bh(&xprt->transport_lock);
632 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
633 /* Try to schedule an autoclose RPC call */
634 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
635 queue_work(rpciod_workqueue, &xprt->task_cleanup);
636 xprt_wake_pending_tasks(xprt, -EAGAIN);
637 spin_unlock_bh(&xprt->transport_lock);
641 * xprt_conditional_disconnect - force a transport to disconnect
642 * @xprt: transport to disconnect
643 * @cookie: 'connection cookie'
645 * This attempts to break the connection if and only if 'cookie' matches
646 * the current transport 'connection cookie'. It ensures that we don't
647 * try to break the connection more than once when we need to retransmit
648 * a batch of RPC requests.
651 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
653 /* Don't race with the test_bit() in xprt_clear_locked() */
654 spin_lock_bh(&xprt->transport_lock);
655 if (cookie != xprt->connect_cookie)
657 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
659 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
660 /* Try to schedule an autoclose RPC call */
661 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
662 queue_work(rpciod_workqueue, &xprt->task_cleanup);
663 xprt_wake_pending_tasks(xprt, -EAGAIN);
665 spin_unlock_bh(&xprt->transport_lock);
669 xprt_init_autodisconnect(unsigned long data)
671 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
673 spin_lock(&xprt->transport_lock);
674 if (!list_empty(&xprt->recv) || xprt->shutdown)
676 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
678 spin_unlock(&xprt->transport_lock);
679 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
680 queue_work(rpciod_workqueue, &xprt->task_cleanup);
683 spin_unlock(&xprt->transport_lock);
687 * xprt_connect - schedule a transport connect operation
688 * @task: RPC task that is requesting the connect
691 void xprt_connect(struct rpc_task *task)
693 struct rpc_xprt *xprt = task->tk_xprt;
695 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
696 xprt, (xprt_connected(xprt) ? "is" : "is not"));
698 if (!xprt_bound(xprt)) {
699 task->tk_status = -EAGAIN;
702 if (!xprt_lock_write(xprt, task))
705 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
706 xprt->ops->close(xprt);
708 if (xprt_connected(xprt))
709 xprt_release_write(xprt, task);
712 task->tk_rqstp->rq_bytes_sent = 0;
714 task->tk_timeout = task->tk_rqstp->rq_timeout;
715 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
717 if (test_bit(XPRT_CLOSING, &xprt->state))
719 if (xprt_test_and_set_connecting(xprt))
721 xprt->stat.connect_start = jiffies;
722 xprt->ops->connect(task);
726 static void xprt_connect_status(struct rpc_task *task)
728 struct rpc_xprt *xprt = task->tk_xprt;
730 if (task->tk_status == 0) {
731 xprt->stat.connect_count++;
732 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
733 dprintk("RPC: %5u xprt_connect_status: connection established\n",
738 switch (task->tk_status) {
740 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
743 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
744 "out\n", task->tk_pid);
747 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
748 "server %s\n", task->tk_pid, -task->tk_status,
749 task->tk_client->cl_server);
750 xprt_release_write(xprt, task);
751 task->tk_status = -EIO;
756 * xprt_lookup_rqst - find an RPC request corresponding to an XID
757 * @xprt: transport on which the original request was transmitted
758 * @xid: RPC XID of incoming reply
761 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
763 struct rpc_rqst *entry;
765 list_for_each_entry(entry, &xprt->recv, rq_list)
766 if (entry->rq_xid == xid)
769 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
771 xprt->stat.bad_xids++;
774 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
776 static void xprt_update_rtt(struct rpc_task *task)
778 struct rpc_rqst *req = task->tk_rqstp;
779 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
780 unsigned timer = task->tk_msg.rpc_proc->p_timer;
781 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
784 if (req->rq_ntrans == 1)
785 rpc_update_rtt(rtt, timer, m);
786 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
791 * xprt_complete_rqst - called when reply processing is complete
792 * @task: RPC request that recently completed
793 * @copied: actual number of bytes received from the transport
795 * Caller holds transport lock.
797 void xprt_complete_rqst(struct rpc_task *task, int copied)
799 struct rpc_rqst *req = task->tk_rqstp;
800 struct rpc_xprt *xprt = req->rq_xprt;
802 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
803 task->tk_pid, ntohl(req->rq_xid), copied);
806 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
807 if (xprt->ops->timer != NULL)
808 xprt_update_rtt(task);
810 list_del_init(&req->rq_list);
811 req->rq_private_buf.len = copied;
812 /* Ensure all writes are done before we update */
813 /* req->rq_reply_bytes_recvd */
815 req->rq_reply_bytes_recvd = copied;
816 rpc_wake_up_queued_task(&xprt->pending, task);
818 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
820 static void xprt_timer(struct rpc_task *task)
822 struct rpc_rqst *req = task->tk_rqstp;
823 struct rpc_xprt *xprt = req->rq_xprt;
825 if (task->tk_status != -ETIMEDOUT)
827 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
829 spin_lock_bh(&xprt->transport_lock);
830 if (!req->rq_reply_bytes_recvd) {
831 if (xprt->ops->timer)
832 xprt->ops->timer(task);
835 spin_unlock_bh(&xprt->transport_lock);
838 static inline int xprt_has_timer(struct rpc_xprt *xprt)
840 return xprt->idle_timeout != 0;
844 * xprt_prepare_transmit - reserve the transport before sending a request
845 * @task: RPC task about to send a request
848 int xprt_prepare_transmit(struct rpc_task *task)
850 struct rpc_rqst *req = task->tk_rqstp;
851 struct rpc_xprt *xprt = req->rq_xprt;
854 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
856 spin_lock_bh(&xprt->transport_lock);
857 if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
858 err = req->rq_reply_bytes_recvd;
861 if (!xprt->ops->reserve_xprt(xprt, task))
864 spin_unlock_bh(&xprt->transport_lock);
868 void xprt_end_transmit(struct rpc_task *task)
870 xprt_release_write(task->tk_rqstp->rq_xprt, task);
874 * xprt_transmit - send an RPC request on a transport
875 * @task: controlling RPC task
877 * We have to copy the iovec because sendmsg fiddles with its contents.
879 void xprt_transmit(struct rpc_task *task)
881 struct rpc_rqst *req = task->tk_rqstp;
882 struct rpc_xprt *xprt = req->rq_xprt;
885 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
887 if (!req->rq_reply_bytes_recvd) {
888 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
890 * Add to the list only if we're expecting a reply
892 spin_lock_bh(&xprt->transport_lock);
893 /* Update the softirq receive buffer */
894 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
895 sizeof(req->rq_private_buf));
896 /* Add request to the receive list */
897 list_add_tail(&req->rq_list, &xprt->recv);
898 spin_unlock_bh(&xprt->transport_lock);
899 xprt_reset_majortimeo(req);
900 /* Turn off autodisconnect */
901 del_singleshot_timer_sync(&xprt->timer);
903 } else if (!req->rq_bytes_sent)
906 req->rq_connect_cookie = xprt->connect_cookie;
907 req->rq_xtime = ktime_get();
908 status = xprt->ops->send_request(task);
910 task->tk_status = status;
914 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
915 task->tk_flags |= RPC_TASK_SENT;
916 spin_lock_bh(&xprt->transport_lock);
918 xprt->ops->set_retrans_timeout(task);
921 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
922 xprt->stat.bklog_u += xprt->backlog.qlen;
924 /* Don't race with disconnect */
925 if (!xprt_connected(xprt))
926 task->tk_status = -ENOTCONN;
927 else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
929 * Sleep on the pending queue since
930 * we're expecting a reply.
932 rpc_sleep_on(&xprt->pending, task, xprt_timer);
934 spin_unlock_bh(&xprt->transport_lock);
937 static void xprt_alloc_slot(struct rpc_task *task)
939 struct rpc_xprt *xprt = task->tk_xprt;
942 if (!list_empty(&xprt->free)) {
943 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
944 list_del_init(&req->rq_list);
945 task->tk_rqstp = req;
946 xprt_request_init(task, xprt);
949 dprintk("RPC: waiting for request slot\n");
950 task->tk_status = -EAGAIN;
951 rpc_sleep_on(&xprt->backlog, task, NULL);
954 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
956 memset(req, 0, sizeof(*req)); /* mark unused */
958 spin_lock(&xprt->reserve_lock);
959 list_add(&req->rq_list, &xprt->free);
960 rpc_wake_up_next(&xprt->backlog);
961 spin_unlock(&xprt->reserve_lock);
964 struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
966 struct rpc_xprt *xprt;
968 xprt = kzalloc(size, GFP_KERNEL);
971 atomic_set(&xprt->count, 1);
973 xprt->max_reqs = max_req;
974 xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
975 if (xprt->slot == NULL)
978 xprt->xprt_net = get_net(net);
986 EXPORT_SYMBOL_GPL(xprt_alloc);
988 void xprt_free(struct rpc_xprt *xprt)
990 put_net(xprt->xprt_net);
994 EXPORT_SYMBOL_GPL(xprt_free);
997 * xprt_reserve - allocate an RPC request slot
998 * @task: RPC task requesting a slot allocation
1000 * If no more slots are available, place the task on the transport's
1003 void xprt_reserve(struct rpc_task *task)
1005 struct rpc_xprt *xprt = task->tk_xprt;
1007 task->tk_status = 0;
1008 if (task->tk_rqstp != NULL)
1011 /* Note: grabbing the xprt_lock_write() here is not strictly needed,
1012 * but ensures that we throttle new slot allocation if the transport
1013 * is congested (e.g. if reconnecting or if we're out of socket
1014 * write buffer space).
1016 task->tk_timeout = 0;
1017 task->tk_status = -EAGAIN;
1018 if (!xprt_lock_write(xprt, task))
1021 task->tk_status = -EIO;
1022 spin_lock(&xprt->reserve_lock);
1023 xprt_alloc_slot(task);
1024 spin_unlock(&xprt->reserve_lock);
1025 xprt_release_write(xprt, task);
1028 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1030 return (__force __be32)xprt->xid++;
1033 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1035 xprt->xid = net_random();
1038 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1040 struct rpc_rqst *req = task->tk_rqstp;
1042 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1043 req->rq_task = task;
1044 req->rq_xprt = xprt;
1045 req->rq_buffer = NULL;
1046 req->rq_xid = xprt_alloc_xid(xprt);
1047 req->rq_release_snd_buf = NULL;
1048 xprt_reset_majortimeo(req);
1049 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1050 req, ntohl(req->rq_xid));
1054 * xprt_release - release an RPC request slot
1055 * @task: task which is finished with the slot
1058 void xprt_release(struct rpc_task *task)
1060 struct rpc_xprt *xprt;
1061 struct rpc_rqst *req;
1063 if (!(req = task->tk_rqstp))
1066 xprt = req->rq_xprt;
1067 rpc_count_iostats(task);
1068 spin_lock_bh(&xprt->transport_lock);
1069 xprt->ops->release_xprt(xprt, task);
1070 if (xprt->ops->release_request)
1071 xprt->ops->release_request(task);
1072 if (!list_empty(&req->rq_list))
1073 list_del(&req->rq_list);
1074 xprt->last_used = jiffies;
1075 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1076 mod_timer(&xprt->timer,
1077 xprt->last_used + xprt->idle_timeout);
1078 spin_unlock_bh(&xprt->transport_lock);
1080 xprt->ops->buf_free(req->rq_buffer);
1081 if (req->rq_cred != NULL)
1082 put_rpccred(req->rq_cred);
1083 task->tk_rqstp = NULL;
1084 if (req->rq_release_snd_buf)
1085 req->rq_release_snd_buf(req);
1087 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1088 if (likely(!bc_prealloc(req)))
1089 xprt_free_slot(xprt, req);
1091 xprt_free_bc_request(req);
1094 static void xprt_init(struct rpc_xprt *xprt)
1096 struct rpc_rqst *req;
1098 spin_lock_init(&xprt->transport_lock);
1099 spin_lock_init(&xprt->reserve_lock);
1101 INIT_LIST_HEAD(&xprt->free);
1102 INIT_LIST_HEAD(&xprt->recv);
1103 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1104 spin_lock_init(&xprt->bc_pa_lock);
1105 INIT_LIST_HEAD(&xprt->bc_pa_list);
1106 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1108 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1109 if (xprt_has_timer(xprt))
1110 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1111 (unsigned long)xprt);
1113 init_timer(&xprt->timer);
1114 xprt->last_used = jiffies;
1115 xprt->cwnd = RPC_INITCWND;
1116 xprt->bind_index = 0;
1118 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1119 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1120 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1121 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1122 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1124 /* initialize free list */
1125 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1126 list_add(&req->rq_list, &xprt->free);
1128 xprt_init_xid(xprt);
1133 * xprt_create_transport - create an RPC transport
1134 * @args: rpc transport creation arguments
1137 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1139 struct rpc_xprt *xprt;
1140 struct xprt_class *t;
1142 spin_lock(&xprt_list_lock);
1143 list_for_each_entry(t, &xprt_list, list) {
1144 if (t->ident == args->ident) {
1145 spin_unlock(&xprt_list_lock);
1149 spin_unlock(&xprt_list_lock);
1150 printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
1151 return ERR_PTR(-EIO);
1154 xprt = t->setup(args);
1156 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1160 if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
1161 /* ->setup returned a pre-initialized xprt: */
1166 dprintk("RPC: created transport %p with %u slots\n", xprt,
1172 * xprt_destroy - destroy an RPC transport, killing off all requests.
1173 * @xprt: transport to destroy
1176 static void xprt_destroy(struct rpc_xprt *xprt)
1178 dprintk("RPC: destroying transport %p\n", xprt);
1180 del_timer_sync(&xprt->timer);
1182 rpc_destroy_wait_queue(&xprt->binding);
1183 rpc_destroy_wait_queue(&xprt->pending);
1184 rpc_destroy_wait_queue(&xprt->sending);
1185 rpc_destroy_wait_queue(&xprt->resend);
1186 rpc_destroy_wait_queue(&xprt->backlog);
1187 cancel_work_sync(&xprt->task_cleanup);
1189 * Tear down transport state and free the rpc_xprt
1191 xprt->ops->destroy(xprt);
1195 * xprt_put - release a reference to an RPC transport.
1196 * @xprt: pointer to the transport
1199 void xprt_put(struct rpc_xprt *xprt)
1201 if (atomic_dec_and_test(&xprt->count))
1206 * xprt_get - return a reference to an RPC transport.
1207 * @xprt: pointer to the transport
1210 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1212 if (atomic_inc_not_zero(&xprt->count))