2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/socket.h>
16 #include <linux/fcntl.h>
17 #include <linux/stat.h>
18 #include <linux/string.h>
19 #include <linux/sched/signal.h>
20 #include <linux/uaccess.h>
22 #include <linux/net.h>
24 #include <linux/netdevice.h>
25 #include <linux/signal.h>
26 #include <linux/slab.h>
29 #include <linux/ipx.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
35 #include "ncpsign_kernel.h"
37 static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
39 struct msghdr msg = {NULL, };
40 struct kvec iov = {buf, size};
41 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
44 static inline int do_send(struct socket *sock, struct kvec *vec, int count,
45 int len, unsigned flags)
47 struct msghdr msg = { .msg_flags = flags };
48 return kernel_sendmsg(sock, &msg, vec, count, len);
51 static int _send(struct socket *sock, const void *buff, int len)
54 vec.iov_base = (void *) buff;
56 return do_send(sock, &vec, 1, len, 0);
59 struct ncp_request_reply {
63 unsigned char* reply_buf;
66 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
70 struct kvec tx_iov[3];
75 static inline struct ncp_request_reply* ncp_alloc_req(void)
77 struct ncp_request_reply *req;
79 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
83 init_waitqueue_head(&req->wq);
84 atomic_set(&req->refs, (1));
85 req->status = RQ_IDLE;
90 static void ncp_req_get(struct ncp_request_reply *req)
92 atomic_inc(&req->refs);
95 static void ncp_req_put(struct ncp_request_reply *req)
97 if (atomic_dec_and_test(&req->refs))
101 void ncp_tcp_data_ready(struct sock *sk)
103 struct ncp_server *server = sk->sk_user_data;
105 server->data_ready(sk);
106 schedule_work(&server->rcv.tq);
109 void ncp_tcp_error_report(struct sock *sk)
111 struct ncp_server *server = sk->sk_user_data;
113 server->error_report(sk);
114 schedule_work(&server->rcv.tq);
117 void ncp_tcp_write_space(struct sock *sk)
119 struct ncp_server *server = sk->sk_user_data;
121 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
123 server->write_space(sk);
125 schedule_work(&server->tx.tq);
128 void ncpdgram_timeout_call(unsigned long v)
130 struct ncp_server *server = (void*)v;
132 schedule_work(&server->timeout_tq);
135 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
137 req->result = result;
138 if (req->status != RQ_ABANDONED)
139 memcpy(req->reply_buf, server->rxbuf, req->datalen);
140 req->status = RQ_DONE;
141 wake_up_all(&req->wq);
145 static void __abort_ncp_connection(struct ncp_server *server)
147 struct ncp_request_reply *req;
149 ncp_invalidate_conn(server);
150 del_timer(&server->timeout_tm);
151 while (!list_empty(&server->tx.requests)) {
152 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
154 list_del_init(&req->req);
155 ncp_finish_request(server, req, -EIO);
157 req = server->rcv.creq;
159 server->rcv.creq = NULL;
160 ncp_finish_request(server, req, -EIO);
161 server->rcv.ptr = NULL;
162 server->rcv.state = 0;
164 req = server->tx.creq;
166 server->tx.creq = NULL;
167 ncp_finish_request(server, req, -EIO);
171 static inline int get_conn_number(struct ncp_reply_header *rp)
173 return rp->conn_low | (rp->conn_high << 8);
176 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
178 /* If req is done, we got signal, but we also received answer... */
179 switch (req->status) {
184 list_del_init(&req->req);
185 ncp_finish_request(server, req, err);
188 req->status = RQ_ABANDONED;
195 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
197 mutex_lock(&server->rcv.creq_mutex);
198 __ncp_abort_request(server, req, err);
199 mutex_unlock(&server->rcv.creq_mutex);
202 static inline void __ncptcp_abort(struct ncp_server *server)
204 __abort_ncp_connection(server);
207 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
210 /* sock_sendmsg updates iov pointers for us :-( */
211 memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
212 return do_send(sock, vec, req->tx_iovlen,
213 req->tx_totallen, MSG_DONTWAIT);
216 static void __ncptcp_try_send(struct ncp_server *server)
218 struct ncp_request_reply *rq;
223 rq = server->tx.creq;
227 /* sock_sendmsg updates iov pointers for us :-( */
228 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
229 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
230 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
232 if (result == -EAGAIN)
236 pr_err("tcp: Send failed: %d\n", result);
237 __ncp_abort_request(server, rq, result);
240 if (result >= rq->tx_totallen) {
241 server->rcv.creq = rq;
242 server->tx.creq = NULL;
245 rq->tx_totallen -= result;
247 while (iov->iov_len <= result) {
248 result -= iov->iov_len;
252 iov->iov_base += result;
253 iov->iov_len -= result;
257 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
259 req->status = RQ_INPROGRESS;
260 h->conn_low = server->connection;
261 h->conn_high = server->connection >> 8;
262 h->sequence = ++server->sequence;
265 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
268 struct ncp_request_header* h;
270 req->tx_ciov = req->tx_iov + 1;
272 h = req->tx_iov[1].iov_base;
273 ncp_init_header(server, req, h);
274 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
275 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
276 cpu_to_le32(req->tx_totallen), req->sign);
278 req->tx_ciov[1].iov_base = req->sign;
279 req->tx_ciov[1].iov_len = signlen;
281 req->tx_totallen += signlen;
283 server->rcv.creq = req;
284 server->timeout_last = server->m.time_out;
285 server->timeout_retries = server->m.retry_count;
286 ncpdgram_send(server->ncp_sock, req);
287 mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
290 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
291 #define NCP_TCP_XMIT_VERSION (1)
292 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
294 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
297 struct ncp_request_header* h;
299 req->tx_ciov = req->tx_iov;
300 h = req->tx_iov[1].iov_base;
301 ncp_init_header(server, req, h);
302 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
303 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
304 cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
306 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
307 req->sign[1] = htonl(req->tx_totallen + signlen);
308 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
309 req->sign[3] = htonl(req->datalen + 8);
310 req->tx_iov[0].iov_base = req->sign;
311 req->tx_iov[0].iov_len = signlen;
313 req->tx_totallen += signlen;
315 server->tx.creq = req;
316 __ncptcp_try_send(server);
319 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
321 /* we copy the data so that we do not depend on the caller
323 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
324 req->tx_iov[1].iov_base = server->txbuf;
326 if (server->ncp_sock->type == SOCK_STREAM)
327 ncptcp_start_request(server, req);
329 ncpdgram_start_request(server, req);
332 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
334 mutex_lock(&server->rcv.creq_mutex);
335 if (!ncp_conn_valid(server)) {
336 mutex_unlock(&server->rcv.creq_mutex);
337 pr_err("tcp: Server died\n");
341 if (server->tx.creq || server->rcv.creq) {
342 req->status = RQ_QUEUED;
343 list_add_tail(&req->req, &server->tx.requests);
344 mutex_unlock(&server->rcv.creq_mutex);
347 __ncp_start_request(server, req);
348 mutex_unlock(&server->rcv.creq_mutex);
352 static void __ncp_next_request(struct ncp_server *server)
354 struct ncp_request_reply *req;
356 server->rcv.creq = NULL;
357 if (list_empty(&server->tx.requests)) {
360 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
361 list_del_init(&req->req);
362 __ncp_start_request(server, req);
365 static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
367 if (server->info_sock) {
371 hdr[0] = cpu_to_be32(len + 8);
372 hdr[1] = cpu_to_be32(id);
374 iov[0].iov_base = hdr;
376 iov[1].iov_base = (void *) data;
377 iov[1].iov_len = len;
379 do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
383 void ncpdgram_rcv_proc(struct work_struct *work)
385 struct ncp_server *server =
386 container_of(work, struct ncp_server, rcv.tq);
389 sock = server->ncp_sock;
392 struct ncp_reply_header reply;
395 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
399 if (result >= sizeof(reply)) {
400 struct ncp_request_reply *req;
402 if (reply.type == NCP_WATCHDOG) {
403 unsigned char buf[10];
405 if (server->connection != get_conn_number(&reply)) {
408 result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
410 ncp_dbg(1, "recv failed with %d\n", result);
414 ncp_dbg(1, "too short (%u) watchdog packet\n", result);
418 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
422 _send(sock, buf, sizeof(buf));
425 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
426 result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
430 info_server(server, 0, server->unexpected_packet.data, result);
433 mutex_lock(&server->rcv.creq_mutex);
434 req = server->rcv.creq;
435 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
436 server->connection == get_conn_number(&reply)))) {
437 if (reply.type == NCP_POSITIVE_ACK) {
438 server->timeout_retries = server->m.retry_count;
439 server->timeout_last = NCP_MAX_RPC_TIMEOUT;
440 mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
441 } else if (reply.type == NCP_REPLY) {
442 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
443 #ifdef CONFIG_NCPFS_PACKET_SIGNING
444 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
445 if (result < 8 + 8) {
451 hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
452 if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
453 pr_info("Signature violation\n");
459 del_timer(&server->timeout_tm);
460 server->rcv.creq = NULL;
461 ncp_finish_request(server, req, result);
462 __ncp_next_request(server);
463 mutex_unlock(&server->rcv.creq_mutex);
467 mutex_unlock(&server->rcv.creq_mutex);
470 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
474 static void __ncpdgram_timeout_proc(struct ncp_server *server)
476 /* If timer is pending, we are processing another request... */
477 if (!timer_pending(&server->timeout_tm)) {
478 struct ncp_request_reply* req;
480 req = server->rcv.creq;
484 if (server->m.flags & NCP_MOUNT_SOFT) {
485 if (server->timeout_retries-- == 0) {
486 __ncp_abort_request(server, req, -ETIMEDOUT);
491 ncpdgram_send(server->ncp_sock, req);
492 timeout = server->timeout_last << 1;
493 if (timeout > NCP_MAX_RPC_TIMEOUT) {
494 timeout = NCP_MAX_RPC_TIMEOUT;
496 server->timeout_last = timeout;
497 mod_timer(&server->timeout_tm, jiffies + timeout);
502 void ncpdgram_timeout_proc(struct work_struct *work)
504 struct ncp_server *server =
505 container_of(work, struct ncp_server, timeout_tq);
506 mutex_lock(&server->rcv.creq_mutex);
507 __ncpdgram_timeout_proc(server);
508 mutex_unlock(&server->rcv.creq_mutex);
511 static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
516 result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
518 static unsigned char dummy[1024];
520 if (len > sizeof(dummy)) {
523 result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
529 pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len);
535 static int __ncptcp_rcv_proc(struct ncp_server *server)
537 /* We have to check the result, so store the complete header */
540 struct ncp_request_reply *req;
544 while (server->rcv.len) {
545 result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
546 if (result == -EAGAIN) {
550 req = server->rcv.creq;
552 __ncp_abort_request(server, req, -EIO);
554 __ncptcp_abort(server);
557 pr_err("tcp: error in recvmsg: %d\n", result);
559 ncp_dbg(1, "tcp: EOF\n");
563 if (server->rcv.ptr) {
564 server->rcv.ptr += result;
566 server->rcv.len -= result;
568 switch (server->rcv.state) {
570 if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
571 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
572 __ncptcp_abort(server);
575 datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
577 pr_err("tcp: Unexpected reply len %d\n", datalen);
578 __ncptcp_abort(server);
581 #ifdef CONFIG_NCPFS_PACKET_SIGNING
582 if (server->sign_active) {
584 pr_err("tcp: Unexpected reply len %d\n", datalen);
585 __ncptcp_abort(server);
588 server->rcv.buf.len = datalen - 8;
589 server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
591 server->rcv.state = 4;
595 type = ntohs(server->rcv.buf.type);
596 #ifdef CONFIG_NCPFS_PACKET_SIGNING
599 if (type != NCP_REPLY) {
600 if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
601 *(__u16*)(server->unexpected_packet.data) = htons(type);
602 server->unexpected_packet.len = datalen - 8;
604 server->rcv.state = 5;
605 server->rcv.ptr = server->unexpected_packet.data + 2;
606 server->rcv.len = datalen - 10;
609 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
611 server->rcv.state = 2;
613 server->rcv.ptr = NULL;
614 server->rcv.len = datalen - 10;
617 req = server->rcv.creq;
619 ncp_dbg(1, "Reply without appropriate request\n");
622 if (datalen > req->datalen + 8) {
623 pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8);
624 server->rcv.state = 3;
627 req->datalen = datalen - 8;
628 ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
629 server->rcv.ptr = server->rxbuf + 2;
630 server->rcv.len = datalen - 10;
631 server->rcv.state = 1;
633 #ifdef CONFIG_NCPFS_PACKET_SIGNING
635 datalen = server->rcv.buf.len;
636 type = ntohs(server->rcv.buf.type2);
640 req = server->rcv.creq;
641 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
642 if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
643 pr_err("tcp: Bad sequence number\n");
644 __ncp_abort_request(server, req, -EIO);
647 if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
648 pr_err("tcp: Connection number mismatch\n");
649 __ncp_abort_request(server, req, -EIO);
653 #ifdef CONFIG_NCPFS_PACKET_SIGNING
654 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
655 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
656 pr_err("tcp: Signature violation\n");
657 __ncp_abort_request(server, req, -EIO);
662 ncp_finish_request(server, req, req->datalen);
664 __ncp_next_request(server);
667 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
668 server->rcv.len = 10;
669 server->rcv.state = 0;
672 ncp_finish_request(server, server->rcv.creq, -EIO);
675 info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
681 void ncp_tcp_rcv_proc(struct work_struct *work)
683 struct ncp_server *server =
684 container_of(work, struct ncp_server, rcv.tq);
686 mutex_lock(&server->rcv.creq_mutex);
687 __ncptcp_rcv_proc(server);
688 mutex_unlock(&server->rcv.creq_mutex);
691 void ncp_tcp_tx_proc(struct work_struct *work)
693 struct ncp_server *server =
694 container_of(work, struct ncp_server, tx.tq);
696 mutex_lock(&server->rcv.creq_mutex);
697 __ncptcp_try_send(server);
698 mutex_unlock(&server->rcv.creq_mutex);
701 static int do_ncp_rpc_call(struct ncp_server *server, int size,
702 unsigned char* reply_buf, int max_reply_size)
705 struct ncp_request_reply *req;
707 req = ncp_alloc_req();
711 req->reply_buf = reply_buf;
712 req->datalen = max_reply_size;
713 req->tx_iov[1].iov_base = server->packet;
714 req->tx_iov[1].iov_len = size;
716 req->tx_totallen = size;
717 req->tx_type = *(u_int16_t*)server->packet;
719 result = ncp_add_request(server, req);
723 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
724 ncp_abort_request(server, req, -EINTR);
729 result = req->result;
738 * We need the server to be locked here, so check!
741 static int ncp_do_request(struct ncp_server *server, int size,
742 void* reply, int max_reply_size)
746 if (server->lock == 0) {
747 pr_err("Server not locked!\n");
750 if (!ncp_conn_valid(server)) {
755 unsigned long mask, flags;
757 spin_lock_irqsave(¤t->sighand->siglock, flags);
758 old_set = current->blocked;
759 if (current->flags & PF_EXITING)
762 mask = sigmask(SIGKILL);
763 if (server->m.flags & NCP_MOUNT_INTR) {
764 /* FIXME: This doesn't seem right at all. So, like,
765 we can't handle SIGINT and get whatever to stop?
766 What if we've blocked it ourselves? What about
767 alarms? Why, in fact, are we mucking with the
768 sigmask at all? -- r~ */
769 if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
770 mask |= sigmask(SIGINT);
771 if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
772 mask |= sigmask(SIGQUIT);
774 siginitsetinv(¤t->blocked, mask);
776 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
778 result = do_ncp_rpc_call(server, size, reply, max_reply_size);
780 spin_lock_irqsave(¤t->sighand->siglock, flags);
781 current->blocked = old_set;
783 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
786 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
791 /* ncp_do_request assures that at least a complete reply header is
792 * received. It assumes that server->current_size contains the ncp
795 int ncp_request2(struct ncp_server *server, int function,
798 struct ncp_request_header *h;
799 struct ncp_reply_header* reply = rpl;
802 h = (struct ncp_request_header *) (server->packet);
803 if (server->has_subfunction != 0) {
804 *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
806 h->type = NCP_REQUEST;
808 * The server shouldn't know or care what task is making a
809 * request, so we always use the same task number.
811 h->task = 2; /* (current->pid) & 0xff; */
812 h->function = function;
814 result = ncp_do_request(server, server->current_size, reply, size);
816 ncp_dbg(1, "ncp_request_error: %d\n", result);
819 server->completion = reply->completion_code;
820 server->conn_status = reply->connection_state;
821 server->reply_size = result;
822 server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
824 result = reply->completion_code;
827 ncp_vdbg("completion code=%x\n", result);
832 int ncp_connect(struct ncp_server *server)
834 struct ncp_request_header *h;
837 server->connection = 0xFFFF;
838 server->sequence = 255;
840 h = (struct ncp_request_header *) (server->packet);
841 h->type = NCP_ALLOC_SLOT_REQUEST;
842 h->task = 2; /* see above */
845 result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
848 server->connection = h->conn_low + (h->conn_high * 256);
854 int ncp_disconnect(struct ncp_server *server)
856 struct ncp_request_header *h;
858 h = (struct ncp_request_header *) (server->packet);
859 h->type = NCP_DEALLOC_SLOT_REQUEST;
860 h->task = 2; /* see above */
863 return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
866 void ncp_lock_server(struct ncp_server *server)
868 mutex_lock(&server->mutex);
870 pr_warn("%s: was locked!\n", __func__);
874 void ncp_unlock_server(struct ncp_server *server)
877 pr_warn("%s: was not locked!\n", __func__);
881 mutex_unlock(&server->mutex);