]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/ceph/mds_client.c
2bb9264b9225260cc1e1f4ee39e4d274b41d67e1
[karo-tx-linux.git] / fs / ceph / mds_client.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/fs.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/gfp.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11
12 #include "super.h"
13 #include "mds_client.h"
14
15 #include <linux/ceph/ceph_features.h>
16 #include <linux/ceph/messenger.h>
17 #include <linux/ceph/decode.h>
18 #include <linux/ceph/pagelist.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/debugfs.h>
21
22 /*
23  * A cluster of MDS (metadata server) daemons is responsible for
24  * managing the file system namespace (the directory hierarchy and
25  * inodes) and for coordinating shared access to storage.  Metadata is
26  * partitioning hierarchically across a number of servers, and that
27  * partition varies over time as the cluster adjusts the distribution
28  * in order to balance load.
29  *
30  * The MDS client is primarily responsible to managing synchronous
31  * metadata requests for operations like open, unlink, and so forth.
32  * If there is a MDS failure, we find out about it when we (possibly
33  * request and) receive a new MDS map, and can resubmit affected
34  * requests.
35  *
36  * For the most part, though, we take advantage of a lossless
37  * communications channel to the MDS, and do not need to worry about
38  * timing out or resubmitting requests.
39  *
40  * We maintain a stateful "session" with each MDS we interact with.
41  * Within each session, we sent periodic heartbeat messages to ensure
42  * any capabilities or leases we have been issues remain valid.  If
43  * the session times out and goes stale, our leases and capabilities
44  * are no longer valid.
45  */
46
47 struct ceph_reconnect_state {
48         int nr_caps;
49         struct ceph_pagelist *pagelist;
50         bool flock;
51 };
52
53 static void __wake_requests(struct ceph_mds_client *mdsc,
54                             struct list_head *head);
55
56 static const struct ceph_connection_operations mds_con_ops;
57
58
59 /*
60  * mds reply parsing
61  */
62
63 /*
64  * parse individual inode info
65  */
66 static int parse_reply_info_in(void **p, void *end,
67                                struct ceph_mds_reply_info_in *info,
68                                u64 features)
69 {
70         int err = -EIO;
71
72         info->in = *p;
73         *p += sizeof(struct ceph_mds_reply_inode) +
74                 sizeof(*info->in->fragtree.splits) *
75                 le32_to_cpu(info->in->fragtree.nsplits);
76
77         ceph_decode_32_safe(p, end, info->symlink_len, bad);
78         ceph_decode_need(p, end, info->symlink_len, bad);
79         info->symlink = *p;
80         *p += info->symlink_len;
81
82         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
83                 ceph_decode_copy_safe(p, end, &info->dir_layout,
84                                       sizeof(info->dir_layout), bad);
85         else
86                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
87
88         ceph_decode_32_safe(p, end, info->xattr_len, bad);
89         ceph_decode_need(p, end, info->xattr_len, bad);
90         info->xattr_data = *p;
91         *p += info->xattr_len;
92
93         if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
94                 ceph_decode_64_safe(p, end, info->inline_version, bad);
95                 ceph_decode_32_safe(p, end, info->inline_len, bad);
96                 ceph_decode_need(p, end, info->inline_len, bad);
97                 info->inline_data = *p;
98                 *p += info->inline_len;
99         } else
100                 info->inline_version = CEPH_INLINE_NONE;
101
102         return 0;
103 bad:
104         return err;
105 }
106
107 /*
108  * parse a normal reply, which may contain a (dir+)dentry and/or a
109  * target inode.
110  */
111 static int parse_reply_info_trace(void **p, void *end,
112                                   struct ceph_mds_reply_info_parsed *info,
113                                   u64 features)
114 {
115         int err;
116
117         if (info->head->is_dentry) {
118                 err = parse_reply_info_in(p, end, &info->diri, features);
119                 if (err < 0)
120                         goto out_bad;
121
122                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
123                         goto bad;
124                 info->dirfrag = *p;
125                 *p += sizeof(*info->dirfrag) +
126                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
127                 if (unlikely(*p > end))
128                         goto bad;
129
130                 ceph_decode_32_safe(p, end, info->dname_len, bad);
131                 ceph_decode_need(p, end, info->dname_len, bad);
132                 info->dname = *p;
133                 *p += info->dname_len;
134                 info->dlease = *p;
135                 *p += sizeof(*info->dlease);
136         }
137
138         if (info->head->is_target) {
139                 err = parse_reply_info_in(p, end, &info->targeti, features);
140                 if (err < 0)
141                         goto out_bad;
142         }
143
144         if (unlikely(*p != end))
145                 goto bad;
146         return 0;
147
148 bad:
149         err = -EIO;
150 out_bad:
151         pr_err("problem parsing mds trace %d\n", err);
152         return err;
153 }
154
155 /*
156  * parse readdir results
157  */
158 static int parse_reply_info_dir(void **p, void *end,
159                                 struct ceph_mds_reply_info_parsed *info,
160                                 u64 features)
161 {
162         u32 num, i = 0;
163         int err;
164
165         info->dir_dir = *p;
166         if (*p + sizeof(*info->dir_dir) > end)
167                 goto bad;
168         *p += sizeof(*info->dir_dir) +
169                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
170         if (*p > end)
171                 goto bad;
172
173         ceph_decode_need(p, end, sizeof(num) + 2, bad);
174         num = ceph_decode_32(p);
175         info->dir_end = ceph_decode_8(p);
176         info->dir_complete = ceph_decode_8(p);
177         if (num == 0)
178                 goto done;
179
180         BUG_ON(!info->dir_in);
181         info->dir_dname = (void *)(info->dir_in + num);
182         info->dir_dname_len = (void *)(info->dir_dname + num);
183         info->dir_dlease = (void *)(info->dir_dname_len + num);
184         if ((unsigned long)(info->dir_dlease + num) >
185             (unsigned long)info->dir_in + info->dir_buf_size) {
186                 pr_err("dir contents are larger than expected\n");
187                 WARN_ON(1);
188                 goto bad;
189         }
190
191         info->dir_nr = num;
192         while (num) {
193                 /* dentry */
194                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
195                 info->dir_dname_len[i] = ceph_decode_32(p);
196                 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
197                 info->dir_dname[i] = *p;
198                 *p += info->dir_dname_len[i];
199                 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
200                      info->dir_dname[i]);
201                 info->dir_dlease[i] = *p;
202                 *p += sizeof(struct ceph_mds_reply_lease);
203
204                 /* inode */
205                 err = parse_reply_info_in(p, end, &info->dir_in[i], features);
206                 if (err < 0)
207                         goto out_bad;
208                 i++;
209                 num--;
210         }
211
212 done:
213         if (*p != end)
214                 goto bad;
215         return 0;
216
217 bad:
218         err = -EIO;
219 out_bad:
220         pr_err("problem parsing dir contents %d\n", err);
221         return err;
222 }
223
224 /*
225  * parse fcntl F_GETLK results
226  */
227 static int parse_reply_info_filelock(void **p, void *end,
228                                      struct ceph_mds_reply_info_parsed *info,
229                                      u64 features)
230 {
231         if (*p + sizeof(*info->filelock_reply) > end)
232                 goto bad;
233
234         info->filelock_reply = *p;
235         *p += sizeof(*info->filelock_reply);
236
237         if (unlikely(*p != end))
238                 goto bad;
239         return 0;
240
241 bad:
242         return -EIO;
243 }
244
245 /*
246  * parse create results
247  */
248 static int parse_reply_info_create(void **p, void *end,
249                                   struct ceph_mds_reply_info_parsed *info,
250                                   u64 features)
251 {
252         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
253                 if (*p == end) {
254                         info->has_create_ino = false;
255                 } else {
256                         info->has_create_ino = true;
257                         info->ino = ceph_decode_64(p);
258                 }
259         }
260
261         if (unlikely(*p != end))
262                 goto bad;
263         return 0;
264
265 bad:
266         return -EIO;
267 }
268
269 /*
270  * parse extra results
271  */
272 static int parse_reply_info_extra(void **p, void *end,
273                                   struct ceph_mds_reply_info_parsed *info,
274                                   u64 features)
275 {
276         if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
277                 return parse_reply_info_filelock(p, end, info, features);
278         else if (info->head->op == CEPH_MDS_OP_READDIR ||
279                  info->head->op == CEPH_MDS_OP_LSSNAP)
280                 return parse_reply_info_dir(p, end, info, features);
281         else if (info->head->op == CEPH_MDS_OP_CREATE)
282                 return parse_reply_info_create(p, end, info, features);
283         else
284                 return -EIO;
285 }
286
287 /*
288  * parse entire mds reply
289  */
290 static int parse_reply_info(struct ceph_msg *msg,
291                             struct ceph_mds_reply_info_parsed *info,
292                             u64 features)
293 {
294         void *p, *end;
295         u32 len;
296         int err;
297
298         info->head = msg->front.iov_base;
299         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
300         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
301
302         /* trace */
303         ceph_decode_32_safe(&p, end, len, bad);
304         if (len > 0) {
305                 ceph_decode_need(&p, end, len, bad);
306                 err = parse_reply_info_trace(&p, p+len, info, features);
307                 if (err < 0)
308                         goto out_bad;
309         }
310
311         /* extra */
312         ceph_decode_32_safe(&p, end, len, bad);
313         if (len > 0) {
314                 ceph_decode_need(&p, end, len, bad);
315                 err = parse_reply_info_extra(&p, p+len, info, features);
316                 if (err < 0)
317                         goto out_bad;
318         }
319
320         /* snap blob */
321         ceph_decode_32_safe(&p, end, len, bad);
322         info->snapblob_len = len;
323         info->snapblob = p;
324         p += len;
325
326         if (p != end)
327                 goto bad;
328         return 0;
329
330 bad:
331         err = -EIO;
332 out_bad:
333         pr_err("mds parse_reply err %d\n", err);
334         return err;
335 }
336
337 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
338 {
339         if (!info->dir_in)
340                 return;
341         free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size));
342 }
343
344
345 /*
346  * sessions
347  */
348 const char *ceph_session_state_name(int s)
349 {
350         switch (s) {
351         case CEPH_MDS_SESSION_NEW: return "new";
352         case CEPH_MDS_SESSION_OPENING: return "opening";
353         case CEPH_MDS_SESSION_OPEN: return "open";
354         case CEPH_MDS_SESSION_HUNG: return "hung";
355         case CEPH_MDS_SESSION_CLOSING: return "closing";
356         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
357         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
358         default: return "???";
359         }
360 }
361
362 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
363 {
364         if (atomic_inc_not_zero(&s->s_ref)) {
365                 dout("mdsc get_session %p %d -> %d\n", s,
366                      atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
367                 return s;
368         } else {
369                 dout("mdsc get_session %p 0 -- FAIL", s);
370                 return NULL;
371         }
372 }
373
374 void ceph_put_mds_session(struct ceph_mds_session *s)
375 {
376         dout("mdsc put_session %p %d -> %d\n", s,
377              atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
378         if (atomic_dec_and_test(&s->s_ref)) {
379                 if (s->s_auth.authorizer)
380                         ceph_auth_destroy_authorizer(
381                                 s->s_mdsc->fsc->client->monc.auth,
382                                 s->s_auth.authorizer);
383                 kfree(s);
384         }
385 }
386
387 /*
388  * called under mdsc->mutex
389  */
390 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
391                                                    int mds)
392 {
393         struct ceph_mds_session *session;
394
395         if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
396                 return NULL;
397         session = mdsc->sessions[mds];
398         dout("lookup_mds_session %p %d\n", session,
399              atomic_read(&session->s_ref));
400         get_session(session);
401         return session;
402 }
403
404 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
405 {
406         if (mds >= mdsc->max_sessions)
407                 return false;
408         return mdsc->sessions[mds];
409 }
410
411 static int __verify_registered_session(struct ceph_mds_client *mdsc,
412                                        struct ceph_mds_session *s)
413 {
414         if (s->s_mds >= mdsc->max_sessions ||
415             mdsc->sessions[s->s_mds] != s)
416                 return -ENOENT;
417         return 0;
418 }
419
420 /*
421  * create+register a new session for given mds.
422  * called under mdsc->mutex.
423  */
424 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
425                                                  int mds)
426 {
427         struct ceph_mds_session *s;
428
429         if (mds >= mdsc->mdsmap->m_max_mds)
430                 return ERR_PTR(-EINVAL);
431
432         s = kzalloc(sizeof(*s), GFP_NOFS);
433         if (!s)
434                 return ERR_PTR(-ENOMEM);
435         s->s_mdsc = mdsc;
436         s->s_mds = mds;
437         s->s_state = CEPH_MDS_SESSION_NEW;
438         s->s_ttl = 0;
439         s->s_seq = 0;
440         mutex_init(&s->s_mutex);
441
442         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
443
444         spin_lock_init(&s->s_gen_ttl_lock);
445         s->s_cap_gen = 0;
446         s->s_cap_ttl = jiffies - 1;
447
448         spin_lock_init(&s->s_cap_lock);
449         s->s_renew_requested = 0;
450         s->s_renew_seq = 0;
451         INIT_LIST_HEAD(&s->s_caps);
452         s->s_nr_caps = 0;
453         s->s_trim_caps = 0;
454         atomic_set(&s->s_ref, 1);
455         INIT_LIST_HEAD(&s->s_waiting);
456         INIT_LIST_HEAD(&s->s_unsafe);
457         s->s_num_cap_releases = 0;
458         s->s_cap_reconnect = 0;
459         s->s_cap_iterator = NULL;
460         INIT_LIST_HEAD(&s->s_cap_releases);
461         INIT_LIST_HEAD(&s->s_cap_releases_done);
462         INIT_LIST_HEAD(&s->s_cap_flushing);
463         INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
464
465         dout("register_session mds%d\n", mds);
466         if (mds >= mdsc->max_sessions) {
467                 int newmax = 1 << get_count_order(mds+1);
468                 struct ceph_mds_session **sa;
469
470                 dout("register_session realloc to %d\n", newmax);
471                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
472                 if (sa == NULL)
473                         goto fail_realloc;
474                 if (mdsc->sessions) {
475                         memcpy(sa, mdsc->sessions,
476                                mdsc->max_sessions * sizeof(void *));
477                         kfree(mdsc->sessions);
478                 }
479                 mdsc->sessions = sa;
480                 mdsc->max_sessions = newmax;
481         }
482         mdsc->sessions[mds] = s;
483         atomic_inc(&mdsc->num_sessions);
484         atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
485
486         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
487                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
488
489         return s;
490
491 fail_realloc:
492         kfree(s);
493         return ERR_PTR(-ENOMEM);
494 }
495
496 /*
497  * called under mdsc->mutex
498  */
499 static void __unregister_session(struct ceph_mds_client *mdsc,
500                                struct ceph_mds_session *s)
501 {
502         dout("__unregister_session mds%d %p\n", s->s_mds, s);
503         BUG_ON(mdsc->sessions[s->s_mds] != s);
504         mdsc->sessions[s->s_mds] = NULL;
505         ceph_con_close(&s->s_con);
506         ceph_put_mds_session(s);
507         atomic_dec(&mdsc->num_sessions);
508 }
509
510 /*
511  * drop session refs in request.
512  *
513  * should be last request ref, or hold mdsc->mutex
514  */
515 static void put_request_session(struct ceph_mds_request *req)
516 {
517         if (req->r_session) {
518                 ceph_put_mds_session(req->r_session);
519                 req->r_session = NULL;
520         }
521 }
522
523 void ceph_mdsc_release_request(struct kref *kref)
524 {
525         struct ceph_mds_request *req = container_of(kref,
526                                                     struct ceph_mds_request,
527                                                     r_kref);
528         destroy_reply_info(&req->r_reply_info);
529         if (req->r_request)
530                 ceph_msg_put(req->r_request);
531         if (req->r_reply)
532                 ceph_msg_put(req->r_reply);
533         if (req->r_inode) {
534                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
535                 iput(req->r_inode);
536         }
537         if (req->r_locked_dir)
538                 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
539         iput(req->r_target_inode);
540         if (req->r_dentry)
541                 dput(req->r_dentry);
542         if (req->r_old_dentry)
543                 dput(req->r_old_dentry);
544         if (req->r_old_dentry_dir) {
545                 /*
546                  * track (and drop pins for) r_old_dentry_dir
547                  * separately, since r_old_dentry's d_parent may have
548                  * changed between the dir mutex being dropped and
549                  * this request being freed.
550                  */
551                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
552                                   CEPH_CAP_PIN);
553                 iput(req->r_old_dentry_dir);
554         }
555         kfree(req->r_path1);
556         kfree(req->r_path2);
557         if (req->r_pagelist)
558                 ceph_pagelist_release(req->r_pagelist);
559         put_request_session(req);
560         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
561         kfree(req);
562 }
563
564 /*
565  * lookup session, bump ref if found.
566  *
567  * called under mdsc->mutex.
568  */
569 static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
570                                              u64 tid)
571 {
572         struct ceph_mds_request *req;
573         struct rb_node *n = mdsc->request_tree.rb_node;
574
575         while (n) {
576                 req = rb_entry(n, struct ceph_mds_request, r_node);
577                 if (tid < req->r_tid)
578                         n = n->rb_left;
579                 else if (tid > req->r_tid)
580                         n = n->rb_right;
581                 else {
582                         ceph_mdsc_get_request(req);
583                         return req;
584                 }
585         }
586         return NULL;
587 }
588
589 static void __insert_request(struct ceph_mds_client *mdsc,
590                              struct ceph_mds_request *new)
591 {
592         struct rb_node **p = &mdsc->request_tree.rb_node;
593         struct rb_node *parent = NULL;
594         struct ceph_mds_request *req = NULL;
595
596         while (*p) {
597                 parent = *p;
598                 req = rb_entry(parent, struct ceph_mds_request, r_node);
599                 if (new->r_tid < req->r_tid)
600                         p = &(*p)->rb_left;
601                 else if (new->r_tid > req->r_tid)
602                         p = &(*p)->rb_right;
603                 else
604                         BUG();
605         }
606
607         rb_link_node(&new->r_node, parent, p);
608         rb_insert_color(&new->r_node, &mdsc->request_tree);
609 }
610
611 /*
612  * Register an in-flight request, and assign a tid.  Link to directory
613  * are modifying (if any).
614  *
615  * Called under mdsc->mutex.
616  */
617 static void __register_request(struct ceph_mds_client *mdsc,
618                                struct ceph_mds_request *req,
619                                struct inode *dir)
620 {
621         req->r_tid = ++mdsc->last_tid;
622         if (req->r_num_caps)
623                 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
624                                   req->r_num_caps);
625         dout("__register_request %p tid %lld\n", req, req->r_tid);
626         ceph_mdsc_get_request(req);
627         __insert_request(mdsc, req);
628
629         req->r_uid = current_fsuid();
630         req->r_gid = current_fsgid();
631
632         if (dir) {
633                 struct ceph_inode_info *ci = ceph_inode(dir);
634
635                 ihold(dir);
636                 spin_lock(&ci->i_unsafe_lock);
637                 req->r_unsafe_dir = dir;
638                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
639                 spin_unlock(&ci->i_unsafe_lock);
640         }
641 }
642
643 static void __unregister_request(struct ceph_mds_client *mdsc,
644                                  struct ceph_mds_request *req)
645 {
646         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
647         rb_erase(&req->r_node, &mdsc->request_tree);
648         RB_CLEAR_NODE(&req->r_node);
649
650         if (req->r_unsafe_dir) {
651                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
652
653                 spin_lock(&ci->i_unsafe_lock);
654                 list_del_init(&req->r_unsafe_dir_item);
655                 spin_unlock(&ci->i_unsafe_lock);
656
657                 iput(req->r_unsafe_dir);
658                 req->r_unsafe_dir = NULL;
659         }
660
661         complete_all(&req->r_safe_completion);
662
663         ceph_mdsc_put_request(req);
664 }
665
666 /*
667  * Choose mds to send request to next.  If there is a hint set in the
668  * request (e.g., due to a prior forward hint from the mds), use that.
669  * Otherwise, consult frag tree and/or caps to identify the
670  * appropriate mds.  If all else fails, choose randomly.
671  *
672  * Called under mdsc->mutex.
673  */
674 static struct dentry *get_nonsnap_parent(struct dentry *dentry)
675 {
676         /*
677          * we don't need to worry about protecting the d_parent access
678          * here because we never renaming inside the snapped namespace
679          * except to resplice to another snapdir, and either the old or new
680          * result is a valid result.
681          */
682         while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
683                 dentry = dentry->d_parent;
684         return dentry;
685 }
686
687 static int __choose_mds(struct ceph_mds_client *mdsc,
688                         struct ceph_mds_request *req)
689 {
690         struct inode *inode;
691         struct ceph_inode_info *ci;
692         struct ceph_cap *cap;
693         int mode = req->r_direct_mode;
694         int mds = -1;
695         u32 hash = req->r_direct_hash;
696         bool is_hash = req->r_direct_is_hash;
697
698         /*
699          * is there a specific mds we should try?  ignore hint if we have
700          * no session and the mds is not up (active or recovering).
701          */
702         if (req->r_resend_mds >= 0 &&
703             (__have_session(mdsc, req->r_resend_mds) ||
704              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
705                 dout("choose_mds using resend_mds mds%d\n",
706                      req->r_resend_mds);
707                 return req->r_resend_mds;
708         }
709
710         if (mode == USE_RANDOM_MDS)
711                 goto random;
712
713         inode = NULL;
714         if (req->r_inode) {
715                 inode = req->r_inode;
716         } else if (req->r_dentry) {
717                 /* ignore race with rename; old or new d_parent is okay */
718                 struct dentry *parent = req->r_dentry->d_parent;
719                 struct inode *dir = d_inode(parent);
720
721                 if (dir->i_sb != mdsc->fsc->sb) {
722                         /* not this fs! */
723                         inode = d_inode(req->r_dentry);
724                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
725                         /* direct snapped/virtual snapdir requests
726                          * based on parent dir inode */
727                         struct dentry *dn = get_nonsnap_parent(parent);
728                         inode = d_inode(dn);
729                         dout("__choose_mds using nonsnap parent %p\n", inode);
730                 } else {
731                         /* dentry target */
732                         inode = d_inode(req->r_dentry);
733                         if (!inode || mode == USE_AUTH_MDS) {
734                                 /* dir + name */
735                                 inode = dir;
736                                 hash = ceph_dentry_hash(dir, req->r_dentry);
737                                 is_hash = true;
738                         }
739                 }
740         }
741
742         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
743              (int)hash, mode);
744         if (!inode)
745                 goto random;
746         ci = ceph_inode(inode);
747
748         if (is_hash && S_ISDIR(inode->i_mode)) {
749                 struct ceph_inode_frag frag;
750                 int found;
751
752                 ceph_choose_frag(ci, hash, &frag, &found);
753                 if (found) {
754                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
755                                 u8 r;
756
757                                 /* choose a random replica */
758                                 get_random_bytes(&r, 1);
759                                 r %= frag.ndist;
760                                 mds = frag.dist[r];
761                                 dout("choose_mds %p %llx.%llx "
762                                      "frag %u mds%d (%d/%d)\n",
763                                      inode, ceph_vinop(inode),
764                                      frag.frag, mds,
765                                      (int)r, frag.ndist);
766                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
767                                     CEPH_MDS_STATE_ACTIVE)
768                                         return mds;
769                         }
770
771                         /* since this file/dir wasn't known to be
772                          * replicated, then we want to look for the
773                          * authoritative mds. */
774                         mode = USE_AUTH_MDS;
775                         if (frag.mds >= 0) {
776                                 /* choose auth mds */
777                                 mds = frag.mds;
778                                 dout("choose_mds %p %llx.%llx "
779                                      "frag %u mds%d (auth)\n",
780                                      inode, ceph_vinop(inode), frag.frag, mds);
781                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
782                                     CEPH_MDS_STATE_ACTIVE)
783                                         return mds;
784                         }
785                 }
786         }
787
788         spin_lock(&ci->i_ceph_lock);
789         cap = NULL;
790         if (mode == USE_AUTH_MDS)
791                 cap = ci->i_auth_cap;
792         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
793                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
794         if (!cap) {
795                 spin_unlock(&ci->i_ceph_lock);
796                 goto random;
797         }
798         mds = cap->session->s_mds;
799         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
800              inode, ceph_vinop(inode), mds,
801              cap == ci->i_auth_cap ? "auth " : "", cap);
802         spin_unlock(&ci->i_ceph_lock);
803         return mds;
804
805 random:
806         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
807         dout("choose_mds chose random mds%d\n", mds);
808         return mds;
809 }
810
811
812 /*
813  * session messages
814  */
815 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
816 {
817         struct ceph_msg *msg;
818         struct ceph_mds_session_head *h;
819
820         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
821                            false);
822         if (!msg) {
823                 pr_err("create_session_msg ENOMEM creating msg\n");
824                 return NULL;
825         }
826         h = msg->front.iov_base;
827         h->op = cpu_to_le32(op);
828         h->seq = cpu_to_le64(seq);
829
830         return msg;
831 }
832
833 /*
834  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
835  * to include additional client metadata fields.
836  */
837 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
838 {
839         struct ceph_msg *msg;
840         struct ceph_mds_session_head *h;
841         int i = -1;
842         int metadata_bytes = 0;
843         int metadata_key_count = 0;
844         struct ceph_options *opt = mdsc->fsc->client->options;
845         void *p;
846
847         const char* metadata[][2] = {
848                 {"hostname", utsname()->nodename},
849                 {"kernel_version", utsname()->release},
850                 {"entity_id", opt->name ? opt->name : ""},
851                 {NULL, NULL}
852         };
853
854         /* Calculate serialized length of metadata */
855         metadata_bytes = 4;  /* map length */
856         for (i = 0; metadata[i][0] != NULL; ++i) {
857                 metadata_bytes += 8 + strlen(metadata[i][0]) +
858                         strlen(metadata[i][1]);
859                 metadata_key_count++;
860         }
861
862         /* Allocate the message */
863         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
864                            GFP_NOFS, false);
865         if (!msg) {
866                 pr_err("create_session_msg ENOMEM creating msg\n");
867                 return NULL;
868         }
869         h = msg->front.iov_base;
870         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
871         h->seq = cpu_to_le64(seq);
872
873         /*
874          * Serialize client metadata into waiting buffer space, using
875          * the format that userspace expects for map<string, string>
876          *
877          * ClientSession messages with metadata are v2
878          */
879         msg->hdr.version = cpu_to_le16(2);
880         msg->hdr.compat_version = cpu_to_le16(1);
881
882         /* The write pointer, following the session_head structure */
883         p = msg->front.iov_base + sizeof(*h);
884
885         /* Number of entries in the map */
886         ceph_encode_32(&p, metadata_key_count);
887
888         /* Two length-prefixed strings for each entry in the map */
889         for (i = 0; metadata[i][0] != NULL; ++i) {
890                 size_t const key_len = strlen(metadata[i][0]);
891                 size_t const val_len = strlen(metadata[i][1]);
892
893                 ceph_encode_32(&p, key_len);
894                 memcpy(p, metadata[i][0], key_len);
895                 p += key_len;
896                 ceph_encode_32(&p, val_len);
897                 memcpy(p, metadata[i][1], val_len);
898                 p += val_len;
899         }
900
901         return msg;
902 }
903
904 /*
905  * send session open request.
906  *
907  * called under mdsc->mutex
908  */
909 static int __open_session(struct ceph_mds_client *mdsc,
910                           struct ceph_mds_session *session)
911 {
912         struct ceph_msg *msg;
913         int mstate;
914         int mds = session->s_mds;
915
916         /* wait for mds to go active? */
917         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
918         dout("open_session to mds%d (%s)\n", mds,
919              ceph_mds_state_name(mstate));
920         session->s_state = CEPH_MDS_SESSION_OPENING;
921         session->s_renew_requested = jiffies;
922
923         /* send connect message */
924         msg = create_session_open_msg(mdsc, session->s_seq);
925         if (!msg)
926                 return -ENOMEM;
927         ceph_con_send(&session->s_con, msg);
928         return 0;
929 }
930
931 /*
932  * open sessions for any export targets for the given mds
933  *
934  * called under mdsc->mutex
935  */
936 static struct ceph_mds_session *
937 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
938 {
939         struct ceph_mds_session *session;
940
941         session = __ceph_lookup_mds_session(mdsc, target);
942         if (!session) {
943                 session = register_session(mdsc, target);
944                 if (IS_ERR(session))
945                         return session;
946         }
947         if (session->s_state == CEPH_MDS_SESSION_NEW ||
948             session->s_state == CEPH_MDS_SESSION_CLOSING)
949                 __open_session(mdsc, session);
950
951         return session;
952 }
953
954 struct ceph_mds_session *
955 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
956 {
957         struct ceph_mds_session *session;
958
959         dout("open_export_target_session to mds%d\n", target);
960
961         mutex_lock(&mdsc->mutex);
962         session = __open_export_target_session(mdsc, target);
963         mutex_unlock(&mdsc->mutex);
964
965         return session;
966 }
967
968 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
969                                           struct ceph_mds_session *session)
970 {
971         struct ceph_mds_info *mi;
972         struct ceph_mds_session *ts;
973         int i, mds = session->s_mds;
974
975         if (mds >= mdsc->mdsmap->m_max_mds)
976                 return;
977
978         mi = &mdsc->mdsmap->m_info[mds];
979         dout("open_export_target_sessions for mds%d (%d targets)\n",
980              session->s_mds, mi->num_export_targets);
981
982         for (i = 0; i < mi->num_export_targets; i++) {
983                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
984                 if (!IS_ERR(ts))
985                         ceph_put_mds_session(ts);
986         }
987 }
988
989 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
990                                            struct ceph_mds_session *session)
991 {
992         mutex_lock(&mdsc->mutex);
993         __open_export_target_sessions(mdsc, session);
994         mutex_unlock(&mdsc->mutex);
995 }
996
997 /*
998  * session caps
999  */
1000
1001 /*
1002  * Free preallocated cap messages assigned to this session
1003  */
1004 static void cleanup_cap_releases(struct ceph_mds_session *session)
1005 {
1006         struct ceph_msg *msg;
1007
1008         spin_lock(&session->s_cap_lock);
1009         while (!list_empty(&session->s_cap_releases)) {
1010                 msg = list_first_entry(&session->s_cap_releases,
1011                                        struct ceph_msg, list_head);
1012                 list_del_init(&msg->list_head);
1013                 ceph_msg_put(msg);
1014         }
1015         while (!list_empty(&session->s_cap_releases_done)) {
1016                 msg = list_first_entry(&session->s_cap_releases_done,
1017                                        struct ceph_msg, list_head);
1018                 list_del_init(&msg->list_head);
1019                 ceph_msg_put(msg);
1020         }
1021         spin_unlock(&session->s_cap_lock);
1022 }
1023
1024 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1025                                      struct ceph_mds_session *session)
1026 {
1027         struct ceph_mds_request *req;
1028         struct rb_node *p;
1029
1030         dout("cleanup_session_requests mds%d\n", session->s_mds);
1031         mutex_lock(&mdsc->mutex);
1032         while (!list_empty(&session->s_unsafe)) {
1033                 req = list_first_entry(&session->s_unsafe,
1034                                        struct ceph_mds_request, r_unsafe_item);
1035                 list_del_init(&req->r_unsafe_item);
1036                 pr_info(" dropping unsafe request %llu\n", req->r_tid);
1037                 __unregister_request(mdsc, req);
1038         }
1039         /* zero r_attempts, so kick_requests() will re-send requests */
1040         p = rb_first(&mdsc->request_tree);
1041         while (p) {
1042                 req = rb_entry(p, struct ceph_mds_request, r_node);
1043                 p = rb_next(p);
1044                 if (req->r_session &&
1045                     req->r_session->s_mds == session->s_mds)
1046                         req->r_attempts = 0;
1047         }
1048         mutex_unlock(&mdsc->mutex);
1049 }
1050
1051 /*
1052  * Helper to safely iterate over all caps associated with a session, with
1053  * special care taken to handle a racing __ceph_remove_cap().
1054  *
1055  * Caller must hold session s_mutex.
1056  */
1057 static int iterate_session_caps(struct ceph_mds_session *session,
1058                                  int (*cb)(struct inode *, struct ceph_cap *,
1059                                             void *), void *arg)
1060 {
1061         struct list_head *p;
1062         struct ceph_cap *cap;
1063         struct inode *inode, *last_inode = NULL;
1064         struct ceph_cap *old_cap = NULL;
1065         int ret;
1066
1067         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1068         spin_lock(&session->s_cap_lock);
1069         p = session->s_caps.next;
1070         while (p != &session->s_caps) {
1071                 cap = list_entry(p, struct ceph_cap, session_caps);
1072                 inode = igrab(&cap->ci->vfs_inode);
1073                 if (!inode) {
1074                         p = p->next;
1075                         continue;
1076                 }
1077                 session->s_cap_iterator = cap;
1078                 spin_unlock(&session->s_cap_lock);
1079
1080                 if (last_inode) {
1081                         iput(last_inode);
1082                         last_inode = NULL;
1083                 }
1084                 if (old_cap) {
1085                         ceph_put_cap(session->s_mdsc, old_cap);
1086                         old_cap = NULL;
1087                 }
1088
1089                 ret = cb(inode, cap, arg);
1090                 last_inode = inode;
1091
1092                 spin_lock(&session->s_cap_lock);
1093                 p = p->next;
1094                 if (cap->ci == NULL) {
1095                         dout("iterate_session_caps  finishing cap %p removal\n",
1096                              cap);
1097                         BUG_ON(cap->session != session);
1098                         list_del_init(&cap->session_caps);
1099                         session->s_nr_caps--;
1100                         cap->session = NULL;
1101                         old_cap = cap;  /* put_cap it w/o locks held */
1102                 }
1103                 if (ret < 0)
1104                         goto out;
1105         }
1106         ret = 0;
1107 out:
1108         session->s_cap_iterator = NULL;
1109         spin_unlock(&session->s_cap_lock);
1110
1111         iput(last_inode);
1112         if (old_cap)
1113                 ceph_put_cap(session->s_mdsc, old_cap);
1114
1115         return ret;
1116 }
1117
1118 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1119                                   void *arg)
1120 {
1121         struct ceph_inode_info *ci = ceph_inode(inode);
1122         int drop = 0;
1123
1124         dout("removing cap %p, ci is %p, inode is %p\n",
1125              cap, ci, &ci->vfs_inode);
1126         spin_lock(&ci->i_ceph_lock);
1127         __ceph_remove_cap(cap, false);
1128         if (!ci->i_auth_cap) {
1129                 struct ceph_mds_client *mdsc =
1130                         ceph_sb_to_client(inode->i_sb)->mdsc;
1131
1132                 spin_lock(&mdsc->cap_dirty_lock);
1133                 if (!list_empty(&ci->i_dirty_item)) {
1134                         pr_info(" dropping dirty %s state for %p %lld\n",
1135                                 ceph_cap_string(ci->i_dirty_caps),
1136                                 inode, ceph_ino(inode));
1137                         ci->i_dirty_caps = 0;
1138                         list_del_init(&ci->i_dirty_item);
1139                         drop = 1;
1140                 }
1141                 if (!list_empty(&ci->i_flushing_item)) {
1142                         pr_info(" dropping dirty+flushing %s state for %p %lld\n",
1143                                 ceph_cap_string(ci->i_flushing_caps),
1144                                 inode, ceph_ino(inode));
1145                         ci->i_flushing_caps = 0;
1146                         list_del_init(&ci->i_flushing_item);
1147                         mdsc->num_cap_flushing--;
1148                         drop = 1;
1149                 }
1150                 spin_unlock(&mdsc->cap_dirty_lock);
1151         }
1152         spin_unlock(&ci->i_ceph_lock);
1153         while (drop--)
1154                 iput(inode);
1155         return 0;
1156 }
1157
1158 /*
1159  * caller must hold session s_mutex
1160  */
1161 static void remove_session_caps(struct ceph_mds_session *session)
1162 {
1163         dout("remove_session_caps on %p\n", session);
1164         iterate_session_caps(session, remove_session_caps_cb, NULL);
1165
1166         spin_lock(&session->s_cap_lock);
1167         if (session->s_nr_caps > 0) {
1168                 struct super_block *sb = session->s_mdsc->fsc->sb;
1169                 struct inode *inode;
1170                 struct ceph_cap *cap, *prev = NULL;
1171                 struct ceph_vino vino;
1172                 /*
1173                  * iterate_session_caps() skips inodes that are being
1174                  * deleted, we need to wait until deletions are complete.
1175                  * __wait_on_freeing_inode() is designed for the job,
1176                  * but it is not exported, so use lookup inode function
1177                  * to access it.
1178                  */
1179                 while (!list_empty(&session->s_caps)) {
1180                         cap = list_entry(session->s_caps.next,
1181                                          struct ceph_cap, session_caps);
1182                         if (cap == prev)
1183                                 break;
1184                         prev = cap;
1185                         vino = cap->ci->i_vino;
1186                         spin_unlock(&session->s_cap_lock);
1187
1188                         inode = ceph_find_inode(sb, vino);
1189                         iput(inode);
1190
1191                         spin_lock(&session->s_cap_lock);
1192                 }
1193         }
1194         spin_unlock(&session->s_cap_lock);
1195
1196         BUG_ON(session->s_nr_caps > 0);
1197         BUG_ON(!list_empty(&session->s_cap_flushing));
1198         cleanup_cap_releases(session);
1199 }
1200
1201 /*
1202  * wake up any threads waiting on this session's caps.  if the cap is
1203  * old (didn't get renewed on the client reconnect), remove it now.
1204  *
1205  * caller must hold s_mutex.
1206  */
1207 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1208                               void *arg)
1209 {
1210         struct ceph_inode_info *ci = ceph_inode(inode);
1211
1212         wake_up_all(&ci->i_cap_wq);
1213         if (arg) {
1214                 spin_lock(&ci->i_ceph_lock);
1215                 ci->i_wanted_max_size = 0;
1216                 ci->i_requested_max_size = 0;
1217                 spin_unlock(&ci->i_ceph_lock);
1218         }
1219         return 0;
1220 }
1221
1222 static void wake_up_session_caps(struct ceph_mds_session *session,
1223                                  int reconnect)
1224 {
1225         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1226         iterate_session_caps(session, wake_up_session_cb,
1227                              (void *)(unsigned long)reconnect);
1228 }
1229
1230 /*
1231  * Send periodic message to MDS renewing all currently held caps.  The
1232  * ack will reset the expiration for all caps from this session.
1233  *
1234  * caller holds s_mutex
1235  */
1236 static int send_renew_caps(struct ceph_mds_client *mdsc,
1237                            struct ceph_mds_session *session)
1238 {
1239         struct ceph_msg *msg;
1240         int state;
1241
1242         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1243             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1244                 pr_info("mds%d caps stale\n", session->s_mds);
1245         session->s_renew_requested = jiffies;
1246
1247         /* do not try to renew caps until a recovering mds has reconnected
1248          * with its clients. */
1249         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1250         if (state < CEPH_MDS_STATE_RECONNECT) {
1251                 dout("send_renew_caps ignoring mds%d (%s)\n",
1252                      session->s_mds, ceph_mds_state_name(state));
1253                 return 0;
1254         }
1255
1256         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1257                 ceph_mds_state_name(state));
1258         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1259                                  ++session->s_renew_seq);
1260         if (!msg)
1261                 return -ENOMEM;
1262         ceph_con_send(&session->s_con, msg);
1263         return 0;
1264 }
1265
1266 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1267                              struct ceph_mds_session *session, u64 seq)
1268 {
1269         struct ceph_msg *msg;
1270
1271         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1272              session->s_mds, ceph_session_state_name(session->s_state), seq);
1273         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1274         if (!msg)
1275                 return -ENOMEM;
1276         ceph_con_send(&session->s_con, msg);
1277         return 0;
1278 }
1279
1280
1281 /*
1282  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1283  *
1284  * Called under session->s_mutex
1285  */
1286 static void renewed_caps(struct ceph_mds_client *mdsc,
1287                          struct ceph_mds_session *session, int is_renew)
1288 {
1289         int was_stale;
1290         int wake = 0;
1291
1292         spin_lock(&session->s_cap_lock);
1293         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1294
1295         session->s_cap_ttl = session->s_renew_requested +
1296                 mdsc->mdsmap->m_session_timeout*HZ;
1297
1298         if (was_stale) {
1299                 if (time_before(jiffies, session->s_cap_ttl)) {
1300                         pr_info("mds%d caps renewed\n", session->s_mds);
1301                         wake = 1;
1302                 } else {
1303                         pr_info("mds%d caps still stale\n", session->s_mds);
1304                 }
1305         }
1306         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1307              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1308              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1309         spin_unlock(&session->s_cap_lock);
1310
1311         if (wake)
1312                 wake_up_session_caps(session, 0);
1313 }
1314
1315 /*
1316  * send a session close request
1317  */
1318 static int request_close_session(struct ceph_mds_client *mdsc,
1319                                  struct ceph_mds_session *session)
1320 {
1321         struct ceph_msg *msg;
1322
1323         dout("request_close_session mds%d state %s seq %lld\n",
1324              session->s_mds, ceph_session_state_name(session->s_state),
1325              session->s_seq);
1326         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1327         if (!msg)
1328                 return -ENOMEM;
1329         ceph_con_send(&session->s_con, msg);
1330         return 0;
1331 }
1332
1333 /*
1334  * Called with s_mutex held.
1335  */
1336 static int __close_session(struct ceph_mds_client *mdsc,
1337                          struct ceph_mds_session *session)
1338 {
1339         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1340                 return 0;
1341         session->s_state = CEPH_MDS_SESSION_CLOSING;
1342         return request_close_session(mdsc, session);
1343 }
1344
1345 /*
1346  * Trim old(er) caps.
1347  *
1348  * Because we can't cache an inode without one or more caps, we do
1349  * this indirectly: if a cap is unused, we prune its aliases, at which
1350  * point the inode will hopefully get dropped to.
1351  *
1352  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1353  * memory pressure from the MDS, though, so it needn't be perfect.
1354  */
1355 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1356 {
1357         struct ceph_mds_session *session = arg;
1358         struct ceph_inode_info *ci = ceph_inode(inode);
1359         int used, wanted, oissued, mine;
1360
1361         if (session->s_trim_caps <= 0)
1362                 return -1;
1363
1364         spin_lock(&ci->i_ceph_lock);
1365         mine = cap->issued | cap->implemented;
1366         used = __ceph_caps_used(ci);
1367         wanted = __ceph_caps_file_wanted(ci);
1368         oissued = __ceph_caps_issued_other(ci, cap);
1369
1370         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1371              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1372              ceph_cap_string(used), ceph_cap_string(wanted));
1373         if (cap == ci->i_auth_cap) {
1374                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1375                     !list_empty(&ci->i_cap_snaps))
1376                         goto out;
1377                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1378                         goto out;
1379         }
1380         if ((used | wanted) & ~oissued & mine)
1381                 goto out;   /* we need these caps */
1382
1383         session->s_trim_caps--;
1384         if (oissued) {
1385                 /* we aren't the only cap.. just remove us */
1386                 __ceph_remove_cap(cap, true);
1387         } else {
1388                 /* try to drop referring dentries */
1389                 spin_unlock(&ci->i_ceph_lock);
1390                 d_prune_aliases(inode);
1391                 dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
1392                      inode, cap, atomic_read(&inode->i_count));
1393                 return 0;
1394         }
1395
1396 out:
1397         spin_unlock(&ci->i_ceph_lock);
1398         return 0;
1399 }
1400
1401 /*
1402  * Trim session cap count down to some max number.
1403  */
1404 static int trim_caps(struct ceph_mds_client *mdsc,
1405                      struct ceph_mds_session *session,
1406                      int max_caps)
1407 {
1408         int trim_caps = session->s_nr_caps - max_caps;
1409
1410         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1411              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1412         if (trim_caps > 0) {
1413                 session->s_trim_caps = trim_caps;
1414                 iterate_session_caps(session, trim_caps_cb, session);
1415                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1416                      session->s_mds, session->s_nr_caps, max_caps,
1417                         trim_caps - session->s_trim_caps);
1418                 session->s_trim_caps = 0;
1419         }
1420
1421         ceph_add_cap_releases(mdsc, session);
1422         ceph_send_cap_releases(mdsc, session);
1423         return 0;
1424 }
1425
1426 /*
1427  * Allocate cap_release messages.  If there is a partially full message
1428  * in the queue, try to allocate enough to cover it's remainder, so that
1429  * we can send it immediately.
1430  *
1431  * Called under s_mutex.
1432  */
1433 int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
1434                           struct ceph_mds_session *session)
1435 {
1436         struct ceph_msg *msg, *partial = NULL;
1437         struct ceph_mds_cap_release *head;
1438         int err = -ENOMEM;
1439         int extra = mdsc->fsc->mount_options->cap_release_safety;
1440         int num;
1441
1442         dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
1443              extra);
1444
1445         spin_lock(&session->s_cap_lock);
1446
1447         if (!list_empty(&session->s_cap_releases)) {
1448                 msg = list_first_entry(&session->s_cap_releases,
1449                                        struct ceph_msg,
1450                                  list_head);
1451                 head = msg->front.iov_base;
1452                 num = le32_to_cpu(head->num);
1453                 if (num) {
1454                         dout(" partial %p with (%d/%d)\n", msg, num,
1455                              (int)CEPH_CAPS_PER_RELEASE);
1456                         extra += CEPH_CAPS_PER_RELEASE - num;
1457                         partial = msg;
1458                 }
1459         }
1460         while (session->s_num_cap_releases < session->s_nr_caps + extra) {
1461                 spin_unlock(&session->s_cap_lock);
1462                 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
1463                                    GFP_NOFS, false);
1464                 if (!msg)
1465                         goto out_unlocked;
1466                 dout("add_cap_releases %p msg %p now %d\n", session, msg,
1467                      (int)msg->front.iov_len);
1468                 head = msg->front.iov_base;
1469                 head->num = cpu_to_le32(0);
1470                 msg->front.iov_len = sizeof(*head);
1471                 spin_lock(&session->s_cap_lock);
1472                 list_add(&msg->list_head, &session->s_cap_releases);
1473                 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
1474         }
1475
1476         if (partial) {
1477                 head = partial->front.iov_base;
1478                 num = le32_to_cpu(head->num);
1479                 dout(" queueing partial %p with %d/%d\n", partial, num,
1480                      (int)CEPH_CAPS_PER_RELEASE);
1481                 list_move_tail(&partial->list_head,
1482                                &session->s_cap_releases_done);
1483                 session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num;
1484         }
1485         err = 0;
1486         spin_unlock(&session->s_cap_lock);
1487 out_unlocked:
1488         return err;
1489 }
1490
1491 static int check_cap_flush(struct ceph_inode_info *ci,
1492                            u64 want_flush_seq, u64 want_snap_seq)
1493 {
1494         int ret1 = 1, ret2 = 1;
1495         spin_lock(&ci->i_ceph_lock);
1496         if (want_flush_seq > 0 && ci->i_flushing_caps)
1497                 ret1 = ci->i_cap_flush_seq >= want_flush_seq;
1498
1499         if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) {
1500                 struct ceph_cap_snap *capsnap =
1501                         list_first_entry(&ci->i_cap_snaps,
1502                                          struct ceph_cap_snap, ci_item);
1503                 ret2 = capsnap->follows >= want_snap_seq;
1504         }
1505         spin_unlock(&ci->i_ceph_lock);
1506         return ret1 && ret2;
1507 }
1508
1509 /*
1510  * flush all dirty inode data to disk.
1511  *
1512  * returns true if we've flushed through want_flush_seq
1513  */
1514 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1515                             u64 want_flush_seq, u64 want_snap_seq)
1516 {
1517         int mds;
1518
1519         dout("check_cap_flush want %lld\n", want_flush_seq);
1520         mutex_lock(&mdsc->mutex);
1521         for (mds = 0; mds < mdsc->max_sessions; ) {
1522                 struct ceph_mds_session *session = mdsc->sessions[mds];
1523                 struct inode *inode1 = NULL, *inode2 = NULL;
1524
1525                 if (!session) {
1526                         mds++;
1527                         continue;
1528                 }
1529                 get_session(session);
1530                 mutex_unlock(&mdsc->mutex);
1531
1532                 mutex_lock(&session->s_mutex);
1533                 if (!list_empty(&session->s_cap_flushing)) {
1534                         struct ceph_inode_info *ci =
1535                                 list_first_entry(&session->s_cap_flushing,
1536                                                  struct ceph_inode_info,
1537                                                  i_flushing_item);
1538
1539                         if (!check_cap_flush(ci, want_flush_seq, 0)) {
1540                                 dout("check_cap_flush still flushing %p "
1541                                      "seq %lld <= %lld to mds%d\n",
1542                                      &ci->vfs_inode, ci->i_cap_flush_seq,
1543                                      want_flush_seq, mds);
1544                                 inode1 = igrab(&ci->vfs_inode);
1545                         }
1546                 }
1547                 if (!list_empty(&session->s_cap_snaps_flushing)) {
1548                         struct ceph_cap_snap *capsnap =
1549                                 list_first_entry(&session->s_cap_snaps_flushing,
1550                                                  struct ceph_cap_snap,
1551                                                  flushing_item);
1552                         struct ceph_inode_info *ci = capsnap->ci;
1553                         if (!check_cap_flush(ci, 0, want_snap_seq)) {
1554                                 dout("check_cap_flush still flushing snap %p "
1555                                      "follows %lld <= %lld to mds%d\n",
1556                                      &ci->vfs_inode, capsnap->follows,
1557                                      want_snap_seq, mds);
1558                                 inode2 = igrab(&ci->vfs_inode);
1559                         }
1560                 }
1561                 mutex_unlock(&session->s_mutex);
1562                 ceph_put_mds_session(session);
1563
1564                 if (inode1) {
1565                         wait_event(mdsc->cap_flushing_wq,
1566                                    check_cap_flush(ceph_inode(inode1),
1567                                                    want_flush_seq, 0));
1568                         iput(inode1);
1569                 }
1570                 if (inode2) {
1571                         wait_event(mdsc->cap_flushing_wq,
1572                                    check_cap_flush(ceph_inode(inode2),
1573                                                    0, want_snap_seq));
1574                         iput(inode2);
1575                 }
1576
1577                 if (!inode1 && !inode2)
1578                         mds++;
1579
1580                 mutex_lock(&mdsc->mutex);
1581         }
1582
1583         mutex_unlock(&mdsc->mutex);
1584         dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1585 }
1586
1587 /*
1588  * called under s_mutex
1589  */
1590 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1591                             struct ceph_mds_session *session)
1592 {
1593         struct ceph_msg *msg;
1594
1595         dout("send_cap_releases mds%d\n", session->s_mds);
1596         spin_lock(&session->s_cap_lock);
1597         while (!list_empty(&session->s_cap_releases_done)) {
1598                 msg = list_first_entry(&session->s_cap_releases_done,
1599                                  struct ceph_msg, list_head);
1600                 list_del_init(&msg->list_head);
1601                 spin_unlock(&session->s_cap_lock);
1602                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1603                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1604                 ceph_con_send(&session->s_con, msg);
1605                 spin_lock(&session->s_cap_lock);
1606         }
1607         spin_unlock(&session->s_cap_lock);
1608 }
1609
1610 static void discard_cap_releases(struct ceph_mds_client *mdsc,
1611                                  struct ceph_mds_session *session)
1612 {
1613         struct ceph_msg *msg;
1614         struct ceph_mds_cap_release *head;
1615         unsigned num;
1616
1617         dout("discard_cap_releases mds%d\n", session->s_mds);
1618
1619         if (!list_empty(&session->s_cap_releases)) {
1620                 /* zero out the in-progress message */
1621                 msg = list_first_entry(&session->s_cap_releases,
1622                                         struct ceph_msg, list_head);
1623                 head = msg->front.iov_base;
1624                 num = le32_to_cpu(head->num);
1625                 dout("discard_cap_releases mds%d %p %u\n",
1626                      session->s_mds, msg, num);
1627                 head->num = cpu_to_le32(0);
1628                 msg->front.iov_len = sizeof(*head);
1629                 session->s_num_cap_releases += num;
1630         }
1631
1632         /* requeue completed messages */
1633         while (!list_empty(&session->s_cap_releases_done)) {
1634                 msg = list_first_entry(&session->s_cap_releases_done,
1635                                  struct ceph_msg, list_head);
1636                 list_del_init(&msg->list_head);
1637
1638                 head = msg->front.iov_base;
1639                 num = le32_to_cpu(head->num);
1640                 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg,
1641                      num);
1642                 session->s_num_cap_releases += num;
1643                 head->num = cpu_to_le32(0);
1644                 msg->front.iov_len = sizeof(*head);
1645                 list_add(&msg->list_head, &session->s_cap_releases);
1646         }
1647 }
1648
1649 /*
1650  * requests
1651  */
1652
1653 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1654                                     struct inode *dir)
1655 {
1656         struct ceph_inode_info *ci = ceph_inode(dir);
1657         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1658         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1659         size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) +
1660                       sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease);
1661         int order, num_entries;
1662
1663         spin_lock(&ci->i_ceph_lock);
1664         num_entries = ci->i_files + ci->i_subdirs;
1665         spin_unlock(&ci->i_ceph_lock);
1666         num_entries = max(num_entries, 1);
1667         num_entries = min(num_entries, opt->max_readdir);
1668
1669         order = get_order(size * num_entries);
1670         while (order >= 0) {
1671                 rinfo->dir_in = (void*)__get_free_pages(GFP_NOFS | __GFP_NOWARN,
1672                                                         order);
1673                 if (rinfo->dir_in)
1674                         break;
1675                 order--;
1676         }
1677         if (!rinfo->dir_in)
1678                 return -ENOMEM;
1679
1680         num_entries = (PAGE_SIZE << order) / size;
1681         num_entries = min(num_entries, opt->max_readdir);
1682
1683         rinfo->dir_buf_size = PAGE_SIZE << order;
1684         req->r_num_caps = num_entries + 1;
1685         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1686         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1687         return 0;
1688 }
1689
1690 /*
1691  * Create an mds request.
1692  */
1693 struct ceph_mds_request *
1694 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1695 {
1696         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1697
1698         if (!req)
1699                 return ERR_PTR(-ENOMEM);
1700
1701         mutex_init(&req->r_fill_mutex);
1702         req->r_mdsc = mdsc;
1703         req->r_started = jiffies;
1704         req->r_resend_mds = -1;
1705         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1706         req->r_fmode = -1;
1707         kref_init(&req->r_kref);
1708         INIT_LIST_HEAD(&req->r_wait);
1709         init_completion(&req->r_completion);
1710         init_completion(&req->r_safe_completion);
1711         INIT_LIST_HEAD(&req->r_unsafe_item);
1712
1713         req->r_stamp = CURRENT_TIME;
1714
1715         req->r_op = op;
1716         req->r_direct_mode = mode;
1717         return req;
1718 }
1719
1720 /*
1721  * return oldest (lowest) request, tid in request tree, 0 if none.
1722  *
1723  * called under mdsc->mutex.
1724  */
1725 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1726 {
1727         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1728                 return NULL;
1729         return rb_entry(rb_first(&mdsc->request_tree),
1730                         struct ceph_mds_request, r_node);
1731 }
1732
1733 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1734 {
1735         struct ceph_mds_request *req = __get_oldest_req(mdsc);
1736
1737         if (req)
1738                 return req->r_tid;
1739         return 0;
1740 }
1741
1742 /*
1743  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1744  * on build_path_from_dentry in fs/cifs/dir.c.
1745  *
1746  * If @stop_on_nosnap, generate path relative to the first non-snapped
1747  * inode.
1748  *
1749  * Encode hidden .snap dirs as a double /, i.e.
1750  *   foo/.snap/bar -> foo//bar
1751  */
1752 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1753                            int stop_on_nosnap)
1754 {
1755         struct dentry *temp;
1756         char *path;
1757         int len, pos;
1758         unsigned seq;
1759
1760         if (dentry == NULL)
1761                 return ERR_PTR(-EINVAL);
1762
1763 retry:
1764         len = 0;
1765         seq = read_seqbegin(&rename_lock);
1766         rcu_read_lock();
1767         for (temp = dentry; !IS_ROOT(temp);) {
1768                 struct inode *inode = d_inode(temp);
1769                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1770                         len++;  /* slash only */
1771                 else if (stop_on_nosnap && inode &&
1772                          ceph_snap(inode) == CEPH_NOSNAP)
1773                         break;
1774                 else
1775                         len += 1 + temp->d_name.len;
1776                 temp = temp->d_parent;
1777         }
1778         rcu_read_unlock();
1779         if (len)
1780                 len--;  /* no leading '/' */
1781
1782         path = kmalloc(len+1, GFP_NOFS);
1783         if (path == NULL)
1784                 return ERR_PTR(-ENOMEM);
1785         pos = len;
1786         path[pos] = 0;  /* trailing null */
1787         rcu_read_lock();
1788         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1789                 struct inode *inode;
1790
1791                 spin_lock(&temp->d_lock);
1792                 inode = d_inode(temp);
1793                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1794                         dout("build_path path+%d: %p SNAPDIR\n",
1795                              pos, temp);
1796                 } else if (stop_on_nosnap && inode &&
1797                            ceph_snap(inode) == CEPH_NOSNAP) {
1798                         spin_unlock(&temp->d_lock);
1799                         break;
1800                 } else {
1801                         pos -= temp->d_name.len;
1802                         if (pos < 0) {
1803                                 spin_unlock(&temp->d_lock);
1804                                 break;
1805                         }
1806                         strncpy(path + pos, temp->d_name.name,
1807                                 temp->d_name.len);
1808                 }
1809                 spin_unlock(&temp->d_lock);
1810                 if (pos)
1811                         path[--pos] = '/';
1812                 temp = temp->d_parent;
1813         }
1814         rcu_read_unlock();
1815         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1816                 pr_err("build_path did not end path lookup where "
1817                        "expected, namelen is %d, pos is %d\n", len, pos);
1818                 /* presumably this is only possible if racing with a
1819                    rename of one of the parent directories (we can not
1820                    lock the dentries above us to prevent this, but
1821                    retrying should be harmless) */
1822                 kfree(path);
1823                 goto retry;
1824         }
1825
1826         *base = ceph_ino(d_inode(temp));
1827         *plen = len;
1828         dout("build_path on %p %d built %llx '%.*s'\n",
1829              dentry, d_count(dentry), *base, len, path);
1830         return path;
1831 }
1832
1833 static int build_dentry_path(struct dentry *dentry,
1834                              const char **ppath, int *ppathlen, u64 *pino,
1835                              int *pfreepath)
1836 {
1837         char *path;
1838
1839         if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
1840                 *pino = ceph_ino(d_inode(dentry->d_parent));
1841                 *ppath = dentry->d_name.name;
1842                 *ppathlen = dentry->d_name.len;
1843                 return 0;
1844         }
1845         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1846         if (IS_ERR(path))
1847                 return PTR_ERR(path);
1848         *ppath = path;
1849         *pfreepath = 1;
1850         return 0;
1851 }
1852
1853 static int build_inode_path(struct inode *inode,
1854                             const char **ppath, int *ppathlen, u64 *pino,
1855                             int *pfreepath)
1856 {
1857         struct dentry *dentry;
1858         char *path;
1859
1860         if (ceph_snap(inode) == CEPH_NOSNAP) {
1861                 *pino = ceph_ino(inode);
1862                 *ppathlen = 0;
1863                 return 0;
1864         }
1865         dentry = d_find_alias(inode);
1866         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1867         dput(dentry);
1868         if (IS_ERR(path))
1869                 return PTR_ERR(path);
1870         *ppath = path;
1871         *pfreepath = 1;
1872         return 0;
1873 }
1874
1875 /*
1876  * request arguments may be specified via an inode *, a dentry *, or
1877  * an explicit ino+path.
1878  */
1879 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1880                                   const char *rpath, u64 rino,
1881                                   const char **ppath, int *pathlen,
1882                                   u64 *ino, int *freepath)
1883 {
1884         int r = 0;
1885
1886         if (rinode) {
1887                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1888                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1889                      ceph_snap(rinode));
1890         } else if (rdentry) {
1891                 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1892                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1893                      *ppath);
1894         } else if (rpath || rino) {
1895                 *ino = rino;
1896                 *ppath = rpath;
1897                 *pathlen = rpath ? strlen(rpath) : 0;
1898                 dout(" path %.*s\n", *pathlen, rpath);
1899         }
1900
1901         return r;
1902 }
1903
1904 /*
1905  * called under mdsc->mutex
1906  */
1907 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1908                                                struct ceph_mds_request *req,
1909                                                int mds, bool drop_cap_releases)
1910 {
1911         struct ceph_msg *msg;
1912         struct ceph_mds_request_head *head;
1913         const char *path1 = NULL;
1914         const char *path2 = NULL;
1915         u64 ino1 = 0, ino2 = 0;
1916         int pathlen1 = 0, pathlen2 = 0;
1917         int freepath1 = 0, freepath2 = 0;
1918         int len;
1919         u16 releases;
1920         void *p, *end;
1921         int ret;
1922
1923         ret = set_request_path_attr(req->r_inode, req->r_dentry,
1924                               req->r_path1, req->r_ino1.ino,
1925                               &path1, &pathlen1, &ino1, &freepath1);
1926         if (ret < 0) {
1927                 msg = ERR_PTR(ret);
1928                 goto out;
1929         }
1930
1931         ret = set_request_path_attr(NULL, req->r_old_dentry,
1932                               req->r_path2, req->r_ino2.ino,
1933                               &path2, &pathlen2, &ino2, &freepath2);
1934         if (ret < 0) {
1935                 msg = ERR_PTR(ret);
1936                 goto out_free1;
1937         }
1938
1939         len = sizeof(*head) +
1940                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1941                 sizeof(struct timespec);
1942
1943         /* calculate (max) length for cap releases */
1944         len += sizeof(struct ceph_mds_request_release) *
1945                 (!!req->r_inode_drop + !!req->r_dentry_drop +
1946                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1947         if (req->r_dentry_drop)
1948                 len += req->r_dentry->d_name.len;
1949         if (req->r_old_dentry_drop)
1950                 len += req->r_old_dentry->d_name.len;
1951
1952         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1953         if (!msg) {
1954                 msg = ERR_PTR(-ENOMEM);
1955                 goto out_free2;
1956         }
1957
1958         msg->hdr.version = cpu_to_le16(2);
1959         msg->hdr.tid = cpu_to_le64(req->r_tid);
1960
1961         head = msg->front.iov_base;
1962         p = msg->front.iov_base + sizeof(*head);
1963         end = msg->front.iov_base + msg->front.iov_len;
1964
1965         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1966         head->op = cpu_to_le32(req->r_op);
1967         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1968         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1969         head->args = req->r_args;
1970
1971         ceph_encode_filepath(&p, end, ino1, path1);
1972         ceph_encode_filepath(&p, end, ino2, path2);
1973
1974         /* make note of release offset, in case we need to replay */
1975         req->r_request_release_offset = p - msg->front.iov_base;
1976
1977         /* cap releases */
1978         releases = 0;
1979         if (req->r_inode_drop)
1980                 releases += ceph_encode_inode_release(&p,
1981                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
1982                       mds, req->r_inode_drop, req->r_inode_unless, 0);
1983         if (req->r_dentry_drop)
1984                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1985                        mds, req->r_dentry_drop, req->r_dentry_unless);
1986         if (req->r_old_dentry_drop)
1987                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1988                        mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1989         if (req->r_old_inode_drop)
1990                 releases += ceph_encode_inode_release(&p,
1991                       d_inode(req->r_old_dentry),
1992                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1993
1994         if (drop_cap_releases) {
1995                 releases = 0;
1996                 p = msg->front.iov_base + req->r_request_release_offset;
1997         }
1998
1999         head->num_releases = cpu_to_le16(releases);
2000
2001         /* time stamp */
2002         {
2003                 struct ceph_timespec ts;
2004                 ceph_encode_timespec(&ts, &req->r_stamp);
2005                 ceph_encode_copy(&p, &ts, sizeof(ts));
2006         }
2007
2008         BUG_ON(p > end);
2009         msg->front.iov_len = p - msg->front.iov_base;
2010         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2011
2012         if (req->r_pagelist) {
2013                 struct ceph_pagelist *pagelist = req->r_pagelist;
2014                 atomic_inc(&pagelist->refcnt);
2015                 ceph_msg_data_add_pagelist(msg, pagelist);
2016                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2017         } else {
2018                 msg->hdr.data_len = 0;
2019         }
2020
2021         msg->hdr.data_off = cpu_to_le16(0);
2022
2023 out_free2:
2024         if (freepath2)
2025                 kfree((char *)path2);
2026 out_free1:
2027         if (freepath1)
2028                 kfree((char *)path1);
2029 out:
2030         return msg;
2031 }
2032
2033 /*
2034  * called under mdsc->mutex if error, under no mutex if
2035  * success.
2036  */
2037 static void complete_request(struct ceph_mds_client *mdsc,
2038                              struct ceph_mds_request *req)
2039 {
2040         if (req->r_callback)
2041                 req->r_callback(mdsc, req);
2042         else
2043                 complete_all(&req->r_completion);
2044 }
2045
2046 /*
2047  * called under mdsc->mutex
2048  */
2049 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2050                                   struct ceph_mds_request *req,
2051                                   int mds, bool drop_cap_releases)
2052 {
2053         struct ceph_mds_request_head *rhead;
2054         struct ceph_msg *msg;
2055         int flags = 0;
2056
2057         req->r_attempts++;
2058         if (req->r_inode) {
2059                 struct ceph_cap *cap =
2060                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2061
2062                 if (cap)
2063                         req->r_sent_on_mseq = cap->mseq;
2064                 else
2065                         req->r_sent_on_mseq = -1;
2066         }
2067         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2068              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2069
2070         if (req->r_got_unsafe) {
2071                 void *p;
2072                 /*
2073                  * Replay.  Do not regenerate message (and rebuild
2074                  * paths, etc.); just use the original message.
2075                  * Rebuilding paths will break for renames because
2076                  * d_move mangles the src name.
2077                  */
2078                 msg = req->r_request;
2079                 rhead = msg->front.iov_base;
2080
2081                 flags = le32_to_cpu(rhead->flags);
2082                 flags |= CEPH_MDS_FLAG_REPLAY;
2083                 rhead->flags = cpu_to_le32(flags);
2084
2085                 if (req->r_target_inode)
2086                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2087
2088                 rhead->num_retry = req->r_attempts - 1;
2089
2090                 /* remove cap/dentry releases from message */
2091                 rhead->num_releases = 0;
2092
2093                 /* time stamp */
2094                 p = msg->front.iov_base + req->r_request_release_offset;
2095                 {
2096                         struct ceph_timespec ts;
2097                         ceph_encode_timespec(&ts, &req->r_stamp);
2098                         ceph_encode_copy(&p, &ts, sizeof(ts));
2099                 }
2100
2101                 msg->front.iov_len = p - msg->front.iov_base;
2102                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2103                 return 0;
2104         }
2105
2106         if (req->r_request) {
2107                 ceph_msg_put(req->r_request);
2108                 req->r_request = NULL;
2109         }
2110         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2111         if (IS_ERR(msg)) {
2112                 req->r_err = PTR_ERR(msg);
2113                 complete_request(mdsc, req);
2114                 return PTR_ERR(msg);
2115         }
2116         req->r_request = msg;
2117
2118         rhead = msg->front.iov_base;
2119         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2120         if (req->r_got_unsafe)
2121                 flags |= CEPH_MDS_FLAG_REPLAY;
2122         if (req->r_locked_dir)
2123                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2124         rhead->flags = cpu_to_le32(flags);
2125         rhead->num_fwd = req->r_num_fwd;
2126         rhead->num_retry = req->r_attempts - 1;
2127         rhead->ino = 0;
2128
2129         dout(" r_locked_dir = %p\n", req->r_locked_dir);
2130         return 0;
2131 }
2132
2133 /*
2134  * send request, or put it on the appropriate wait list.
2135  */
2136 static int __do_request(struct ceph_mds_client *mdsc,
2137                         struct ceph_mds_request *req)
2138 {
2139         struct ceph_mds_session *session = NULL;
2140         int mds = -1;
2141         int err = -EAGAIN;
2142
2143         if (req->r_err || req->r_got_result) {
2144                 if (req->r_aborted)
2145                         __unregister_request(mdsc, req);
2146                 goto out;
2147         }
2148
2149         if (req->r_timeout &&
2150             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2151                 dout("do_request timed out\n");
2152                 err = -EIO;
2153                 goto finish;
2154         }
2155
2156         put_request_session(req);
2157
2158         mds = __choose_mds(mdsc, req);
2159         if (mds < 0 ||
2160             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2161                 dout("do_request no mds or not active, waiting for map\n");
2162                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2163                 goto out;
2164         }
2165
2166         /* get, open session */
2167         session = __ceph_lookup_mds_session(mdsc, mds);
2168         if (!session) {
2169                 session = register_session(mdsc, mds);
2170                 if (IS_ERR(session)) {
2171                         err = PTR_ERR(session);
2172                         goto finish;
2173                 }
2174         }
2175         req->r_session = get_session(session);
2176
2177         dout("do_request mds%d session %p state %s\n", mds, session,
2178              ceph_session_state_name(session->s_state));
2179         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2180             session->s_state != CEPH_MDS_SESSION_HUNG) {
2181                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2182                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2183                         __open_session(mdsc, session);
2184                 list_add(&req->r_wait, &session->s_waiting);
2185                 goto out_session;
2186         }
2187
2188         /* send request */
2189         req->r_resend_mds = -1;   /* forget any previous mds hint */
2190
2191         if (req->r_request_started == 0)   /* note request start time */
2192                 req->r_request_started = jiffies;
2193
2194         err = __prepare_send_request(mdsc, req, mds, false);
2195         if (!err) {
2196                 ceph_msg_get(req->r_request);
2197                 ceph_con_send(&session->s_con, req->r_request);
2198         }
2199
2200 out_session:
2201         ceph_put_mds_session(session);
2202 out:
2203         return err;
2204
2205 finish:
2206         req->r_err = err;
2207         complete_request(mdsc, req);
2208         goto out;
2209 }
2210
2211 /*
2212  * called under mdsc->mutex
2213  */
2214 static void __wake_requests(struct ceph_mds_client *mdsc,
2215                             struct list_head *head)
2216 {
2217         struct ceph_mds_request *req;
2218         LIST_HEAD(tmp_list);
2219
2220         list_splice_init(head, &tmp_list);
2221
2222         while (!list_empty(&tmp_list)) {
2223                 req = list_entry(tmp_list.next,
2224                                  struct ceph_mds_request, r_wait);
2225                 list_del_init(&req->r_wait);
2226                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2227                 __do_request(mdsc, req);
2228         }
2229 }
2230
2231 /*
2232  * Wake up threads with requests pending for @mds, so that they can
2233  * resubmit their requests to a possibly different mds.
2234  */
2235 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2236 {
2237         struct ceph_mds_request *req;
2238         struct rb_node *p = rb_first(&mdsc->request_tree);
2239
2240         dout("kick_requests mds%d\n", mds);
2241         while (p) {
2242                 req = rb_entry(p, struct ceph_mds_request, r_node);
2243                 p = rb_next(p);
2244                 if (req->r_got_unsafe)
2245                         continue;
2246                 if (req->r_attempts > 0)
2247                         continue; /* only new requests */
2248                 if (req->r_session &&
2249                     req->r_session->s_mds == mds) {
2250                         dout(" kicking tid %llu\n", req->r_tid);
2251                         list_del_init(&req->r_wait);
2252                         __do_request(mdsc, req);
2253                 }
2254         }
2255 }
2256
2257 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2258                               struct ceph_mds_request *req)
2259 {
2260         dout("submit_request on %p\n", req);
2261         mutex_lock(&mdsc->mutex);
2262         __register_request(mdsc, req, NULL);
2263         __do_request(mdsc, req);
2264         mutex_unlock(&mdsc->mutex);
2265 }
2266
2267 /*
2268  * Synchrously perform an mds request.  Take care of all of the
2269  * session setup, forwarding, retry details.
2270  */
2271 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2272                          struct inode *dir,
2273                          struct ceph_mds_request *req)
2274 {
2275         int err;
2276
2277         dout("do_request on %p\n", req);
2278
2279         /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2280         if (req->r_inode)
2281                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2282         if (req->r_locked_dir)
2283                 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
2284         if (req->r_old_dentry_dir)
2285                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2286                                   CEPH_CAP_PIN);
2287
2288         /* issue */
2289         mutex_lock(&mdsc->mutex);
2290         __register_request(mdsc, req, dir);
2291         __do_request(mdsc, req);
2292
2293         if (req->r_err) {
2294                 err = req->r_err;
2295                 __unregister_request(mdsc, req);
2296                 dout("do_request early error %d\n", err);
2297                 goto out;
2298         }
2299
2300         /* wait */
2301         mutex_unlock(&mdsc->mutex);
2302         dout("do_request waiting\n");
2303         if (req->r_timeout) {
2304                 err = (long)wait_for_completion_killable_timeout(
2305                         &req->r_completion, req->r_timeout);
2306                 if (err == 0)
2307                         err = -EIO;
2308         } else if (req->r_wait_for_completion) {
2309                 err = req->r_wait_for_completion(mdsc, req);
2310         } else {
2311                 err = wait_for_completion_killable(&req->r_completion);
2312         }
2313         dout("do_request waited, got %d\n", err);
2314         mutex_lock(&mdsc->mutex);
2315
2316         /* only abort if we didn't race with a real reply */
2317         if (req->r_got_result) {
2318                 err = le32_to_cpu(req->r_reply_info.head->result);
2319         } else if (err < 0) {
2320                 dout("aborted request %lld with %d\n", req->r_tid, err);
2321
2322                 /*
2323                  * ensure we aren't running concurrently with
2324                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2325                  * rely on locks (dir mutex) held by our caller.
2326                  */
2327                 mutex_lock(&req->r_fill_mutex);
2328                 req->r_err = err;
2329                 req->r_aborted = true;
2330                 mutex_unlock(&req->r_fill_mutex);
2331
2332                 if (req->r_locked_dir &&
2333                     (req->r_op & CEPH_MDS_OP_WRITE))
2334                         ceph_invalidate_dir_request(req);
2335         } else {
2336                 err = req->r_err;
2337         }
2338
2339 out:
2340         mutex_unlock(&mdsc->mutex);
2341         dout("do_request %p done, result %d\n", req, err);
2342         return err;
2343 }
2344
2345 /*
2346  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2347  * namespace request.
2348  */
2349 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2350 {
2351         struct inode *inode = req->r_locked_dir;
2352
2353         dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2354
2355         ceph_dir_clear_complete(inode);
2356         if (req->r_dentry)
2357                 ceph_invalidate_dentry_lease(req->r_dentry);
2358         if (req->r_old_dentry)
2359                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2360 }
2361
2362 /*
2363  * Handle mds reply.
2364  *
2365  * We take the session mutex and parse and process the reply immediately.
2366  * This preserves the logical ordering of replies, capabilities, etc., sent
2367  * by the MDS as they are applied to our local cache.
2368  */
2369 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2370 {
2371         struct ceph_mds_client *mdsc = session->s_mdsc;
2372         struct ceph_mds_request *req;
2373         struct ceph_mds_reply_head *head = msg->front.iov_base;
2374         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2375         struct ceph_snap_realm *realm;
2376         u64 tid;
2377         int err, result;
2378         int mds = session->s_mds;
2379
2380         if (msg->front.iov_len < sizeof(*head)) {
2381                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2382                 ceph_msg_dump(msg);
2383                 return;
2384         }
2385
2386         /* get request, session */
2387         tid = le64_to_cpu(msg->hdr.tid);
2388         mutex_lock(&mdsc->mutex);
2389         req = __lookup_request(mdsc, tid);
2390         if (!req) {
2391                 dout("handle_reply on unknown tid %llu\n", tid);
2392                 mutex_unlock(&mdsc->mutex);
2393                 return;
2394         }
2395         dout("handle_reply %p\n", req);
2396
2397         /* correct session? */
2398         if (req->r_session != session) {
2399                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2400                        " not mds%d\n", tid, session->s_mds,
2401                        req->r_session ? req->r_session->s_mds : -1);
2402                 mutex_unlock(&mdsc->mutex);
2403                 goto out;
2404         }
2405
2406         /* dup? */
2407         if ((req->r_got_unsafe && !head->safe) ||
2408             (req->r_got_safe && head->safe)) {
2409                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2410                            head->safe ? "safe" : "unsafe", tid, mds);
2411                 mutex_unlock(&mdsc->mutex);
2412                 goto out;
2413         }
2414         if (req->r_got_safe && !head->safe) {
2415                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2416                            tid, mds);
2417                 mutex_unlock(&mdsc->mutex);
2418                 goto out;
2419         }
2420
2421         result = le32_to_cpu(head->result);
2422
2423         /*
2424          * Handle an ESTALE
2425          * if we're not talking to the authority, send to them
2426          * if the authority has changed while we weren't looking,
2427          * send to new authority
2428          * Otherwise we just have to return an ESTALE
2429          */
2430         if (result == -ESTALE) {
2431                 dout("got ESTALE on request %llu", req->r_tid);
2432                 req->r_resend_mds = -1;
2433                 if (req->r_direct_mode != USE_AUTH_MDS) {
2434                         dout("not using auth, setting for that now");
2435                         req->r_direct_mode = USE_AUTH_MDS;
2436                         __do_request(mdsc, req);
2437                         mutex_unlock(&mdsc->mutex);
2438                         goto out;
2439                 } else  {
2440                         int mds = __choose_mds(mdsc, req);
2441                         if (mds >= 0 && mds != req->r_session->s_mds) {
2442                                 dout("but auth changed, so resending");
2443                                 __do_request(mdsc, req);
2444                                 mutex_unlock(&mdsc->mutex);
2445                                 goto out;
2446                         }
2447                 }
2448                 dout("have to return ESTALE on request %llu", req->r_tid);
2449         }
2450
2451
2452         if (head->safe) {
2453                 req->r_got_safe = true;
2454                 __unregister_request(mdsc, req);
2455
2456                 if (req->r_got_unsafe) {
2457                         /*
2458                          * We already handled the unsafe response, now do the
2459                          * cleanup.  No need to examine the response; the MDS
2460                          * doesn't include any result info in the safe
2461                          * response.  And even if it did, there is nothing
2462                          * useful we could do with a revised return value.
2463                          */
2464                         dout("got safe reply %llu, mds%d\n", tid, mds);
2465                         list_del_init(&req->r_unsafe_item);
2466
2467                         /* last unsafe request during umount? */
2468                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2469                                 complete_all(&mdsc->safe_umount_waiters);
2470                         mutex_unlock(&mdsc->mutex);
2471                         goto out;
2472                 }
2473         } else {
2474                 req->r_got_unsafe = true;
2475                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2476         }
2477
2478         dout("handle_reply tid %lld result %d\n", tid, result);
2479         rinfo = &req->r_reply_info;
2480         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2481         mutex_unlock(&mdsc->mutex);
2482
2483         mutex_lock(&session->s_mutex);
2484         if (err < 0) {
2485                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2486                 ceph_msg_dump(msg);
2487                 goto out_err;
2488         }
2489
2490         /* snap trace */
2491         realm = NULL;
2492         if (rinfo->snapblob_len) {
2493                 down_write(&mdsc->snap_rwsem);
2494                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2495                                 rinfo->snapblob + rinfo->snapblob_len,
2496                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2497                                 &realm);
2498                 downgrade_write(&mdsc->snap_rwsem);
2499         } else {
2500                 down_read(&mdsc->snap_rwsem);
2501         }
2502
2503         /* insert trace into our cache */
2504         mutex_lock(&req->r_fill_mutex);
2505         err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2506         if (err == 0) {
2507                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2508                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2509                         ceph_readdir_prepopulate(req, req->r_session);
2510                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2511         }
2512         mutex_unlock(&req->r_fill_mutex);
2513
2514         up_read(&mdsc->snap_rwsem);
2515         if (realm)
2516                 ceph_put_snap_realm(mdsc, realm);
2517 out_err:
2518         mutex_lock(&mdsc->mutex);
2519         if (!req->r_aborted) {
2520                 if (err) {
2521                         req->r_err = err;
2522                 } else {
2523                         req->r_reply = msg;
2524                         ceph_msg_get(msg);
2525                         req->r_got_result = true;
2526                 }
2527         } else {
2528                 dout("reply arrived after request %lld was aborted\n", tid);
2529         }
2530         mutex_unlock(&mdsc->mutex);
2531
2532         ceph_add_cap_releases(mdsc, req->r_session);
2533         mutex_unlock(&session->s_mutex);
2534
2535         /* kick calling process */
2536         complete_request(mdsc, req);
2537 out:
2538         ceph_mdsc_put_request(req);
2539         return;
2540 }
2541
2542
2543
2544 /*
2545  * handle mds notification that our request has been forwarded.
2546  */
2547 static void handle_forward(struct ceph_mds_client *mdsc,
2548                            struct ceph_mds_session *session,
2549                            struct ceph_msg *msg)
2550 {
2551         struct ceph_mds_request *req;
2552         u64 tid = le64_to_cpu(msg->hdr.tid);
2553         u32 next_mds;
2554         u32 fwd_seq;
2555         int err = -EINVAL;
2556         void *p = msg->front.iov_base;
2557         void *end = p + msg->front.iov_len;
2558
2559         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2560         next_mds = ceph_decode_32(&p);
2561         fwd_seq = ceph_decode_32(&p);
2562
2563         mutex_lock(&mdsc->mutex);
2564         req = __lookup_request(mdsc, tid);
2565         if (!req) {
2566                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2567                 goto out;  /* dup reply? */
2568         }
2569
2570         if (req->r_aborted) {
2571                 dout("forward tid %llu aborted, unregistering\n", tid);
2572                 __unregister_request(mdsc, req);
2573         } else if (fwd_seq <= req->r_num_fwd) {
2574                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2575                      tid, next_mds, req->r_num_fwd, fwd_seq);
2576         } else {
2577                 /* resend. forward race not possible; mds would drop */
2578                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2579                 BUG_ON(req->r_err);
2580                 BUG_ON(req->r_got_result);
2581                 req->r_attempts = 0;
2582                 req->r_num_fwd = fwd_seq;
2583                 req->r_resend_mds = next_mds;
2584                 put_request_session(req);
2585                 __do_request(mdsc, req);
2586         }
2587         ceph_mdsc_put_request(req);
2588 out:
2589         mutex_unlock(&mdsc->mutex);
2590         return;
2591
2592 bad:
2593         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2594 }
2595
2596 /*
2597  * handle a mds session control message
2598  */
2599 static void handle_session(struct ceph_mds_session *session,
2600                            struct ceph_msg *msg)
2601 {
2602         struct ceph_mds_client *mdsc = session->s_mdsc;
2603         u32 op;
2604         u64 seq;
2605         int mds = session->s_mds;
2606         struct ceph_mds_session_head *h = msg->front.iov_base;
2607         int wake = 0;
2608
2609         /* decode */
2610         if (msg->front.iov_len != sizeof(*h))
2611                 goto bad;
2612         op = le32_to_cpu(h->op);
2613         seq = le64_to_cpu(h->seq);
2614
2615         mutex_lock(&mdsc->mutex);
2616         if (op == CEPH_SESSION_CLOSE)
2617                 __unregister_session(mdsc, session);
2618         /* FIXME: this ttl calculation is generous */
2619         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2620         mutex_unlock(&mdsc->mutex);
2621
2622         mutex_lock(&session->s_mutex);
2623
2624         dout("handle_session mds%d %s %p state %s seq %llu\n",
2625              mds, ceph_session_op_name(op), session,
2626              ceph_session_state_name(session->s_state), seq);
2627
2628         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2629                 session->s_state = CEPH_MDS_SESSION_OPEN;
2630                 pr_info("mds%d came back\n", session->s_mds);
2631         }
2632
2633         switch (op) {
2634         case CEPH_SESSION_OPEN:
2635                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2636                         pr_info("mds%d reconnect success\n", session->s_mds);
2637                 session->s_state = CEPH_MDS_SESSION_OPEN;
2638                 renewed_caps(mdsc, session, 0);
2639                 wake = 1;
2640                 if (mdsc->stopping)
2641                         __close_session(mdsc, session);
2642                 break;
2643
2644         case CEPH_SESSION_RENEWCAPS:
2645                 if (session->s_renew_seq == seq)
2646                         renewed_caps(mdsc, session, 1);
2647                 break;
2648
2649         case CEPH_SESSION_CLOSE:
2650                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2651                         pr_info("mds%d reconnect denied\n", session->s_mds);
2652                 cleanup_session_requests(mdsc, session);
2653                 remove_session_caps(session);
2654                 wake = 2; /* for good measure */
2655                 wake_up_all(&mdsc->session_close_wq);
2656                 break;
2657
2658         case CEPH_SESSION_STALE:
2659                 pr_info("mds%d caps went stale, renewing\n",
2660                         session->s_mds);
2661                 spin_lock(&session->s_gen_ttl_lock);
2662                 session->s_cap_gen++;
2663                 session->s_cap_ttl = jiffies - 1;
2664                 spin_unlock(&session->s_gen_ttl_lock);
2665                 send_renew_caps(mdsc, session);
2666                 break;
2667
2668         case CEPH_SESSION_RECALL_STATE:
2669                 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2670                 break;
2671
2672         case CEPH_SESSION_FLUSHMSG:
2673                 send_flushmsg_ack(mdsc, session, seq);
2674                 break;
2675
2676         case CEPH_SESSION_FORCE_RO:
2677                 dout("force_session_readonly %p\n", session);
2678                 spin_lock(&session->s_cap_lock);
2679                 session->s_readonly = true;
2680                 spin_unlock(&session->s_cap_lock);
2681                 wake_up_session_caps(session, 0);
2682                 break;
2683
2684         default:
2685                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2686                 WARN_ON(1);
2687         }
2688
2689         mutex_unlock(&session->s_mutex);
2690         if (wake) {
2691                 mutex_lock(&mdsc->mutex);
2692                 __wake_requests(mdsc, &session->s_waiting);
2693                 if (wake == 2)
2694                         kick_requests(mdsc, mds);
2695                 mutex_unlock(&mdsc->mutex);
2696         }
2697         return;
2698
2699 bad:
2700         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2701                (int)msg->front.iov_len);
2702         ceph_msg_dump(msg);
2703         return;
2704 }
2705
2706
2707 /*
2708  * called under session->mutex.
2709  */
2710 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2711                                    struct ceph_mds_session *session)
2712 {
2713         struct ceph_mds_request *req, *nreq;
2714         struct rb_node *p;
2715         int err;
2716
2717         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2718
2719         mutex_lock(&mdsc->mutex);
2720         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2721                 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2722                 if (!err) {
2723                         ceph_msg_get(req->r_request);
2724                         ceph_con_send(&session->s_con, req->r_request);
2725                 }
2726         }
2727
2728         /*
2729          * also re-send old requests when MDS enters reconnect stage. So that MDS
2730          * can process completed request in clientreplay stage.
2731          */
2732         p = rb_first(&mdsc->request_tree);
2733         while (p) {
2734                 req = rb_entry(p, struct ceph_mds_request, r_node);
2735                 p = rb_next(p);
2736                 if (req->r_got_unsafe)
2737                         continue;
2738                 if (req->r_attempts == 0)
2739                         continue; /* only old requests */
2740                 if (req->r_session &&
2741                     req->r_session->s_mds == session->s_mds) {
2742                         err = __prepare_send_request(mdsc, req,
2743                                                      session->s_mds, true);
2744                         if (!err) {
2745                                 ceph_msg_get(req->r_request);
2746                                 ceph_con_send(&session->s_con, req->r_request);
2747                         }
2748                 }
2749         }
2750         mutex_unlock(&mdsc->mutex);
2751 }
2752
2753 /*
2754  * Encode information about a cap for a reconnect with the MDS.
2755  */
2756 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2757                           void *arg)
2758 {
2759         union {
2760                 struct ceph_mds_cap_reconnect v2;
2761                 struct ceph_mds_cap_reconnect_v1 v1;
2762         } rec;
2763         size_t reclen;
2764         struct ceph_inode_info *ci;
2765         struct ceph_reconnect_state *recon_state = arg;
2766         struct ceph_pagelist *pagelist = recon_state->pagelist;
2767         char *path;
2768         int pathlen, err;
2769         u64 pathbase;
2770         struct dentry *dentry;
2771
2772         ci = cap->ci;
2773
2774         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2775              inode, ceph_vinop(inode), cap, cap->cap_id,
2776              ceph_cap_string(cap->issued));
2777         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2778         if (err)
2779                 return err;
2780
2781         dentry = d_find_alias(inode);
2782         if (dentry) {
2783                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2784                 if (IS_ERR(path)) {
2785                         err = PTR_ERR(path);
2786                         goto out_dput;
2787                 }
2788         } else {
2789                 path = NULL;
2790                 pathlen = 0;
2791         }
2792         err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2793         if (err)
2794                 goto out_free;
2795
2796         spin_lock(&ci->i_ceph_lock);
2797         cap->seq = 0;        /* reset cap seq */
2798         cap->issue_seq = 0;  /* and issue_seq */
2799         cap->mseq = 0;       /* and migrate_seq */
2800         cap->cap_gen = cap->session->s_cap_gen;
2801
2802         if (recon_state->flock) {
2803                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2804                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2805                 rec.v2.issued = cpu_to_le32(cap->issued);
2806                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2807                 rec.v2.pathbase = cpu_to_le64(pathbase);
2808                 rec.v2.flock_len = 0;
2809                 reclen = sizeof(rec.v2);
2810         } else {
2811                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2812                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2813                 rec.v1.issued = cpu_to_le32(cap->issued);
2814                 rec.v1.size = cpu_to_le64(inode->i_size);
2815                 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2816                 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2817                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2818                 rec.v1.pathbase = cpu_to_le64(pathbase);
2819                 reclen = sizeof(rec.v1);
2820         }
2821         spin_unlock(&ci->i_ceph_lock);
2822
2823         if (recon_state->flock) {
2824                 int num_fcntl_locks, num_flock_locks;
2825                 struct ceph_filelock *flocks;
2826
2827 encode_again:
2828                 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2829                 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2830                                  sizeof(struct ceph_filelock), GFP_NOFS);
2831                 if (!flocks) {
2832                         err = -ENOMEM;
2833                         goto out_free;
2834                 }
2835                 err = ceph_encode_locks_to_buffer(inode, flocks,
2836                                                   num_fcntl_locks,
2837                                                   num_flock_locks);
2838                 if (err) {
2839                         kfree(flocks);
2840                         if (err == -ENOSPC)
2841                                 goto encode_again;
2842                         goto out_free;
2843                 }
2844                 /*
2845                  * number of encoded locks is stable, so copy to pagelist
2846                  */
2847                 rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
2848                                     (num_fcntl_locks+num_flock_locks) *
2849                                     sizeof(struct ceph_filelock));
2850                 err = ceph_pagelist_append(pagelist, &rec, reclen);
2851                 if (!err)
2852                         err = ceph_locks_to_pagelist(flocks, pagelist,
2853                                                      num_fcntl_locks,
2854                                                      num_flock_locks);
2855                 kfree(flocks);
2856         } else {
2857                 err = ceph_pagelist_append(pagelist, &rec, reclen);
2858         }
2859
2860         recon_state->nr_caps++;
2861 out_free:
2862         kfree(path);
2863 out_dput:
2864         dput(dentry);
2865         return err;
2866 }
2867
2868
2869 /*
2870  * If an MDS fails and recovers, clients need to reconnect in order to
2871  * reestablish shared state.  This includes all caps issued through
2872  * this session _and_ the snap_realm hierarchy.  Because it's not
2873  * clear which snap realms the mds cares about, we send everything we
2874  * know about.. that ensures we'll then get any new info the
2875  * recovering MDS might have.
2876  *
2877  * This is a relatively heavyweight operation, but it's rare.
2878  *
2879  * called with mdsc->mutex held.
2880  */
2881 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2882                                struct ceph_mds_session *session)
2883 {
2884         struct ceph_msg *reply;
2885         struct rb_node *p;
2886         int mds = session->s_mds;
2887         int err = -ENOMEM;
2888         int s_nr_caps;
2889         struct ceph_pagelist *pagelist;
2890         struct ceph_reconnect_state recon_state;
2891
2892         pr_info("mds%d reconnect start\n", mds);
2893
2894         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2895         if (!pagelist)
2896                 goto fail_nopagelist;
2897         ceph_pagelist_init(pagelist);
2898
2899         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2900         if (!reply)
2901                 goto fail_nomsg;
2902
2903         mutex_lock(&session->s_mutex);
2904         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2905         session->s_seq = 0;
2906
2907         dout("session %p state %s\n", session,
2908              ceph_session_state_name(session->s_state));
2909
2910         spin_lock(&session->s_gen_ttl_lock);
2911         session->s_cap_gen++;
2912         spin_unlock(&session->s_gen_ttl_lock);
2913
2914         spin_lock(&session->s_cap_lock);
2915         /* don't know if session is readonly */
2916         session->s_readonly = 0;
2917         /*
2918          * notify __ceph_remove_cap() that we are composing cap reconnect.
2919          * If a cap get released before being added to the cap reconnect,
2920          * __ceph_remove_cap() should skip queuing cap release.
2921          */
2922         session->s_cap_reconnect = 1;
2923         /* drop old cap expires; we're about to reestablish that state */
2924         discard_cap_releases(mdsc, session);
2925         spin_unlock(&session->s_cap_lock);
2926
2927         /* trim unused caps to reduce MDS's cache rejoin time */
2928         if (mdsc->fsc->sb->s_root)
2929                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
2930
2931         ceph_con_close(&session->s_con);
2932         ceph_con_open(&session->s_con,
2933                       CEPH_ENTITY_TYPE_MDS, mds,
2934                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2935
2936         /* replay unsafe requests */
2937         replay_unsafe_requests(mdsc, session);
2938
2939         down_read(&mdsc->snap_rwsem);
2940
2941         /* traverse this session's caps */
2942         s_nr_caps = session->s_nr_caps;
2943         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2944         if (err)
2945                 goto fail;
2946
2947         recon_state.nr_caps = 0;
2948         recon_state.pagelist = pagelist;
2949         recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
2950         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2951         if (err < 0)
2952                 goto fail;
2953
2954         spin_lock(&session->s_cap_lock);
2955         session->s_cap_reconnect = 0;
2956         spin_unlock(&session->s_cap_lock);
2957
2958         /*
2959          * snaprealms.  we provide mds with the ino, seq (version), and
2960          * parent for all of our realms.  If the mds has any newer info,
2961          * it will tell us.
2962          */
2963         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
2964                 struct ceph_snap_realm *realm =
2965                         rb_entry(p, struct ceph_snap_realm, node);
2966                 struct ceph_mds_snaprealm_reconnect sr_rec;
2967
2968                 dout(" adding snap realm %llx seq %lld parent %llx\n",
2969                      realm->ino, realm->seq, realm->parent_ino);
2970                 sr_rec.ino = cpu_to_le64(realm->ino);
2971                 sr_rec.seq = cpu_to_le64(realm->seq);
2972                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
2973                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
2974                 if (err)
2975                         goto fail;
2976         }
2977
2978         if (recon_state.flock)
2979                 reply->hdr.version = cpu_to_le16(2);
2980
2981         /* raced with cap release? */
2982         if (s_nr_caps != recon_state.nr_caps) {
2983                 struct page *page = list_first_entry(&pagelist->head,
2984                                                      struct page, lru);
2985                 __le32 *addr = kmap_atomic(page);
2986                 *addr = cpu_to_le32(recon_state.nr_caps);
2987                 kunmap_atomic(addr);
2988         }
2989
2990         reply->hdr.data_len = cpu_to_le32(pagelist->length);
2991         ceph_msg_data_add_pagelist(reply, pagelist);
2992         ceph_con_send(&session->s_con, reply);
2993
2994         mutex_unlock(&session->s_mutex);
2995
2996         mutex_lock(&mdsc->mutex);
2997         __wake_requests(mdsc, &session->s_waiting);
2998         mutex_unlock(&mdsc->mutex);
2999
3000         up_read(&mdsc->snap_rwsem);
3001         return;
3002
3003 fail:
3004         ceph_msg_put(reply);
3005         up_read(&mdsc->snap_rwsem);
3006         mutex_unlock(&session->s_mutex);
3007 fail_nomsg:
3008         ceph_pagelist_release(pagelist);
3009 fail_nopagelist:
3010         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3011         return;
3012 }
3013
3014
3015 /*
3016  * compare old and new mdsmaps, kicking requests
3017  * and closing out old connections as necessary
3018  *
3019  * called under mdsc->mutex.
3020  */
3021 static void check_new_map(struct ceph_mds_client *mdsc,
3022                           struct ceph_mdsmap *newmap,
3023                           struct ceph_mdsmap *oldmap)
3024 {
3025         int i;
3026         int oldstate, newstate;
3027         struct ceph_mds_session *s;
3028
3029         dout("check_new_map new %u old %u\n",
3030              newmap->m_epoch, oldmap->m_epoch);
3031
3032         for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
3033                 if (mdsc->sessions[i] == NULL)
3034                         continue;
3035                 s = mdsc->sessions[i];
3036                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3037                 newstate = ceph_mdsmap_get_state(newmap, i);
3038
3039                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3040                      i, ceph_mds_state_name(oldstate),
3041                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3042                      ceph_mds_state_name(newstate),
3043                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3044                      ceph_session_state_name(s->s_state));
3045
3046                 if (i >= newmap->m_max_mds ||
3047                     memcmp(ceph_mdsmap_get_addr(oldmap, i),
3048                            ceph_mdsmap_get_addr(newmap, i),
3049                            sizeof(struct ceph_entity_addr))) {
3050                         if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3051                                 /* the session never opened, just close it
3052                                  * out now */
3053                                 __wake_requests(mdsc, &s->s_waiting);
3054                                 __unregister_session(mdsc, s);
3055                         } else {
3056                                 /* just close it */
3057                                 mutex_unlock(&mdsc->mutex);
3058                                 mutex_lock(&s->s_mutex);
3059                                 mutex_lock(&mdsc->mutex);
3060                                 ceph_con_close(&s->s_con);
3061                                 mutex_unlock(&s->s_mutex);
3062                                 s->s_state = CEPH_MDS_SESSION_RESTARTING;
3063                         }
3064                 } else if (oldstate == newstate) {
3065                         continue;  /* nothing new with this mds */
3066                 }
3067
3068                 /*
3069                  * send reconnect?
3070                  */
3071                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3072                     newstate >= CEPH_MDS_STATE_RECONNECT) {
3073                         mutex_unlock(&mdsc->mutex);
3074                         send_mds_reconnect(mdsc, s);
3075                         mutex_lock(&mdsc->mutex);
3076                 }
3077
3078                 /*
3079                  * kick request on any mds that has gone active.
3080                  */
3081                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3082                     newstate >= CEPH_MDS_STATE_ACTIVE) {
3083                         if (oldstate != CEPH_MDS_STATE_CREATING &&
3084                             oldstate != CEPH_MDS_STATE_STARTING)
3085                                 pr_info("mds%d recovery completed\n", s->s_mds);
3086                         kick_requests(mdsc, i);
3087                         ceph_kick_flushing_caps(mdsc, s);
3088                         wake_up_session_caps(s, 1);
3089                 }
3090         }
3091
3092         for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
3093                 s = mdsc->sessions[i];
3094                 if (!s)
3095                         continue;
3096                 if (!ceph_mdsmap_is_laggy(newmap, i))
3097                         continue;
3098                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3099                     s->s_state == CEPH_MDS_SESSION_HUNG ||
3100                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
3101                         dout(" connecting to export targets of laggy mds%d\n",
3102                              i);
3103                         __open_export_target_sessions(mdsc, s);
3104                 }
3105         }
3106 }
3107
3108
3109
3110 /*
3111  * leases
3112  */
3113
3114 /*
3115  * caller must hold session s_mutex, dentry->d_lock
3116  */
3117 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3118 {
3119         struct ceph_dentry_info *di = ceph_dentry(dentry);
3120
3121         ceph_put_mds_session(di->lease_session);
3122         di->lease_session = NULL;
3123 }
3124
3125 static void handle_lease(struct ceph_mds_client *mdsc,
3126                          struct ceph_mds_session *session,
3127                          struct ceph_msg *msg)
3128 {
3129         struct super_block *sb = mdsc->fsc->sb;
3130         struct inode *inode;
3131         struct dentry *parent, *dentry;
3132         struct ceph_dentry_info *di;
3133         int mds = session->s_mds;
3134         struct ceph_mds_lease *h = msg->front.iov_base;
3135         u32 seq;
3136         struct ceph_vino vino;
3137         struct qstr dname;
3138         int release = 0;
3139
3140         dout("handle_lease from mds%d\n", mds);
3141
3142         /* decode */
3143         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3144                 goto bad;
3145         vino.ino = le64_to_cpu(h->ino);
3146         vino.snap = CEPH_NOSNAP;
3147         seq = le32_to_cpu(h->seq);
3148         dname.name = (void *)h + sizeof(*h) + sizeof(u32);
3149         dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
3150         if (dname.len != get_unaligned_le32(h+1))
3151                 goto bad;
3152
3153         /* lookup inode */
3154         inode = ceph_find_inode(sb, vino);
3155         dout("handle_lease %s, ino %llx %p %.*s\n",
3156              ceph_lease_op_name(h->action), vino.ino, inode,
3157              dname.len, dname.name);
3158
3159         mutex_lock(&session->s_mutex);
3160         session->s_seq++;
3161
3162         if (inode == NULL) {
3163                 dout("handle_lease no inode %llx\n", vino.ino);
3164                 goto release;
3165         }
3166
3167         /* dentry */
3168         parent = d_find_alias(inode);
3169         if (!parent) {
3170                 dout("no parent dentry on inode %p\n", inode);
3171                 WARN_ON(1);
3172                 goto release;  /* hrm... */
3173         }
3174         dname.hash = full_name_hash(dname.name, dname.len);
3175         dentry = d_lookup(parent, &dname);
3176         dput(parent);
3177         if (!dentry)
3178                 goto release;
3179
3180         spin_lock(&dentry->d_lock);
3181         di = ceph_dentry(dentry);
3182         switch (h->action) {
3183         case CEPH_MDS_LEASE_REVOKE:
3184                 if (di->lease_session == session) {
3185                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3186                                 h->seq = cpu_to_le32(di->lease_seq);
3187                         __ceph_mdsc_drop_dentry_lease(dentry);
3188                 }
3189                 release = 1;
3190                 break;
3191
3192         case CEPH_MDS_LEASE_RENEW:
3193                 if (di->lease_session == session &&
3194                     di->lease_gen == session->s_cap_gen &&
3195                     di->lease_renew_from &&
3196                     di->lease_renew_after == 0) {
3197                         unsigned long duration =
3198                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3199
3200                         di->lease_seq = seq;
3201                         dentry->d_time = di->lease_renew_from + duration;
3202                         di->lease_renew_after = di->lease_renew_from +
3203                                 (duration >> 1);
3204                         di->lease_renew_from = 0;
3205                 }
3206                 break;
3207         }
3208         spin_unlock(&dentry->d_lock);
3209         dput(dentry);
3210
3211         if (!release)
3212                 goto out;
3213
3214 release:
3215         /* let's just reuse the same message */
3216         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3217         ceph_msg_get(msg);
3218         ceph_con_send(&session->s_con, msg);
3219
3220 out:
3221         iput(inode);
3222         mutex_unlock(&session->s_mutex);
3223         return;
3224
3225 bad:
3226         pr_err("corrupt lease message\n");
3227         ceph_msg_dump(msg);
3228 }
3229
3230 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3231                               struct inode *inode,
3232                               struct dentry *dentry, char action,
3233                               u32 seq)
3234 {
3235         struct ceph_msg *msg;
3236         struct ceph_mds_lease *lease;
3237         int len = sizeof(*lease) + sizeof(u32);
3238         int dnamelen = 0;
3239
3240         dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3241              inode, dentry, ceph_lease_op_name(action), session->s_mds);
3242         dnamelen = dentry->d_name.len;
3243         len += dnamelen;
3244
3245         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3246         if (!msg)
3247                 return;
3248         lease = msg->front.iov_base;
3249         lease->action = action;
3250         lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3251         lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3252         lease->seq = cpu_to_le32(seq);
3253         put_unaligned_le32(dnamelen, lease + 1);
3254         memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3255
3256         /*
3257          * if this is a preemptive lease RELEASE, no need to
3258          * flush request stream, since the actual request will
3259          * soon follow.
3260          */
3261         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3262
3263         ceph_con_send(&session->s_con, msg);
3264 }
3265
3266 /*
3267  * Preemptively release a lease we expect to invalidate anyway.
3268  * Pass @inode always, @dentry is optional.
3269  */
3270 void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
3271                              struct dentry *dentry)
3272 {
3273         struct ceph_dentry_info *di;
3274         struct ceph_mds_session *session;
3275         u32 seq;
3276
3277         BUG_ON(inode == NULL);
3278         BUG_ON(dentry == NULL);
3279
3280         /* is dentry lease valid? */
3281         spin_lock(&dentry->d_lock);
3282         di = ceph_dentry(dentry);
3283         if (!di || !di->lease_session ||
3284             di->lease_session->s_mds < 0 ||
3285             di->lease_gen != di->lease_session->s_cap_gen ||
3286             !time_before(jiffies, dentry->d_time)) {
3287                 dout("lease_release inode %p dentry %p -- "
3288                      "no lease\n",
3289                      inode, dentry);
3290                 spin_unlock(&dentry->d_lock);
3291                 return;
3292         }
3293
3294         /* we do have a lease on this dentry; note mds and seq */
3295         session = ceph_get_mds_session(di->lease_session);
3296         seq = di->lease_seq;
3297         __ceph_mdsc_drop_dentry_lease(dentry);
3298         spin_unlock(&dentry->d_lock);
3299
3300         dout("lease_release inode %p dentry %p to mds%d\n",
3301              inode, dentry, session->s_mds);
3302         ceph_mdsc_lease_send_msg(session, inode, dentry,
3303                                  CEPH_MDS_LEASE_RELEASE, seq);
3304         ceph_put_mds_session(session);
3305 }
3306
3307 /*
3308  * drop all leases (and dentry refs) in preparation for umount
3309  */
3310 static void drop_leases(struct ceph_mds_client *mdsc)
3311 {
3312         int i;
3313
3314         dout("drop_leases\n");
3315         mutex_lock(&mdsc->mutex);
3316         for (i = 0; i < mdsc->max_sessions; i++) {
3317                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3318                 if (!s)
3319                         continue;
3320                 mutex_unlock(&mdsc->mutex);
3321                 mutex_lock(&s->s_mutex);
3322                 mutex_unlock(&s->s_mutex);
3323                 ceph_put_mds_session(s);
3324                 mutex_lock(&mdsc->mutex);
3325         }
3326         mutex_unlock(&mdsc->mutex);
3327 }
3328
3329
3330
3331 /*
3332  * delayed work -- periodically trim expired leases, renew caps with mds
3333  */
3334 static void schedule_delayed(struct ceph_mds_client *mdsc)
3335 {
3336         int delay = 5;
3337         unsigned hz = round_jiffies_relative(HZ * delay);
3338         schedule_delayed_work(&mdsc->delayed_work, hz);
3339 }
3340
3341 static void delayed_work(struct work_struct *work)
3342 {
3343         int i;
3344         struct ceph_mds_client *mdsc =
3345                 container_of(work, struct ceph_mds_client, delayed_work.work);
3346         int renew_interval;
3347         int renew_caps;
3348
3349         dout("mdsc delayed_work\n");
3350         ceph_check_delayed_caps(mdsc);
3351
3352         mutex_lock(&mdsc->mutex);
3353         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3354         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3355                                    mdsc->last_renew_caps);
3356         if (renew_caps)
3357                 mdsc->last_renew_caps = jiffies;
3358
3359         for (i = 0; i < mdsc->max_sessions; i++) {
3360                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3361                 if (s == NULL)
3362                         continue;
3363                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3364                         dout("resending session close request for mds%d\n",
3365                              s->s_mds);
3366                         request_close_session(mdsc, s);
3367                         ceph_put_mds_session(s);
3368                         continue;
3369                 }
3370                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3371                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3372                                 s->s_state = CEPH_MDS_SESSION_HUNG;
3373                                 pr_info("mds%d hung\n", s->s_mds);
3374                         }
3375                 }
3376                 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3377                         /* this mds is failed or recovering, just wait */
3378                         ceph_put_mds_session(s);
3379                         continue;
3380                 }
3381                 mutex_unlock(&mdsc->mutex);
3382
3383                 mutex_lock(&s->s_mutex);
3384                 if (renew_caps)
3385                         send_renew_caps(mdsc, s);
3386                 else
3387                         ceph_con_keepalive(&s->s_con);
3388                 ceph_add_cap_releases(mdsc, s);
3389                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3390                     s->s_state == CEPH_MDS_SESSION_HUNG)
3391                         ceph_send_cap_releases(mdsc, s);
3392                 mutex_unlock(&s->s_mutex);
3393                 ceph_put_mds_session(s);
3394
3395                 mutex_lock(&mdsc->mutex);
3396         }
3397         mutex_unlock(&mdsc->mutex);
3398
3399         schedule_delayed(mdsc);
3400 }
3401
3402 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3403
3404 {
3405         struct ceph_mds_client *mdsc;
3406
3407         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3408         if (!mdsc)
3409                 return -ENOMEM;
3410         mdsc->fsc = fsc;
3411         fsc->mdsc = mdsc;
3412         mutex_init(&mdsc->mutex);
3413         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3414         if (mdsc->mdsmap == NULL) {
3415                 kfree(mdsc);
3416                 return -ENOMEM;
3417         }
3418
3419         init_completion(&mdsc->safe_umount_waiters);
3420         init_waitqueue_head(&mdsc->session_close_wq);
3421         INIT_LIST_HEAD(&mdsc->waiting_for_map);
3422         mdsc->sessions = NULL;
3423         atomic_set(&mdsc->num_sessions, 0);
3424         mdsc->max_sessions = 0;
3425         mdsc->stopping = 0;
3426         mdsc->last_snap_seq = 0;
3427         init_rwsem(&mdsc->snap_rwsem);
3428         mdsc->snap_realms = RB_ROOT;
3429         INIT_LIST_HEAD(&mdsc->snap_empty);
3430         spin_lock_init(&mdsc->snap_empty_lock);
3431         mdsc->last_tid = 0;
3432         mdsc->request_tree = RB_ROOT;
3433         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3434         mdsc->last_renew_caps = jiffies;
3435         INIT_LIST_HEAD(&mdsc->cap_delay_list);
3436         spin_lock_init(&mdsc->cap_delay_lock);
3437         INIT_LIST_HEAD(&mdsc->snap_flush_list);
3438         spin_lock_init(&mdsc->snap_flush_lock);
3439         mdsc->cap_flush_seq = 0;
3440         INIT_LIST_HEAD(&mdsc->cap_dirty);
3441         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3442         mdsc->num_cap_flushing = 0;
3443         spin_lock_init(&mdsc->cap_dirty_lock);
3444         init_waitqueue_head(&mdsc->cap_flushing_wq);
3445         spin_lock_init(&mdsc->dentry_lru_lock);
3446         INIT_LIST_HEAD(&mdsc->dentry_lru);
3447
3448         ceph_caps_init(mdsc);
3449         ceph_adjust_min_caps(mdsc, fsc->min_caps);
3450
3451         init_rwsem(&mdsc->pool_perm_rwsem);
3452         mdsc->pool_perm_tree = RB_ROOT;
3453
3454         return 0;
3455 }
3456
3457 /*
3458  * Wait for safe replies on open mds requests.  If we time out, drop
3459  * all requests from the tree to avoid dangling dentry refs.
3460  */
3461 static void wait_requests(struct ceph_mds_client *mdsc)
3462 {
3463         struct ceph_mds_request *req;
3464         struct ceph_fs_client *fsc = mdsc->fsc;
3465
3466         mutex_lock(&mdsc->mutex);
3467         if (__get_oldest_req(mdsc)) {
3468                 mutex_unlock(&mdsc->mutex);
3469
3470                 dout("wait_requests waiting for requests\n");
3471                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3472                                     fsc->client->options->mount_timeout * HZ);
3473
3474                 /* tear down remaining requests */
3475                 mutex_lock(&mdsc->mutex);
3476                 while ((req = __get_oldest_req(mdsc))) {
3477                         dout("wait_requests timed out on tid %llu\n",
3478                              req->r_tid);
3479                         __unregister_request(mdsc, req);
3480                 }
3481         }
3482         mutex_unlock(&mdsc->mutex);
3483         dout("wait_requests done\n");
3484 }
3485
3486 /*
3487  * called before mount is ro, and before dentries are torn down.
3488  * (hmm, does this still race with new lookups?)
3489  */
3490 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3491 {
3492         dout("pre_umount\n");
3493         mdsc->stopping = 1;
3494
3495         drop_leases(mdsc);
3496         ceph_flush_dirty_caps(mdsc);
3497         wait_requests(mdsc);
3498
3499         /*
3500          * wait for reply handlers to drop their request refs and
3501          * their inode/dcache refs
3502          */
3503         ceph_msgr_flush();
3504 }
3505
3506 /*
3507  * wait for all write mds requests to flush.
3508  */
3509 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3510 {
3511         struct ceph_mds_request *req = NULL, *nextreq;
3512         struct rb_node *n;
3513
3514         mutex_lock(&mdsc->mutex);
3515         dout("wait_unsafe_requests want %lld\n", want_tid);
3516 restart:
3517         req = __get_oldest_req(mdsc);
3518         while (req && req->r_tid <= want_tid) {
3519                 /* find next request */
3520                 n = rb_next(&req->r_node);
3521                 if (n)
3522                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3523                 else
3524                         nextreq = NULL;
3525                 if ((req->r_op & CEPH_MDS_OP_WRITE)) {
3526                         /* write op */
3527                         ceph_mdsc_get_request(req);
3528                         if (nextreq)
3529                                 ceph_mdsc_get_request(nextreq);
3530                         mutex_unlock(&mdsc->mutex);
3531                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3532                              req->r_tid, want_tid);
3533                         wait_for_completion(&req->r_safe_completion);
3534                         mutex_lock(&mdsc->mutex);
3535                         ceph_mdsc_put_request(req);
3536                         if (!nextreq)
3537                                 break;  /* next dne before, so we're done! */
3538                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
3539                                 /* next request was removed from tree */
3540                                 ceph_mdsc_put_request(nextreq);
3541                                 goto restart;
3542                         }
3543                         ceph_mdsc_put_request(nextreq);  /* won't go away */
3544                 }
3545                 req = nextreq;
3546         }
3547         mutex_unlock(&mdsc->mutex);
3548         dout("wait_unsafe_requests done\n");
3549 }
3550
3551 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3552 {
3553         u64 want_tid, want_flush, want_snap;
3554
3555         if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3556                 return;
3557
3558         dout("sync\n");
3559         mutex_lock(&mdsc->mutex);
3560         want_tid = mdsc->last_tid;
3561         mutex_unlock(&mdsc->mutex);
3562
3563         ceph_flush_dirty_caps(mdsc);
3564         spin_lock(&mdsc->cap_dirty_lock);
3565         want_flush = mdsc->cap_flush_seq;
3566         spin_unlock(&mdsc->cap_dirty_lock);
3567
3568         down_read(&mdsc->snap_rwsem);
3569         want_snap = mdsc->last_snap_seq;
3570         up_read(&mdsc->snap_rwsem);
3571
3572         dout("sync want tid %lld flush_seq %lld snap_seq %lld\n",
3573              want_tid, want_flush, want_snap);
3574
3575         wait_unsafe_requests(mdsc, want_tid);
3576         wait_caps_flush(mdsc, want_flush, want_snap);
3577 }
3578
3579 /*
3580  * true if all sessions are closed, or we force unmount
3581  */
3582 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
3583 {
3584         if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3585                 return true;
3586         return atomic_read(&mdsc->num_sessions) == 0;
3587 }
3588
3589 /*
3590  * called after sb is ro.
3591  */
3592 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3593 {
3594         struct ceph_mds_session *session;
3595         int i;
3596         struct ceph_fs_client *fsc = mdsc->fsc;
3597         unsigned long timeout = fsc->client->options->mount_timeout * HZ;
3598
3599         dout("close_sessions\n");
3600
3601         /* close sessions */
3602         mutex_lock(&mdsc->mutex);
3603         for (i = 0; i < mdsc->max_sessions; i++) {
3604                 session = __ceph_lookup_mds_session(mdsc, i);
3605                 if (!session)
3606                         continue;
3607                 mutex_unlock(&mdsc->mutex);
3608                 mutex_lock(&session->s_mutex);
3609                 __close_session(mdsc, session);
3610                 mutex_unlock(&session->s_mutex);
3611                 ceph_put_mds_session(session);
3612                 mutex_lock(&mdsc->mutex);
3613         }
3614         mutex_unlock(&mdsc->mutex);
3615
3616         dout("waiting for sessions to close\n");
3617         wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3618                            timeout);
3619
3620         /* tear down remaining sessions */
3621         mutex_lock(&mdsc->mutex);
3622         for (i = 0; i < mdsc->max_sessions; i++) {
3623                 if (mdsc->sessions[i]) {
3624                         session = get_session(mdsc->sessions[i]);
3625                         __unregister_session(mdsc, session);
3626                         mutex_unlock(&mdsc->mutex);
3627                         mutex_lock(&session->s_mutex);
3628                         remove_session_caps(session);
3629                         mutex_unlock(&session->s_mutex);
3630                         ceph_put_mds_session(session);
3631                         mutex_lock(&mdsc->mutex);
3632                 }
3633         }
3634         WARN_ON(!list_empty(&mdsc->cap_delay_list));
3635         mutex_unlock(&mdsc->mutex);
3636
3637         ceph_cleanup_empty_realms(mdsc);
3638
3639         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3640
3641         dout("stopped\n");
3642 }
3643
3644 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3645 {
3646         dout("stop\n");
3647         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3648         if (mdsc->mdsmap)
3649                 ceph_mdsmap_destroy(mdsc->mdsmap);
3650         kfree(mdsc->sessions);
3651         ceph_caps_finalize(mdsc);
3652         ceph_pool_perm_destroy(mdsc);
3653 }
3654
3655 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3656 {
3657         struct ceph_mds_client *mdsc = fsc->mdsc;
3658
3659         dout("mdsc_destroy %p\n", mdsc);
3660         ceph_mdsc_stop(mdsc);
3661
3662         /* flush out any connection work with references to us */
3663         ceph_msgr_flush();
3664
3665         fsc->mdsc = NULL;
3666         kfree(mdsc);
3667         dout("mdsc_destroy %p done\n", mdsc);
3668 }
3669
3670
3671 /*
3672  * handle mds map update.
3673  */
3674 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3675 {
3676         u32 epoch;
3677         u32 maplen;
3678         void *p = msg->front.iov_base;
3679         void *end = p + msg->front.iov_len;
3680         struct ceph_mdsmap *newmap, *oldmap;
3681         struct ceph_fsid fsid;
3682         int err = -EINVAL;
3683
3684         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3685         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3686         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3687                 return;
3688         epoch = ceph_decode_32(&p);
3689         maplen = ceph_decode_32(&p);
3690         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3691
3692         /* do we need it? */
3693         ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
3694         mutex_lock(&mdsc->mutex);
3695         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3696                 dout("handle_map epoch %u <= our %u\n",
3697                      epoch, mdsc->mdsmap->m_epoch);
3698                 mutex_unlock(&mdsc->mutex);
3699                 return;
3700         }
3701
3702         newmap = ceph_mdsmap_decode(&p, end);
3703         if (IS_ERR(newmap)) {
3704                 err = PTR_ERR(newmap);
3705                 goto bad_unlock;
3706         }
3707
3708         /* swap into place */
3709         if (mdsc->mdsmap) {
3710                 oldmap = mdsc->mdsmap;
3711                 mdsc->mdsmap = newmap;
3712                 check_new_map(mdsc, newmap, oldmap);
3713                 ceph_mdsmap_destroy(oldmap);
3714         } else {
3715                 mdsc->mdsmap = newmap;  /* first mds map */
3716         }
3717         mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3718
3719         __wake_requests(mdsc, &mdsc->waiting_for_map);
3720
3721         mutex_unlock(&mdsc->mutex);
3722         schedule_delayed(mdsc);
3723         return;
3724
3725 bad_unlock:
3726         mutex_unlock(&mdsc->mutex);
3727 bad:
3728         pr_err("error decoding mdsmap %d\n", err);
3729         return;
3730 }
3731
3732 static struct ceph_connection *con_get(struct ceph_connection *con)
3733 {
3734         struct ceph_mds_session *s = con->private;
3735
3736         if (get_session(s)) {
3737                 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3738                 return con;
3739         }
3740         dout("mdsc con_get %p FAIL\n", s);
3741         return NULL;
3742 }
3743
3744 static void con_put(struct ceph_connection *con)
3745 {
3746         struct ceph_mds_session *s = con->private;
3747
3748         dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3749         ceph_put_mds_session(s);
3750 }
3751
3752 /*
3753  * if the client is unresponsive for long enough, the mds will kill
3754  * the session entirely.
3755  */
3756 static void peer_reset(struct ceph_connection *con)
3757 {
3758         struct ceph_mds_session *s = con->private;
3759         struct ceph_mds_client *mdsc = s->s_mdsc;
3760
3761         pr_warn("mds%d closed our session\n", s->s_mds);
3762         send_mds_reconnect(mdsc, s);
3763 }
3764
3765 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3766 {
3767         struct ceph_mds_session *s = con->private;
3768         struct ceph_mds_client *mdsc = s->s_mdsc;
3769         int type = le16_to_cpu(msg->hdr.type);
3770
3771         mutex_lock(&mdsc->mutex);
3772         if (__verify_registered_session(mdsc, s) < 0) {
3773                 mutex_unlock(&mdsc->mutex);
3774                 goto out;
3775         }
3776         mutex_unlock(&mdsc->mutex);
3777
3778         switch (type) {
3779         case CEPH_MSG_MDS_MAP:
3780                 ceph_mdsc_handle_map(mdsc, msg);
3781                 break;
3782         case CEPH_MSG_CLIENT_SESSION:
3783                 handle_session(s, msg);
3784                 break;
3785         case CEPH_MSG_CLIENT_REPLY:
3786                 handle_reply(s, msg);
3787                 break;
3788         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3789                 handle_forward(mdsc, s, msg);
3790                 break;
3791         case CEPH_MSG_CLIENT_CAPS:
3792                 ceph_handle_caps(s, msg);
3793                 break;
3794         case CEPH_MSG_CLIENT_SNAP:
3795                 ceph_handle_snap(mdsc, s, msg);
3796                 break;
3797         case CEPH_MSG_CLIENT_LEASE:
3798                 handle_lease(mdsc, s, msg);
3799                 break;
3800
3801         default:
3802                 pr_err("received unknown message type %d %s\n", type,
3803                        ceph_msg_type_name(type));
3804         }
3805 out:
3806         ceph_msg_put(msg);
3807 }
3808
3809 /*
3810  * authentication
3811  */
3812
3813 /*
3814  * Note: returned pointer is the address of a structure that's
3815  * managed separately.  Caller must *not* attempt to free it.
3816  */
3817 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3818                                         int *proto, int force_new)
3819 {
3820         struct ceph_mds_session *s = con->private;
3821         struct ceph_mds_client *mdsc = s->s_mdsc;
3822         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3823         struct ceph_auth_handshake *auth = &s->s_auth;
3824
3825         if (force_new && auth->authorizer) {
3826                 ceph_auth_destroy_authorizer(ac, auth->authorizer);
3827                 auth->authorizer = NULL;
3828         }
3829         if (!auth->authorizer) {
3830                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3831                                                       auth);
3832                 if (ret)
3833                         return ERR_PTR(ret);
3834         } else {
3835                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3836                                                       auth);
3837                 if (ret)
3838                         return ERR_PTR(ret);
3839         }
3840         *proto = ac->protocol;
3841
3842         return auth;
3843 }
3844
3845
3846 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3847 {
3848         struct ceph_mds_session *s = con->private;
3849         struct ceph_mds_client *mdsc = s->s_mdsc;
3850         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3851
3852         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
3853 }
3854
3855 static int invalidate_authorizer(struct ceph_connection *con)
3856 {
3857         struct ceph_mds_session *s = con->private;
3858         struct ceph_mds_client *mdsc = s->s_mdsc;
3859         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3860
3861         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3862
3863         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3864 }
3865
3866 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
3867                                 struct ceph_msg_header *hdr, int *skip)
3868 {
3869         struct ceph_msg *msg;
3870         int type = (int) le16_to_cpu(hdr->type);
3871         int front_len = (int) le32_to_cpu(hdr->front_len);
3872
3873         if (con->in_msg)
3874                 return con->in_msg;
3875
3876         *skip = 0;
3877         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
3878         if (!msg) {
3879                 pr_err("unable to allocate msg type %d len %d\n",
3880                        type, front_len);
3881                 return NULL;
3882         }
3883
3884         return msg;
3885 }
3886
3887 static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
3888 {
3889        struct ceph_mds_session *s = con->private;
3890        struct ceph_auth_handshake *auth = &s->s_auth;
3891        return ceph_auth_sign_message(auth, msg);
3892 }
3893
3894 static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
3895 {
3896        struct ceph_mds_session *s = con->private;
3897        struct ceph_auth_handshake *auth = &s->s_auth;
3898        return ceph_auth_check_message_signature(auth, msg);
3899 }
3900
3901 static const struct ceph_connection_operations mds_con_ops = {
3902         .get = con_get,
3903         .put = con_put,
3904         .dispatch = dispatch,
3905         .get_authorizer = get_authorizer,
3906         .verify_authorizer_reply = verify_authorizer_reply,
3907         .invalidate_authorizer = invalidate_authorizer,
3908         .peer_reset = peer_reset,
3909         .alloc_msg = mds_alloc_msg,
3910         .sign_message = sign_message,
3911         .check_message_signature = check_message_signature,
3912 };
3913
3914 /* eof */