]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/ceph/mds_client.c
Merge tag 'ceph-for-4.8-rc1' of git://github.com/ceph/ceph-client
[karo-tx-linux.git] / fs / ceph / mds_client.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/fs.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/gfp.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
22
23 /*
24  * A cluster of MDS (metadata server) daemons is responsible for
25  * managing the file system namespace (the directory hierarchy and
26  * inodes) and for coordinating shared access to storage.  Metadata is
27  * partitioning hierarchically across a number of servers, and that
28  * partition varies over time as the cluster adjusts the distribution
29  * in order to balance load.
30  *
31  * The MDS client is primarily responsible to managing synchronous
32  * metadata requests for operations like open, unlink, and so forth.
33  * If there is a MDS failure, we find out about it when we (possibly
34  * request and) receive a new MDS map, and can resubmit affected
35  * requests.
36  *
37  * For the most part, though, we take advantage of a lossless
38  * communications channel to the MDS, and do not need to worry about
39  * timing out or resubmitting requests.
40  *
41  * We maintain a stateful "session" with each MDS we interact with.
42  * Within each session, we sent periodic heartbeat messages to ensure
43  * any capabilities or leases we have been issues remain valid.  If
44  * the session times out and goes stale, our leases and capabilities
45  * are no longer valid.
46  */
47
48 struct ceph_reconnect_state {
49         int nr_caps;
50         struct ceph_pagelist *pagelist;
51         unsigned msg_version;
52 };
53
54 static void __wake_requests(struct ceph_mds_client *mdsc,
55                             struct list_head *head);
56
57 static const struct ceph_connection_operations mds_con_ops;
58
59
60 /*
61  * mds reply parsing
62  */
63
64 /*
65  * parse individual inode info
66  */
67 static int parse_reply_info_in(void **p, void *end,
68                                struct ceph_mds_reply_info_in *info,
69                                u64 features)
70 {
71         int err = -EIO;
72
73         info->in = *p;
74         *p += sizeof(struct ceph_mds_reply_inode) +
75                 sizeof(*info->in->fragtree.splits) *
76                 le32_to_cpu(info->in->fragtree.nsplits);
77
78         ceph_decode_32_safe(p, end, info->symlink_len, bad);
79         ceph_decode_need(p, end, info->symlink_len, bad);
80         info->symlink = *p;
81         *p += info->symlink_len;
82
83         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
84                 ceph_decode_copy_safe(p, end, &info->dir_layout,
85                                       sizeof(info->dir_layout), bad);
86         else
87                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
88
89         ceph_decode_32_safe(p, end, info->xattr_len, bad);
90         ceph_decode_need(p, end, info->xattr_len, bad);
91         info->xattr_data = *p;
92         *p += info->xattr_len;
93
94         if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
95                 ceph_decode_64_safe(p, end, info->inline_version, bad);
96                 ceph_decode_32_safe(p, end, info->inline_len, bad);
97                 ceph_decode_need(p, end, info->inline_len, bad);
98                 info->inline_data = *p;
99                 *p += info->inline_len;
100         } else
101                 info->inline_version = CEPH_INLINE_NONE;
102
103         info->pool_ns_len = 0;
104         info->pool_ns_data = NULL;
105         if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
106                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
107                 if (info->pool_ns_len > 0) {
108                         ceph_decode_need(p, end, info->pool_ns_len, bad);
109                         info->pool_ns_data = *p;
110                         *p += info->pool_ns_len;
111                 }
112         }
113
114         return 0;
115 bad:
116         return err;
117 }
118
119 /*
120  * parse a normal reply, which may contain a (dir+)dentry and/or a
121  * target inode.
122  */
123 static int parse_reply_info_trace(void **p, void *end,
124                                   struct ceph_mds_reply_info_parsed *info,
125                                   u64 features)
126 {
127         int err;
128
129         if (info->head->is_dentry) {
130                 err = parse_reply_info_in(p, end, &info->diri, features);
131                 if (err < 0)
132                         goto out_bad;
133
134                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
135                         goto bad;
136                 info->dirfrag = *p;
137                 *p += sizeof(*info->dirfrag) +
138                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
139                 if (unlikely(*p > end))
140                         goto bad;
141
142                 ceph_decode_32_safe(p, end, info->dname_len, bad);
143                 ceph_decode_need(p, end, info->dname_len, bad);
144                 info->dname = *p;
145                 *p += info->dname_len;
146                 info->dlease = *p;
147                 *p += sizeof(*info->dlease);
148         }
149
150         if (info->head->is_target) {
151                 err = parse_reply_info_in(p, end, &info->targeti, features);
152                 if (err < 0)
153                         goto out_bad;
154         }
155
156         if (unlikely(*p != end))
157                 goto bad;
158         return 0;
159
160 bad:
161         err = -EIO;
162 out_bad:
163         pr_err("problem parsing mds trace %d\n", err);
164         return err;
165 }
166
167 /*
168  * parse readdir results
169  */
170 static int parse_reply_info_dir(void **p, void *end,
171                                 struct ceph_mds_reply_info_parsed *info,
172                                 u64 features)
173 {
174         u32 num, i = 0;
175         int err;
176
177         info->dir_dir = *p;
178         if (*p + sizeof(*info->dir_dir) > end)
179                 goto bad;
180         *p += sizeof(*info->dir_dir) +
181                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
182         if (*p > end)
183                 goto bad;
184
185         ceph_decode_need(p, end, sizeof(num) + 2, bad);
186         num = ceph_decode_32(p);
187         {
188                 u16 flags = ceph_decode_16(p);
189                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
190                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
191                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
192         }
193         if (num == 0)
194                 goto done;
195
196         BUG_ON(!info->dir_entries);
197         if ((unsigned long)(info->dir_entries + num) >
198             (unsigned long)info->dir_entries + info->dir_buf_size) {
199                 pr_err("dir contents are larger than expected\n");
200                 WARN_ON(1);
201                 goto bad;
202         }
203
204         info->dir_nr = num;
205         while (num) {
206                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
207                 /* dentry */
208                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
209                 rde->name_len = ceph_decode_32(p);
210                 ceph_decode_need(p, end, rde->name_len, bad);
211                 rde->name = *p;
212                 *p += rde->name_len;
213                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
214                 rde->lease = *p;
215                 *p += sizeof(struct ceph_mds_reply_lease);
216
217                 /* inode */
218                 err = parse_reply_info_in(p, end, &rde->inode, features);
219                 if (err < 0)
220                         goto out_bad;
221                 /* ceph_readdir_prepopulate() will update it */
222                 rde->offset = 0;
223                 i++;
224                 num--;
225         }
226
227 done:
228         if (*p != end)
229                 goto bad;
230         return 0;
231
232 bad:
233         err = -EIO;
234 out_bad:
235         pr_err("problem parsing dir contents %d\n", err);
236         return err;
237 }
238
239 /*
240  * parse fcntl F_GETLK results
241  */
242 static int parse_reply_info_filelock(void **p, void *end,
243                                      struct ceph_mds_reply_info_parsed *info,
244                                      u64 features)
245 {
246         if (*p + sizeof(*info->filelock_reply) > end)
247                 goto bad;
248
249         info->filelock_reply = *p;
250         *p += sizeof(*info->filelock_reply);
251
252         if (unlikely(*p != end))
253                 goto bad;
254         return 0;
255
256 bad:
257         return -EIO;
258 }
259
260 /*
261  * parse create results
262  */
263 static int parse_reply_info_create(void **p, void *end,
264                                   struct ceph_mds_reply_info_parsed *info,
265                                   u64 features)
266 {
267         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
268                 if (*p == end) {
269                         info->has_create_ino = false;
270                 } else {
271                         info->has_create_ino = true;
272                         info->ino = ceph_decode_64(p);
273                 }
274         }
275
276         if (unlikely(*p != end))
277                 goto bad;
278         return 0;
279
280 bad:
281         return -EIO;
282 }
283
284 /*
285  * parse extra results
286  */
287 static int parse_reply_info_extra(void **p, void *end,
288                                   struct ceph_mds_reply_info_parsed *info,
289                                   u64 features)
290 {
291         if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
292                 return parse_reply_info_filelock(p, end, info, features);
293         else if (info->head->op == CEPH_MDS_OP_READDIR ||
294                  info->head->op == CEPH_MDS_OP_LSSNAP)
295                 return parse_reply_info_dir(p, end, info, features);
296         else if (info->head->op == CEPH_MDS_OP_CREATE)
297                 return parse_reply_info_create(p, end, info, features);
298         else
299                 return -EIO;
300 }
301
302 /*
303  * parse entire mds reply
304  */
305 static int parse_reply_info(struct ceph_msg *msg,
306                             struct ceph_mds_reply_info_parsed *info,
307                             u64 features)
308 {
309         void *p, *end;
310         u32 len;
311         int err;
312
313         info->head = msg->front.iov_base;
314         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
315         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
316
317         /* trace */
318         ceph_decode_32_safe(&p, end, len, bad);
319         if (len > 0) {
320                 ceph_decode_need(&p, end, len, bad);
321                 err = parse_reply_info_trace(&p, p+len, info, features);
322                 if (err < 0)
323                         goto out_bad;
324         }
325
326         /* extra */
327         ceph_decode_32_safe(&p, end, len, bad);
328         if (len > 0) {
329                 ceph_decode_need(&p, end, len, bad);
330                 err = parse_reply_info_extra(&p, p+len, info, features);
331                 if (err < 0)
332                         goto out_bad;
333         }
334
335         /* snap blob */
336         ceph_decode_32_safe(&p, end, len, bad);
337         info->snapblob_len = len;
338         info->snapblob = p;
339         p += len;
340
341         if (p != end)
342                 goto bad;
343         return 0;
344
345 bad:
346         err = -EIO;
347 out_bad:
348         pr_err("mds parse_reply err %d\n", err);
349         return err;
350 }
351
352 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
353 {
354         if (!info->dir_entries)
355                 return;
356         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
357 }
358
359
360 /*
361  * sessions
362  */
363 const char *ceph_session_state_name(int s)
364 {
365         switch (s) {
366         case CEPH_MDS_SESSION_NEW: return "new";
367         case CEPH_MDS_SESSION_OPENING: return "opening";
368         case CEPH_MDS_SESSION_OPEN: return "open";
369         case CEPH_MDS_SESSION_HUNG: return "hung";
370         case CEPH_MDS_SESSION_CLOSING: return "closing";
371         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
372         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
373         default: return "???";
374         }
375 }
376
377 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
378 {
379         if (atomic_inc_not_zero(&s->s_ref)) {
380                 dout("mdsc get_session %p %d -> %d\n", s,
381                      atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
382                 return s;
383         } else {
384                 dout("mdsc get_session %p 0 -- FAIL", s);
385                 return NULL;
386         }
387 }
388
389 void ceph_put_mds_session(struct ceph_mds_session *s)
390 {
391         dout("mdsc put_session %p %d -> %d\n", s,
392              atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
393         if (atomic_dec_and_test(&s->s_ref)) {
394                 if (s->s_auth.authorizer)
395                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
396                 kfree(s);
397         }
398 }
399
400 /*
401  * called under mdsc->mutex
402  */
403 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
404                                                    int mds)
405 {
406         struct ceph_mds_session *session;
407
408         if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
409                 return NULL;
410         session = mdsc->sessions[mds];
411         dout("lookup_mds_session %p %d\n", session,
412              atomic_read(&session->s_ref));
413         get_session(session);
414         return session;
415 }
416
417 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
418 {
419         if (mds >= mdsc->max_sessions)
420                 return false;
421         return mdsc->sessions[mds];
422 }
423
424 static int __verify_registered_session(struct ceph_mds_client *mdsc,
425                                        struct ceph_mds_session *s)
426 {
427         if (s->s_mds >= mdsc->max_sessions ||
428             mdsc->sessions[s->s_mds] != s)
429                 return -ENOENT;
430         return 0;
431 }
432
433 /*
434  * create+register a new session for given mds.
435  * called under mdsc->mutex.
436  */
437 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
438                                                  int mds)
439 {
440         struct ceph_mds_session *s;
441
442         if (mds >= mdsc->mdsmap->m_max_mds)
443                 return ERR_PTR(-EINVAL);
444
445         s = kzalloc(sizeof(*s), GFP_NOFS);
446         if (!s)
447                 return ERR_PTR(-ENOMEM);
448         s->s_mdsc = mdsc;
449         s->s_mds = mds;
450         s->s_state = CEPH_MDS_SESSION_NEW;
451         s->s_ttl = 0;
452         s->s_seq = 0;
453         mutex_init(&s->s_mutex);
454
455         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
456
457         spin_lock_init(&s->s_gen_ttl_lock);
458         s->s_cap_gen = 0;
459         s->s_cap_ttl = jiffies - 1;
460
461         spin_lock_init(&s->s_cap_lock);
462         s->s_renew_requested = 0;
463         s->s_renew_seq = 0;
464         INIT_LIST_HEAD(&s->s_caps);
465         s->s_nr_caps = 0;
466         s->s_trim_caps = 0;
467         atomic_set(&s->s_ref, 1);
468         INIT_LIST_HEAD(&s->s_waiting);
469         INIT_LIST_HEAD(&s->s_unsafe);
470         s->s_num_cap_releases = 0;
471         s->s_cap_reconnect = 0;
472         s->s_cap_iterator = NULL;
473         INIT_LIST_HEAD(&s->s_cap_releases);
474         INIT_LIST_HEAD(&s->s_cap_flushing);
475
476         dout("register_session mds%d\n", mds);
477         if (mds >= mdsc->max_sessions) {
478                 int newmax = 1 << get_count_order(mds+1);
479                 struct ceph_mds_session **sa;
480
481                 dout("register_session realloc to %d\n", newmax);
482                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
483                 if (sa == NULL)
484                         goto fail_realloc;
485                 if (mdsc->sessions) {
486                         memcpy(sa, mdsc->sessions,
487                                mdsc->max_sessions * sizeof(void *));
488                         kfree(mdsc->sessions);
489                 }
490                 mdsc->sessions = sa;
491                 mdsc->max_sessions = newmax;
492         }
493         mdsc->sessions[mds] = s;
494         atomic_inc(&mdsc->num_sessions);
495         atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
496
497         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
498                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
499
500         return s;
501
502 fail_realloc:
503         kfree(s);
504         return ERR_PTR(-ENOMEM);
505 }
506
507 /*
508  * called under mdsc->mutex
509  */
510 static void __unregister_session(struct ceph_mds_client *mdsc,
511                                struct ceph_mds_session *s)
512 {
513         dout("__unregister_session mds%d %p\n", s->s_mds, s);
514         BUG_ON(mdsc->sessions[s->s_mds] != s);
515         mdsc->sessions[s->s_mds] = NULL;
516         ceph_con_close(&s->s_con);
517         ceph_put_mds_session(s);
518         atomic_dec(&mdsc->num_sessions);
519 }
520
521 /*
522  * drop session refs in request.
523  *
524  * should be last request ref, or hold mdsc->mutex
525  */
526 static void put_request_session(struct ceph_mds_request *req)
527 {
528         if (req->r_session) {
529                 ceph_put_mds_session(req->r_session);
530                 req->r_session = NULL;
531         }
532 }
533
534 void ceph_mdsc_release_request(struct kref *kref)
535 {
536         struct ceph_mds_request *req = container_of(kref,
537                                                     struct ceph_mds_request,
538                                                     r_kref);
539         destroy_reply_info(&req->r_reply_info);
540         if (req->r_request)
541                 ceph_msg_put(req->r_request);
542         if (req->r_reply)
543                 ceph_msg_put(req->r_reply);
544         if (req->r_inode) {
545                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
546                 iput(req->r_inode);
547         }
548         if (req->r_locked_dir)
549                 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
550         iput(req->r_target_inode);
551         if (req->r_dentry)
552                 dput(req->r_dentry);
553         if (req->r_old_dentry)
554                 dput(req->r_old_dentry);
555         if (req->r_old_dentry_dir) {
556                 /*
557                  * track (and drop pins for) r_old_dentry_dir
558                  * separately, since r_old_dentry's d_parent may have
559                  * changed between the dir mutex being dropped and
560                  * this request being freed.
561                  */
562                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
563                                   CEPH_CAP_PIN);
564                 iput(req->r_old_dentry_dir);
565         }
566         kfree(req->r_path1);
567         kfree(req->r_path2);
568         if (req->r_pagelist)
569                 ceph_pagelist_release(req->r_pagelist);
570         put_request_session(req);
571         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
572         kfree(req);
573 }
574
575 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
576
577 /*
578  * lookup session, bump ref if found.
579  *
580  * called under mdsc->mutex.
581  */
582 static struct ceph_mds_request *
583 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
584 {
585         struct ceph_mds_request *req;
586
587         req = lookup_request(&mdsc->request_tree, tid);
588         if (req)
589                 ceph_mdsc_get_request(req);
590
591         return req;
592 }
593
594 /*
595  * Register an in-flight request, and assign a tid.  Link to directory
596  * are modifying (if any).
597  *
598  * Called under mdsc->mutex.
599  */
600 static void __register_request(struct ceph_mds_client *mdsc,
601                                struct ceph_mds_request *req,
602                                struct inode *dir)
603 {
604         req->r_tid = ++mdsc->last_tid;
605         if (req->r_num_caps)
606                 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
607                                   req->r_num_caps);
608         dout("__register_request %p tid %lld\n", req, req->r_tid);
609         ceph_mdsc_get_request(req);
610         insert_request(&mdsc->request_tree, req);
611
612         req->r_uid = current_fsuid();
613         req->r_gid = current_fsgid();
614
615         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
616                 mdsc->oldest_tid = req->r_tid;
617
618         if (dir) {
619                 ihold(dir);
620                 req->r_unsafe_dir = dir;
621         }
622 }
623
624 static void __unregister_request(struct ceph_mds_client *mdsc,
625                                  struct ceph_mds_request *req)
626 {
627         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
628
629         if (req->r_tid == mdsc->oldest_tid) {
630                 struct rb_node *p = rb_next(&req->r_node);
631                 mdsc->oldest_tid = 0;
632                 while (p) {
633                         struct ceph_mds_request *next_req =
634                                 rb_entry(p, struct ceph_mds_request, r_node);
635                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
636                                 mdsc->oldest_tid = next_req->r_tid;
637                                 break;
638                         }
639                         p = rb_next(p);
640                 }
641         }
642
643         erase_request(&mdsc->request_tree, req);
644
645         if (req->r_unsafe_dir && req->r_got_unsafe) {
646                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
647                 spin_lock(&ci->i_unsafe_lock);
648                 list_del_init(&req->r_unsafe_dir_item);
649                 spin_unlock(&ci->i_unsafe_lock);
650         }
651         if (req->r_target_inode && req->r_got_unsafe) {
652                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
653                 spin_lock(&ci->i_unsafe_lock);
654                 list_del_init(&req->r_unsafe_target_item);
655                 spin_unlock(&ci->i_unsafe_lock);
656         }
657
658         if (req->r_unsafe_dir) {
659                 iput(req->r_unsafe_dir);
660                 req->r_unsafe_dir = NULL;
661         }
662
663         complete_all(&req->r_safe_completion);
664
665         ceph_mdsc_put_request(req);
666 }
667
668 /*
669  * Choose mds to send request to next.  If there is a hint set in the
670  * request (e.g., due to a prior forward hint from the mds), use that.
671  * Otherwise, consult frag tree and/or caps to identify the
672  * appropriate mds.  If all else fails, choose randomly.
673  *
674  * Called under mdsc->mutex.
675  */
676 static struct dentry *get_nonsnap_parent(struct dentry *dentry)
677 {
678         /*
679          * we don't need to worry about protecting the d_parent access
680          * here because we never renaming inside the snapped namespace
681          * except to resplice to another snapdir, and either the old or new
682          * result is a valid result.
683          */
684         while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
685                 dentry = dentry->d_parent;
686         return dentry;
687 }
688
689 static int __choose_mds(struct ceph_mds_client *mdsc,
690                         struct ceph_mds_request *req)
691 {
692         struct inode *inode;
693         struct ceph_inode_info *ci;
694         struct ceph_cap *cap;
695         int mode = req->r_direct_mode;
696         int mds = -1;
697         u32 hash = req->r_direct_hash;
698         bool is_hash = req->r_direct_is_hash;
699
700         /*
701          * is there a specific mds we should try?  ignore hint if we have
702          * no session and the mds is not up (active or recovering).
703          */
704         if (req->r_resend_mds >= 0 &&
705             (__have_session(mdsc, req->r_resend_mds) ||
706              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
707                 dout("choose_mds using resend_mds mds%d\n",
708                      req->r_resend_mds);
709                 return req->r_resend_mds;
710         }
711
712         if (mode == USE_RANDOM_MDS)
713                 goto random;
714
715         inode = NULL;
716         if (req->r_inode) {
717                 inode = req->r_inode;
718         } else if (req->r_dentry) {
719                 /* ignore race with rename; old or new d_parent is okay */
720                 struct dentry *parent = req->r_dentry->d_parent;
721                 struct inode *dir = d_inode(parent);
722
723                 if (dir->i_sb != mdsc->fsc->sb) {
724                         /* not this fs! */
725                         inode = d_inode(req->r_dentry);
726                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
727                         /* direct snapped/virtual snapdir requests
728                          * based on parent dir inode */
729                         struct dentry *dn = get_nonsnap_parent(parent);
730                         inode = d_inode(dn);
731                         dout("__choose_mds using nonsnap parent %p\n", inode);
732                 } else {
733                         /* dentry target */
734                         inode = d_inode(req->r_dentry);
735                         if (!inode || mode == USE_AUTH_MDS) {
736                                 /* dir + name */
737                                 inode = dir;
738                                 hash = ceph_dentry_hash(dir, req->r_dentry);
739                                 is_hash = true;
740                         }
741                 }
742         }
743
744         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
745              (int)hash, mode);
746         if (!inode)
747                 goto random;
748         ci = ceph_inode(inode);
749
750         if (is_hash && S_ISDIR(inode->i_mode)) {
751                 struct ceph_inode_frag frag;
752                 int found;
753
754                 ceph_choose_frag(ci, hash, &frag, &found);
755                 if (found) {
756                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
757                                 u8 r;
758
759                                 /* choose a random replica */
760                                 get_random_bytes(&r, 1);
761                                 r %= frag.ndist;
762                                 mds = frag.dist[r];
763                                 dout("choose_mds %p %llx.%llx "
764                                      "frag %u mds%d (%d/%d)\n",
765                                      inode, ceph_vinop(inode),
766                                      frag.frag, mds,
767                                      (int)r, frag.ndist);
768                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
769                                     CEPH_MDS_STATE_ACTIVE)
770                                         return mds;
771                         }
772
773                         /* since this file/dir wasn't known to be
774                          * replicated, then we want to look for the
775                          * authoritative mds. */
776                         mode = USE_AUTH_MDS;
777                         if (frag.mds >= 0) {
778                                 /* choose auth mds */
779                                 mds = frag.mds;
780                                 dout("choose_mds %p %llx.%llx "
781                                      "frag %u mds%d (auth)\n",
782                                      inode, ceph_vinop(inode), frag.frag, mds);
783                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
784                                     CEPH_MDS_STATE_ACTIVE)
785                                         return mds;
786                         }
787                 }
788         }
789
790         spin_lock(&ci->i_ceph_lock);
791         cap = NULL;
792         if (mode == USE_AUTH_MDS)
793                 cap = ci->i_auth_cap;
794         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
795                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
796         if (!cap) {
797                 spin_unlock(&ci->i_ceph_lock);
798                 goto random;
799         }
800         mds = cap->session->s_mds;
801         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
802              inode, ceph_vinop(inode), mds,
803              cap == ci->i_auth_cap ? "auth " : "", cap);
804         spin_unlock(&ci->i_ceph_lock);
805         return mds;
806
807 random:
808         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
809         dout("choose_mds chose random mds%d\n", mds);
810         return mds;
811 }
812
813
814 /*
815  * session messages
816  */
817 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
818 {
819         struct ceph_msg *msg;
820         struct ceph_mds_session_head *h;
821
822         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
823                            false);
824         if (!msg) {
825                 pr_err("create_session_msg ENOMEM creating msg\n");
826                 return NULL;
827         }
828         h = msg->front.iov_base;
829         h->op = cpu_to_le32(op);
830         h->seq = cpu_to_le64(seq);
831
832         return msg;
833 }
834
835 /*
836  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
837  * to include additional client metadata fields.
838  */
839 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
840 {
841         struct ceph_msg *msg;
842         struct ceph_mds_session_head *h;
843         int i = -1;
844         int metadata_bytes = 0;
845         int metadata_key_count = 0;
846         struct ceph_options *opt = mdsc->fsc->client->options;
847         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
848         void *p;
849
850         const char* metadata[][2] = {
851                 {"hostname", utsname()->nodename},
852                 {"kernel_version", utsname()->release},
853                 {"entity_id", opt->name ? : ""},
854                 {"root", fsopt->server_path ? : "/"},
855                 {NULL, NULL}
856         };
857
858         /* Calculate serialized length of metadata */
859         metadata_bytes = 4;  /* map length */
860         for (i = 0; metadata[i][0] != NULL; ++i) {
861                 metadata_bytes += 8 + strlen(metadata[i][0]) +
862                         strlen(metadata[i][1]);
863                 metadata_key_count++;
864         }
865
866         /* Allocate the message */
867         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
868                            GFP_NOFS, false);
869         if (!msg) {
870                 pr_err("create_session_msg ENOMEM creating msg\n");
871                 return NULL;
872         }
873         h = msg->front.iov_base;
874         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
875         h->seq = cpu_to_le64(seq);
876
877         /*
878          * Serialize client metadata into waiting buffer space, using
879          * the format that userspace expects for map<string, string>
880          *
881          * ClientSession messages with metadata are v2
882          */
883         msg->hdr.version = cpu_to_le16(2);
884         msg->hdr.compat_version = cpu_to_le16(1);
885
886         /* The write pointer, following the session_head structure */
887         p = msg->front.iov_base + sizeof(*h);
888
889         /* Number of entries in the map */
890         ceph_encode_32(&p, metadata_key_count);
891
892         /* Two length-prefixed strings for each entry in the map */
893         for (i = 0; metadata[i][0] != NULL; ++i) {
894                 size_t const key_len = strlen(metadata[i][0]);
895                 size_t const val_len = strlen(metadata[i][1]);
896
897                 ceph_encode_32(&p, key_len);
898                 memcpy(p, metadata[i][0], key_len);
899                 p += key_len;
900                 ceph_encode_32(&p, val_len);
901                 memcpy(p, metadata[i][1], val_len);
902                 p += val_len;
903         }
904
905         return msg;
906 }
907
908 /*
909  * send session open request.
910  *
911  * called under mdsc->mutex
912  */
913 static int __open_session(struct ceph_mds_client *mdsc,
914                           struct ceph_mds_session *session)
915 {
916         struct ceph_msg *msg;
917         int mstate;
918         int mds = session->s_mds;
919
920         /* wait for mds to go active? */
921         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
922         dout("open_session to mds%d (%s)\n", mds,
923              ceph_mds_state_name(mstate));
924         session->s_state = CEPH_MDS_SESSION_OPENING;
925         session->s_renew_requested = jiffies;
926
927         /* send connect message */
928         msg = create_session_open_msg(mdsc, session->s_seq);
929         if (!msg)
930                 return -ENOMEM;
931         ceph_con_send(&session->s_con, msg);
932         return 0;
933 }
934
935 /*
936  * open sessions for any export targets for the given mds
937  *
938  * called under mdsc->mutex
939  */
940 static struct ceph_mds_session *
941 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
942 {
943         struct ceph_mds_session *session;
944
945         session = __ceph_lookup_mds_session(mdsc, target);
946         if (!session) {
947                 session = register_session(mdsc, target);
948                 if (IS_ERR(session))
949                         return session;
950         }
951         if (session->s_state == CEPH_MDS_SESSION_NEW ||
952             session->s_state == CEPH_MDS_SESSION_CLOSING)
953                 __open_session(mdsc, session);
954
955         return session;
956 }
957
958 struct ceph_mds_session *
959 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
960 {
961         struct ceph_mds_session *session;
962
963         dout("open_export_target_session to mds%d\n", target);
964
965         mutex_lock(&mdsc->mutex);
966         session = __open_export_target_session(mdsc, target);
967         mutex_unlock(&mdsc->mutex);
968
969         return session;
970 }
971
972 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
973                                           struct ceph_mds_session *session)
974 {
975         struct ceph_mds_info *mi;
976         struct ceph_mds_session *ts;
977         int i, mds = session->s_mds;
978
979         if (mds >= mdsc->mdsmap->m_max_mds)
980                 return;
981
982         mi = &mdsc->mdsmap->m_info[mds];
983         dout("open_export_target_sessions for mds%d (%d targets)\n",
984              session->s_mds, mi->num_export_targets);
985
986         for (i = 0; i < mi->num_export_targets; i++) {
987                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
988                 if (!IS_ERR(ts))
989                         ceph_put_mds_session(ts);
990         }
991 }
992
993 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
994                                            struct ceph_mds_session *session)
995 {
996         mutex_lock(&mdsc->mutex);
997         __open_export_target_sessions(mdsc, session);
998         mutex_unlock(&mdsc->mutex);
999 }
1000
1001 /*
1002  * session caps
1003  */
1004
1005 /* caller holds s_cap_lock, we drop it */
1006 static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
1007                                  struct ceph_mds_session *session)
1008         __releases(session->s_cap_lock)
1009 {
1010         LIST_HEAD(tmp_list);
1011         list_splice_init(&session->s_cap_releases, &tmp_list);
1012         session->s_num_cap_releases = 0;
1013         spin_unlock(&session->s_cap_lock);
1014
1015         dout("cleanup_cap_releases mds%d\n", session->s_mds);
1016         while (!list_empty(&tmp_list)) {
1017                 struct ceph_cap *cap;
1018                 /* zero out the in-progress message */
1019                 cap = list_first_entry(&tmp_list,
1020                                         struct ceph_cap, session_caps);
1021                 list_del(&cap->session_caps);
1022                 ceph_put_cap(mdsc, cap);
1023         }
1024 }
1025
1026 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1027                                      struct ceph_mds_session *session)
1028 {
1029         struct ceph_mds_request *req;
1030         struct rb_node *p;
1031
1032         dout("cleanup_session_requests mds%d\n", session->s_mds);
1033         mutex_lock(&mdsc->mutex);
1034         while (!list_empty(&session->s_unsafe)) {
1035                 req = list_first_entry(&session->s_unsafe,
1036                                        struct ceph_mds_request, r_unsafe_item);
1037                 list_del_init(&req->r_unsafe_item);
1038                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1039                                     req->r_tid);
1040                 __unregister_request(mdsc, req);
1041         }
1042         /* zero r_attempts, so kick_requests() will re-send requests */
1043         p = rb_first(&mdsc->request_tree);
1044         while (p) {
1045                 req = rb_entry(p, struct ceph_mds_request, r_node);
1046                 p = rb_next(p);
1047                 if (req->r_session &&
1048                     req->r_session->s_mds == session->s_mds)
1049                         req->r_attempts = 0;
1050         }
1051         mutex_unlock(&mdsc->mutex);
1052 }
1053
1054 /*
1055  * Helper to safely iterate over all caps associated with a session, with
1056  * special care taken to handle a racing __ceph_remove_cap().
1057  *
1058  * Caller must hold session s_mutex.
1059  */
1060 static int iterate_session_caps(struct ceph_mds_session *session,
1061                                  int (*cb)(struct inode *, struct ceph_cap *,
1062                                             void *), void *arg)
1063 {
1064         struct list_head *p;
1065         struct ceph_cap *cap;
1066         struct inode *inode, *last_inode = NULL;
1067         struct ceph_cap *old_cap = NULL;
1068         int ret;
1069
1070         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1071         spin_lock(&session->s_cap_lock);
1072         p = session->s_caps.next;
1073         while (p != &session->s_caps) {
1074                 cap = list_entry(p, struct ceph_cap, session_caps);
1075                 inode = igrab(&cap->ci->vfs_inode);
1076                 if (!inode) {
1077                         p = p->next;
1078                         continue;
1079                 }
1080                 session->s_cap_iterator = cap;
1081                 spin_unlock(&session->s_cap_lock);
1082
1083                 if (last_inode) {
1084                         iput(last_inode);
1085                         last_inode = NULL;
1086                 }
1087                 if (old_cap) {
1088                         ceph_put_cap(session->s_mdsc, old_cap);
1089                         old_cap = NULL;
1090                 }
1091
1092                 ret = cb(inode, cap, arg);
1093                 last_inode = inode;
1094
1095                 spin_lock(&session->s_cap_lock);
1096                 p = p->next;
1097                 if (cap->ci == NULL) {
1098                         dout("iterate_session_caps  finishing cap %p removal\n",
1099                              cap);
1100                         BUG_ON(cap->session != session);
1101                         cap->session = NULL;
1102                         list_del_init(&cap->session_caps);
1103                         session->s_nr_caps--;
1104                         if (cap->queue_release) {
1105                                 list_add_tail(&cap->session_caps,
1106                                               &session->s_cap_releases);
1107                                 session->s_num_cap_releases++;
1108                         } else {
1109                                 old_cap = cap;  /* put_cap it w/o locks held */
1110                         }
1111                 }
1112                 if (ret < 0)
1113                         goto out;
1114         }
1115         ret = 0;
1116 out:
1117         session->s_cap_iterator = NULL;
1118         spin_unlock(&session->s_cap_lock);
1119
1120         iput(last_inode);
1121         if (old_cap)
1122                 ceph_put_cap(session->s_mdsc, old_cap);
1123
1124         return ret;
1125 }
1126
1127 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1128                                   void *arg)
1129 {
1130         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1131         struct ceph_inode_info *ci = ceph_inode(inode);
1132         LIST_HEAD(to_remove);
1133         bool drop = false;
1134         bool invalidate = false;
1135
1136         dout("removing cap %p, ci is %p, inode is %p\n",
1137              cap, ci, &ci->vfs_inode);
1138         spin_lock(&ci->i_ceph_lock);
1139         __ceph_remove_cap(cap, false);
1140         if (!ci->i_auth_cap) {
1141                 struct ceph_cap_flush *cf;
1142                 struct ceph_mds_client *mdsc = fsc->mdsc;
1143
1144                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1145
1146                 if (ci->i_wrbuffer_ref > 0 &&
1147                     ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
1148                         invalidate = true;
1149
1150                 while (!list_empty(&ci->i_cap_flush_list)) {
1151                         cf = list_first_entry(&ci->i_cap_flush_list,
1152                                               struct ceph_cap_flush, i_list);
1153                         list_del(&cf->i_list);
1154                         list_add(&cf->i_list, &to_remove);
1155                 }
1156
1157                 spin_lock(&mdsc->cap_dirty_lock);
1158
1159                 list_for_each_entry(cf, &to_remove, i_list)
1160                         list_del(&cf->g_list);
1161
1162                 if (!list_empty(&ci->i_dirty_item)) {
1163                         pr_warn_ratelimited(
1164                                 " dropping dirty %s state for %p %lld\n",
1165                                 ceph_cap_string(ci->i_dirty_caps),
1166                                 inode, ceph_ino(inode));
1167                         ci->i_dirty_caps = 0;
1168                         list_del_init(&ci->i_dirty_item);
1169                         drop = true;
1170                 }
1171                 if (!list_empty(&ci->i_flushing_item)) {
1172                         pr_warn_ratelimited(
1173                                 " dropping dirty+flushing %s state for %p %lld\n",
1174                                 ceph_cap_string(ci->i_flushing_caps),
1175                                 inode, ceph_ino(inode));
1176                         ci->i_flushing_caps = 0;
1177                         list_del_init(&ci->i_flushing_item);
1178                         mdsc->num_cap_flushing--;
1179                         drop = true;
1180                 }
1181                 spin_unlock(&mdsc->cap_dirty_lock);
1182
1183                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1184                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1185                         ci->i_prealloc_cap_flush = NULL;
1186                 }
1187         }
1188         spin_unlock(&ci->i_ceph_lock);
1189         while (!list_empty(&to_remove)) {
1190                 struct ceph_cap_flush *cf;
1191                 cf = list_first_entry(&to_remove,
1192                                       struct ceph_cap_flush, i_list);
1193                 list_del(&cf->i_list);
1194                 ceph_free_cap_flush(cf);
1195         }
1196
1197         wake_up_all(&ci->i_cap_wq);
1198         if (invalidate)
1199                 ceph_queue_invalidate(inode);
1200         if (drop)
1201                 iput(inode);
1202         return 0;
1203 }
1204
1205 /*
1206  * caller must hold session s_mutex
1207  */
1208 static void remove_session_caps(struct ceph_mds_session *session)
1209 {
1210         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1211         struct super_block *sb = fsc->sb;
1212         dout("remove_session_caps on %p\n", session);
1213         iterate_session_caps(session, remove_session_caps_cb, fsc);
1214
1215         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1216
1217         spin_lock(&session->s_cap_lock);
1218         if (session->s_nr_caps > 0) {
1219                 struct inode *inode;
1220                 struct ceph_cap *cap, *prev = NULL;
1221                 struct ceph_vino vino;
1222                 /*
1223                  * iterate_session_caps() skips inodes that are being
1224                  * deleted, we need to wait until deletions are complete.
1225                  * __wait_on_freeing_inode() is designed for the job,
1226                  * but it is not exported, so use lookup inode function
1227                  * to access it.
1228                  */
1229                 while (!list_empty(&session->s_caps)) {
1230                         cap = list_entry(session->s_caps.next,
1231                                          struct ceph_cap, session_caps);
1232                         if (cap == prev)
1233                                 break;
1234                         prev = cap;
1235                         vino = cap->ci->i_vino;
1236                         spin_unlock(&session->s_cap_lock);
1237
1238                         inode = ceph_find_inode(sb, vino);
1239                         iput(inode);
1240
1241                         spin_lock(&session->s_cap_lock);
1242                 }
1243         }
1244
1245         // drop cap expires and unlock s_cap_lock
1246         cleanup_cap_releases(session->s_mdsc, session);
1247
1248         BUG_ON(session->s_nr_caps > 0);
1249         BUG_ON(!list_empty(&session->s_cap_flushing));
1250 }
1251
1252 /*
1253  * wake up any threads waiting on this session's caps.  if the cap is
1254  * old (didn't get renewed on the client reconnect), remove it now.
1255  *
1256  * caller must hold s_mutex.
1257  */
1258 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1259                               void *arg)
1260 {
1261         struct ceph_inode_info *ci = ceph_inode(inode);
1262
1263         if (arg) {
1264                 spin_lock(&ci->i_ceph_lock);
1265                 ci->i_wanted_max_size = 0;
1266                 ci->i_requested_max_size = 0;
1267                 spin_unlock(&ci->i_ceph_lock);
1268         }
1269         wake_up_all(&ci->i_cap_wq);
1270         return 0;
1271 }
1272
1273 static void wake_up_session_caps(struct ceph_mds_session *session,
1274                                  int reconnect)
1275 {
1276         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1277         iterate_session_caps(session, wake_up_session_cb,
1278                              (void *)(unsigned long)reconnect);
1279 }
1280
1281 /*
1282  * Send periodic message to MDS renewing all currently held caps.  The
1283  * ack will reset the expiration for all caps from this session.
1284  *
1285  * caller holds s_mutex
1286  */
1287 static int send_renew_caps(struct ceph_mds_client *mdsc,
1288                            struct ceph_mds_session *session)
1289 {
1290         struct ceph_msg *msg;
1291         int state;
1292
1293         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1294             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1295                 pr_info("mds%d caps stale\n", session->s_mds);
1296         session->s_renew_requested = jiffies;
1297
1298         /* do not try to renew caps until a recovering mds has reconnected
1299          * with its clients. */
1300         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1301         if (state < CEPH_MDS_STATE_RECONNECT) {
1302                 dout("send_renew_caps ignoring mds%d (%s)\n",
1303                      session->s_mds, ceph_mds_state_name(state));
1304                 return 0;
1305         }
1306
1307         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1308                 ceph_mds_state_name(state));
1309         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1310                                  ++session->s_renew_seq);
1311         if (!msg)
1312                 return -ENOMEM;
1313         ceph_con_send(&session->s_con, msg);
1314         return 0;
1315 }
1316
1317 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1318                              struct ceph_mds_session *session, u64 seq)
1319 {
1320         struct ceph_msg *msg;
1321
1322         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1323              session->s_mds, ceph_session_state_name(session->s_state), seq);
1324         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1325         if (!msg)
1326                 return -ENOMEM;
1327         ceph_con_send(&session->s_con, msg);
1328         return 0;
1329 }
1330
1331
1332 /*
1333  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1334  *
1335  * Called under session->s_mutex
1336  */
1337 static void renewed_caps(struct ceph_mds_client *mdsc,
1338                          struct ceph_mds_session *session, int is_renew)
1339 {
1340         int was_stale;
1341         int wake = 0;
1342
1343         spin_lock(&session->s_cap_lock);
1344         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1345
1346         session->s_cap_ttl = session->s_renew_requested +
1347                 mdsc->mdsmap->m_session_timeout*HZ;
1348
1349         if (was_stale) {
1350                 if (time_before(jiffies, session->s_cap_ttl)) {
1351                         pr_info("mds%d caps renewed\n", session->s_mds);
1352                         wake = 1;
1353                 } else {
1354                         pr_info("mds%d caps still stale\n", session->s_mds);
1355                 }
1356         }
1357         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1358              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1359              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1360         spin_unlock(&session->s_cap_lock);
1361
1362         if (wake)
1363                 wake_up_session_caps(session, 0);
1364 }
1365
1366 /*
1367  * send a session close request
1368  */
1369 static int request_close_session(struct ceph_mds_client *mdsc,
1370                                  struct ceph_mds_session *session)
1371 {
1372         struct ceph_msg *msg;
1373
1374         dout("request_close_session mds%d state %s seq %lld\n",
1375              session->s_mds, ceph_session_state_name(session->s_state),
1376              session->s_seq);
1377         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1378         if (!msg)
1379                 return -ENOMEM;
1380         ceph_con_send(&session->s_con, msg);
1381         return 0;
1382 }
1383
1384 /*
1385  * Called with s_mutex held.
1386  */
1387 static int __close_session(struct ceph_mds_client *mdsc,
1388                          struct ceph_mds_session *session)
1389 {
1390         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1391                 return 0;
1392         session->s_state = CEPH_MDS_SESSION_CLOSING;
1393         return request_close_session(mdsc, session);
1394 }
1395
1396 /*
1397  * Trim old(er) caps.
1398  *
1399  * Because we can't cache an inode without one or more caps, we do
1400  * this indirectly: if a cap is unused, we prune its aliases, at which
1401  * point the inode will hopefully get dropped to.
1402  *
1403  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1404  * memory pressure from the MDS, though, so it needn't be perfect.
1405  */
1406 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1407 {
1408         struct ceph_mds_session *session = arg;
1409         struct ceph_inode_info *ci = ceph_inode(inode);
1410         int used, wanted, oissued, mine;
1411
1412         if (session->s_trim_caps <= 0)
1413                 return -1;
1414
1415         spin_lock(&ci->i_ceph_lock);
1416         mine = cap->issued | cap->implemented;
1417         used = __ceph_caps_used(ci);
1418         wanted = __ceph_caps_file_wanted(ci);
1419         oissued = __ceph_caps_issued_other(ci, cap);
1420
1421         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1422              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1423              ceph_cap_string(used), ceph_cap_string(wanted));
1424         if (cap == ci->i_auth_cap) {
1425                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1426                     !list_empty(&ci->i_cap_snaps))
1427                         goto out;
1428                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1429                         goto out;
1430         }
1431         /* The inode has cached pages, but it's no longer used.
1432          * we can safely drop it */
1433         if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1434             !(oissued & CEPH_CAP_FILE_CACHE)) {
1435           used = 0;
1436           oissued = 0;
1437         }
1438         if ((used | wanted) & ~oissued & mine)
1439                 goto out;   /* we need these caps */
1440
1441         session->s_trim_caps--;
1442         if (oissued) {
1443                 /* we aren't the only cap.. just remove us */
1444                 __ceph_remove_cap(cap, true);
1445         } else {
1446                 /* try dropping referring dentries */
1447                 spin_unlock(&ci->i_ceph_lock);
1448                 d_prune_aliases(inode);
1449                 dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
1450                      inode, cap, atomic_read(&inode->i_count));
1451                 return 0;
1452         }
1453
1454 out:
1455         spin_unlock(&ci->i_ceph_lock);
1456         return 0;
1457 }
1458
1459 /*
1460  * Trim session cap count down to some max number.
1461  */
1462 static int trim_caps(struct ceph_mds_client *mdsc,
1463                      struct ceph_mds_session *session,
1464                      int max_caps)
1465 {
1466         int trim_caps = session->s_nr_caps - max_caps;
1467
1468         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1469              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1470         if (trim_caps > 0) {
1471                 session->s_trim_caps = trim_caps;
1472                 iterate_session_caps(session, trim_caps_cb, session);
1473                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1474                      session->s_mds, session->s_nr_caps, max_caps,
1475                         trim_caps - session->s_trim_caps);
1476                 session->s_trim_caps = 0;
1477         }
1478
1479         ceph_send_cap_releases(mdsc, session);
1480         return 0;
1481 }
1482
1483 static int check_caps_flush(struct ceph_mds_client *mdsc,
1484                             u64 want_flush_tid)
1485 {
1486         int ret = 1;
1487
1488         spin_lock(&mdsc->cap_dirty_lock);
1489         if (!list_empty(&mdsc->cap_flush_list)) {
1490                 struct ceph_cap_flush *cf =
1491                         list_first_entry(&mdsc->cap_flush_list,
1492                                          struct ceph_cap_flush, g_list);
1493                 if (cf->tid <= want_flush_tid) {
1494                         dout("check_caps_flush still flushing tid "
1495                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1496                         ret = 0;
1497                 }
1498         }
1499         spin_unlock(&mdsc->cap_dirty_lock);
1500         return ret;
1501 }
1502
1503 /*
1504  * flush all dirty inode data to disk.
1505  *
1506  * returns true if we've flushed through want_flush_tid
1507  */
1508 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1509                             u64 want_flush_tid)
1510 {
1511         dout("check_caps_flush want %llu\n", want_flush_tid);
1512
1513         wait_event(mdsc->cap_flushing_wq,
1514                    check_caps_flush(mdsc, want_flush_tid));
1515
1516         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1517 }
1518
1519 /*
1520  * called under s_mutex
1521  */
1522 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1523                             struct ceph_mds_session *session)
1524 {
1525         struct ceph_msg *msg = NULL;
1526         struct ceph_mds_cap_release *head;
1527         struct ceph_mds_cap_item *item;
1528         struct ceph_cap *cap;
1529         LIST_HEAD(tmp_list);
1530         int num_cap_releases;
1531
1532         spin_lock(&session->s_cap_lock);
1533 again:
1534         list_splice_init(&session->s_cap_releases, &tmp_list);
1535         num_cap_releases = session->s_num_cap_releases;
1536         session->s_num_cap_releases = 0;
1537         spin_unlock(&session->s_cap_lock);
1538
1539         while (!list_empty(&tmp_list)) {
1540                 if (!msg) {
1541                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1542                                         PAGE_SIZE, GFP_NOFS, false);
1543                         if (!msg)
1544                                 goto out_err;
1545                         head = msg->front.iov_base;
1546                         head->num = cpu_to_le32(0);
1547                         msg->front.iov_len = sizeof(*head);
1548                 }
1549                 cap = list_first_entry(&tmp_list, struct ceph_cap,
1550                                         session_caps);
1551                 list_del(&cap->session_caps);
1552                 num_cap_releases--;
1553
1554                 head = msg->front.iov_base;
1555                 le32_add_cpu(&head->num, 1);
1556                 item = msg->front.iov_base + msg->front.iov_len;
1557                 item->ino = cpu_to_le64(cap->cap_ino);
1558                 item->cap_id = cpu_to_le64(cap->cap_id);
1559                 item->migrate_seq = cpu_to_le32(cap->mseq);
1560                 item->seq = cpu_to_le32(cap->issue_seq);
1561                 msg->front.iov_len += sizeof(*item);
1562
1563                 ceph_put_cap(mdsc, cap);
1564
1565                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1566                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1567                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1568                         ceph_con_send(&session->s_con, msg);
1569                         msg = NULL;
1570                 }
1571         }
1572
1573         BUG_ON(num_cap_releases != 0);
1574
1575         spin_lock(&session->s_cap_lock);
1576         if (!list_empty(&session->s_cap_releases))
1577                 goto again;
1578         spin_unlock(&session->s_cap_lock);
1579
1580         if (msg) {
1581                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1582                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1583                 ceph_con_send(&session->s_con, msg);
1584         }
1585         return;
1586 out_err:
1587         pr_err("send_cap_releases mds%d, failed to allocate message\n",
1588                 session->s_mds);
1589         spin_lock(&session->s_cap_lock);
1590         list_splice(&tmp_list, &session->s_cap_releases);
1591         session->s_num_cap_releases += num_cap_releases;
1592         spin_unlock(&session->s_cap_lock);
1593 }
1594
1595 /*
1596  * requests
1597  */
1598
1599 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1600                                     struct inode *dir)
1601 {
1602         struct ceph_inode_info *ci = ceph_inode(dir);
1603         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1604         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1605         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
1606         int order, num_entries;
1607
1608         spin_lock(&ci->i_ceph_lock);
1609         num_entries = ci->i_files + ci->i_subdirs;
1610         spin_unlock(&ci->i_ceph_lock);
1611         num_entries = max(num_entries, 1);
1612         num_entries = min(num_entries, opt->max_readdir);
1613
1614         order = get_order(size * num_entries);
1615         while (order >= 0) {
1616                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
1617                                                              __GFP_NOWARN,
1618                                                              order);
1619                 if (rinfo->dir_entries)
1620                         break;
1621                 order--;
1622         }
1623         if (!rinfo->dir_entries)
1624                 return -ENOMEM;
1625
1626         num_entries = (PAGE_SIZE << order) / size;
1627         num_entries = min(num_entries, opt->max_readdir);
1628
1629         rinfo->dir_buf_size = PAGE_SIZE << order;
1630         req->r_num_caps = num_entries + 1;
1631         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1632         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1633         return 0;
1634 }
1635
1636 /*
1637  * Create an mds request.
1638  */
1639 struct ceph_mds_request *
1640 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1641 {
1642         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1643
1644         if (!req)
1645                 return ERR_PTR(-ENOMEM);
1646
1647         mutex_init(&req->r_fill_mutex);
1648         req->r_mdsc = mdsc;
1649         req->r_started = jiffies;
1650         req->r_resend_mds = -1;
1651         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1652         INIT_LIST_HEAD(&req->r_unsafe_target_item);
1653         req->r_fmode = -1;
1654         kref_init(&req->r_kref);
1655         RB_CLEAR_NODE(&req->r_node);
1656         INIT_LIST_HEAD(&req->r_wait);
1657         init_completion(&req->r_completion);
1658         init_completion(&req->r_safe_completion);
1659         INIT_LIST_HEAD(&req->r_unsafe_item);
1660
1661         req->r_stamp = current_fs_time(mdsc->fsc->sb);
1662
1663         req->r_op = op;
1664         req->r_direct_mode = mode;
1665         return req;
1666 }
1667
1668 /*
1669  * return oldest (lowest) request, tid in request tree, 0 if none.
1670  *
1671  * called under mdsc->mutex.
1672  */
1673 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1674 {
1675         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1676                 return NULL;
1677         return rb_entry(rb_first(&mdsc->request_tree),
1678                         struct ceph_mds_request, r_node);
1679 }
1680
1681 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1682 {
1683         return mdsc->oldest_tid;
1684 }
1685
1686 /*
1687  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1688  * on build_path_from_dentry in fs/cifs/dir.c.
1689  *
1690  * If @stop_on_nosnap, generate path relative to the first non-snapped
1691  * inode.
1692  *
1693  * Encode hidden .snap dirs as a double /, i.e.
1694  *   foo/.snap/bar -> foo//bar
1695  */
1696 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1697                            int stop_on_nosnap)
1698 {
1699         struct dentry *temp;
1700         char *path;
1701         int len, pos;
1702         unsigned seq;
1703
1704         if (dentry == NULL)
1705                 return ERR_PTR(-EINVAL);
1706
1707 retry:
1708         len = 0;
1709         seq = read_seqbegin(&rename_lock);
1710         rcu_read_lock();
1711         for (temp = dentry; !IS_ROOT(temp);) {
1712                 struct inode *inode = d_inode(temp);
1713                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1714                         len++;  /* slash only */
1715                 else if (stop_on_nosnap && inode &&
1716                          ceph_snap(inode) == CEPH_NOSNAP)
1717                         break;
1718                 else
1719                         len += 1 + temp->d_name.len;
1720                 temp = temp->d_parent;
1721         }
1722         rcu_read_unlock();
1723         if (len)
1724                 len--;  /* no leading '/' */
1725
1726         path = kmalloc(len+1, GFP_NOFS);
1727         if (path == NULL)
1728                 return ERR_PTR(-ENOMEM);
1729         pos = len;
1730         path[pos] = 0;  /* trailing null */
1731         rcu_read_lock();
1732         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1733                 struct inode *inode;
1734
1735                 spin_lock(&temp->d_lock);
1736                 inode = d_inode(temp);
1737                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1738                         dout("build_path path+%d: %p SNAPDIR\n",
1739                              pos, temp);
1740                 } else if (stop_on_nosnap && inode &&
1741                            ceph_snap(inode) == CEPH_NOSNAP) {
1742                         spin_unlock(&temp->d_lock);
1743                         break;
1744                 } else {
1745                         pos -= temp->d_name.len;
1746                         if (pos < 0) {
1747                                 spin_unlock(&temp->d_lock);
1748                                 break;
1749                         }
1750                         strncpy(path + pos, temp->d_name.name,
1751                                 temp->d_name.len);
1752                 }
1753                 spin_unlock(&temp->d_lock);
1754                 if (pos)
1755                         path[--pos] = '/';
1756                 temp = temp->d_parent;
1757         }
1758         rcu_read_unlock();
1759         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1760                 pr_err("build_path did not end path lookup where "
1761                        "expected, namelen is %d, pos is %d\n", len, pos);
1762                 /* presumably this is only possible if racing with a
1763                    rename of one of the parent directories (we can not
1764                    lock the dentries above us to prevent this, but
1765                    retrying should be harmless) */
1766                 kfree(path);
1767                 goto retry;
1768         }
1769
1770         *base = ceph_ino(d_inode(temp));
1771         *plen = len;
1772         dout("build_path on %p %d built %llx '%.*s'\n",
1773              dentry, d_count(dentry), *base, len, path);
1774         return path;
1775 }
1776
1777 static int build_dentry_path(struct dentry *dentry,
1778                              const char **ppath, int *ppathlen, u64 *pino,
1779                              int *pfreepath)
1780 {
1781         char *path;
1782
1783         if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
1784                 *pino = ceph_ino(d_inode(dentry->d_parent));
1785                 *ppath = dentry->d_name.name;
1786                 *ppathlen = dentry->d_name.len;
1787                 return 0;
1788         }
1789         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1790         if (IS_ERR(path))
1791                 return PTR_ERR(path);
1792         *ppath = path;
1793         *pfreepath = 1;
1794         return 0;
1795 }
1796
1797 static int build_inode_path(struct inode *inode,
1798                             const char **ppath, int *ppathlen, u64 *pino,
1799                             int *pfreepath)
1800 {
1801         struct dentry *dentry;
1802         char *path;
1803
1804         if (ceph_snap(inode) == CEPH_NOSNAP) {
1805                 *pino = ceph_ino(inode);
1806                 *ppathlen = 0;
1807                 return 0;
1808         }
1809         dentry = d_find_alias(inode);
1810         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1811         dput(dentry);
1812         if (IS_ERR(path))
1813                 return PTR_ERR(path);
1814         *ppath = path;
1815         *pfreepath = 1;
1816         return 0;
1817 }
1818
1819 /*
1820  * request arguments may be specified via an inode *, a dentry *, or
1821  * an explicit ino+path.
1822  */
1823 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1824                                   const char *rpath, u64 rino,
1825                                   const char **ppath, int *pathlen,
1826                                   u64 *ino, int *freepath)
1827 {
1828         int r = 0;
1829
1830         if (rinode) {
1831                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1832                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1833                      ceph_snap(rinode));
1834         } else if (rdentry) {
1835                 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1836                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1837                      *ppath);
1838         } else if (rpath || rino) {
1839                 *ino = rino;
1840                 *ppath = rpath;
1841                 *pathlen = rpath ? strlen(rpath) : 0;
1842                 dout(" path %.*s\n", *pathlen, rpath);
1843         }
1844
1845         return r;
1846 }
1847
1848 /*
1849  * called under mdsc->mutex
1850  */
1851 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1852                                                struct ceph_mds_request *req,
1853                                                int mds, bool drop_cap_releases)
1854 {
1855         struct ceph_msg *msg;
1856         struct ceph_mds_request_head *head;
1857         const char *path1 = NULL;
1858         const char *path2 = NULL;
1859         u64 ino1 = 0, ino2 = 0;
1860         int pathlen1 = 0, pathlen2 = 0;
1861         int freepath1 = 0, freepath2 = 0;
1862         int len;
1863         u16 releases;
1864         void *p, *end;
1865         int ret;
1866
1867         ret = set_request_path_attr(req->r_inode, req->r_dentry,
1868                               req->r_path1, req->r_ino1.ino,
1869                               &path1, &pathlen1, &ino1, &freepath1);
1870         if (ret < 0) {
1871                 msg = ERR_PTR(ret);
1872                 goto out;
1873         }
1874
1875         ret = set_request_path_attr(NULL, req->r_old_dentry,
1876                               req->r_path2, req->r_ino2.ino,
1877                               &path2, &pathlen2, &ino2, &freepath2);
1878         if (ret < 0) {
1879                 msg = ERR_PTR(ret);
1880                 goto out_free1;
1881         }
1882
1883         len = sizeof(*head) +
1884                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1885                 sizeof(struct ceph_timespec);
1886
1887         /* calculate (max) length for cap releases */
1888         len += sizeof(struct ceph_mds_request_release) *
1889                 (!!req->r_inode_drop + !!req->r_dentry_drop +
1890                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1891         if (req->r_dentry_drop)
1892                 len += req->r_dentry->d_name.len;
1893         if (req->r_old_dentry_drop)
1894                 len += req->r_old_dentry->d_name.len;
1895
1896         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1897         if (!msg) {
1898                 msg = ERR_PTR(-ENOMEM);
1899                 goto out_free2;
1900         }
1901
1902         msg->hdr.version = cpu_to_le16(2);
1903         msg->hdr.tid = cpu_to_le64(req->r_tid);
1904
1905         head = msg->front.iov_base;
1906         p = msg->front.iov_base + sizeof(*head);
1907         end = msg->front.iov_base + msg->front.iov_len;
1908
1909         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1910         head->op = cpu_to_le32(req->r_op);
1911         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1912         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1913         head->args = req->r_args;
1914
1915         ceph_encode_filepath(&p, end, ino1, path1);
1916         ceph_encode_filepath(&p, end, ino2, path2);
1917
1918         /* make note of release offset, in case we need to replay */
1919         req->r_request_release_offset = p - msg->front.iov_base;
1920
1921         /* cap releases */
1922         releases = 0;
1923         if (req->r_inode_drop)
1924                 releases += ceph_encode_inode_release(&p,
1925                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
1926                       mds, req->r_inode_drop, req->r_inode_unless, 0);
1927         if (req->r_dentry_drop)
1928                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1929                        mds, req->r_dentry_drop, req->r_dentry_unless);
1930         if (req->r_old_dentry_drop)
1931                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1932                        mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1933         if (req->r_old_inode_drop)
1934                 releases += ceph_encode_inode_release(&p,
1935                       d_inode(req->r_old_dentry),
1936                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1937
1938         if (drop_cap_releases) {
1939                 releases = 0;
1940                 p = msg->front.iov_base + req->r_request_release_offset;
1941         }
1942
1943         head->num_releases = cpu_to_le16(releases);
1944
1945         /* time stamp */
1946         {
1947                 struct ceph_timespec ts;
1948                 ceph_encode_timespec(&ts, &req->r_stamp);
1949                 ceph_encode_copy(&p, &ts, sizeof(ts));
1950         }
1951
1952         BUG_ON(p > end);
1953         msg->front.iov_len = p - msg->front.iov_base;
1954         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1955
1956         if (req->r_pagelist) {
1957                 struct ceph_pagelist *pagelist = req->r_pagelist;
1958                 atomic_inc(&pagelist->refcnt);
1959                 ceph_msg_data_add_pagelist(msg, pagelist);
1960                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
1961         } else {
1962                 msg->hdr.data_len = 0;
1963         }
1964
1965         msg->hdr.data_off = cpu_to_le16(0);
1966
1967 out_free2:
1968         if (freepath2)
1969                 kfree((char *)path2);
1970 out_free1:
1971         if (freepath1)
1972                 kfree((char *)path1);
1973 out:
1974         return msg;
1975 }
1976
1977 /*
1978  * called under mdsc->mutex if error, under no mutex if
1979  * success.
1980  */
1981 static void complete_request(struct ceph_mds_client *mdsc,
1982                              struct ceph_mds_request *req)
1983 {
1984         if (req->r_callback)
1985                 req->r_callback(mdsc, req);
1986         else
1987                 complete_all(&req->r_completion);
1988 }
1989
1990 /*
1991  * called under mdsc->mutex
1992  */
1993 static int __prepare_send_request(struct ceph_mds_client *mdsc,
1994                                   struct ceph_mds_request *req,
1995                                   int mds, bool drop_cap_releases)
1996 {
1997         struct ceph_mds_request_head *rhead;
1998         struct ceph_msg *msg;
1999         int flags = 0;
2000
2001         req->r_attempts++;
2002         if (req->r_inode) {
2003                 struct ceph_cap *cap =
2004                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2005
2006                 if (cap)
2007                         req->r_sent_on_mseq = cap->mseq;
2008                 else
2009                         req->r_sent_on_mseq = -1;
2010         }
2011         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2012              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2013
2014         if (req->r_got_unsafe) {
2015                 void *p;
2016                 /*
2017                  * Replay.  Do not regenerate message (and rebuild
2018                  * paths, etc.); just use the original message.
2019                  * Rebuilding paths will break for renames because
2020                  * d_move mangles the src name.
2021                  */
2022                 msg = req->r_request;
2023                 rhead = msg->front.iov_base;
2024
2025                 flags = le32_to_cpu(rhead->flags);
2026                 flags |= CEPH_MDS_FLAG_REPLAY;
2027                 rhead->flags = cpu_to_le32(flags);
2028
2029                 if (req->r_target_inode)
2030                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2031
2032                 rhead->num_retry = req->r_attempts - 1;
2033
2034                 /* remove cap/dentry releases from message */
2035                 rhead->num_releases = 0;
2036
2037                 /* time stamp */
2038                 p = msg->front.iov_base + req->r_request_release_offset;
2039                 {
2040                         struct ceph_timespec ts;
2041                         ceph_encode_timespec(&ts, &req->r_stamp);
2042                         ceph_encode_copy(&p, &ts, sizeof(ts));
2043                 }
2044
2045                 msg->front.iov_len = p - msg->front.iov_base;
2046                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2047                 return 0;
2048         }
2049
2050         if (req->r_request) {
2051                 ceph_msg_put(req->r_request);
2052                 req->r_request = NULL;
2053         }
2054         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2055         if (IS_ERR(msg)) {
2056                 req->r_err = PTR_ERR(msg);
2057                 return PTR_ERR(msg);
2058         }
2059         req->r_request = msg;
2060
2061         rhead = msg->front.iov_base;
2062         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2063         if (req->r_got_unsafe)
2064                 flags |= CEPH_MDS_FLAG_REPLAY;
2065         if (req->r_locked_dir)
2066                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2067         rhead->flags = cpu_to_le32(flags);
2068         rhead->num_fwd = req->r_num_fwd;
2069         rhead->num_retry = req->r_attempts - 1;
2070         rhead->ino = 0;
2071
2072         dout(" r_locked_dir = %p\n", req->r_locked_dir);
2073         return 0;
2074 }
2075
2076 /*
2077  * send request, or put it on the appropriate wait list.
2078  */
2079 static int __do_request(struct ceph_mds_client *mdsc,
2080                         struct ceph_mds_request *req)
2081 {
2082         struct ceph_mds_session *session = NULL;
2083         int mds = -1;
2084         int err = 0;
2085
2086         if (req->r_err || req->r_got_result) {
2087                 if (req->r_aborted)
2088                         __unregister_request(mdsc, req);
2089                 goto out;
2090         }
2091
2092         if (req->r_timeout &&
2093             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2094                 dout("do_request timed out\n");
2095                 err = -EIO;
2096                 goto finish;
2097         }
2098         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2099                 dout("do_request forced umount\n");
2100                 err = -EIO;
2101                 goto finish;
2102         }
2103
2104         put_request_session(req);
2105
2106         mds = __choose_mds(mdsc, req);
2107         if (mds < 0 ||
2108             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2109                 if (mdsc->mdsmap_err) {
2110                         err = mdsc->mdsmap_err;
2111                         dout("do_request mdsmap err %d\n", err);
2112                         goto finish;
2113                 }
2114                 dout("do_request no mds or not active, waiting for map\n");
2115                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2116                 goto out;
2117         }
2118
2119         /* get, open session */
2120         session = __ceph_lookup_mds_session(mdsc, mds);
2121         if (!session) {
2122                 session = register_session(mdsc, mds);
2123                 if (IS_ERR(session)) {
2124                         err = PTR_ERR(session);
2125                         goto finish;
2126                 }
2127         }
2128         req->r_session = get_session(session);
2129
2130         dout("do_request mds%d session %p state %s\n", mds, session,
2131              ceph_session_state_name(session->s_state));
2132         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2133             session->s_state != CEPH_MDS_SESSION_HUNG) {
2134                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2135                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2136                         __open_session(mdsc, session);
2137                 list_add(&req->r_wait, &session->s_waiting);
2138                 goto out_session;
2139         }
2140
2141         /* send request */
2142         req->r_resend_mds = -1;   /* forget any previous mds hint */
2143
2144         if (req->r_request_started == 0)   /* note request start time */
2145                 req->r_request_started = jiffies;
2146
2147         err = __prepare_send_request(mdsc, req, mds, false);
2148         if (!err) {
2149                 ceph_msg_get(req->r_request);
2150                 ceph_con_send(&session->s_con, req->r_request);
2151         }
2152
2153 out_session:
2154         ceph_put_mds_session(session);
2155 finish:
2156         if (err) {
2157                 dout("__do_request early error %d\n", err);
2158                 req->r_err = err;
2159                 complete_request(mdsc, req);
2160                 __unregister_request(mdsc, req);
2161         }
2162 out:
2163         return err;
2164 }
2165
2166 /*
2167  * called under mdsc->mutex
2168  */
2169 static void __wake_requests(struct ceph_mds_client *mdsc,
2170                             struct list_head *head)
2171 {
2172         struct ceph_mds_request *req;
2173         LIST_HEAD(tmp_list);
2174
2175         list_splice_init(head, &tmp_list);
2176
2177         while (!list_empty(&tmp_list)) {
2178                 req = list_entry(tmp_list.next,
2179                                  struct ceph_mds_request, r_wait);
2180                 list_del_init(&req->r_wait);
2181                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2182                 __do_request(mdsc, req);
2183         }
2184 }
2185
2186 /*
2187  * Wake up threads with requests pending for @mds, so that they can
2188  * resubmit their requests to a possibly different mds.
2189  */
2190 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2191 {
2192         struct ceph_mds_request *req;
2193         struct rb_node *p = rb_first(&mdsc->request_tree);
2194
2195         dout("kick_requests mds%d\n", mds);
2196         while (p) {
2197                 req = rb_entry(p, struct ceph_mds_request, r_node);
2198                 p = rb_next(p);
2199                 if (req->r_got_unsafe)
2200                         continue;
2201                 if (req->r_attempts > 0)
2202                         continue; /* only new requests */
2203                 if (req->r_session &&
2204                     req->r_session->s_mds == mds) {
2205                         dout(" kicking tid %llu\n", req->r_tid);
2206                         list_del_init(&req->r_wait);
2207                         __do_request(mdsc, req);
2208                 }
2209         }
2210 }
2211
2212 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2213                               struct ceph_mds_request *req)
2214 {
2215         dout("submit_request on %p\n", req);
2216         mutex_lock(&mdsc->mutex);
2217         __register_request(mdsc, req, NULL);
2218         __do_request(mdsc, req);
2219         mutex_unlock(&mdsc->mutex);
2220 }
2221
2222 /*
2223  * Synchrously perform an mds request.  Take care of all of the
2224  * session setup, forwarding, retry details.
2225  */
2226 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2227                          struct inode *dir,
2228                          struct ceph_mds_request *req)
2229 {
2230         int err;
2231
2232         dout("do_request on %p\n", req);
2233
2234         /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2235         if (req->r_inode)
2236                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2237         if (req->r_locked_dir)
2238                 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
2239         if (req->r_old_dentry_dir)
2240                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2241                                   CEPH_CAP_PIN);
2242
2243         /* issue */
2244         mutex_lock(&mdsc->mutex);
2245         __register_request(mdsc, req, dir);
2246         __do_request(mdsc, req);
2247
2248         if (req->r_err) {
2249                 err = req->r_err;
2250                 goto out;
2251         }
2252
2253         /* wait */
2254         mutex_unlock(&mdsc->mutex);
2255         dout("do_request waiting\n");
2256         if (!req->r_timeout && req->r_wait_for_completion) {
2257                 err = req->r_wait_for_completion(mdsc, req);
2258         } else {
2259                 long timeleft = wait_for_completion_killable_timeout(
2260                                         &req->r_completion,
2261                                         ceph_timeout_jiffies(req->r_timeout));
2262                 if (timeleft > 0)
2263                         err = 0;
2264                 else if (!timeleft)
2265                         err = -EIO;  /* timed out */
2266                 else
2267                         err = timeleft;  /* killed */
2268         }
2269         dout("do_request waited, got %d\n", err);
2270         mutex_lock(&mdsc->mutex);
2271
2272         /* only abort if we didn't race with a real reply */
2273         if (req->r_got_result) {
2274                 err = le32_to_cpu(req->r_reply_info.head->result);
2275         } else if (err < 0) {
2276                 dout("aborted request %lld with %d\n", req->r_tid, err);
2277
2278                 /*
2279                  * ensure we aren't running concurrently with
2280                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2281                  * rely on locks (dir mutex) held by our caller.
2282                  */
2283                 mutex_lock(&req->r_fill_mutex);
2284                 req->r_err = err;
2285                 req->r_aborted = true;
2286                 mutex_unlock(&req->r_fill_mutex);
2287
2288                 if (req->r_locked_dir &&
2289                     (req->r_op & CEPH_MDS_OP_WRITE))
2290                         ceph_invalidate_dir_request(req);
2291         } else {
2292                 err = req->r_err;
2293         }
2294
2295 out:
2296         mutex_unlock(&mdsc->mutex);
2297         dout("do_request %p done, result %d\n", req, err);
2298         return err;
2299 }
2300
2301 /*
2302  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2303  * namespace request.
2304  */
2305 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2306 {
2307         struct inode *inode = req->r_locked_dir;
2308
2309         dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2310
2311         ceph_dir_clear_complete(inode);
2312         if (req->r_dentry)
2313                 ceph_invalidate_dentry_lease(req->r_dentry);
2314         if (req->r_old_dentry)
2315                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2316 }
2317
2318 /*
2319  * Handle mds reply.
2320  *
2321  * We take the session mutex and parse and process the reply immediately.
2322  * This preserves the logical ordering of replies, capabilities, etc., sent
2323  * by the MDS as they are applied to our local cache.
2324  */
2325 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2326 {
2327         struct ceph_mds_client *mdsc = session->s_mdsc;
2328         struct ceph_mds_request *req;
2329         struct ceph_mds_reply_head *head = msg->front.iov_base;
2330         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2331         struct ceph_snap_realm *realm;
2332         u64 tid;
2333         int err, result;
2334         int mds = session->s_mds;
2335
2336         if (msg->front.iov_len < sizeof(*head)) {
2337                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2338                 ceph_msg_dump(msg);
2339                 return;
2340         }
2341
2342         /* get request, session */
2343         tid = le64_to_cpu(msg->hdr.tid);
2344         mutex_lock(&mdsc->mutex);
2345         req = lookup_get_request(mdsc, tid);
2346         if (!req) {
2347                 dout("handle_reply on unknown tid %llu\n", tid);
2348                 mutex_unlock(&mdsc->mutex);
2349                 return;
2350         }
2351         dout("handle_reply %p\n", req);
2352
2353         /* correct session? */
2354         if (req->r_session != session) {
2355                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2356                        " not mds%d\n", tid, session->s_mds,
2357                        req->r_session ? req->r_session->s_mds : -1);
2358                 mutex_unlock(&mdsc->mutex);
2359                 goto out;
2360         }
2361
2362         /* dup? */
2363         if ((req->r_got_unsafe && !head->safe) ||
2364             (req->r_got_safe && head->safe)) {
2365                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2366                            head->safe ? "safe" : "unsafe", tid, mds);
2367                 mutex_unlock(&mdsc->mutex);
2368                 goto out;
2369         }
2370         if (req->r_got_safe) {
2371                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2372                            tid, mds);
2373                 mutex_unlock(&mdsc->mutex);
2374                 goto out;
2375         }
2376
2377         result = le32_to_cpu(head->result);
2378
2379         /*
2380          * Handle an ESTALE
2381          * if we're not talking to the authority, send to them
2382          * if the authority has changed while we weren't looking,
2383          * send to new authority
2384          * Otherwise we just have to return an ESTALE
2385          */
2386         if (result == -ESTALE) {
2387                 dout("got ESTALE on request %llu", req->r_tid);
2388                 req->r_resend_mds = -1;
2389                 if (req->r_direct_mode != USE_AUTH_MDS) {
2390                         dout("not using auth, setting for that now");
2391                         req->r_direct_mode = USE_AUTH_MDS;
2392                         __do_request(mdsc, req);
2393                         mutex_unlock(&mdsc->mutex);
2394                         goto out;
2395                 } else  {
2396                         int mds = __choose_mds(mdsc, req);
2397                         if (mds >= 0 && mds != req->r_session->s_mds) {
2398                                 dout("but auth changed, so resending");
2399                                 __do_request(mdsc, req);
2400                                 mutex_unlock(&mdsc->mutex);
2401                                 goto out;
2402                         }
2403                 }
2404                 dout("have to return ESTALE on request %llu", req->r_tid);
2405         }
2406
2407
2408         if (head->safe) {
2409                 req->r_got_safe = true;
2410                 __unregister_request(mdsc, req);
2411
2412                 if (req->r_got_unsafe) {
2413                         /*
2414                          * We already handled the unsafe response, now do the
2415                          * cleanup.  No need to examine the response; the MDS
2416                          * doesn't include any result info in the safe
2417                          * response.  And even if it did, there is nothing
2418                          * useful we could do with a revised return value.
2419                          */
2420                         dout("got safe reply %llu, mds%d\n", tid, mds);
2421                         list_del_init(&req->r_unsafe_item);
2422
2423                         /* last unsafe request during umount? */
2424                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2425                                 complete_all(&mdsc->safe_umount_waiters);
2426                         mutex_unlock(&mdsc->mutex);
2427                         goto out;
2428                 }
2429         } else {
2430                 req->r_got_unsafe = true;
2431                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2432                 if (req->r_unsafe_dir) {
2433                         struct ceph_inode_info *ci =
2434                                         ceph_inode(req->r_unsafe_dir);
2435                         spin_lock(&ci->i_unsafe_lock);
2436                         list_add_tail(&req->r_unsafe_dir_item,
2437                                       &ci->i_unsafe_dirops);
2438                         spin_unlock(&ci->i_unsafe_lock);
2439                 }
2440         }
2441
2442         dout("handle_reply tid %lld result %d\n", tid, result);
2443         rinfo = &req->r_reply_info;
2444         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2445         mutex_unlock(&mdsc->mutex);
2446
2447         mutex_lock(&session->s_mutex);
2448         if (err < 0) {
2449                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2450                 ceph_msg_dump(msg);
2451                 goto out_err;
2452         }
2453
2454         /* snap trace */
2455         realm = NULL;
2456         if (rinfo->snapblob_len) {
2457                 down_write(&mdsc->snap_rwsem);
2458                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2459                                 rinfo->snapblob + rinfo->snapblob_len,
2460                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2461                                 &realm);
2462                 downgrade_write(&mdsc->snap_rwsem);
2463         } else {
2464                 down_read(&mdsc->snap_rwsem);
2465         }
2466
2467         /* insert trace into our cache */
2468         mutex_lock(&req->r_fill_mutex);
2469         current->journal_info = req;
2470         err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2471         if (err == 0) {
2472                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2473                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2474                         ceph_readdir_prepopulate(req, req->r_session);
2475                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2476         }
2477         current->journal_info = NULL;
2478         mutex_unlock(&req->r_fill_mutex);
2479
2480         up_read(&mdsc->snap_rwsem);
2481         if (realm)
2482                 ceph_put_snap_realm(mdsc, realm);
2483
2484         if (err == 0 && req->r_got_unsafe && req->r_target_inode) {
2485                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2486                 spin_lock(&ci->i_unsafe_lock);
2487                 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2488                 spin_unlock(&ci->i_unsafe_lock);
2489         }
2490 out_err:
2491         mutex_lock(&mdsc->mutex);
2492         if (!req->r_aborted) {
2493                 if (err) {
2494                         req->r_err = err;
2495                 } else {
2496                         req->r_reply =  ceph_msg_get(msg);
2497                         req->r_got_result = true;
2498                 }
2499         } else {
2500                 dout("reply arrived after request %lld was aborted\n", tid);
2501         }
2502         mutex_unlock(&mdsc->mutex);
2503
2504         mutex_unlock(&session->s_mutex);
2505
2506         /* kick calling process */
2507         complete_request(mdsc, req);
2508 out:
2509         ceph_mdsc_put_request(req);
2510         return;
2511 }
2512
2513
2514
2515 /*
2516  * handle mds notification that our request has been forwarded.
2517  */
2518 static void handle_forward(struct ceph_mds_client *mdsc,
2519                            struct ceph_mds_session *session,
2520                            struct ceph_msg *msg)
2521 {
2522         struct ceph_mds_request *req;
2523         u64 tid = le64_to_cpu(msg->hdr.tid);
2524         u32 next_mds;
2525         u32 fwd_seq;
2526         int err = -EINVAL;
2527         void *p = msg->front.iov_base;
2528         void *end = p + msg->front.iov_len;
2529
2530         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2531         next_mds = ceph_decode_32(&p);
2532         fwd_seq = ceph_decode_32(&p);
2533
2534         mutex_lock(&mdsc->mutex);
2535         req = lookup_get_request(mdsc, tid);
2536         if (!req) {
2537                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2538                 goto out;  /* dup reply? */
2539         }
2540
2541         if (req->r_aborted) {
2542                 dout("forward tid %llu aborted, unregistering\n", tid);
2543                 __unregister_request(mdsc, req);
2544         } else if (fwd_seq <= req->r_num_fwd) {
2545                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2546                      tid, next_mds, req->r_num_fwd, fwd_seq);
2547         } else {
2548                 /* resend. forward race not possible; mds would drop */
2549                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2550                 BUG_ON(req->r_err);
2551                 BUG_ON(req->r_got_result);
2552                 req->r_attempts = 0;
2553                 req->r_num_fwd = fwd_seq;
2554                 req->r_resend_mds = next_mds;
2555                 put_request_session(req);
2556                 __do_request(mdsc, req);
2557         }
2558         ceph_mdsc_put_request(req);
2559 out:
2560         mutex_unlock(&mdsc->mutex);
2561         return;
2562
2563 bad:
2564         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2565 }
2566
2567 /*
2568  * handle a mds session control message
2569  */
2570 static void handle_session(struct ceph_mds_session *session,
2571                            struct ceph_msg *msg)
2572 {
2573         struct ceph_mds_client *mdsc = session->s_mdsc;
2574         u32 op;
2575         u64 seq;
2576         int mds = session->s_mds;
2577         struct ceph_mds_session_head *h = msg->front.iov_base;
2578         int wake = 0;
2579
2580         /* decode */
2581         if (msg->front.iov_len != sizeof(*h))
2582                 goto bad;
2583         op = le32_to_cpu(h->op);
2584         seq = le64_to_cpu(h->seq);
2585
2586         mutex_lock(&mdsc->mutex);
2587         if (op == CEPH_SESSION_CLOSE)
2588                 __unregister_session(mdsc, session);
2589         /* FIXME: this ttl calculation is generous */
2590         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2591         mutex_unlock(&mdsc->mutex);
2592
2593         mutex_lock(&session->s_mutex);
2594
2595         dout("handle_session mds%d %s %p state %s seq %llu\n",
2596              mds, ceph_session_op_name(op), session,
2597              ceph_session_state_name(session->s_state), seq);
2598
2599         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2600                 session->s_state = CEPH_MDS_SESSION_OPEN;
2601                 pr_info("mds%d came back\n", session->s_mds);
2602         }
2603
2604         switch (op) {
2605         case CEPH_SESSION_OPEN:
2606                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2607                         pr_info("mds%d reconnect success\n", session->s_mds);
2608                 session->s_state = CEPH_MDS_SESSION_OPEN;
2609                 renewed_caps(mdsc, session, 0);
2610                 wake = 1;
2611                 if (mdsc->stopping)
2612                         __close_session(mdsc, session);
2613                 break;
2614
2615         case CEPH_SESSION_RENEWCAPS:
2616                 if (session->s_renew_seq == seq)
2617                         renewed_caps(mdsc, session, 1);
2618                 break;
2619
2620         case CEPH_SESSION_CLOSE:
2621                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2622                         pr_info("mds%d reconnect denied\n", session->s_mds);
2623                 cleanup_session_requests(mdsc, session);
2624                 remove_session_caps(session);
2625                 wake = 2; /* for good measure */
2626                 wake_up_all(&mdsc->session_close_wq);
2627                 break;
2628
2629         case CEPH_SESSION_STALE:
2630                 pr_info("mds%d caps went stale, renewing\n",
2631                         session->s_mds);
2632                 spin_lock(&session->s_gen_ttl_lock);
2633                 session->s_cap_gen++;
2634                 session->s_cap_ttl = jiffies - 1;
2635                 spin_unlock(&session->s_gen_ttl_lock);
2636                 send_renew_caps(mdsc, session);
2637                 break;
2638
2639         case CEPH_SESSION_RECALL_STATE:
2640                 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2641                 break;
2642
2643         case CEPH_SESSION_FLUSHMSG:
2644                 send_flushmsg_ack(mdsc, session, seq);
2645                 break;
2646
2647         case CEPH_SESSION_FORCE_RO:
2648                 dout("force_session_readonly %p\n", session);
2649                 spin_lock(&session->s_cap_lock);
2650                 session->s_readonly = true;
2651                 spin_unlock(&session->s_cap_lock);
2652                 wake_up_session_caps(session, 0);
2653                 break;
2654
2655         default:
2656                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2657                 WARN_ON(1);
2658         }
2659
2660         mutex_unlock(&session->s_mutex);
2661         if (wake) {
2662                 mutex_lock(&mdsc->mutex);
2663                 __wake_requests(mdsc, &session->s_waiting);
2664                 if (wake == 2)
2665                         kick_requests(mdsc, mds);
2666                 mutex_unlock(&mdsc->mutex);
2667         }
2668         return;
2669
2670 bad:
2671         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2672                (int)msg->front.iov_len);
2673         ceph_msg_dump(msg);
2674         return;
2675 }
2676
2677
2678 /*
2679  * called under session->mutex.
2680  */
2681 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2682                                    struct ceph_mds_session *session)
2683 {
2684         struct ceph_mds_request *req, *nreq;
2685         struct rb_node *p;
2686         int err;
2687
2688         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2689
2690         mutex_lock(&mdsc->mutex);
2691         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2692                 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2693                 if (!err) {
2694                         ceph_msg_get(req->r_request);
2695                         ceph_con_send(&session->s_con, req->r_request);
2696                 }
2697         }
2698
2699         /*
2700          * also re-send old requests when MDS enters reconnect stage. So that MDS
2701          * can process completed request in clientreplay stage.
2702          */
2703         p = rb_first(&mdsc->request_tree);
2704         while (p) {
2705                 req = rb_entry(p, struct ceph_mds_request, r_node);
2706                 p = rb_next(p);
2707                 if (req->r_got_unsafe)
2708                         continue;
2709                 if (req->r_attempts == 0)
2710                         continue; /* only old requests */
2711                 if (req->r_session &&
2712                     req->r_session->s_mds == session->s_mds) {
2713                         err = __prepare_send_request(mdsc, req,
2714                                                      session->s_mds, true);
2715                         if (!err) {
2716                                 ceph_msg_get(req->r_request);
2717                                 ceph_con_send(&session->s_con, req->r_request);
2718                         }
2719                 }
2720         }
2721         mutex_unlock(&mdsc->mutex);
2722 }
2723
2724 /*
2725  * Encode information about a cap for a reconnect with the MDS.
2726  */
2727 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2728                           void *arg)
2729 {
2730         union {
2731                 struct ceph_mds_cap_reconnect v2;
2732                 struct ceph_mds_cap_reconnect_v1 v1;
2733         } rec;
2734         struct ceph_inode_info *ci;
2735         struct ceph_reconnect_state *recon_state = arg;
2736         struct ceph_pagelist *pagelist = recon_state->pagelist;
2737         char *path;
2738         int pathlen, err;
2739         u64 pathbase;
2740         u64 snap_follows;
2741         struct dentry *dentry;
2742
2743         ci = cap->ci;
2744
2745         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2746              inode, ceph_vinop(inode), cap, cap->cap_id,
2747              ceph_cap_string(cap->issued));
2748         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2749         if (err)
2750                 return err;
2751
2752         dentry = d_find_alias(inode);
2753         if (dentry) {
2754                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2755                 if (IS_ERR(path)) {
2756                         err = PTR_ERR(path);
2757                         goto out_dput;
2758                 }
2759         } else {
2760                 path = NULL;
2761                 pathlen = 0;
2762         }
2763
2764         spin_lock(&ci->i_ceph_lock);
2765         cap->seq = 0;        /* reset cap seq */
2766         cap->issue_seq = 0;  /* and issue_seq */
2767         cap->mseq = 0;       /* and migrate_seq */
2768         cap->cap_gen = cap->session->s_cap_gen;
2769
2770         if (recon_state->msg_version >= 2) {
2771                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2772                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2773                 rec.v2.issued = cpu_to_le32(cap->issued);
2774                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2775                 rec.v2.pathbase = cpu_to_le64(pathbase);
2776                 rec.v2.flock_len = 0;
2777         } else {
2778                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2779                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2780                 rec.v1.issued = cpu_to_le32(cap->issued);
2781                 rec.v1.size = cpu_to_le64(inode->i_size);
2782                 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2783                 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2784                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2785                 rec.v1.pathbase = cpu_to_le64(pathbase);
2786         }
2787
2788         if (list_empty(&ci->i_cap_snaps)) {
2789                 snap_follows = 0;
2790         } else {
2791                 struct ceph_cap_snap *capsnap =
2792                         list_first_entry(&ci->i_cap_snaps,
2793                                          struct ceph_cap_snap, ci_item);
2794                 snap_follows = capsnap->follows;
2795         }
2796         spin_unlock(&ci->i_ceph_lock);
2797
2798         if (recon_state->msg_version >= 2) {
2799                 int num_fcntl_locks, num_flock_locks;
2800                 struct ceph_filelock *flocks;
2801                 size_t struct_len, total_len = 0;
2802                 u8 struct_v = 0;
2803
2804 encode_again:
2805                 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2806                 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2807                                  sizeof(struct ceph_filelock), GFP_NOFS);
2808                 if (!flocks) {
2809                         err = -ENOMEM;
2810                         goto out_free;
2811                 }
2812                 err = ceph_encode_locks_to_buffer(inode, flocks,
2813                                                   num_fcntl_locks,
2814                                                   num_flock_locks);
2815                 if (err) {
2816                         kfree(flocks);
2817                         if (err == -ENOSPC)
2818                                 goto encode_again;
2819                         goto out_free;
2820                 }
2821
2822                 if (recon_state->msg_version >= 3) {
2823                         /* version, compat_version and struct_len */
2824                         total_len = 2 * sizeof(u8) + sizeof(u32);
2825                         struct_v = 2;
2826                 }
2827                 /*
2828                  * number of encoded locks is stable, so copy to pagelist
2829                  */
2830                 struct_len = 2 * sizeof(u32) +
2831                             (num_fcntl_locks + num_flock_locks) *
2832                             sizeof(struct ceph_filelock);
2833                 rec.v2.flock_len = cpu_to_le32(struct_len);
2834
2835                 struct_len += sizeof(rec.v2);
2836                 struct_len += sizeof(u32) + pathlen;
2837
2838                 if (struct_v >= 2)
2839                         struct_len += sizeof(u64); /* snap_follows */
2840
2841                 total_len += struct_len;
2842                 err = ceph_pagelist_reserve(pagelist, total_len);
2843
2844                 if (!err) {
2845                         if (recon_state->msg_version >= 3) {
2846                                 ceph_pagelist_encode_8(pagelist, struct_v);
2847                                 ceph_pagelist_encode_8(pagelist, 1);
2848                                 ceph_pagelist_encode_32(pagelist, struct_len);
2849                         }
2850                         ceph_pagelist_encode_string(pagelist, path, pathlen);
2851                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
2852                         ceph_locks_to_pagelist(flocks, pagelist,
2853                                                num_fcntl_locks,
2854                                                num_flock_locks);
2855                         if (struct_v >= 2)
2856                                 ceph_pagelist_encode_64(pagelist, snap_follows);
2857                 }
2858                 kfree(flocks);
2859         } else {
2860                 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
2861                 err = ceph_pagelist_reserve(pagelist, size);
2862                 if (!err) {
2863                         ceph_pagelist_encode_string(pagelist, path, pathlen);
2864                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
2865                 }
2866         }
2867
2868         recon_state->nr_caps++;
2869 out_free:
2870         kfree(path);
2871 out_dput:
2872         dput(dentry);
2873         return err;
2874 }
2875
2876
2877 /*
2878  * If an MDS fails and recovers, clients need to reconnect in order to
2879  * reestablish shared state.  This includes all caps issued through
2880  * this session _and_ the snap_realm hierarchy.  Because it's not
2881  * clear which snap realms the mds cares about, we send everything we
2882  * know about.. that ensures we'll then get any new info the
2883  * recovering MDS might have.
2884  *
2885  * This is a relatively heavyweight operation, but it's rare.
2886  *
2887  * called with mdsc->mutex held.
2888  */
2889 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2890                                struct ceph_mds_session *session)
2891 {
2892         struct ceph_msg *reply;
2893         struct rb_node *p;
2894         int mds = session->s_mds;
2895         int err = -ENOMEM;
2896         int s_nr_caps;
2897         struct ceph_pagelist *pagelist;
2898         struct ceph_reconnect_state recon_state;
2899
2900         pr_info("mds%d reconnect start\n", mds);
2901
2902         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2903         if (!pagelist)
2904                 goto fail_nopagelist;
2905         ceph_pagelist_init(pagelist);
2906
2907         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2908         if (!reply)
2909                 goto fail_nomsg;
2910
2911         mutex_lock(&session->s_mutex);
2912         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2913         session->s_seq = 0;
2914
2915         dout("session %p state %s\n", session,
2916              ceph_session_state_name(session->s_state));
2917
2918         spin_lock(&session->s_gen_ttl_lock);
2919         session->s_cap_gen++;
2920         spin_unlock(&session->s_gen_ttl_lock);
2921
2922         spin_lock(&session->s_cap_lock);
2923         /* don't know if session is readonly */
2924         session->s_readonly = 0;
2925         /*
2926          * notify __ceph_remove_cap() that we are composing cap reconnect.
2927          * If a cap get released before being added to the cap reconnect,
2928          * __ceph_remove_cap() should skip queuing cap release.
2929          */
2930         session->s_cap_reconnect = 1;
2931         /* drop old cap expires; we're about to reestablish that state */
2932         cleanup_cap_releases(mdsc, session);
2933
2934         /* trim unused caps to reduce MDS's cache rejoin time */
2935         if (mdsc->fsc->sb->s_root)
2936                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
2937
2938         ceph_con_close(&session->s_con);
2939         ceph_con_open(&session->s_con,
2940                       CEPH_ENTITY_TYPE_MDS, mds,
2941                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2942
2943         /* replay unsafe requests */
2944         replay_unsafe_requests(mdsc, session);
2945
2946         down_read(&mdsc->snap_rwsem);
2947
2948         /* traverse this session's caps */
2949         s_nr_caps = session->s_nr_caps;
2950         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2951         if (err)
2952                 goto fail;
2953
2954         recon_state.nr_caps = 0;
2955         recon_state.pagelist = pagelist;
2956         if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
2957                 recon_state.msg_version = 3;
2958         else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
2959                 recon_state.msg_version = 2;
2960         else
2961                 recon_state.msg_version = 1;
2962         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2963         if (err < 0)
2964                 goto fail;
2965
2966         spin_lock(&session->s_cap_lock);
2967         session->s_cap_reconnect = 0;
2968         spin_unlock(&session->s_cap_lock);
2969
2970         /*
2971          * snaprealms.  we provide mds with the ino, seq (version), and
2972          * parent for all of our realms.  If the mds has any newer info,
2973          * it will tell us.
2974          */
2975         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
2976                 struct ceph_snap_realm *realm =
2977                         rb_entry(p, struct ceph_snap_realm, node);
2978                 struct ceph_mds_snaprealm_reconnect sr_rec;
2979
2980                 dout(" adding snap realm %llx seq %lld parent %llx\n",
2981                      realm->ino, realm->seq, realm->parent_ino);
2982                 sr_rec.ino = cpu_to_le64(realm->ino);
2983                 sr_rec.seq = cpu_to_le64(realm->seq);
2984                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
2985                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
2986                 if (err)
2987                         goto fail;
2988         }
2989
2990         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
2991
2992         /* raced with cap release? */
2993         if (s_nr_caps != recon_state.nr_caps) {
2994                 struct page *page = list_first_entry(&pagelist->head,
2995                                                      struct page, lru);
2996                 __le32 *addr = kmap_atomic(page);
2997                 *addr = cpu_to_le32(recon_state.nr_caps);
2998                 kunmap_atomic(addr);
2999         }
3000
3001         reply->hdr.data_len = cpu_to_le32(pagelist->length);
3002         ceph_msg_data_add_pagelist(reply, pagelist);
3003
3004         ceph_early_kick_flushing_caps(mdsc, session);
3005
3006         ceph_con_send(&session->s_con, reply);
3007
3008         mutex_unlock(&session->s_mutex);
3009
3010         mutex_lock(&mdsc->mutex);
3011         __wake_requests(mdsc, &session->s_waiting);
3012         mutex_unlock(&mdsc->mutex);
3013
3014         up_read(&mdsc->snap_rwsem);
3015         return;
3016
3017 fail:
3018         ceph_msg_put(reply);
3019         up_read(&mdsc->snap_rwsem);
3020         mutex_unlock(&session->s_mutex);
3021 fail_nomsg:
3022         ceph_pagelist_release(pagelist);
3023 fail_nopagelist:
3024         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3025         return;
3026 }
3027
3028
3029 /*
3030  * compare old and new mdsmaps, kicking requests
3031  * and closing out old connections as necessary
3032  *
3033  * called under mdsc->mutex.
3034  */
3035 static void check_new_map(struct ceph_mds_client *mdsc,
3036                           struct ceph_mdsmap *newmap,
3037                           struct ceph_mdsmap *oldmap)
3038 {
3039         int i;
3040         int oldstate, newstate;
3041         struct ceph_mds_session *s;
3042
3043         dout("check_new_map new %u old %u\n",
3044              newmap->m_epoch, oldmap->m_epoch);
3045
3046         for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
3047                 if (mdsc->sessions[i] == NULL)
3048                         continue;
3049                 s = mdsc->sessions[i];
3050                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3051                 newstate = ceph_mdsmap_get_state(newmap, i);
3052
3053                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3054                      i, ceph_mds_state_name(oldstate),
3055                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3056                      ceph_mds_state_name(newstate),
3057                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3058                      ceph_session_state_name(s->s_state));
3059
3060                 if (i >= newmap->m_max_mds ||
3061                     memcmp(ceph_mdsmap_get_addr(oldmap, i),
3062                            ceph_mdsmap_get_addr(newmap, i),
3063                            sizeof(struct ceph_entity_addr))) {
3064                         if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3065                                 /* the session never opened, just close it
3066                                  * out now */
3067                                 __wake_requests(mdsc, &s->s_waiting);
3068                                 __unregister_session(mdsc, s);
3069                         } else {
3070                                 /* just close it */
3071                                 mutex_unlock(&mdsc->mutex);
3072                                 mutex_lock(&s->s_mutex);
3073                                 mutex_lock(&mdsc->mutex);
3074                                 ceph_con_close(&s->s_con);
3075                                 mutex_unlock(&s->s_mutex);
3076                                 s->s_state = CEPH_MDS_SESSION_RESTARTING;
3077                         }
3078                 } else if (oldstate == newstate) {
3079                         continue;  /* nothing new with this mds */
3080                 }
3081
3082                 /*
3083                  * send reconnect?
3084                  */
3085                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3086                     newstate >= CEPH_MDS_STATE_RECONNECT) {
3087                         mutex_unlock(&mdsc->mutex);
3088                         send_mds_reconnect(mdsc, s);
3089                         mutex_lock(&mdsc->mutex);
3090                 }
3091
3092                 /*
3093                  * kick request on any mds that has gone active.
3094                  */
3095                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3096                     newstate >= CEPH_MDS_STATE_ACTIVE) {
3097                         if (oldstate != CEPH_MDS_STATE_CREATING &&
3098                             oldstate != CEPH_MDS_STATE_STARTING)
3099                                 pr_info("mds%d recovery completed\n", s->s_mds);
3100                         kick_requests(mdsc, i);
3101                         ceph_kick_flushing_caps(mdsc, s);
3102                         wake_up_session_caps(s, 1);
3103                 }
3104         }
3105
3106         for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
3107                 s = mdsc->sessions[i];
3108                 if (!s)
3109                         continue;
3110                 if (!ceph_mdsmap_is_laggy(newmap, i))
3111                         continue;
3112                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3113                     s->s_state == CEPH_MDS_SESSION_HUNG ||
3114                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
3115                         dout(" connecting to export targets of laggy mds%d\n",
3116                              i);
3117                         __open_export_target_sessions(mdsc, s);
3118                 }
3119         }
3120 }
3121
3122
3123
3124 /*
3125  * leases
3126  */
3127
3128 /*
3129  * caller must hold session s_mutex, dentry->d_lock
3130  */
3131 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3132 {
3133         struct ceph_dentry_info *di = ceph_dentry(dentry);
3134
3135         ceph_put_mds_session(di->lease_session);
3136         di->lease_session = NULL;
3137 }
3138
3139 static void handle_lease(struct ceph_mds_client *mdsc,
3140                          struct ceph_mds_session *session,
3141                          struct ceph_msg *msg)
3142 {
3143         struct super_block *sb = mdsc->fsc->sb;
3144         struct inode *inode;
3145         struct dentry *parent, *dentry;
3146         struct ceph_dentry_info *di;
3147         int mds = session->s_mds;
3148         struct ceph_mds_lease *h = msg->front.iov_base;
3149         u32 seq;
3150         struct ceph_vino vino;
3151         struct qstr dname;
3152         int release = 0;
3153
3154         dout("handle_lease from mds%d\n", mds);
3155
3156         /* decode */
3157         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3158                 goto bad;
3159         vino.ino = le64_to_cpu(h->ino);
3160         vino.snap = CEPH_NOSNAP;
3161         seq = le32_to_cpu(h->seq);
3162         dname.name = (void *)h + sizeof(*h) + sizeof(u32);
3163         dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
3164         if (dname.len != get_unaligned_le32(h+1))
3165                 goto bad;
3166
3167         /* lookup inode */
3168         inode = ceph_find_inode(sb, vino);
3169         dout("handle_lease %s, ino %llx %p %.*s\n",
3170              ceph_lease_op_name(h->action), vino.ino, inode,
3171              dname.len, dname.name);
3172
3173         mutex_lock(&session->s_mutex);
3174         session->s_seq++;
3175
3176         if (inode == NULL) {
3177                 dout("handle_lease no inode %llx\n", vino.ino);
3178                 goto release;
3179         }
3180
3181         /* dentry */
3182         parent = d_find_alias(inode);
3183         if (!parent) {
3184                 dout("no parent dentry on inode %p\n", inode);
3185                 WARN_ON(1);
3186                 goto release;  /* hrm... */
3187         }
3188         dname.hash = full_name_hash(parent, dname.name, dname.len);
3189         dentry = d_lookup(parent, &dname);
3190         dput(parent);
3191         if (!dentry)
3192                 goto release;
3193
3194         spin_lock(&dentry->d_lock);
3195         di = ceph_dentry(dentry);
3196         switch (h->action) {
3197         case CEPH_MDS_LEASE_REVOKE:
3198                 if (di->lease_session == session) {
3199                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3200                                 h->seq = cpu_to_le32(di->lease_seq);
3201                         __ceph_mdsc_drop_dentry_lease(dentry);
3202                 }
3203                 release = 1;
3204                 break;
3205
3206         case CEPH_MDS_LEASE_RENEW:
3207                 if (di->lease_session == session &&
3208                     di->lease_gen == session->s_cap_gen &&
3209                     di->lease_renew_from &&
3210                     di->lease_renew_after == 0) {
3211                         unsigned long duration =
3212                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3213
3214                         di->lease_seq = seq;
3215                         di->time = di->lease_renew_from + duration;
3216                         di->lease_renew_after = di->lease_renew_from +
3217                                 (duration >> 1);
3218                         di->lease_renew_from = 0;
3219                 }
3220                 break;
3221         }
3222         spin_unlock(&dentry->d_lock);
3223         dput(dentry);
3224
3225         if (!release)
3226                 goto out;
3227
3228 release:
3229         /* let's just reuse the same message */
3230         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3231         ceph_msg_get(msg);
3232         ceph_con_send(&session->s_con, msg);
3233
3234 out:
3235         iput(inode);
3236         mutex_unlock(&session->s_mutex);
3237         return;
3238
3239 bad:
3240         pr_err("corrupt lease message\n");
3241         ceph_msg_dump(msg);
3242 }
3243
3244 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3245                               struct inode *inode,
3246                               struct dentry *dentry, char action,
3247                               u32 seq)
3248 {
3249         struct ceph_msg *msg;
3250         struct ceph_mds_lease *lease;
3251         int len = sizeof(*lease) + sizeof(u32);
3252         int dnamelen = 0;
3253
3254         dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3255              inode, dentry, ceph_lease_op_name(action), session->s_mds);
3256         dnamelen = dentry->d_name.len;
3257         len += dnamelen;
3258
3259         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3260         if (!msg)
3261                 return;
3262         lease = msg->front.iov_base;
3263         lease->action = action;
3264         lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3265         lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3266         lease->seq = cpu_to_le32(seq);
3267         put_unaligned_le32(dnamelen, lease + 1);
3268         memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3269
3270         /*
3271          * if this is a preemptive lease RELEASE, no need to
3272          * flush request stream, since the actual request will
3273          * soon follow.
3274          */
3275         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3276
3277         ceph_con_send(&session->s_con, msg);
3278 }
3279
3280 /*
3281  * drop all leases (and dentry refs) in preparation for umount
3282  */
3283 static void drop_leases(struct ceph_mds_client *mdsc)
3284 {
3285         int i;
3286
3287         dout("drop_leases\n");
3288         mutex_lock(&mdsc->mutex);
3289         for (i = 0; i < mdsc->max_sessions; i++) {
3290                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3291                 if (!s)
3292                         continue;
3293                 mutex_unlock(&mdsc->mutex);
3294                 mutex_lock(&s->s_mutex);
3295                 mutex_unlock(&s->s_mutex);
3296                 ceph_put_mds_session(s);
3297                 mutex_lock(&mdsc->mutex);
3298         }
3299         mutex_unlock(&mdsc->mutex);
3300 }
3301
3302
3303
3304 /*
3305  * delayed work -- periodically trim expired leases, renew caps with mds
3306  */
3307 static void schedule_delayed(struct ceph_mds_client *mdsc)
3308 {
3309         int delay = 5;
3310         unsigned hz = round_jiffies_relative(HZ * delay);
3311         schedule_delayed_work(&mdsc->delayed_work, hz);
3312 }
3313
3314 static void delayed_work(struct work_struct *work)
3315 {
3316         int i;
3317         struct ceph_mds_client *mdsc =
3318                 container_of(work, struct ceph_mds_client, delayed_work.work);
3319         int renew_interval;
3320         int renew_caps;
3321
3322         dout("mdsc delayed_work\n");
3323         ceph_check_delayed_caps(mdsc);
3324
3325         mutex_lock(&mdsc->mutex);
3326         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3327         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3328                                    mdsc->last_renew_caps);
3329         if (renew_caps)
3330                 mdsc->last_renew_caps = jiffies;
3331
3332         for (i = 0; i < mdsc->max_sessions; i++) {
3333                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3334                 if (s == NULL)
3335                         continue;
3336                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3337                         dout("resending session close request for mds%d\n",
3338                              s->s_mds);
3339                         request_close_session(mdsc, s);
3340                         ceph_put_mds_session(s);
3341                         continue;
3342                 }
3343                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3344                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3345                                 s->s_state = CEPH_MDS_SESSION_HUNG;
3346                                 pr_info("mds%d hung\n", s->s_mds);
3347                         }
3348                 }
3349                 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3350                         /* this mds is failed or recovering, just wait */
3351                         ceph_put_mds_session(s);
3352                         continue;
3353                 }
3354                 mutex_unlock(&mdsc->mutex);
3355
3356                 mutex_lock(&s->s_mutex);
3357                 if (renew_caps)
3358                         send_renew_caps(mdsc, s);
3359                 else
3360                         ceph_con_keepalive(&s->s_con);
3361                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3362                     s->s_state == CEPH_MDS_SESSION_HUNG)
3363                         ceph_send_cap_releases(mdsc, s);
3364                 mutex_unlock(&s->s_mutex);
3365                 ceph_put_mds_session(s);
3366
3367                 mutex_lock(&mdsc->mutex);
3368         }
3369         mutex_unlock(&mdsc->mutex);
3370
3371         schedule_delayed(mdsc);
3372 }
3373
3374 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3375
3376 {
3377         struct ceph_mds_client *mdsc;
3378
3379         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3380         if (!mdsc)
3381                 return -ENOMEM;
3382         mdsc->fsc = fsc;
3383         fsc->mdsc = mdsc;
3384         mutex_init(&mdsc->mutex);
3385         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3386         if (mdsc->mdsmap == NULL) {
3387                 kfree(mdsc);
3388                 return -ENOMEM;
3389         }
3390
3391         init_completion(&mdsc->safe_umount_waiters);
3392         init_waitqueue_head(&mdsc->session_close_wq);
3393         INIT_LIST_HEAD(&mdsc->waiting_for_map);
3394         mdsc->sessions = NULL;
3395         atomic_set(&mdsc->num_sessions, 0);
3396         mdsc->max_sessions = 0;
3397         mdsc->stopping = 0;
3398         mdsc->last_snap_seq = 0;
3399         init_rwsem(&mdsc->snap_rwsem);
3400         mdsc->snap_realms = RB_ROOT;
3401         INIT_LIST_HEAD(&mdsc->snap_empty);
3402         spin_lock_init(&mdsc->snap_empty_lock);
3403         mdsc->last_tid = 0;
3404         mdsc->oldest_tid = 0;
3405         mdsc->request_tree = RB_ROOT;
3406         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3407         mdsc->last_renew_caps = jiffies;
3408         INIT_LIST_HEAD(&mdsc->cap_delay_list);
3409         spin_lock_init(&mdsc->cap_delay_lock);
3410         INIT_LIST_HEAD(&mdsc->snap_flush_list);
3411         spin_lock_init(&mdsc->snap_flush_lock);
3412         mdsc->last_cap_flush_tid = 1;
3413         INIT_LIST_HEAD(&mdsc->cap_flush_list);
3414         INIT_LIST_HEAD(&mdsc->cap_dirty);
3415         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3416         mdsc->num_cap_flushing = 0;
3417         spin_lock_init(&mdsc->cap_dirty_lock);
3418         init_waitqueue_head(&mdsc->cap_flushing_wq);
3419         spin_lock_init(&mdsc->dentry_lru_lock);
3420         INIT_LIST_HEAD(&mdsc->dentry_lru);
3421
3422         ceph_caps_init(mdsc);
3423         ceph_adjust_min_caps(mdsc, fsc->min_caps);
3424
3425         init_rwsem(&mdsc->pool_perm_rwsem);
3426         mdsc->pool_perm_tree = RB_ROOT;
3427
3428         return 0;
3429 }
3430
3431 /*
3432  * Wait for safe replies on open mds requests.  If we time out, drop
3433  * all requests from the tree to avoid dangling dentry refs.
3434  */
3435 static void wait_requests(struct ceph_mds_client *mdsc)
3436 {
3437         struct ceph_options *opts = mdsc->fsc->client->options;
3438         struct ceph_mds_request *req;
3439
3440         mutex_lock(&mdsc->mutex);
3441         if (__get_oldest_req(mdsc)) {
3442                 mutex_unlock(&mdsc->mutex);
3443
3444                 dout("wait_requests waiting for requests\n");
3445                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3446                                     ceph_timeout_jiffies(opts->mount_timeout));
3447
3448                 /* tear down remaining requests */
3449                 mutex_lock(&mdsc->mutex);
3450                 while ((req = __get_oldest_req(mdsc))) {
3451                         dout("wait_requests timed out on tid %llu\n",
3452                              req->r_tid);
3453                         __unregister_request(mdsc, req);
3454                 }
3455         }
3456         mutex_unlock(&mdsc->mutex);
3457         dout("wait_requests done\n");
3458 }
3459
3460 /*
3461  * called before mount is ro, and before dentries are torn down.
3462  * (hmm, does this still race with new lookups?)
3463  */
3464 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3465 {
3466         dout("pre_umount\n");
3467         mdsc->stopping = 1;
3468
3469         drop_leases(mdsc);
3470         ceph_flush_dirty_caps(mdsc);
3471         wait_requests(mdsc);
3472
3473         /*
3474          * wait for reply handlers to drop their request refs and
3475          * their inode/dcache refs
3476          */
3477         ceph_msgr_flush();
3478 }
3479
3480 /*
3481  * wait for all write mds requests to flush.
3482  */
3483 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3484 {
3485         struct ceph_mds_request *req = NULL, *nextreq;
3486         struct rb_node *n;
3487
3488         mutex_lock(&mdsc->mutex);
3489         dout("wait_unsafe_requests want %lld\n", want_tid);
3490 restart:
3491         req = __get_oldest_req(mdsc);
3492         while (req && req->r_tid <= want_tid) {
3493                 /* find next request */
3494                 n = rb_next(&req->r_node);
3495                 if (n)
3496                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3497                 else
3498                         nextreq = NULL;
3499                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
3500                     (req->r_op & CEPH_MDS_OP_WRITE)) {
3501                         /* write op */
3502                         ceph_mdsc_get_request(req);
3503                         if (nextreq)
3504                                 ceph_mdsc_get_request(nextreq);
3505                         mutex_unlock(&mdsc->mutex);
3506                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3507                              req->r_tid, want_tid);
3508                         wait_for_completion(&req->r_safe_completion);
3509                         mutex_lock(&mdsc->mutex);
3510                         ceph_mdsc_put_request(req);
3511                         if (!nextreq)
3512                                 break;  /* next dne before, so we're done! */
3513                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
3514                                 /* next request was removed from tree */
3515                                 ceph_mdsc_put_request(nextreq);
3516                                 goto restart;
3517                         }
3518                         ceph_mdsc_put_request(nextreq);  /* won't go away */
3519                 }
3520                 req = nextreq;
3521         }
3522         mutex_unlock(&mdsc->mutex);
3523         dout("wait_unsafe_requests done\n");
3524 }
3525
3526 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3527 {
3528         u64 want_tid, want_flush;
3529
3530         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3531                 return;
3532
3533         dout("sync\n");
3534         mutex_lock(&mdsc->mutex);
3535         want_tid = mdsc->last_tid;
3536         mutex_unlock(&mdsc->mutex);
3537
3538         ceph_flush_dirty_caps(mdsc);
3539         spin_lock(&mdsc->cap_dirty_lock);
3540         want_flush = mdsc->last_cap_flush_tid;
3541         if (!list_empty(&mdsc->cap_flush_list)) {
3542                 struct ceph_cap_flush *cf =
3543                         list_last_entry(&mdsc->cap_flush_list,
3544                                         struct ceph_cap_flush, g_list);
3545                 cf->wake = true;
3546         }
3547         spin_unlock(&mdsc->cap_dirty_lock);
3548
3549         dout("sync want tid %lld flush_seq %lld\n",
3550              want_tid, want_flush);
3551
3552         wait_unsafe_requests(mdsc, want_tid);
3553         wait_caps_flush(mdsc, want_flush);
3554 }
3555
3556 /*
3557  * true if all sessions are closed, or we force unmount
3558  */
3559 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
3560 {
3561         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3562                 return true;
3563         return atomic_read(&mdsc->num_sessions) == 0;
3564 }
3565
3566 /*
3567  * called after sb is ro.
3568  */
3569 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3570 {
3571         struct ceph_options *opts = mdsc->fsc->client->options;
3572         struct ceph_mds_session *session;
3573         int i;
3574
3575         dout("close_sessions\n");
3576
3577         /* close sessions */
3578         mutex_lock(&mdsc->mutex);
3579         for (i = 0; i < mdsc->max_sessions; i++) {
3580                 session = __ceph_lookup_mds_session(mdsc, i);
3581                 if (!session)
3582                         continue;
3583                 mutex_unlock(&mdsc->mutex);
3584                 mutex_lock(&session->s_mutex);
3585                 __close_session(mdsc, session);
3586                 mutex_unlock(&session->s_mutex);
3587                 ceph_put_mds_session(session);
3588                 mutex_lock(&mdsc->mutex);
3589         }
3590         mutex_unlock(&mdsc->mutex);
3591
3592         dout("waiting for sessions to close\n");
3593         wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3594                            ceph_timeout_jiffies(opts->mount_timeout));
3595
3596         /* tear down remaining sessions */
3597         mutex_lock(&mdsc->mutex);
3598         for (i = 0; i < mdsc->max_sessions; i++) {
3599                 if (mdsc->sessions[i]) {
3600                         session = get_session(mdsc->sessions[i]);
3601                         __unregister_session(mdsc, session);
3602                         mutex_unlock(&mdsc->mutex);
3603                         mutex_lock(&session->s_mutex);
3604                         remove_session_caps(session);
3605                         mutex_unlock(&session->s_mutex);
3606                         ceph_put_mds_session(session);
3607                         mutex_lock(&mdsc->mutex);
3608                 }
3609         }
3610         WARN_ON(!list_empty(&mdsc->cap_delay_list));
3611         mutex_unlock(&mdsc->mutex);
3612
3613         ceph_cleanup_empty_realms(mdsc);
3614
3615         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3616
3617         dout("stopped\n");
3618 }
3619
3620 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3621 {
3622         struct ceph_mds_session *session;
3623         int mds;
3624
3625         dout("force umount\n");
3626
3627         mutex_lock(&mdsc->mutex);
3628         for (mds = 0; mds < mdsc->max_sessions; mds++) {
3629                 session = __ceph_lookup_mds_session(mdsc, mds);
3630                 if (!session)
3631                         continue;
3632                 mutex_unlock(&mdsc->mutex);
3633                 mutex_lock(&session->s_mutex);
3634                 __close_session(mdsc, session);
3635                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3636                         cleanup_session_requests(mdsc, session);
3637                         remove_session_caps(session);
3638                 }
3639                 mutex_unlock(&session->s_mutex);
3640                 ceph_put_mds_session(session);
3641                 mutex_lock(&mdsc->mutex);
3642                 kick_requests(mdsc, mds);
3643         }
3644         __wake_requests(mdsc, &mdsc->waiting_for_map);
3645         mutex_unlock(&mdsc->mutex);
3646 }
3647
3648 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3649 {
3650         dout("stop\n");
3651         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3652         if (mdsc->mdsmap)
3653                 ceph_mdsmap_destroy(mdsc->mdsmap);
3654         kfree(mdsc->sessions);
3655         ceph_caps_finalize(mdsc);
3656         ceph_pool_perm_destroy(mdsc);
3657 }
3658
3659 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3660 {
3661         struct ceph_mds_client *mdsc = fsc->mdsc;
3662
3663         dout("mdsc_destroy %p\n", mdsc);
3664         ceph_mdsc_stop(mdsc);
3665
3666         /* flush out any connection work with references to us */
3667         ceph_msgr_flush();
3668
3669         fsc->mdsc = NULL;
3670         kfree(mdsc);
3671         dout("mdsc_destroy %p done\n", mdsc);
3672 }
3673
3674 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3675 {
3676         struct ceph_fs_client *fsc = mdsc->fsc;
3677         const char *mds_namespace = fsc->mount_options->mds_namespace;
3678         void *p = msg->front.iov_base;
3679         void *end = p + msg->front.iov_len;
3680         u32 epoch;
3681         u32 map_len;
3682         u32 num_fs;
3683         u32 mount_fscid = (u32)-1;
3684         u8 struct_v, struct_cv;
3685         int err = -EINVAL;
3686
3687         ceph_decode_need(&p, end, sizeof(u32), bad);
3688         epoch = ceph_decode_32(&p);
3689
3690         dout("handle_fsmap epoch %u\n", epoch);
3691
3692         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3693         struct_v = ceph_decode_8(&p);
3694         struct_cv = ceph_decode_8(&p);
3695         map_len = ceph_decode_32(&p);
3696
3697         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
3698         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
3699
3700         num_fs = ceph_decode_32(&p);
3701         while (num_fs-- > 0) {
3702                 void *info_p, *info_end;
3703                 u32 info_len;
3704                 u8 info_v, info_cv;
3705                 u32 fscid, namelen;
3706
3707                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3708                 info_v = ceph_decode_8(&p);
3709                 info_cv = ceph_decode_8(&p);
3710                 info_len = ceph_decode_32(&p);
3711                 ceph_decode_need(&p, end, info_len, bad);
3712                 info_p = p;
3713                 info_end = p + info_len;
3714                 p = info_end;
3715
3716                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
3717                 fscid = ceph_decode_32(&info_p);
3718                 namelen = ceph_decode_32(&info_p);
3719                 ceph_decode_need(&info_p, info_end, namelen, bad);
3720
3721                 if (mds_namespace &&
3722                     strlen(mds_namespace) == namelen &&
3723                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
3724                         mount_fscid = fscid;
3725                         break;
3726                 }
3727         }
3728
3729         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
3730         if (mount_fscid != (u32)-1) {
3731                 fsc->client->monc.fs_cluster_id = mount_fscid;
3732                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
3733                                    0, true);
3734                 ceph_monc_renew_subs(&fsc->client->monc);
3735         } else {
3736                 err = -ENOENT;
3737                 goto err_out;
3738         }
3739         return;
3740 bad:
3741         pr_err("error decoding fsmap\n");
3742 err_out:
3743         mutex_lock(&mdsc->mutex);
3744         mdsc->mdsmap_err = -ENOENT;
3745         __wake_requests(mdsc, &mdsc->waiting_for_map);
3746         mutex_unlock(&mdsc->mutex);
3747         return;
3748 }
3749
3750 /*
3751  * handle mds map update.
3752  */
3753 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3754 {
3755         u32 epoch;
3756         u32 maplen;
3757         void *p = msg->front.iov_base;
3758         void *end = p + msg->front.iov_len;
3759         struct ceph_mdsmap *newmap, *oldmap;
3760         struct ceph_fsid fsid;
3761         int err = -EINVAL;
3762
3763         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3764         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3765         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3766                 return;
3767         epoch = ceph_decode_32(&p);
3768         maplen = ceph_decode_32(&p);
3769         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3770
3771         /* do we need it? */
3772         mutex_lock(&mdsc->mutex);
3773         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3774                 dout("handle_map epoch %u <= our %u\n",
3775                      epoch, mdsc->mdsmap->m_epoch);
3776                 mutex_unlock(&mdsc->mutex);
3777                 return;
3778         }
3779
3780         newmap = ceph_mdsmap_decode(&p, end);
3781         if (IS_ERR(newmap)) {
3782                 err = PTR_ERR(newmap);
3783                 goto bad_unlock;
3784         }
3785
3786         /* swap into place */
3787         if (mdsc->mdsmap) {
3788                 oldmap = mdsc->mdsmap;
3789                 mdsc->mdsmap = newmap;
3790                 check_new_map(mdsc, newmap, oldmap);
3791                 ceph_mdsmap_destroy(oldmap);
3792         } else {
3793                 mdsc->mdsmap = newmap;  /* first mds map */
3794         }
3795         mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3796
3797         __wake_requests(mdsc, &mdsc->waiting_for_map);
3798         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
3799                           mdsc->mdsmap->m_epoch);
3800
3801         mutex_unlock(&mdsc->mutex);
3802         schedule_delayed(mdsc);
3803         return;
3804
3805 bad_unlock:
3806         mutex_unlock(&mdsc->mutex);
3807 bad:
3808         pr_err("error decoding mdsmap %d\n", err);
3809         return;
3810 }
3811
3812 static struct ceph_connection *con_get(struct ceph_connection *con)
3813 {
3814         struct ceph_mds_session *s = con->private;
3815
3816         if (get_session(s)) {
3817                 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3818                 return con;
3819         }
3820         dout("mdsc con_get %p FAIL\n", s);
3821         return NULL;
3822 }
3823
3824 static void con_put(struct ceph_connection *con)
3825 {
3826         struct ceph_mds_session *s = con->private;
3827
3828         dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3829         ceph_put_mds_session(s);
3830 }
3831
3832 /*
3833  * if the client is unresponsive for long enough, the mds will kill
3834  * the session entirely.
3835  */
3836 static void peer_reset(struct ceph_connection *con)
3837 {
3838         struct ceph_mds_session *s = con->private;
3839         struct ceph_mds_client *mdsc = s->s_mdsc;
3840
3841         pr_warn("mds%d closed our session\n", s->s_mds);
3842         send_mds_reconnect(mdsc, s);
3843 }
3844
3845 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3846 {
3847         struct ceph_mds_session *s = con->private;
3848         struct ceph_mds_client *mdsc = s->s_mdsc;
3849         int type = le16_to_cpu(msg->hdr.type);
3850
3851         mutex_lock(&mdsc->mutex);
3852         if (__verify_registered_session(mdsc, s) < 0) {
3853                 mutex_unlock(&mdsc->mutex);
3854                 goto out;
3855         }
3856         mutex_unlock(&mdsc->mutex);
3857
3858         switch (type) {
3859         case CEPH_MSG_MDS_MAP:
3860                 ceph_mdsc_handle_mdsmap(mdsc, msg);
3861                 break;
3862         case CEPH_MSG_FS_MAP_USER:
3863                 ceph_mdsc_handle_fsmap(mdsc, msg);
3864                 break;
3865         case CEPH_MSG_CLIENT_SESSION:
3866                 handle_session(s, msg);
3867                 break;
3868         case CEPH_MSG_CLIENT_REPLY:
3869                 handle_reply(s, msg);
3870                 break;
3871         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3872                 handle_forward(mdsc, s, msg);
3873                 break;
3874         case CEPH_MSG_CLIENT_CAPS:
3875                 ceph_handle_caps(s, msg);
3876                 break;
3877         case CEPH_MSG_CLIENT_SNAP:
3878                 ceph_handle_snap(mdsc, s, msg);
3879                 break;
3880         case CEPH_MSG_CLIENT_LEASE:
3881                 handle_lease(mdsc, s, msg);
3882                 break;
3883
3884         default:
3885                 pr_err("received unknown message type %d %s\n", type,
3886                        ceph_msg_type_name(type));
3887         }
3888 out:
3889         ceph_msg_put(msg);
3890 }
3891
3892 /*
3893  * authentication
3894  */
3895
3896 /*
3897  * Note: returned pointer is the address of a structure that's
3898  * managed separately.  Caller must *not* attempt to free it.
3899  */
3900 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3901                                         int *proto, int force_new)
3902 {
3903         struct ceph_mds_session *s = con->private;
3904         struct ceph_mds_client *mdsc = s->s_mdsc;
3905         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3906         struct ceph_auth_handshake *auth = &s->s_auth;
3907
3908         if (force_new && auth->authorizer) {
3909                 ceph_auth_destroy_authorizer(auth->authorizer);
3910                 auth->authorizer = NULL;
3911         }
3912         if (!auth->authorizer) {
3913                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3914                                                       auth);
3915                 if (ret)
3916                         return ERR_PTR(ret);
3917         } else {
3918                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3919                                                       auth);
3920                 if (ret)
3921                         return ERR_PTR(ret);
3922         }
3923         *proto = ac->protocol;
3924
3925         return auth;
3926 }
3927
3928
3929 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3930 {
3931         struct ceph_mds_session *s = con->private;
3932         struct ceph_mds_client *mdsc = s->s_mdsc;
3933         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3934
3935         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
3936 }
3937
3938 static int invalidate_authorizer(struct ceph_connection *con)
3939 {
3940         struct ceph_mds_session *s = con->private;
3941         struct ceph_mds_client *mdsc = s->s_mdsc;
3942         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3943
3944         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3945
3946         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3947 }
3948
3949 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
3950                                 struct ceph_msg_header *hdr, int *skip)
3951 {
3952         struct ceph_msg *msg;
3953         int type = (int) le16_to_cpu(hdr->type);
3954         int front_len = (int) le32_to_cpu(hdr->front_len);
3955
3956         if (con->in_msg)
3957                 return con->in_msg;
3958
3959         *skip = 0;
3960         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
3961         if (!msg) {
3962                 pr_err("unable to allocate msg type %d len %d\n",
3963                        type, front_len);
3964                 return NULL;
3965         }
3966
3967         return msg;
3968 }
3969
3970 static int mds_sign_message(struct ceph_msg *msg)
3971 {
3972        struct ceph_mds_session *s = msg->con->private;
3973        struct ceph_auth_handshake *auth = &s->s_auth;
3974
3975        return ceph_auth_sign_message(auth, msg);
3976 }
3977
3978 static int mds_check_message_signature(struct ceph_msg *msg)
3979 {
3980        struct ceph_mds_session *s = msg->con->private;
3981        struct ceph_auth_handshake *auth = &s->s_auth;
3982
3983        return ceph_auth_check_message_signature(auth, msg);
3984 }
3985
3986 static const struct ceph_connection_operations mds_con_ops = {
3987         .get = con_get,
3988         .put = con_put,
3989         .dispatch = dispatch,
3990         .get_authorizer = get_authorizer,
3991         .verify_authorizer_reply = verify_authorizer_reply,
3992         .invalidate_authorizer = invalidate_authorizer,
3993         .peer_reset = peer_reset,
3994         .alloc_msg = mds_alloc_msg,
3995         .sign_message = mds_sign_message,
3996         .check_message_signature = mds_check_message_signature,
3997 };
3998
3999 /* eof */