4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/hash.h>
10 #include <linux/file.h>
11 #include <linux/sched.h>
12 #include <linux/list_lru.h>
13 #include <linux/fsnotify_backend.h>
14 #include <linux/seq_file.h>
19 #include "filecache.h"
22 #define NFSDDBG_FACILITY NFSDDBG_FH
24 /* FIXME: dynamically size this for the machine somehow? */
25 #define NFSD_FILE_HASH_BITS 12
26 #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
28 /* We only care about NFSD_MAY_READ/WRITE for this cache */
29 #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
31 struct nfsd_fcache_bucket {
32 struct hlist_head nfb_head;
34 unsigned int nfb_count;
35 unsigned int nfb_maxcount;
38 static struct kmem_cache *nfsd_file_slab;
39 static struct kmem_cache *nfsd_file_mark_slab;
40 static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
41 static struct list_lru nfsd_file_lru;
42 static struct fsnotify_group *nfsd_file_fsnotify_group;
45 nfsd_file_slab_free(struct rcu_head *rcu)
47 struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
49 kmem_cache_free(nfsd_file_slab, nf);
53 nfsd_file_mark_free(struct fsnotify_mark *mark)
55 struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
58 kmem_cache_free(nfsd_file_mark_slab, nfm);
61 static struct nfsd_file_mark *
62 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
64 if (!atomic_inc_not_zero(&nfm->nfm_ref))
70 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
72 if (atomic_dec_and_test(&nfm->nfm_ref))
73 fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
76 static struct nfsd_file_mark *
77 nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
80 struct fsnotify_mark *mark;
81 struct nfsd_file_mark *nfm = NULL, *new = NULL;
84 mark = fsnotify_find_inode_mark(nfsd_file_fsnotify_group,
87 nfm = nfsd_file_mark_get(container_of(mark,
88 struct nfsd_file_mark,
90 fsnotify_put_mark(mark);
95 /* allocate a new nfm */
97 new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
100 fsnotify_init_mark(&new->nfm_mark, nfsd_file_mark_free);
101 atomic_set(&new->nfm_ref, 1);
104 err = fsnotify_add_mark(&new->nfm_mark,
105 nfsd_file_fsnotify_group, nf->nf_inode,
111 } while (unlikely(err == EEXIST));
114 kmem_cache_free(nfsd_file_mark_slab, new);
118 static struct nfsd_file *
119 nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval)
121 struct nfsd_file *nf;
123 nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
125 INIT_HLIST_NODE(&nf->nf_node);
126 INIT_LIST_HEAD(&nf->nf_lru);
129 nf->nf_inode = inode;
130 nf->nf_hashval = hashval;
131 atomic_set(&nf->nf_ref, 1);
132 nf->nf_may = NFSD_FILE_MAY_MASK & may;
133 if (may & NFSD_MAY_NOT_BREAK_LEASE) {
134 if (may & NFSD_MAY_WRITE)
135 __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
136 if (may & NFSD_MAY_READ)
137 __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
140 trace_nfsd_file_alloc(nf);
146 nfsd_file_put_final(struct nfsd_file *nf)
148 trace_nfsd_file_put_final(nf);
150 nfsd_file_mark_put(nf->nf_mark);
153 call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
157 nfsd_file_put_final_delayed(struct nfsd_file *nf)
161 trace_nfsd_file_put_final(nf);
163 nfsd_file_mark_put(nf->nf_mark);
165 flush = fput_global(nf->nf_file);
166 call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
171 nfsd_file_unhash(struct nfsd_file *nf)
173 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
175 trace_nfsd_file_unhash(nf);
176 if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
177 --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
178 clear_bit(NFSD_FILE_HASHED, &nf->nf_flags);
179 hlist_del_rcu(&nf->nf_node);
180 list_lru_del(&nfsd_file_lru, &nf->nf_lru);
187 nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
189 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
191 trace_nfsd_file_unhash_and_release_locked(nf);
192 if (!nfsd_file_unhash(nf))
194 if (!atomic_dec_and_test(&nf->nf_ref))
197 list_add(&nf->nf_lru, dispose);
201 nfsd_file_put(struct nfsd_file *nf)
203 trace_nfsd_file_put(nf);
204 set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
205 smp_mb__after_atomic();
206 if (atomic_dec_and_test(&nf->nf_ref)) {
207 WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
208 nfsd_file_put_final(nf);
213 nfsd_file_get(struct nfsd_file *nf)
215 if (likely(atomic_inc_not_zero(&nf->nf_ref)))
221 nfsd_file_dispose_list(struct list_head *dispose)
223 struct nfsd_file *nf;
225 while(!list_empty(dispose)) {
226 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
227 list_del(&nf->nf_lru);
228 nfsd_file_put_final(nf);
233 nfsd_file_dispose_list_sync(struct list_head *dispose)
236 struct nfsd_file *nf;
238 while(!list_empty(dispose)) {
239 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
240 list_del(&nf->nf_lru);
241 if (nfsd_file_put_final_delayed(nf))
248 static enum lru_status
249 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
250 spinlock_t *lock, void *arg)
254 struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
258 * Do a lockless refcount check. The hashtable holds one reference, so
259 * we look to see if anything else has a reference, or if any have
260 * been put since the shrinker last ran. Those don't get unhashed and
263 * Note that in the put path, we set the flag and then decrement the
264 * counter. Here we check the counter and then test and clear the flag.
265 * That order is deliberate to ensure that we can do this locklessly.
267 if (atomic_read(&nf->nf_ref) > 1 ||
268 test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
272 spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
273 unhashed = nfsd_file_unhash(nf);
274 spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
278 return unhashed ? LRU_REMOVED_RETRY : LRU_RETRY;
282 nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
284 return list_lru_count(&nfsd_file_lru);
288 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
290 return list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, NULL);
293 static struct shrinker nfsd_file_shrinker = {
294 .scan_objects = nfsd_file_lru_scan,
295 .count_objects = nfsd_file_lru_count,
300 __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
301 struct list_head *dispose)
303 struct nfsd_file *nf;
304 struct hlist_node *tmp;
306 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
307 hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
308 if (inode == nf->nf_inode)
309 nfsd_file_unhash_and_release_locked(nf, dispose);
311 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
315 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
316 * @inode: inode of the file to attempt to remove
318 * Walk the whole hash bucket, looking for any files that correspond to "inode".
319 * If any do, then unhash them and put the hashtable reference to them and
320 * destroy any that had their last reference put. Also ensure that any of the
321 * fputs also have their final __fput done as well.
324 nfsd_file_close_inode_sync(struct inode *inode)
326 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
327 NFSD_FILE_HASH_BITS);
330 __nfsd_file_close_inode(inode, hashval, &dispose);
331 trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
332 nfsd_file_dispose_list_sync(&dispose);
336 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
337 * @inode: inode of the file to attempt to remove
339 * Walk the whole hash bucket, looking for any files that correspond to "inode".
340 * If any do, then unhash them and put the hashtable reference to them and
341 * destroy any that had their last reference put.
344 nfsd_file_close_inode(struct inode *inode)
346 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
347 NFSD_FILE_HASH_BITS);
350 __nfsd_file_close_inode(inode, hashval, &dispose);
351 trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
352 nfsd_file_dispose_list(&dispose);
356 nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
359 struct file_lock *fl = data;
361 /* Only close files for F_SETLEASE leases */
362 if (fl->fl_flags & FL_LEASE)
363 nfsd_file_close_inode_sync(file_inode(fl->fl_file));
367 static struct notifier_block nfsd_file_lease_notifier = {
368 .notifier_call = nfsd_file_lease_notifier_call,
372 nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
374 struct fsnotify_mark *inode_mark,
375 struct fsnotify_mark *vfsmount_mark,
376 u32 mask, void *data, int data_type,
377 const unsigned char *file_name, u32 cookie)
379 trace_nfsd_file_fsnotify_handle_event(inode, mask);
381 /* Should be no marks on non-regular files */
382 if (!S_ISREG(inode->i_mode)) {
387 /* ...and we don't do anything with vfsmount marks */
388 BUG_ON(vfsmount_mark);
390 /* don't close files if this was not the last link */
391 if (mask & FS_ATTRIB) {
396 nfsd_file_close_inode(inode);
401 const static struct fsnotify_ops nfsd_file_fsnotify_ops = {
402 .handle_event = nfsd_file_fsnotify_handle_event,
406 nfsd_file_cache_init(void)
411 if (nfsd_file_hashtbl)
414 nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
415 sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
416 if (!nfsd_file_hashtbl) {
417 pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
421 nfsd_file_slab = kmem_cache_create("nfsd_file",
422 sizeof(struct nfsd_file), 0, 0, NULL);
423 if (!nfsd_file_slab) {
424 pr_err("nfsd: unable to create nfsd_file_slab\n");
428 nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
429 sizeof(struct nfsd_file_mark), 0, 0, NULL);
430 if (!nfsd_file_mark_slab) {
431 pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
436 ret = list_lru_init(&nfsd_file_lru);
438 pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
442 ret = register_shrinker(&nfsd_file_shrinker);
444 pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
448 ret = srcu_notifier_chain_register(&lease_notifier_chain,
449 &nfsd_file_lease_notifier);
451 pr_err("nfsd: unable to register lease notifier: %d\n", ret);
455 nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
456 if (IS_ERR(nfsd_file_fsnotify_group)) {
457 pr_err("nfsd: unable to create fsnotify group: %ld\n",
458 PTR_ERR(nfsd_file_fsnotify_group));
459 nfsd_file_fsnotify_group = NULL;
463 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
464 INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
465 spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
470 srcu_notifier_chain_unregister(&lease_notifier_chain,
471 &nfsd_file_lease_notifier);
473 unregister_shrinker(&nfsd_file_shrinker);
475 list_lru_destroy(&nfsd_file_lru);
477 kmem_cache_destroy(nfsd_file_slab);
478 nfsd_file_slab = NULL;
479 kmem_cache_destroy(nfsd_file_mark_slab);
480 nfsd_file_mark_slab = NULL;
481 kfree(nfsd_file_hashtbl);
482 nfsd_file_hashtbl = NULL;
487 nfsd_file_cache_purge(void)
490 struct nfsd_file *nf;
493 if (!nfsd_file_hashtbl)
496 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
497 spin_lock(&nfsd_file_hashtbl[i].nfb_lock);
498 while(!hlist_empty(&nfsd_file_hashtbl[i].nfb_head)) {
499 nf = hlist_entry(nfsd_file_hashtbl[i].nfb_head.first,
500 struct nfsd_file, nf_node);
501 nfsd_file_unhash_and_release_locked(nf, &dispose);
503 spin_unlock(&nfsd_file_hashtbl[i].nfb_lock);
504 nfsd_file_dispose_list(&dispose);
509 nfsd_file_cache_shutdown(void)
513 srcu_notifier_chain_unregister(&lease_notifier_chain,
514 &nfsd_file_lease_notifier);
515 unregister_shrinker(&nfsd_file_shrinker);
516 nfsd_file_cache_purge();
517 list_lru_destroy(&nfsd_file_lru);
519 fsnotify_put_group(nfsd_file_fsnotify_group);
520 nfsd_file_fsnotify_group = NULL;
521 kmem_cache_destroy(nfsd_file_slab);
522 nfsd_file_slab = NULL;
523 kmem_cache_destroy(nfsd_file_mark_slab);
524 nfsd_file_mark_slab = NULL;
525 kfree(nfsd_file_hashtbl);
526 nfsd_file_hashtbl = NULL;
530 * Search nfsd_file_hashtbl[] for file. We hash on the filehandle and also on
531 * the NFSD_MAY_READ/WRITE flags. If the file is open for r/w, then it's usable
534 static struct nfsd_file *
535 nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
536 unsigned int hashval)
538 struct nfsd_file *nf;
539 unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
541 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
543 if ((need & nf->nf_may) != need)
545 if (nf->nf_inode == inode)
546 return nfsd_file_get(nf);
552 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
553 unsigned int may_flags, struct nfsd_file **pnf)
556 struct nfsd_file *nf, *new = NULL;
558 unsigned int hashval;
560 /* FIXME: skip this if fh_dentry is already set? */
561 status = fh_verify(rqstp, fhp, S_IFREG,
562 may_flags|NFSD_MAY_OWNER_OVERRIDE);
563 if (status != nfs_ok)
566 inode = d_inode(fhp->fh_dentry);
567 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
570 nf = nfsd_file_find_locked(inode, may_flags, hashval);
573 goto wait_for_construction;
576 new = nfsd_file_alloc(inode, may_flags, hashval);
578 trace_nfsd_file_acquire(hashval, inode, may_flags, NULL,
580 return nfserr_jukebox;
584 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
585 nf = nfsd_file_find_locked(inode, may_flags, hashval);
586 if (likely(nf == NULL)) {
587 /* Take reference for the hashtable */
588 atomic_inc(&new->nf_ref);
589 __set_bit(NFSD_FILE_HASHED, &new->nf_flags);
590 __set_bit(NFSD_FILE_PENDING, &new->nf_flags);
591 list_lru_add(&nfsd_file_lru, &new->nf_lru);
592 hlist_add_head_rcu(&new->nf_node,
593 &nfsd_file_hashtbl[hashval].nfb_head);
594 ++nfsd_file_hashtbl[hashval].nfb_count;
595 nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
596 nfsd_file_hashtbl[hashval].nfb_count);
597 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
602 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
604 wait_for_construction:
605 wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
607 /* Did construction of this file fail? */
610 * We can only take over construction for this nfsd_file if the
611 * MAY flags are equal. Otherwise, we put the reference and try
614 if ((may_flags & NFSD_FILE_MAY_MASK) != nf->nf_may) {
619 /* try to take over construction for this file */
620 if (test_and_set_bit(NFSD_FILE_PENDING, &nf->nf_flags))
621 goto wait_for_construction;
623 /* sync up the BREAK_* flags with our may_flags */
624 if (may_flags & NFSD_MAY_NOT_BREAK_LEASE) {
625 if (may_flags & NFSD_MAY_WRITE)
626 set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
627 if (may_flags & NFSD_MAY_READ)
628 set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
630 clear_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
631 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
637 if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
638 bool write = (may_flags & NFSD_MAY_WRITE);
640 if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
641 (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
642 status = nfserrno(nfsd_open_break_lease(
643 file_inode(nf->nf_file), may_flags));
644 if (status == nfs_ok) {
645 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
647 clear_bit(NFSD_FILE_BREAK_WRITE,
653 if (status == nfs_ok) {
663 trace_nfsd_file_acquire(hashval, inode, may_flags, nf, status);
667 nf->nf_mark = nfsd_file_mark_find_or_create(nf, inode);
669 status = nfserr_jukebox;
671 /* FIXME: should we abort opening if the link count goes to 0? */
672 if (status == nfs_ok)
673 status = nfsd_open_verified(rqstp, fhp, S_IFREG, may_flags,
675 clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
676 smp_mb__after_atomic();
677 wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
682 * Note that fields may be added, removed or reordered in the future. Programs
683 * scraping this file for info should test the labels to ensure they're
684 * getting the correct field.
686 static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
688 unsigned int i, count = 0, longest = 0;
691 * No need for spinlocks here since we're not terribly interested in
692 * accuracy. We do take the nfsd_mutex simply to ensure that we
693 * don't end up racing with server shutdown
695 mutex_lock(&nfsd_mutex);
696 if (nfsd_file_hashtbl) {
697 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
698 count += nfsd_file_hashtbl[i].nfb_count;
699 longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
702 mutex_unlock(&nfsd_mutex);
704 seq_printf(m, "total entries: %u\n", count);
705 seq_printf(m, "longest chain: %u\n", longest);
709 int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
711 return single_open(file, nfsd_file_cache_stats_show, NULL);