4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
26 int sysctl_nr_open __read_mostly = 1024*1024;
27 int sysctl_nr_open_min = BITS_PER_LONG;
28 /* our max() is unusable in constant expressions ;-/ */
29 #define __const_max(x, y) ((x) < (y) ? (x) : (y))
30 int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) &
33 static void *alloc_fdmem(size_t size)
36 * Very large allocations can stress page reclaim, so fall back to
37 * vmalloc() if the allocation size will be considered "large" by the VM.
39 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
40 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
47 static void __free_fdtable(struct fdtable *fdt)
50 kvfree(fdt->open_fds);
54 static void free_fdtable_rcu(struct rcu_head *rcu)
56 __free_fdtable(container_of(rcu, struct fdtable, rcu));
59 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
60 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
63 * Expand the fdset in the files_struct. Called with the files spinlock
66 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
68 unsigned int cpy, set;
70 BUG_ON(nfdt->max_fds < ofdt->max_fds);
72 cpy = ofdt->max_fds * sizeof(struct file *);
73 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
74 memcpy(nfdt->fd, ofdt->fd, cpy);
75 memset((char *)(nfdt->fd) + cpy, 0, set);
77 cpy = ofdt->max_fds / BITS_PER_BYTE;
78 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
79 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
80 memset((char *)(nfdt->open_fds) + cpy, 0, set);
81 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
82 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
84 cpy = BITBIT_SIZE(ofdt->max_fds);
85 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
86 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
87 memset(cpy+(char *)nfdt->full_fds_bits, 0, set);
90 static struct fdtable * alloc_fdtable(unsigned int nr)
96 * Figure out how many fds we actually want to support in this fdtable.
97 * Allocation steps are keyed to the size of the fdarray, since it
98 * grows far faster than any of the other dynamic data. We try to fit
99 * the fdarray into comfortable page-tuned chunks: starting at 1024B
100 * and growing in powers of two from there on.
102 nr /= (1024 / sizeof(struct file *));
103 nr = roundup_pow_of_two(nr + 1);
104 nr *= (1024 / sizeof(struct file *));
106 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
107 * had been set lower between the check in expand_files() and here. Deal
108 * with that in caller, it's cheaper that way.
110 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
111 * bitmaps handling below becomes unpleasant, to put it mildly...
113 if (unlikely(nr > sysctl_nr_open))
114 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
116 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
120 data = alloc_fdmem(nr * sizeof(struct file *));
125 data = alloc_fdmem(max_t(size_t,
126 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
129 fdt->open_fds = data;
130 data += nr / BITS_PER_BYTE;
131 fdt->close_on_exec = data;
132 data += nr / BITS_PER_BYTE;
133 fdt->full_fds_bits = data;
146 * Expand the file descriptor table.
147 * This function will allocate a new fdtable and both fd array and fdset, of
149 * Return <0 error code on error; 1 on successful completion.
150 * The files->file_lock should be held on entry, and will be held on exit.
152 static int expand_fdtable(struct files_struct *files, int nr)
153 __releases(files->file_lock)
154 __acquires(files->file_lock)
156 struct fdtable *new_fdt, *cur_fdt;
158 spin_unlock(&files->file_lock);
159 new_fdt = alloc_fdtable(nr);
161 /* make sure all __fd_install() have seen resize_in_progress
162 * or have finished their rcu_read_lock_sched() section.
164 if (atomic_read(&files->count) > 1)
167 spin_lock(&files->file_lock);
171 * extremely unlikely race - sysctl_nr_open decreased between the check in
172 * caller and alloc_fdtable(). Cheaper to catch it here...
174 if (unlikely(new_fdt->max_fds <= nr)) {
175 __free_fdtable(new_fdt);
178 cur_fdt = files_fdtable(files);
179 BUG_ON(nr < cur_fdt->max_fds);
180 copy_fdtable(new_fdt, cur_fdt);
181 rcu_assign_pointer(files->fdt, new_fdt);
182 if (cur_fdt != &files->fdtab)
183 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
184 /* coupled with smp_rmb() in __fd_install() */
191 * This function will expand the file structures, if the requested size exceeds
192 * the current capacity and there is room for expansion.
193 * Return <0 error code on error; 0 when nothing done; 1 when files were
194 * expanded and execution may have blocked.
195 * The files->file_lock should be held on entry, and will be held on exit.
197 static int expand_files(struct files_struct *files, int nr)
198 __releases(files->file_lock)
199 __acquires(files->file_lock)
205 fdt = files_fdtable(files);
207 /* Do we need to expand? */
208 if (nr < fdt->max_fds)
212 if (nr >= sysctl_nr_open)
215 if (unlikely(files->resize_in_progress)) {
216 spin_unlock(&files->file_lock);
218 wait_event(files->resize_wait, !files->resize_in_progress);
219 spin_lock(&files->file_lock);
223 /* All good, so we try */
224 files->resize_in_progress = true;
225 expanded = expand_fdtable(files, nr);
226 files->resize_in_progress = false;
228 wake_up_all(&files->resize_wait);
232 static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
234 __set_bit(fd, fdt->close_on_exec);
237 static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
239 __clear_bit(fd, fdt->close_on_exec);
242 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
244 __set_bit(fd, fdt->open_fds);
246 if (!~fdt->open_fds[fd])
247 __set_bit(fd, fdt->full_fds_bits);
250 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
252 __clear_bit(fd, fdt->open_fds);
253 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
256 static int count_open_files(struct fdtable *fdt)
258 int size = fdt->max_fds;
261 /* Find the last open fd */
262 for (i = size / BITS_PER_LONG; i > 0; ) {
263 if (fdt->open_fds[--i])
266 i = (i + 1) * BITS_PER_LONG;
271 * Allocate a new files structure and copy contents from the
272 * passed in files structure.
273 * errorp will be valid only when the returned files_struct is NULL.
275 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
277 struct files_struct *newf;
278 struct file **old_fds, **new_fds;
279 int open_files, size, i;
280 struct fdtable *old_fdt, *new_fdt;
283 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
287 atomic_set(&newf->count, 1);
289 spin_lock_init(&newf->file_lock);
290 newf->resize_in_progress = false;
291 init_waitqueue_head(&newf->resize_wait);
293 new_fdt = &newf->fdtab;
294 new_fdt->max_fds = NR_OPEN_DEFAULT;
295 new_fdt->close_on_exec = newf->close_on_exec_init;
296 new_fdt->open_fds = newf->open_fds_init;
297 new_fdt->full_fds_bits = newf->full_fds_bits_init;
298 new_fdt->fd = &newf->fd_array[0];
300 spin_lock(&oldf->file_lock);
301 old_fdt = files_fdtable(oldf);
302 open_files = count_open_files(old_fdt);
305 * Check whether we need to allocate a larger fd array and fd set.
307 while (unlikely(open_files > new_fdt->max_fds)) {
308 spin_unlock(&oldf->file_lock);
310 if (new_fdt != &newf->fdtab)
311 __free_fdtable(new_fdt);
313 new_fdt = alloc_fdtable(open_files - 1);
319 /* beyond sysctl_nr_open; nothing to do */
320 if (unlikely(new_fdt->max_fds < open_files)) {
321 __free_fdtable(new_fdt);
327 * Reacquire the oldf lock and a pointer to its fd table
328 * who knows it may have a new bigger fd table. We need
329 * the latest pointer.
331 spin_lock(&oldf->file_lock);
332 old_fdt = files_fdtable(oldf);
333 open_files = count_open_files(old_fdt);
336 old_fds = old_fdt->fd;
337 new_fds = new_fdt->fd;
339 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
340 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
341 memcpy(new_fdt->full_fds_bits, old_fdt->full_fds_bits, BITBIT_SIZE(open_files));
343 for (i = open_files; i != 0; i--) {
344 struct file *f = *old_fds++;
349 * The fd may be claimed in the fd bitmap but not yet
350 * instantiated in the files array if a sibling thread
351 * is partway through open(). So make sure that this
352 * fd is available to the new process.
354 __clear_open_fd(open_files - i, new_fdt);
356 rcu_assign_pointer(*new_fds++, f);
358 spin_unlock(&oldf->file_lock);
360 /* compute the remainder to be cleared */
361 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
363 /* This is long word aligned thus could use a optimized version */
364 memset(new_fds, 0, size);
366 if (new_fdt->max_fds > open_files) {
367 int left = (new_fdt->max_fds - open_files) / 8;
368 int start = open_files / BITS_PER_LONG;
370 memset(&new_fdt->open_fds[start], 0, left);
371 memset(&new_fdt->close_on_exec[start], 0, left);
374 rcu_assign_pointer(newf->fdt, new_fdt);
379 kmem_cache_free(files_cachep, newf);
384 static struct fdtable *close_files(struct files_struct * files)
387 * It is safe to dereference the fd table without RCU or
388 * ->file_lock because this is the last reference to the
391 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
396 i = j * BITS_PER_LONG;
397 if (i >= fdt->max_fds)
399 set = fdt->open_fds[j++];
402 struct file * file = xchg(&fdt->fd[i], NULL);
404 filp_close(file, files);
405 cond_resched_rcu_qs();
416 struct files_struct *get_files_struct(struct task_struct *task)
418 struct files_struct *files;
423 atomic_inc(&files->count);
429 void put_files_struct(struct files_struct *files)
431 if (atomic_dec_and_test(&files->count)) {
432 struct fdtable *fdt = close_files(files);
434 /* free the arrays if they are not embedded */
435 if (fdt != &files->fdtab)
437 kmem_cache_free(files_cachep, files);
441 void reset_files_struct(struct files_struct *files)
443 struct task_struct *tsk = current;
444 struct files_struct *old;
450 put_files_struct(old);
453 void exit_files(struct task_struct *tsk)
455 struct files_struct * files = tsk->files;
461 put_files_struct(files);
465 struct files_struct init_files = {
466 .count = ATOMIC_INIT(1),
467 .fdt = &init_files.fdtab,
469 .max_fds = NR_OPEN_DEFAULT,
470 .fd = &init_files.fd_array[0],
471 .close_on_exec = init_files.close_on_exec_init,
472 .open_fds = init_files.open_fds_init,
473 .full_fds_bits = init_files.full_fds_bits_init,
475 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
478 static unsigned long find_next_fd(struct fdtable *fdt, unsigned long start)
480 unsigned long maxfd = fdt->max_fds;
481 unsigned long maxbit = maxfd / BITS_PER_LONG;
482 unsigned long bitbit = start / BITS_PER_LONG;
484 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
489 return find_next_zero_bit(fdt->open_fds, maxfd, start);
493 * allocate a file descriptor, mark it busy.
495 int __alloc_fd(struct files_struct *files,
496 unsigned start, unsigned end, unsigned flags)
502 spin_lock(&files->file_lock);
504 fdt = files_fdtable(files);
506 if (fd < files->next_fd)
509 if (fd < fdt->max_fds)
510 fd = find_next_fd(fdt, fd);
513 * N.B. For clone tasks sharing a files structure, this test
514 * will limit the total number of files that can be opened.
520 error = expand_files(files, fd);
525 * If we needed to expand the fs array we
526 * might have blocked - try again.
531 if (start <= files->next_fd)
532 files->next_fd = fd + 1;
534 __set_open_fd(fd, fdt);
535 if (flags & O_CLOEXEC)
536 __set_close_on_exec(fd, fdt);
538 __clear_close_on_exec(fd, fdt);
542 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
543 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
544 rcu_assign_pointer(fdt->fd[fd], NULL);
549 spin_unlock(&files->file_lock);
553 static int alloc_fd(unsigned start, unsigned flags)
555 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
558 int get_unused_fd_flags(unsigned flags)
560 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
562 EXPORT_SYMBOL(get_unused_fd_flags);
564 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
566 struct fdtable *fdt = files_fdtable(files);
567 __clear_open_fd(fd, fdt);
568 if (fd < files->next_fd)
572 void put_unused_fd(unsigned int fd)
574 struct files_struct *files = current->files;
575 spin_lock(&files->file_lock);
576 __put_unused_fd(files, fd);
577 spin_unlock(&files->file_lock);
580 EXPORT_SYMBOL(put_unused_fd);
583 * Install a file pointer in the fd array.
585 * The VFS is full of places where we drop the files lock between
586 * setting the open_fds bitmap and installing the file in the file
587 * array. At any such point, we are vulnerable to a dup2() race
588 * installing a file in the array before us. We need to detect this and
589 * fput() the struct file we are about to overwrite in this case.
591 * It should never happen - if we allow dup2() do it, _really_ bad things
594 * NOTE: __fd_install() variant is really, really low-level; don't
595 * use it unless you are forced to by truly lousy API shoved down
596 * your throat. 'files' *MUST* be either current->files or obtained
597 * by get_files_struct(current) done by whoever had given it to you,
598 * or really bad things will happen. Normally you want to use
599 * fd_install() instead.
602 void __fd_install(struct files_struct *files, unsigned int fd,
608 rcu_read_lock_sched();
610 while (unlikely(files->resize_in_progress)) {
611 rcu_read_unlock_sched();
612 wait_event(files->resize_wait, !files->resize_in_progress);
613 rcu_read_lock_sched();
615 /* coupled with smp_wmb() in expand_fdtable() */
617 fdt = rcu_dereference_sched(files->fdt);
618 BUG_ON(fdt->fd[fd] != NULL);
619 rcu_assign_pointer(fdt->fd[fd], file);
620 rcu_read_unlock_sched();
623 void fd_install(unsigned int fd, struct file *file)
625 __fd_install(current->files, fd, file);
628 EXPORT_SYMBOL(fd_install);
631 * The same warnings as for __alloc_fd()/__fd_install() apply here...
633 int __close_fd(struct files_struct *files, unsigned fd)
638 spin_lock(&files->file_lock);
639 fdt = files_fdtable(files);
640 if (fd >= fdt->max_fds)
645 rcu_assign_pointer(fdt->fd[fd], NULL);
646 __clear_close_on_exec(fd, fdt);
647 __put_unused_fd(files, fd);
648 spin_unlock(&files->file_lock);
649 return filp_close(file, files);
652 spin_unlock(&files->file_lock);
656 void do_close_on_exec(struct files_struct *files)
661 /* exec unshares first */
662 spin_lock(&files->file_lock);
665 unsigned fd = i * BITS_PER_LONG;
666 fdt = files_fdtable(files);
667 if (fd >= fdt->max_fds)
669 set = fdt->close_on_exec[i];
672 fdt->close_on_exec[i] = 0;
673 for ( ; set ; fd++, set >>= 1) {
680 rcu_assign_pointer(fdt->fd[fd], NULL);
681 __put_unused_fd(files, fd);
682 spin_unlock(&files->file_lock);
683 filp_close(file, files);
685 spin_lock(&files->file_lock);
689 spin_unlock(&files->file_lock);
692 static struct file *__fget(unsigned int fd, fmode_t mask)
694 struct files_struct *files = current->files;
699 file = fcheck_files(files, fd);
701 /* File object ref couldn't be taken.
702 * dup2() atomicity guarantee is the reason
703 * we loop to catch the new file (or NULL pointer)
705 if (file->f_mode & mask)
707 else if (!get_file_rcu(file))
715 struct file *fget(unsigned int fd)
717 return __fget(fd, FMODE_PATH);
721 struct file *fget_raw(unsigned int fd)
723 return __fget(fd, 0);
725 EXPORT_SYMBOL(fget_raw);
728 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
730 * You can use this instead of fget if you satisfy all of the following
732 * 1) You must call fput_light before exiting the syscall and returning control
733 * to userspace (i.e. you cannot remember the returned struct file * after
734 * returning to userspace).
735 * 2) You must not call filp_close on the returned struct file * in between
736 * calls to fget_light and fput_light.
737 * 3) You must not clone the current task in between the calls to fget_light
740 * The fput_needed flag returned by fget_light should be passed to the
741 * corresponding fput_light.
743 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
745 struct files_struct *files = current->files;
748 if (atomic_read(&files->count) == 1) {
749 file = __fcheck_files(files, fd);
750 if (!file || unlikely(file->f_mode & mask))
752 return (unsigned long)file;
754 file = __fget(fd, mask);
757 return FDPUT_FPUT | (unsigned long)file;
760 unsigned long __fdget(unsigned int fd)
762 return __fget_light(fd, FMODE_PATH);
764 EXPORT_SYMBOL(__fdget);
766 unsigned long __fdget_raw(unsigned int fd)
768 return __fget_light(fd, 0);
771 unsigned long __fdget_pos(unsigned int fd)
773 unsigned long v = __fdget(fd);
774 struct file *file = (struct file *)(v & ~3);
776 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
777 if (file_count(file) > 1) {
778 v |= FDPUT_POS_UNLOCK;
779 mutex_lock(&file->f_pos_lock);
786 * We only lock f_pos if we have threads or if the file might be
787 * shared with another process. In both cases we'll have an elevated
788 * file count (done either by fdget() or by fork()).
791 void set_close_on_exec(unsigned int fd, int flag)
793 struct files_struct *files = current->files;
795 spin_lock(&files->file_lock);
796 fdt = files_fdtable(files);
798 __set_close_on_exec(fd, fdt);
800 __clear_close_on_exec(fd, fdt);
801 spin_unlock(&files->file_lock);
804 bool get_close_on_exec(unsigned int fd)
806 struct files_struct *files = current->files;
810 fdt = files_fdtable(files);
811 res = close_on_exec(fd, fdt);
816 static int do_dup2(struct files_struct *files,
817 struct file *file, unsigned fd, unsigned flags)
818 __releases(&files->file_lock)
824 * We need to detect attempts to do dup2() over allocated but still
825 * not finished descriptor. NB: OpenBSD avoids that at the price of
826 * extra work in their equivalent of fget() - they insert struct
827 * file immediately after grabbing descriptor, mark it larval if
828 * more work (e.g. actual opening) is needed and make sure that
829 * fget() treats larval files as absent. Potentially interesting,
830 * but while extra work in fget() is trivial, locking implications
831 * and amount of surgery on open()-related paths in VFS are not.
832 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
833 * deadlocks in rather amusing ways, AFAICS. All of that is out of
834 * scope of POSIX or SUS, since neither considers shared descriptor
835 * tables and this condition does not arise without those.
837 fdt = files_fdtable(files);
838 tofree = fdt->fd[fd];
839 if (!tofree && fd_is_open(fd, fdt))
842 rcu_assign_pointer(fdt->fd[fd], file);
843 __set_open_fd(fd, fdt);
844 if (flags & O_CLOEXEC)
845 __set_close_on_exec(fd, fdt);
847 __clear_close_on_exec(fd, fdt);
848 spin_unlock(&files->file_lock);
851 filp_close(tofree, files);
856 spin_unlock(&files->file_lock);
860 int replace_fd(unsigned fd, struct file *file, unsigned flags)
863 struct files_struct *files = current->files;
866 return __close_fd(files, fd);
868 if (fd >= rlimit(RLIMIT_NOFILE))
871 spin_lock(&files->file_lock);
872 err = expand_files(files, fd);
873 if (unlikely(err < 0))
875 return do_dup2(files, file, fd, flags);
878 spin_unlock(&files->file_lock);
882 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
886 struct files_struct *files = current->files;
888 if ((flags & ~O_CLOEXEC) != 0)
891 if (unlikely(oldfd == newfd))
894 if (newfd >= rlimit(RLIMIT_NOFILE))
897 spin_lock(&files->file_lock);
898 err = expand_files(files, newfd);
899 file = fcheck(oldfd);
902 if (unlikely(err < 0)) {
907 return do_dup2(files, file, newfd, flags);
912 spin_unlock(&files->file_lock);
916 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
918 if (unlikely(newfd == oldfd)) { /* corner case */
919 struct files_struct *files = current->files;
923 if (!fcheck_files(files, oldfd))
928 return sys_dup3(oldfd, newfd, 0);
931 SYSCALL_DEFINE1(dup, unsigned int, fildes)
934 struct file *file = fget_raw(fildes);
937 ret = get_unused_fd_flags(0);
939 fd_install(ret, file);
946 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
949 if (from >= rlimit(RLIMIT_NOFILE))
951 err = alloc_fd(from, flags);
954 fd_install(err, file);
959 int iterate_fd(struct files_struct *files, unsigned n,
960 int (*f)(const void *, struct file *, unsigned),
967 spin_lock(&files->file_lock);
968 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
970 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
977 spin_unlock(&files->file_lock);
980 EXPORT_SYMBOL(iterate_fd);