3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
27 #include <linux/slab.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
46 #include <linux/uaccess.h>
50 struct shm_file_data {
52 struct ipc_namespace *ns;
54 const struct vm_operations_struct *vm_ops;
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
59 static const struct file_operations shm_file_operations;
60 static const struct vm_operations_struct shm_vm_ops;
62 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
64 #define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
67 static int newseg(struct ipc_namespace *, struct ipc_params *);
68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
70 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
75 void shm_init_ns(struct ipc_namespace *ns)
77 ns->shm_ctlmax = SHMMAX;
78 ns->shm_ctlall = SHMALL;
79 ns->shm_ctlmni = SHMMNI;
80 ns->shm_rmid_forced = 0;
82 ipc_init_ids(&shm_ids(ns));
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
91 struct shmid_kernel *shp;
93 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
95 if (shp->shm_nattch) {
96 shp->shm_perm.mode |= SHM_DEST;
97 /* Do not find it any more */
98 shp->shm_perm.key = IPC_PRIVATE;
101 shm_destroy(ns, shp);
105 void shm_exit_ns(struct ipc_namespace *ns)
107 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
108 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
112 static int __init ipc_ns_init(void)
114 shm_init_ns(&init_ipc_ns);
118 pure_initcall(ipc_ns_init);
120 void __init shm_init(void)
122 ipc_init_proc_interface("sysvipc/shm",
123 #if BITS_PER_LONG <= 32
124 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
126 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
128 IPC_SHM_IDS, sysvipc_shm_proc_show);
131 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
133 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
136 return ERR_CAST(ipcp);
138 return container_of(ipcp, struct shmid_kernel, shm_perm);
141 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
143 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
146 return ERR_CAST(ipcp);
148 return container_of(ipcp, struct shmid_kernel, shm_perm);
152 * shm_lock_(check_) routines are called in the paths where the rwsem
153 * is not necessarily held.
155 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
157 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
160 * Callers of shm_lock() must validate the status of the returned ipc
161 * object pointer (as returned by ipc_lock()), and error out as
166 return container_of(ipcp, struct shmid_kernel, shm_perm);
169 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
172 ipc_lock_object(&ipcp->shm_perm);
175 static void __shm_free(struct shmid_kernel *shp)
180 static void shm_rcu_free(struct rcu_head *head)
182 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
184 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
186 security_shm_free(shp);
190 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
192 list_del(&s->shm_clist);
193 ipc_rmid(&shm_ids(ns), &s->shm_perm);
197 static int __shm_open(struct vm_area_struct *vma)
199 struct file *file = vma->vm_file;
200 struct shm_file_data *sfd = shm_file_data(file);
201 struct shmid_kernel *shp;
203 shp = shm_lock(sfd->ns, sfd->id);
208 shp->shm_atim = get_seconds();
209 shp->shm_lprid = task_tgid_vnr(current);
215 /* This is called by fork, once for every shm attach. */
216 static void shm_open(struct vm_area_struct *vma)
218 int err = __shm_open(vma);
220 * We raced in the idr lookup or with shm_destroy().
221 * Either way, the ID is busted.
227 * shm_destroy - free the struct shmid_kernel
230 * @shp: struct to free
232 * It has to be called with shp and shm_ids.rwsem (writer) locked,
233 * but returns with shp unlocked and freed.
235 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
237 struct file *shm_file;
239 shm_file = shp->shm_file;
240 shp->shm_file = NULL;
241 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
244 if (!is_file_hugepages(shm_file))
245 shmem_lock(shm_file, 0, shp->mlock_user);
246 else if (shp->mlock_user)
247 user_shm_unlock(i_size_read(file_inode(shm_file)),
250 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
254 * shm_may_destroy - identifies whether shm segment should be destroyed now
256 * Returns true if and only if there are no active users of the segment and
257 * one of the following is true:
259 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
261 * 2) sysctl kernel.shm_rmid_forced is set to 1.
263 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
265 return (shp->shm_nattch == 0) &&
266 (ns->shm_rmid_forced ||
267 (shp->shm_perm.mode & SHM_DEST));
271 * remove the attach descriptor vma.
272 * free memory for segment if it is marked destroyed.
273 * The descriptor has already been removed from the current->mm->mmap list
274 * and will later be kfree()d.
276 static void shm_close(struct vm_area_struct *vma)
278 struct file *file = vma->vm_file;
279 struct shm_file_data *sfd = shm_file_data(file);
280 struct shmid_kernel *shp;
281 struct ipc_namespace *ns = sfd->ns;
283 down_write(&shm_ids(ns).rwsem);
284 /* remove from the list of attaches of the shm segment */
285 shp = shm_lock(ns, sfd->id);
288 * We raced in the idr lookup or with shm_destroy().
289 * Either way, the ID is busted.
291 if (WARN_ON_ONCE(IS_ERR(shp)))
292 goto done; /* no-op */
294 shp->shm_lprid = task_tgid_vnr(current);
295 shp->shm_dtim = get_seconds();
297 if (shm_may_destroy(ns, shp))
298 shm_destroy(ns, shp);
302 up_write(&shm_ids(ns).rwsem);
305 /* Called with ns->shm_ids(ns).rwsem locked */
306 static int shm_try_destroy_orphaned(int id, void *p, void *data)
308 struct ipc_namespace *ns = data;
309 struct kern_ipc_perm *ipcp = p;
310 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
313 * We want to destroy segments without users and with already
314 * exit'ed originating process.
316 * As shp->* are changed under rwsem, it's safe to skip shp locking.
318 if (shp->shm_creator != NULL)
321 if (shm_may_destroy(ns, shp)) {
322 shm_lock_by_ptr(shp);
323 shm_destroy(ns, shp);
328 void shm_destroy_orphaned(struct ipc_namespace *ns)
330 down_write(&shm_ids(ns).rwsem);
331 if (shm_ids(ns).in_use)
332 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
333 up_write(&shm_ids(ns).rwsem);
336 /* Locking assumes this will only be called with task == current */
337 void exit_shm(struct task_struct *task)
339 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
340 struct shmid_kernel *shp, *n;
342 if (list_empty(&task->sysvshm.shm_clist))
346 * If kernel.shm_rmid_forced is not set then only keep track of
347 * which shmids are orphaned, so that a later set of the sysctl
350 if (!ns->shm_rmid_forced) {
351 down_read(&shm_ids(ns).rwsem);
352 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
353 shp->shm_creator = NULL;
355 * Only under read lock but we are only called on current
356 * so no entry on the list will be shared.
358 list_del(&task->sysvshm.shm_clist);
359 up_read(&shm_ids(ns).rwsem);
364 * Destroy all already created segments, that were not yet mapped,
365 * and mark any mapped as orphan to cover the sysctl toggling.
366 * Destroy is skipped if shm_may_destroy() returns false.
368 down_write(&shm_ids(ns).rwsem);
369 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
370 shp->shm_creator = NULL;
372 if (shm_may_destroy(ns, shp)) {
373 shm_lock_by_ptr(shp);
374 shm_destroy(ns, shp);
378 /* Remove the list head from any segments still attached. */
379 list_del(&task->sysvshm.shm_clist);
380 up_write(&shm_ids(ns).rwsem);
383 static int shm_fault(struct vm_fault *vmf)
385 struct file *file = vmf->vma->vm_file;
386 struct shm_file_data *sfd = shm_file_data(file);
388 return sfd->vm_ops->fault(vmf);
392 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
394 struct file *file = vma->vm_file;
395 struct shm_file_data *sfd = shm_file_data(file);
398 if (sfd->vm_ops->set_policy)
399 err = sfd->vm_ops->set_policy(vma, new);
403 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
406 struct file *file = vma->vm_file;
407 struct shm_file_data *sfd = shm_file_data(file);
408 struct mempolicy *pol = NULL;
410 if (sfd->vm_ops->get_policy)
411 pol = sfd->vm_ops->get_policy(vma, addr);
412 else if (vma->vm_policy)
413 pol = vma->vm_policy;
419 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
421 struct shm_file_data *sfd = shm_file_data(file);
425 * In case of remap_file_pages() emulation, the file can represent
426 * removed IPC ID: propogate shm_lock() error to caller.
428 ret = __shm_open(vma);
432 ret = call_mmap(sfd->file, vma);
437 sfd->vm_ops = vma->vm_ops;
439 WARN_ON(!sfd->vm_ops->fault);
441 vma->vm_ops = &shm_vm_ops;
445 static int shm_release(struct inode *ino, struct file *file)
447 struct shm_file_data *sfd = shm_file_data(file);
450 shm_file_data(file) = NULL;
455 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
457 struct shm_file_data *sfd = shm_file_data(file);
459 if (!sfd->file->f_op->fsync)
461 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
464 static long shm_fallocate(struct file *file, int mode, loff_t offset,
467 struct shm_file_data *sfd = shm_file_data(file);
469 if (!sfd->file->f_op->fallocate)
471 return sfd->file->f_op->fallocate(file, mode, offset, len);
474 static unsigned long shm_get_unmapped_area(struct file *file,
475 unsigned long addr, unsigned long len, unsigned long pgoff,
478 struct shm_file_data *sfd = shm_file_data(file);
480 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
484 static const struct file_operations shm_file_operations = {
487 .release = shm_release,
488 .get_unmapped_area = shm_get_unmapped_area,
489 .llseek = noop_llseek,
490 .fallocate = shm_fallocate,
494 * shm_file_operations_huge is now identical to shm_file_operations,
495 * but we keep it distinct for the sake of is_file_shm_hugepages().
497 static const struct file_operations shm_file_operations_huge = {
500 .release = shm_release,
501 .get_unmapped_area = shm_get_unmapped_area,
502 .llseek = noop_llseek,
503 .fallocate = shm_fallocate,
506 bool is_file_shm_hugepages(struct file *file)
508 return file->f_op == &shm_file_operations_huge;
511 static const struct vm_operations_struct shm_vm_ops = {
512 .open = shm_open, /* callback for a new vm-area open */
513 .close = shm_close, /* callback for when the vm-area is released */
515 #if defined(CONFIG_NUMA)
516 .set_policy = shm_set_policy,
517 .get_policy = shm_get_policy,
521 static struct shmid_kernel *shm_alloc(void)
523 struct shmid_kernel *shp;
525 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
529 atomic_set(&shp->shm_perm.refcount, 1);
535 * newseg - Create a new shared memory segment
537 * @params: ptr to the structure that contains key, size and shmflg
539 * Called with shm_ids.rwsem held as a writer.
541 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
543 key_t key = params->key;
544 int shmflg = params->flg;
545 size_t size = params->u.size;
547 struct shmid_kernel *shp;
548 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
552 vm_flags_t acctflag = 0;
554 if (size < SHMMIN || size > ns->shm_ctlmax)
557 if (numpages << PAGE_SHIFT < size)
560 if (ns->shm_tot + numpages < ns->shm_tot ||
561 ns->shm_tot + numpages > ns->shm_ctlall)
568 shp->shm_perm.key = key;
569 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
570 shp->mlock_user = NULL;
572 shp->shm_perm.security = NULL;
573 error = security_shm_alloc(shp);
579 sprintf(name, "SYSV%08x", key);
580 if (shmflg & SHM_HUGETLB) {
584 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
589 hugesize = ALIGN(size, huge_page_size(hs));
591 /* hugetlb_file_setup applies strict accounting */
592 if (shmflg & SHM_NORESERVE)
593 acctflag = VM_NORESERVE;
594 file = hugetlb_file_setup(name, hugesize, acctflag,
595 &shp->mlock_user, HUGETLB_SHMFS_INODE,
596 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
599 * Do not allow no accounting for OVERCOMMIT_NEVER, even
602 if ((shmflg & SHM_NORESERVE) &&
603 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
604 acctflag = VM_NORESERVE;
605 file = shmem_kernel_file_setup(name, size, acctflag);
607 error = PTR_ERR(file);
611 shp->shm_cprid = task_tgid_vnr(current);
613 shp->shm_atim = shp->shm_dtim = 0;
614 shp->shm_ctim = get_seconds();
615 shp->shm_segsz = size;
617 shp->shm_file = file;
618 shp->shm_creator = current;
620 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
626 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
629 * shmid gets reported as "inode#" in /proc/pid/maps.
630 * proc-ps tools use this. Changing this will break them.
632 file_inode(file)->i_ino = shp->shm_perm.id;
634 ns->shm_tot += numpages;
635 error = shp->shm_perm.id;
637 ipc_unlock_object(&shp->shm_perm);
642 if (is_file_hugepages(file) && shp->mlock_user)
643 user_shm_unlock(size, shp->mlock_user);
646 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
651 * Called with shm_ids.rwsem and ipcp locked.
653 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
655 struct shmid_kernel *shp;
657 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
658 return security_shm_associate(shp, shmflg);
662 * Called with shm_ids.rwsem and ipcp locked.
664 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
665 struct ipc_params *params)
667 struct shmid_kernel *shp;
669 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
670 if (shp->shm_segsz < params->u.size)
676 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
678 struct ipc_namespace *ns;
679 static const struct ipc_ops shm_ops = {
681 .associate = shm_security,
682 .more_checks = shm_more_checks,
684 struct ipc_params shm_params;
686 ns = current->nsproxy->ipc_ns;
688 shm_params.key = key;
689 shm_params.flg = shmflg;
690 shm_params.u.size = size;
692 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
695 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
699 return copy_to_user(buf, in, sizeof(*in));
704 memset(&out, 0, sizeof(out));
705 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
706 out.shm_segsz = in->shm_segsz;
707 out.shm_atime = in->shm_atime;
708 out.shm_dtime = in->shm_dtime;
709 out.shm_ctime = in->shm_ctime;
710 out.shm_cpid = in->shm_cpid;
711 out.shm_lpid = in->shm_lpid;
712 out.shm_nattch = in->shm_nattch;
714 return copy_to_user(buf, &out, sizeof(out));
721 static inline unsigned long
722 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
726 if (copy_from_user(out, buf, sizeof(*out)))
731 struct shmid_ds tbuf_old;
733 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
736 out->shm_perm.uid = tbuf_old.shm_perm.uid;
737 out->shm_perm.gid = tbuf_old.shm_perm.gid;
738 out->shm_perm.mode = tbuf_old.shm_perm.mode;
747 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
751 return copy_to_user(buf, in, sizeof(*in));
756 if (in->shmmax > INT_MAX)
757 out.shmmax = INT_MAX;
759 out.shmmax = (int)in->shmmax;
761 out.shmmin = in->shmmin;
762 out.shmmni = in->shmmni;
763 out.shmseg = in->shmseg;
764 out.shmall = in->shmall;
766 return copy_to_user(buf, &out, sizeof(out));
774 * Calculate and add used RSS and swap pages of a shm.
775 * Called with shm_ids.rwsem held as a reader
777 static void shm_add_rss_swap(struct shmid_kernel *shp,
778 unsigned long *rss_add, unsigned long *swp_add)
782 inode = file_inode(shp->shm_file);
784 if (is_file_hugepages(shp->shm_file)) {
785 struct address_space *mapping = inode->i_mapping;
786 struct hstate *h = hstate_file(shp->shm_file);
787 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
790 struct shmem_inode_info *info = SHMEM_I(inode);
792 spin_lock_irq(&info->lock);
793 *rss_add += inode->i_mapping->nrpages;
794 *swp_add += info->swapped;
795 spin_unlock_irq(&info->lock);
797 *rss_add += inode->i_mapping->nrpages;
803 * Called with shm_ids.rwsem held as a reader
805 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
814 in_use = shm_ids(ns).in_use;
816 for (total = 0, next_id = 0; total < in_use; next_id++) {
817 struct kern_ipc_perm *ipc;
818 struct shmid_kernel *shp;
820 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
823 shp = container_of(ipc, struct shmid_kernel, shm_perm);
825 shm_add_rss_swap(shp, rss, swp);
832 * This function handles some shmctl commands which require the rwsem
833 * to be held in write mode.
834 * NOTE: no locks must be held, the rwsem is taken inside this function.
836 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
837 struct shmid_ds __user *buf, int version)
839 struct kern_ipc_perm *ipcp;
840 struct shmid64_ds shmid64;
841 struct shmid_kernel *shp;
844 if (cmd == IPC_SET) {
845 if (copy_shmid_from_user(&shmid64, buf, version))
849 down_write(&shm_ids(ns).rwsem);
852 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
853 &shmid64.shm_perm, 0);
859 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
861 err = security_shm_shmctl(shp, cmd);
867 ipc_lock_object(&shp->shm_perm);
868 /* do_shm_rmid unlocks the ipc object and rcu */
869 do_shm_rmid(ns, ipcp);
872 ipc_lock_object(&shp->shm_perm);
873 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
876 shp->shm_ctim = get_seconds();
884 ipc_unlock_object(&shp->shm_perm);
888 up_write(&shm_ids(ns).rwsem);
892 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
893 int cmd, int version, void __user *buf)
896 struct shmid_kernel *shp;
898 /* preliminary security checks for *_INFO */
899 if (cmd == IPC_INFO || cmd == SHM_INFO) {
900 err = security_shm_shmctl(NULL, cmd);
908 struct shminfo64 shminfo;
910 memset(&shminfo, 0, sizeof(shminfo));
911 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
912 shminfo.shmmax = ns->shm_ctlmax;
913 shminfo.shmall = ns->shm_ctlall;
915 shminfo.shmmin = SHMMIN;
916 if (copy_shminfo_to_user(buf, &shminfo, version))
919 down_read(&shm_ids(ns).rwsem);
920 err = ipc_get_maxid(&shm_ids(ns));
921 up_read(&shm_ids(ns).rwsem);
929 struct shm_info shm_info;
931 memset(&shm_info, 0, sizeof(shm_info));
932 down_read(&shm_ids(ns).rwsem);
933 shm_info.used_ids = shm_ids(ns).in_use;
934 shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
935 shm_info.shm_tot = ns->shm_tot;
936 shm_info.swap_attempts = 0;
937 shm_info.swap_successes = 0;
938 err = ipc_get_maxid(&shm_ids(ns));
939 up_read(&shm_ids(ns).rwsem);
940 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
945 err = err < 0 ? 0 : err;
951 struct shmid64_ds tbuf;
955 if (cmd == SHM_STAT) {
956 shp = shm_obtain_object(ns, shmid);
961 result = shp->shm_perm.id;
963 shp = shm_obtain_object_check(ns, shmid);
972 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
975 err = security_shm_shmctl(shp, cmd);
979 memset(&tbuf, 0, sizeof(tbuf));
980 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
981 tbuf.shm_segsz = shp->shm_segsz;
982 tbuf.shm_atime = shp->shm_atim;
983 tbuf.shm_dtime = shp->shm_dtim;
984 tbuf.shm_ctime = shp->shm_ctim;
985 tbuf.shm_cpid = shp->shm_cprid;
986 tbuf.shm_lpid = shp->shm_lprid;
987 tbuf.shm_nattch = shp->shm_nattch;
990 if (copy_shmid_to_user(buf, &tbuf, version))
1006 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1008 struct shmid_kernel *shp;
1010 struct ipc_namespace *ns;
1012 if (cmd < 0 || shmid < 0)
1015 version = ipc_parse_version(&cmd);
1016 ns = current->nsproxy->ipc_ns;
1023 return shmctl_nolock(ns, shmid, cmd, version, buf);
1026 return shmctl_down(ns, shmid, cmd, buf, version);
1030 struct file *shm_file;
1033 shp = shm_obtain_object_check(ns, shmid);
1039 audit_ipc_obj(&(shp->shm_perm));
1040 err = security_shm_shmctl(shp, cmd);
1044 ipc_lock_object(&shp->shm_perm);
1046 /* check if shm_destroy() is tearing down shp */
1047 if (!ipc_valid_object(&shp->shm_perm)) {
1052 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1053 kuid_t euid = current_euid();
1055 if (!uid_eq(euid, shp->shm_perm.uid) &&
1056 !uid_eq(euid, shp->shm_perm.cuid)) {
1060 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1066 shm_file = shp->shm_file;
1067 if (is_file_hugepages(shm_file))
1070 if (cmd == SHM_LOCK) {
1071 struct user_struct *user = current_user();
1073 err = shmem_lock(shm_file, 1, user);
1074 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1075 shp->shm_perm.mode |= SHM_LOCKED;
1076 shp->mlock_user = user;
1082 if (!(shp->shm_perm.mode & SHM_LOCKED))
1084 shmem_lock(shm_file, 0, shp->mlock_user);
1085 shp->shm_perm.mode &= ~SHM_LOCKED;
1086 shp->mlock_user = NULL;
1088 ipc_unlock_object(&shp->shm_perm);
1090 shmem_unlock_mapping(shm_file->f_mapping);
1100 ipc_unlock_object(&shp->shm_perm);
1107 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1109 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1110 * "raddr" thing points to kernel space, and there has to be a wrapper around
1113 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1114 ulong *raddr, unsigned long shmlba)
1116 struct shmid_kernel *shp;
1117 unsigned long addr = (unsigned long)shmaddr;
1121 unsigned long flags = MAP_SHARED;
1124 struct ipc_namespace *ns;
1125 struct shm_file_data *sfd;
1128 unsigned long populate = 0;
1135 if (addr & (shmlba - 1)) {
1137 * Round down to the nearest multiple of shmlba.
1138 * For sane do_mmap_pgoff() parameters, avoid
1139 * round downs that trigger nil-page and MAP_FIXED.
1141 if ((shmflg & SHM_RND) && addr >= shmlba)
1142 addr &= ~(shmlba - 1);
1144 #ifndef __ARCH_FORCE_SHMLBA
1145 if (addr & ~PAGE_MASK)
1151 } else if ((shmflg & SHM_REMAP))
1154 if (shmflg & SHM_RDONLY) {
1157 f_mode = FMODE_READ;
1159 prot = PROT_READ | PROT_WRITE;
1160 acc_mode = S_IRUGO | S_IWUGO;
1161 f_mode = FMODE_READ | FMODE_WRITE;
1163 if (shmflg & SHM_EXEC) {
1165 acc_mode |= S_IXUGO;
1169 * We cannot rely on the fs check since SYSV IPC does have an
1170 * additional creator id...
1172 ns = current->nsproxy->ipc_ns;
1174 shp = shm_obtain_object_check(ns, shmid);
1181 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1184 err = security_shm_shmat(shp, shmaddr, shmflg);
1188 ipc_lock_object(&shp->shm_perm);
1190 /* check if shm_destroy() is tearing down shp */
1191 if (!ipc_valid_object(&shp->shm_perm)) {
1192 ipc_unlock_object(&shp->shm_perm);
1197 path = shp->shm_file->f_path;
1200 size = i_size_read(d_inode(path.dentry));
1201 ipc_unlock_object(&shp->shm_perm);
1205 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1211 file = alloc_file(&path, f_mode,
1212 is_file_hugepages(shp->shm_file) ?
1213 &shm_file_operations_huge :
1214 &shm_file_operations);
1215 err = PTR_ERR(file);
1222 file->private_data = sfd;
1223 file->f_mapping = shp->shm_file->f_mapping;
1224 sfd->id = shp->shm_perm.id;
1225 sfd->ns = get_ipc_ns(ns);
1226 sfd->file = shp->shm_file;
1229 err = security_mmap_file(file, prot, flags);
1233 if (down_write_killable(¤t->mm->mmap_sem)) {
1238 if (addr && !(shmflg & SHM_REMAP)) {
1240 if (addr + size < addr)
1243 if (find_vma_intersection(current->mm, addr, addr + size))
1247 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1250 if (IS_ERR_VALUE(addr))
1253 up_write(¤t->mm->mmap_sem);
1255 mm_populate(addr, populate);
1261 down_write(&shm_ids(ns).rwsem);
1262 shp = shm_lock(ns, shmid);
1264 if (shm_may_destroy(ns, shp))
1265 shm_destroy(ns, shp);
1268 up_write(&shm_ids(ns).rwsem);
1277 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1282 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1285 force_successful_syscall_return();
1290 * detach and kill segment if marked destroyed.
1291 * The work is done in shm_close.
1293 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1295 struct mm_struct *mm = current->mm;
1296 struct vm_area_struct *vma;
1297 unsigned long addr = (unsigned long)shmaddr;
1298 int retval = -EINVAL;
1302 struct vm_area_struct *next;
1305 if (addr & ~PAGE_MASK)
1308 if (down_write_killable(&mm->mmap_sem))
1312 * This function tries to be smart and unmap shm segments that
1313 * were modified by partial mlock or munmap calls:
1314 * - It first determines the size of the shm segment that should be
1315 * unmapped: It searches for a vma that is backed by shm and that
1316 * started at address shmaddr. It records it's size and then unmaps
1318 * - Then it unmaps all shm vmas that started at shmaddr and that
1319 * are within the initially determined size and that are from the
1320 * same shm segment from which we determined the size.
1321 * Errors from do_munmap are ignored: the function only fails if
1322 * it's called with invalid parameters or if it's called to unmap
1323 * a part of a vma. Both calls in this function are for full vmas,
1324 * the parameters are directly copied from the vma itself and always
1325 * valid - therefore do_munmap cannot fail. (famous last words?)
1328 * If it had been mremap()'d, the starting address would not
1329 * match the usual checks anyway. So assume all vma's are
1330 * above the starting address given.
1332 vma = find_vma(mm, addr);
1336 next = vma->vm_next;
1339 * Check if the starting address would match, i.e. it's
1340 * a fragment created by mprotect() and/or munmap(), or it
1341 * otherwise it starts at this address with no hassles.
1343 if ((vma->vm_ops == &shm_vm_ops) &&
1344 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1347 * Record the file of the shm segment being
1348 * unmapped. With mremap(), someone could place
1349 * page from another segment but with equal offsets
1350 * in the range we are unmapping.
1352 file = vma->vm_file;
1353 size = i_size_read(file_inode(vma->vm_file));
1354 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1356 * We discovered the size of the shm segment, so
1357 * break out of here and fall through to the next
1358 * loop that uses the size information to stop
1359 * searching for matching vma's.
1369 * We need look no further than the maximum address a fragment
1370 * could possibly have landed at. Also cast things to loff_t to
1371 * prevent overflows and make comparisons vs. equal-width types.
1373 size = PAGE_ALIGN(size);
1374 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1375 next = vma->vm_next;
1377 /* finding a matching vma now does not alter retval */
1378 if ((vma->vm_ops == &shm_vm_ops) &&
1379 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1380 (vma->vm_file == file))
1381 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1385 #else /* CONFIG_MMU */
1386 /* under NOMMU conditions, the exact address to be destroyed must be
1389 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1390 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1396 up_write(&mm->mmap_sem);
1400 #ifdef CONFIG_PROC_FS
1401 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1403 struct user_namespace *user_ns = seq_user_ns(s);
1404 struct shmid_kernel *shp = it;
1405 unsigned long rss = 0, swp = 0;
1407 shm_add_rss_swap(shp, &rss, &swp);
1409 #if BITS_PER_LONG <= 32
1410 #define SIZE_SPEC "%10lu"
1412 #define SIZE_SPEC "%21lu"
1416 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1417 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1418 SIZE_SPEC " " SIZE_SPEC "\n",
1426 from_kuid_munged(user_ns, shp->shm_perm.uid),
1427 from_kgid_munged(user_ns, shp->shm_perm.gid),
1428 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1429 from_kgid_munged(user_ns, shp->shm_perm.cgid),