2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
25 #include <linux/sysfs.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/elf.h>
30 #include <linux/proc_fs.h>
31 #include <linux/seq_file.h>
32 #include <linux/syscalls.h>
33 #include <linux/fcntl.h>
34 #include <linux/rcupdate.h>
35 #include <linux/capability.h>
36 #include <linux/cpu.h>
37 #include <linux/moduleparam.h>
38 #include <linux/errno.h>
39 #include <linux/err.h>
40 #include <linux/vermagic.h>
41 #include <linux/notifier.h>
42 #include <linux/sched.h>
43 #include <linux/stop_machine.h>
44 #include <linux/device.h>
45 #include <linux/string.h>
46 #include <linux/mutex.h>
47 #include <linux/rculist.h>
48 #include <asm/uaccess.h>
49 #include <asm/cacheflush.h>
50 #include <asm/mmu_context.h>
51 #include <linux/license.h>
52 #include <asm/sections.h>
53 #include <linux/tracepoint.h>
54 #include <linux/ftrace.h>
55 #include <linux/async.h>
56 #include <linux/percpu.h>
57 #include <linux/kmemleak.h>
58 #include <linux/jump_label.h>
59 #include <linux/pfn.h>
60 #include <linux/bsearch.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/module.h>
68 #define DEBUGP(fmt , a...)
71 #ifndef ARCH_SHF_SMALL
72 #define ARCH_SHF_SMALL 0
76 * Modules' sections will be aligned on page boundaries
77 * to ensure complete separation of code and data, but
78 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
80 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
81 # define debug_align(X) ALIGN(X, PAGE_SIZE)
83 # define debug_align(X) (X)
87 * Given BASE and SIZE this macro calculates the number of pages the
88 * memory regions occupies
90 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
91 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
92 PFN_DOWN((unsigned long)BASE) + 1) \
95 /* If this is set, the section belongs in the init part of the module */
96 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
100 * 1) List of modules (also safely readable with preempt_disable),
101 * 2) module_use links,
102 * 3) module_addr_min/module_addr_max.
103 * (delete uses stop_machine/add uses RCU list operations). */
104 DEFINE_MUTEX(module_mutex);
105 EXPORT_SYMBOL_GPL(module_mutex);
106 static LIST_HEAD(modules);
107 #ifdef CONFIG_KGDB_KDB
108 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
109 #endif /* CONFIG_KGDB_KDB */
112 /* Block module loading/unloading? */
113 int modules_disabled = 0;
115 /* Waiting for a module to finish initializing? */
116 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
118 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
120 /* Bounds of module allocation, for speeding __module_address.
121 * Protected by module_mutex. */
122 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
124 int register_module_notifier(struct notifier_block * nb)
126 return blocking_notifier_chain_register(&module_notify_list, nb);
128 EXPORT_SYMBOL(register_module_notifier);
130 int unregister_module_notifier(struct notifier_block * nb)
132 return blocking_notifier_chain_unregister(&module_notify_list, nb);
134 EXPORT_SYMBOL(unregister_module_notifier);
140 char *secstrings, *strtab;
141 unsigned long symoffs, stroffs;
142 struct _ddebug *debug;
143 unsigned int num_debug;
145 unsigned int sym, str, mod, vers, info, pcpu;
149 /* We require a truly strong try_module_get(): 0 means failure due to
150 ongoing or failed initialization etc. */
151 static inline int strong_try_module_get(struct module *mod)
153 if (mod && mod->state == MODULE_STATE_COMING)
155 if (try_module_get(mod))
161 static inline void add_taint_module(struct module *mod, unsigned flag)
164 mod->taints |= (1U << flag);
168 * A thread that wants to hold a reference to a module only while it
169 * is running can call this to safely exit. nfsd and lockd use this.
171 void __module_put_and_exit(struct module *mod, long code)
176 EXPORT_SYMBOL(__module_put_and_exit);
178 /* Find a module section: 0 means not found. */
179 static unsigned int find_sec(const struct load_info *info, const char *name)
183 for (i = 1; i < info->hdr->e_shnum; i++) {
184 Elf_Shdr *shdr = &info->sechdrs[i];
185 /* Alloc bit cleared means "ignore it." */
186 if ((shdr->sh_flags & SHF_ALLOC)
187 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
193 /* Find a module section, or NULL. */
194 static void *section_addr(const struct load_info *info, const char *name)
196 /* Section 0 has sh_addr 0. */
197 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
200 /* Find a module section, or NULL. Fill in number of "objects" in section. */
201 static void *section_objs(const struct load_info *info,
206 unsigned int sec = find_sec(info, name);
208 /* Section 0 has sh_addr 0 and sh_size 0. */
209 *num = info->sechdrs[sec].sh_size / object_size;
210 return (void *)info->sechdrs[sec].sh_addr;
213 /* Provided by the linker */
214 extern const struct kernel_symbol __start___ksymtab[];
215 extern const struct kernel_symbol __stop___ksymtab[];
216 extern const struct kernel_symbol __start___ksymtab_gpl[];
217 extern const struct kernel_symbol __stop___ksymtab_gpl[];
218 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
219 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
220 extern const unsigned long __start___kcrctab[];
221 extern const unsigned long __start___kcrctab_gpl[];
222 extern const unsigned long __start___kcrctab_gpl_future[];
223 #ifdef CONFIG_UNUSED_SYMBOLS
224 extern const struct kernel_symbol __start___ksymtab_unused[];
225 extern const struct kernel_symbol __stop___ksymtab_unused[];
226 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
227 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
228 extern const unsigned long __start___kcrctab_unused[];
229 extern const unsigned long __start___kcrctab_unused_gpl[];
232 #ifndef CONFIG_MODVERSIONS
233 #define symversion(base, idx) NULL
235 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
238 static bool each_symbol_in_section(const struct symsearch *arr,
239 unsigned int arrsize,
240 struct module *owner,
241 bool (*fn)(const struct symsearch *syms,
242 struct module *owner,
248 for (j = 0; j < arrsize; j++) {
249 if (fn(&arr[j], owner, data))
256 /* Returns true as soon as fn returns true, otherwise false. */
257 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
258 struct module *owner,
263 static const struct symsearch arr[] = {
264 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
265 NOT_GPL_ONLY, false },
266 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
267 __start___kcrctab_gpl,
269 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
270 __start___kcrctab_gpl_future,
271 WILL_BE_GPL_ONLY, false },
272 #ifdef CONFIG_UNUSED_SYMBOLS
273 { __start___ksymtab_unused, __stop___ksymtab_unused,
274 __start___kcrctab_unused,
275 NOT_GPL_ONLY, true },
276 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
277 __start___kcrctab_unused_gpl,
282 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
285 list_for_each_entry_rcu(mod, &modules, list) {
286 struct symsearch arr[] = {
287 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
288 NOT_GPL_ONLY, false },
289 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
292 { mod->gpl_future_syms,
293 mod->gpl_future_syms + mod->num_gpl_future_syms,
294 mod->gpl_future_crcs,
295 WILL_BE_GPL_ONLY, false },
296 #ifdef CONFIG_UNUSED_SYMBOLS
298 mod->unused_syms + mod->num_unused_syms,
300 NOT_GPL_ONLY, true },
301 { mod->unused_gpl_syms,
302 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
303 mod->unused_gpl_crcs,
308 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
313 EXPORT_SYMBOL_GPL(each_symbol_section);
315 struct find_symbol_arg {
322 struct module *owner;
323 const unsigned long *crc;
324 const struct kernel_symbol *sym;
327 static bool check_symbol(const struct symsearch *syms,
328 struct module *owner,
329 unsigned int symnum, void *data)
331 struct find_symbol_arg *fsa = data;
334 if (syms->licence == GPL_ONLY)
336 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
337 printk(KERN_WARNING "Symbol %s is being used "
338 "by a non-GPL module, which will not "
339 "be allowed in the future\n", fsa->name);
340 printk(KERN_WARNING "Please see the file "
341 "Documentation/feature-removal-schedule.txt "
342 "in the kernel source tree for more details.\n");
346 #ifdef CONFIG_UNUSED_SYMBOLS
347 if (syms->unused && fsa->warn) {
348 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
349 "however this module is using it.\n", fsa->name);
351 "This symbol will go away in the future.\n");
353 "Please evalute if this is the right api to use and if "
354 "it really is, submit a report the linux kernel "
355 "mailinglist together with submitting your code for "
361 fsa->crc = symversion(syms->crcs, symnum);
362 fsa->sym = &syms->start[symnum];
366 static int cmp_name(const void *va, const void *vb)
369 const struct kernel_symbol *b;
371 return strcmp(a, b->name);
374 static bool find_symbol_in_section(const struct symsearch *syms,
375 struct module *owner,
378 struct find_symbol_arg *fsa = data;
379 struct kernel_symbol *sym;
381 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
382 sizeof(struct kernel_symbol), cmp_name);
384 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
390 /* Find a symbol and return it, along with, (optional) crc and
391 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
392 const struct kernel_symbol *find_symbol(const char *name,
393 struct module **owner,
394 const unsigned long **crc,
398 struct find_symbol_arg fsa;
404 if (each_symbol_section(find_symbol_in_section, &fsa)) {
412 DEBUGP("Failed to find symbol %s\n", name);
415 EXPORT_SYMBOL_GPL(find_symbol);
417 /* Search for module by name: must hold module_mutex. */
418 struct module *find_module(const char *name)
422 list_for_each_entry(mod, &modules, list) {
423 if (strcmp(mod->name, name) == 0)
428 EXPORT_SYMBOL_GPL(find_module);
432 static inline void __percpu *mod_percpu(struct module *mod)
437 static int percpu_modalloc(struct module *mod,
438 unsigned long size, unsigned long align)
440 if (align > PAGE_SIZE) {
441 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
442 mod->name, align, PAGE_SIZE);
446 mod->percpu = __alloc_reserved_percpu(size, align);
449 "%s: Could not allocate %lu bytes percpu data\n",
453 mod->percpu_size = size;
457 static void percpu_modfree(struct module *mod)
459 free_percpu(mod->percpu);
462 static unsigned int find_pcpusec(struct load_info *info)
464 return find_sec(info, ".data..percpu");
467 static void percpu_modcopy(struct module *mod,
468 const void *from, unsigned long size)
472 for_each_possible_cpu(cpu)
473 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
477 * is_module_percpu_address - test whether address is from module static percpu
478 * @addr: address to test
480 * Test whether @addr belongs to module static percpu area.
483 * %true if @addr is from module static percpu area
485 bool is_module_percpu_address(unsigned long addr)
492 list_for_each_entry_rcu(mod, &modules, list) {
493 if (!mod->percpu_size)
495 for_each_possible_cpu(cpu) {
496 void *start = per_cpu_ptr(mod->percpu, cpu);
498 if ((void *)addr >= start &&
499 (void *)addr < start + mod->percpu_size) {
510 #else /* ... !CONFIG_SMP */
512 static inline void __percpu *mod_percpu(struct module *mod)
516 static inline int percpu_modalloc(struct module *mod,
517 unsigned long size, unsigned long align)
521 static inline void percpu_modfree(struct module *mod)
524 static unsigned int find_pcpusec(struct load_info *info)
528 static inline void percpu_modcopy(struct module *mod,
529 const void *from, unsigned long size)
531 /* pcpusec should be 0, and size of that section should be 0. */
534 bool is_module_percpu_address(unsigned long addr)
539 #endif /* CONFIG_SMP */
541 #define MODINFO_ATTR(field) \
542 static void setup_modinfo_##field(struct module *mod, const char *s) \
544 mod->field = kstrdup(s, GFP_KERNEL); \
546 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
547 struct module_kobject *mk, char *buffer) \
549 return sprintf(buffer, "%s\n", mk->mod->field); \
551 static int modinfo_##field##_exists(struct module *mod) \
553 return mod->field != NULL; \
555 static void free_modinfo_##field(struct module *mod) \
560 static struct module_attribute modinfo_##field = { \
561 .attr = { .name = __stringify(field), .mode = 0444 }, \
562 .show = show_modinfo_##field, \
563 .setup = setup_modinfo_##field, \
564 .test = modinfo_##field##_exists, \
565 .free = free_modinfo_##field, \
568 MODINFO_ATTR(version);
569 MODINFO_ATTR(srcversion);
571 static char last_unloaded_module[MODULE_NAME_LEN+1];
573 #ifdef CONFIG_MODULE_UNLOAD
575 EXPORT_TRACEPOINT_SYMBOL(module_get);
577 /* Init the unload section of the module. */
578 static int module_unload_init(struct module *mod)
580 mod->refptr = alloc_percpu(struct module_ref);
584 INIT_LIST_HEAD(&mod->source_list);
585 INIT_LIST_HEAD(&mod->target_list);
587 /* Hold reference count during initialization. */
588 __this_cpu_write(mod->refptr->incs, 1);
589 /* Backwards compatibility macros put refcount during init. */
590 mod->waiter = current;
595 /* Does a already use b? */
596 static int already_uses(struct module *a, struct module *b)
598 struct module_use *use;
600 list_for_each_entry(use, &b->source_list, source_list) {
601 if (use->source == a) {
602 DEBUGP("%s uses %s!\n", a->name, b->name);
606 DEBUGP("%s does not use %s!\n", a->name, b->name);
612 * - we add 'a' as a "source", 'b' as a "target" of module use
613 * - the module_use is added to the list of 'b' sources (so
614 * 'b' can walk the list to see who sourced them), and of 'a'
615 * targets (so 'a' can see what modules it targets).
617 static int add_module_usage(struct module *a, struct module *b)
619 struct module_use *use;
621 DEBUGP("Allocating new usage for %s.\n", a->name);
622 use = kmalloc(sizeof(*use), GFP_ATOMIC);
624 printk(KERN_WARNING "%s: out of memory loading\n", a->name);
630 list_add(&use->source_list, &b->source_list);
631 list_add(&use->target_list, &a->target_list);
635 /* Module a uses b: caller needs module_mutex() */
636 int ref_module(struct module *a, struct module *b)
640 if (b == NULL || already_uses(a, b))
643 /* If module isn't available, we fail. */
644 err = strong_try_module_get(b);
648 err = add_module_usage(a, b);
655 EXPORT_SYMBOL_GPL(ref_module);
657 /* Clear the unload stuff of the module. */
658 static void module_unload_free(struct module *mod)
660 struct module_use *use, *tmp;
662 mutex_lock(&module_mutex);
663 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
664 struct module *i = use->target;
665 DEBUGP("%s unusing %s\n", mod->name, i->name);
667 list_del(&use->source_list);
668 list_del(&use->target_list);
671 mutex_unlock(&module_mutex);
673 free_percpu(mod->refptr);
676 #ifdef CONFIG_MODULE_FORCE_UNLOAD
677 static inline int try_force_unload(unsigned int flags)
679 int ret = (flags & O_TRUNC);
681 add_taint(TAINT_FORCED_RMMOD);
685 static inline int try_force_unload(unsigned int flags)
689 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
698 /* Whole machine is stopped with interrupts off when this runs. */
699 static int __try_stop_module(void *_sref)
701 struct stopref *sref = _sref;
703 /* If it's not unused, quit unless we're forcing. */
704 if (module_refcount(sref->mod) != 0) {
705 if (!(*sref->forced = try_force_unload(sref->flags)))
709 /* Mark it as dying. */
710 sref->mod->state = MODULE_STATE_GOING;
714 static int try_stop_module(struct module *mod, int flags, int *forced)
716 if (flags & O_NONBLOCK) {
717 struct stopref sref = { mod, flags, forced };
719 return stop_machine(__try_stop_module, &sref, NULL);
721 /* We don't need to stop the machine for this. */
722 mod->state = MODULE_STATE_GOING;
728 unsigned int module_refcount(struct module *mod)
730 unsigned int incs = 0, decs = 0;
733 for_each_possible_cpu(cpu)
734 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
736 * ensure the incs are added up after the decs.
737 * module_put ensures incs are visible before decs with smp_wmb.
739 * This 2-count scheme avoids the situation where the refcount
740 * for CPU0 is read, then CPU0 increments the module refcount,
741 * then CPU1 drops that refcount, then the refcount for CPU1 is
742 * read. We would record a decrement but not its corresponding
743 * increment so we would see a low count (disaster).
745 * Rare situation? But module_refcount can be preempted, and we
746 * might be tallying up 4096+ CPUs. So it is not impossible.
749 for_each_possible_cpu(cpu)
750 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
753 EXPORT_SYMBOL(module_refcount);
755 /* This exists whether we can unload or not */
756 static void free_module(struct module *mod);
758 static void wait_for_zero_refcount(struct module *mod)
760 /* Since we might sleep for some time, release the mutex first */
761 mutex_unlock(&module_mutex);
763 DEBUGP("Looking at refcount...\n");
764 set_current_state(TASK_UNINTERRUPTIBLE);
765 if (module_refcount(mod) == 0)
769 current->state = TASK_RUNNING;
770 mutex_lock(&module_mutex);
773 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
777 char name[MODULE_NAME_LEN];
780 if (!capable(CAP_SYS_MODULE) || modules_disabled)
783 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
785 name[MODULE_NAME_LEN-1] = '\0';
787 if (mutex_lock_interruptible(&module_mutex) != 0)
790 mod = find_module(name);
796 if (!list_empty(&mod->source_list)) {
797 /* Other modules depend on us: get rid of them first. */
802 /* Doing init or already dying? */
803 if (mod->state != MODULE_STATE_LIVE) {
804 /* FIXME: if (force), slam module count and wake up
806 DEBUGP("%s already dying\n", mod->name);
811 /* If it has an init func, it must have an exit func to unload */
812 if (mod->init && !mod->exit) {
813 forced = try_force_unload(flags);
815 /* This module can't be removed */
821 /* Set this up before setting mod->state */
822 mod->waiter = current;
824 /* Stop the machine so refcounts can't move and disable module. */
825 ret = try_stop_module(mod, flags, &forced);
829 /* Never wait if forced. */
830 if (!forced && module_refcount(mod) != 0)
831 wait_for_zero_refcount(mod);
833 mutex_unlock(&module_mutex);
834 /* Final destruction now no one is using it. */
835 if (mod->exit != NULL)
837 blocking_notifier_call_chain(&module_notify_list,
838 MODULE_STATE_GOING, mod);
839 async_synchronize_full();
841 /* Store the name of the last unloaded module for diagnostic purposes */
842 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
847 mutex_unlock(&module_mutex);
851 static inline void print_unload_info(struct seq_file *m, struct module *mod)
853 struct module_use *use;
854 int printed_something = 0;
856 seq_printf(m, " %u ", module_refcount(mod));
858 /* Always include a trailing , so userspace can differentiate
859 between this and the old multi-field proc format. */
860 list_for_each_entry(use, &mod->source_list, source_list) {
861 printed_something = 1;
862 seq_printf(m, "%s,", use->source->name);
865 if (mod->init != NULL && mod->exit == NULL) {
866 printed_something = 1;
867 seq_printf(m, "[permanent],");
870 if (!printed_something)
874 void __symbol_put(const char *symbol)
876 struct module *owner;
879 if (!find_symbol(symbol, &owner, NULL, true, false))
884 EXPORT_SYMBOL(__symbol_put);
886 /* Note this assumes addr is a function, which it currently always is. */
887 void symbol_put_addr(void *addr)
889 struct module *modaddr;
890 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
892 if (core_kernel_text(a))
895 /* module_text_address is safe here: we're supposed to have reference
896 * to module from symbol_get, so it can't go away. */
897 modaddr = __module_text_address(a);
901 EXPORT_SYMBOL_GPL(symbol_put_addr);
903 static ssize_t show_refcnt(struct module_attribute *mattr,
904 struct module_kobject *mk, char *buffer)
906 return sprintf(buffer, "%u\n", module_refcount(mk->mod));
909 static struct module_attribute refcnt = {
910 .attr = { .name = "refcnt", .mode = 0444 },
914 void module_put(struct module *module)
918 smp_wmb(); /* see comment in module_refcount */
919 __this_cpu_inc(module->refptr->decs);
921 trace_module_put(module, _RET_IP_);
922 /* Maybe they're waiting for us to drop reference? */
923 if (unlikely(!module_is_live(module)))
924 wake_up_process(module->waiter);
928 EXPORT_SYMBOL(module_put);
930 #else /* !CONFIG_MODULE_UNLOAD */
931 static inline void print_unload_info(struct seq_file *m, struct module *mod)
933 /* We don't know the usage count, or what modules are using. */
934 seq_printf(m, " - -");
937 static inline void module_unload_free(struct module *mod)
941 int ref_module(struct module *a, struct module *b)
943 return strong_try_module_get(b);
945 EXPORT_SYMBOL_GPL(ref_module);
947 static inline int module_unload_init(struct module *mod)
951 #endif /* CONFIG_MODULE_UNLOAD */
953 static ssize_t show_initstate(struct module_attribute *mattr,
954 struct module_kobject *mk, char *buffer)
956 const char *state = "unknown";
958 switch (mk->mod->state) {
959 case MODULE_STATE_LIVE:
962 case MODULE_STATE_COMING:
965 case MODULE_STATE_GOING:
969 return sprintf(buffer, "%s\n", state);
972 static struct module_attribute initstate = {
973 .attr = { .name = "initstate", .mode = 0444 },
974 .show = show_initstate,
977 static ssize_t store_uevent(struct module_attribute *mattr,
978 struct module_kobject *mk,
979 const char *buffer, size_t count)
981 enum kobject_action action;
983 if (kobject_action_type(buffer, count, &action) == 0)
984 kobject_uevent(&mk->kobj, action);
988 struct module_attribute module_uevent = {
989 .attr = { .name = "uevent", .mode = 0200 },
990 .store = store_uevent,
993 static struct module_attribute *modinfo_attrs[] = {
998 #ifdef CONFIG_MODULE_UNLOAD
1004 static const char vermagic[] = VERMAGIC_STRING;
1006 static int try_to_force_load(struct module *mod, const char *reason)
1008 #ifdef CONFIG_MODULE_FORCE_LOAD
1009 if (!test_taint(TAINT_FORCED_MODULE))
1010 printk(KERN_WARNING "%s: %s: kernel tainted.\n",
1012 add_taint_module(mod, TAINT_FORCED_MODULE);
1019 #ifdef CONFIG_MODVERSIONS
1020 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1021 static unsigned long maybe_relocated(unsigned long crc,
1022 const struct module *crc_owner)
1024 #ifdef ARCH_RELOCATES_KCRCTAB
1025 if (crc_owner == NULL)
1026 return crc - (unsigned long)reloc_start;
1031 static int check_version(Elf_Shdr *sechdrs,
1032 unsigned int versindex,
1033 const char *symname,
1035 const unsigned long *crc,
1036 const struct module *crc_owner)
1038 unsigned int i, num_versions;
1039 struct modversion_info *versions;
1041 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1045 /* No versions at all? modprobe --force does this. */
1047 return try_to_force_load(mod, symname) == 0;
1049 versions = (void *) sechdrs[versindex].sh_addr;
1050 num_versions = sechdrs[versindex].sh_size
1051 / sizeof(struct modversion_info);
1053 for (i = 0; i < num_versions; i++) {
1054 if (strcmp(versions[i].name, symname) != 0)
1057 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1059 DEBUGP("Found checksum %lX vs module %lX\n",
1060 maybe_relocated(*crc, crc_owner), versions[i].crc);
1064 printk(KERN_WARNING "%s: no symbol version for %s\n",
1065 mod->name, symname);
1069 printk("%s: disagrees about version of symbol %s\n",
1070 mod->name, symname);
1074 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1075 unsigned int versindex,
1078 const unsigned long *crc;
1080 /* Since this should be found in kernel (which can't be removed),
1081 * no locking is necessary. */
1082 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1085 return check_version(sechdrs, versindex, "module_layout", mod, crc,
1089 /* First part is kernel version, which we ignore if module has crcs. */
1090 static inline int same_magic(const char *amagic, const char *bmagic,
1094 amagic += strcspn(amagic, " ");
1095 bmagic += strcspn(bmagic, " ");
1097 return strcmp(amagic, bmagic) == 0;
1100 static inline int check_version(Elf_Shdr *sechdrs,
1101 unsigned int versindex,
1102 const char *symname,
1104 const unsigned long *crc,
1105 const struct module *crc_owner)
1110 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1111 unsigned int versindex,
1117 static inline int same_magic(const char *amagic, const char *bmagic,
1120 return strcmp(amagic, bmagic) == 0;
1122 #endif /* CONFIG_MODVERSIONS */
1124 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1125 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1126 const struct load_info *info,
1130 struct module *owner;
1131 const struct kernel_symbol *sym;
1132 const unsigned long *crc;
1135 mutex_lock(&module_mutex);
1136 sym = find_symbol(name, &owner, &crc,
1137 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1141 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1143 sym = ERR_PTR(-EINVAL);
1147 err = ref_module(mod, owner);
1154 /* We must make copy under the lock if we failed to get ref. */
1155 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1157 mutex_unlock(&module_mutex);
1161 static const struct kernel_symbol *
1162 resolve_symbol_wait(struct module *mod,
1163 const struct load_info *info,
1166 const struct kernel_symbol *ksym;
1167 char owner[MODULE_NAME_LEN];
1169 if (wait_event_interruptible_timeout(module_wq,
1170 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1171 || PTR_ERR(ksym) != -EBUSY,
1173 printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1180 * /sys/module/foo/sections stuff
1181 * J. Corbet <corbet@lwn.net>
1185 #ifdef CONFIG_KALLSYMS
1186 static inline bool sect_empty(const Elf_Shdr *sect)
1188 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1191 struct module_sect_attr
1193 struct module_attribute mattr;
1195 unsigned long address;
1198 struct module_sect_attrs
1200 struct attribute_group grp;
1201 unsigned int nsections;
1202 struct module_sect_attr attrs[0];
1205 static ssize_t module_sect_show(struct module_attribute *mattr,
1206 struct module_kobject *mk, char *buf)
1208 struct module_sect_attr *sattr =
1209 container_of(mattr, struct module_sect_attr, mattr);
1210 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1213 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1215 unsigned int section;
1217 for (section = 0; section < sect_attrs->nsections; section++)
1218 kfree(sect_attrs->attrs[section].name);
1222 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1224 unsigned int nloaded = 0, i, size[2];
1225 struct module_sect_attrs *sect_attrs;
1226 struct module_sect_attr *sattr;
1227 struct attribute **gattr;
1229 /* Count loaded sections and allocate structures */
1230 for (i = 0; i < info->hdr->e_shnum; i++)
1231 if (!sect_empty(&info->sechdrs[i]))
1233 size[0] = ALIGN(sizeof(*sect_attrs)
1234 + nloaded * sizeof(sect_attrs->attrs[0]),
1235 sizeof(sect_attrs->grp.attrs[0]));
1236 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1237 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1238 if (sect_attrs == NULL)
1241 /* Setup section attributes. */
1242 sect_attrs->grp.name = "sections";
1243 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1245 sect_attrs->nsections = 0;
1246 sattr = §_attrs->attrs[0];
1247 gattr = §_attrs->grp.attrs[0];
1248 for (i = 0; i < info->hdr->e_shnum; i++) {
1249 Elf_Shdr *sec = &info->sechdrs[i];
1250 if (sect_empty(sec))
1252 sattr->address = sec->sh_addr;
1253 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1255 if (sattr->name == NULL)
1257 sect_attrs->nsections++;
1258 sysfs_attr_init(&sattr->mattr.attr);
1259 sattr->mattr.show = module_sect_show;
1260 sattr->mattr.store = NULL;
1261 sattr->mattr.attr.name = sattr->name;
1262 sattr->mattr.attr.mode = S_IRUGO;
1263 *(gattr++) = &(sattr++)->mattr.attr;
1267 if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp))
1270 mod->sect_attrs = sect_attrs;
1273 free_sect_attrs(sect_attrs);
1276 static void remove_sect_attrs(struct module *mod)
1278 if (mod->sect_attrs) {
1279 sysfs_remove_group(&mod->mkobj.kobj,
1280 &mod->sect_attrs->grp);
1281 /* We are positive that no one is using any sect attrs
1282 * at this point. Deallocate immediately. */
1283 free_sect_attrs(mod->sect_attrs);
1284 mod->sect_attrs = NULL;
1289 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1292 struct module_notes_attrs {
1293 struct kobject *dir;
1295 struct bin_attribute attrs[0];
1298 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1299 struct bin_attribute *bin_attr,
1300 char *buf, loff_t pos, size_t count)
1303 * The caller checked the pos and count against our size.
1305 memcpy(buf, bin_attr->private + pos, count);
1309 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1312 if (notes_attrs->dir) {
1314 sysfs_remove_bin_file(notes_attrs->dir,
1315 ¬es_attrs->attrs[i]);
1316 kobject_put(notes_attrs->dir);
1321 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1323 unsigned int notes, loaded, i;
1324 struct module_notes_attrs *notes_attrs;
1325 struct bin_attribute *nattr;
1327 /* failed to create section attributes, so can't create notes */
1328 if (!mod->sect_attrs)
1331 /* Count notes sections and allocate structures. */
1333 for (i = 0; i < info->hdr->e_shnum; i++)
1334 if (!sect_empty(&info->sechdrs[i]) &&
1335 (info->sechdrs[i].sh_type == SHT_NOTE))
1341 notes_attrs = kzalloc(sizeof(*notes_attrs)
1342 + notes * sizeof(notes_attrs->attrs[0]),
1344 if (notes_attrs == NULL)
1347 notes_attrs->notes = notes;
1348 nattr = ¬es_attrs->attrs[0];
1349 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1350 if (sect_empty(&info->sechdrs[i]))
1352 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1353 sysfs_bin_attr_init(nattr);
1354 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1355 nattr->attr.mode = S_IRUGO;
1356 nattr->size = info->sechdrs[i].sh_size;
1357 nattr->private = (void *) info->sechdrs[i].sh_addr;
1358 nattr->read = module_notes_read;
1364 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1365 if (!notes_attrs->dir)
1368 for (i = 0; i < notes; ++i)
1369 if (sysfs_create_bin_file(notes_attrs->dir,
1370 ¬es_attrs->attrs[i]))
1373 mod->notes_attrs = notes_attrs;
1377 free_notes_attrs(notes_attrs, i);
1380 static void remove_notes_attrs(struct module *mod)
1382 if (mod->notes_attrs)
1383 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1388 static inline void add_sect_attrs(struct module *mod,
1389 const struct load_info *info)
1393 static inline void remove_sect_attrs(struct module *mod)
1397 static inline void add_notes_attrs(struct module *mod,
1398 const struct load_info *info)
1402 static inline void remove_notes_attrs(struct module *mod)
1405 #endif /* CONFIG_KALLSYMS */
1407 static void add_usage_links(struct module *mod)
1409 #ifdef CONFIG_MODULE_UNLOAD
1410 struct module_use *use;
1413 mutex_lock(&module_mutex);
1414 list_for_each_entry(use, &mod->target_list, target_list) {
1415 nowarn = sysfs_create_link(use->target->holders_dir,
1416 &mod->mkobj.kobj, mod->name);
1418 mutex_unlock(&module_mutex);
1422 static void del_usage_links(struct module *mod)
1424 #ifdef CONFIG_MODULE_UNLOAD
1425 struct module_use *use;
1427 mutex_lock(&module_mutex);
1428 list_for_each_entry(use, &mod->target_list, target_list)
1429 sysfs_remove_link(use->target->holders_dir, mod->name);
1430 mutex_unlock(&module_mutex);
1434 static int module_add_modinfo_attrs(struct module *mod)
1436 struct module_attribute *attr;
1437 struct module_attribute *temp_attr;
1441 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1442 (ARRAY_SIZE(modinfo_attrs) + 1)),
1444 if (!mod->modinfo_attrs)
1447 temp_attr = mod->modinfo_attrs;
1448 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1450 (attr->test && attr->test(mod))) {
1451 memcpy(temp_attr, attr, sizeof(*temp_attr));
1452 sysfs_attr_init(&temp_attr->attr);
1453 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1460 static void module_remove_modinfo_attrs(struct module *mod)
1462 struct module_attribute *attr;
1465 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1466 /* pick a field to test for end of list */
1467 if (!attr->attr.name)
1469 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1473 kfree(mod->modinfo_attrs);
1476 static int mod_sysfs_init(struct module *mod)
1479 struct kobject *kobj;
1481 if (!module_sysfs_initialized) {
1482 printk(KERN_ERR "%s: module sysfs not initialized\n",
1488 kobj = kset_find_obj(module_kset, mod->name);
1490 printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1496 mod->mkobj.mod = mod;
1498 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1499 mod->mkobj.kobj.kset = module_kset;
1500 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1503 kobject_put(&mod->mkobj.kobj);
1505 /* delay uevent until full sysfs population */
1510 static int mod_sysfs_setup(struct module *mod,
1511 const struct load_info *info,
1512 struct kernel_param *kparam,
1513 unsigned int num_params)
1517 err = mod_sysfs_init(mod);
1521 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1522 if (!mod->holders_dir) {
1527 err = module_param_sysfs_setup(mod, kparam, num_params);
1529 goto out_unreg_holders;
1531 err = module_add_modinfo_attrs(mod);
1533 goto out_unreg_param;
1535 add_usage_links(mod);
1536 add_sect_attrs(mod, info);
1537 add_notes_attrs(mod, info);
1539 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1543 module_param_sysfs_remove(mod);
1545 kobject_put(mod->holders_dir);
1547 kobject_put(&mod->mkobj.kobj);
1552 static void mod_sysfs_fini(struct module *mod)
1554 remove_notes_attrs(mod);
1555 remove_sect_attrs(mod);
1556 kobject_put(&mod->mkobj.kobj);
1559 #else /* !CONFIG_SYSFS */
1561 static int mod_sysfs_setup(struct module *mod,
1562 const struct load_info *info,
1563 struct kernel_param *kparam,
1564 unsigned int num_params)
1569 static void mod_sysfs_fini(struct module *mod)
1573 static void module_remove_modinfo_attrs(struct module *mod)
1577 static void del_usage_links(struct module *mod)
1581 #endif /* CONFIG_SYSFS */
1583 static void mod_sysfs_teardown(struct module *mod)
1585 del_usage_links(mod);
1586 module_remove_modinfo_attrs(mod);
1587 module_param_sysfs_remove(mod);
1588 kobject_put(mod->mkobj.drivers_dir);
1589 kobject_put(mod->holders_dir);
1590 mod_sysfs_fini(mod);
1594 * unlink the module with the whole machine is stopped with interrupts off
1595 * - this defends against kallsyms not taking locks
1597 static int __unlink_module(void *_mod)
1599 struct module *mod = _mod;
1600 list_del(&mod->list);
1601 module_bug_cleanup(mod);
1605 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1607 * LKM RO/NX protection: protect module's text/ro-data
1608 * from modification and any data from execution.
1610 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1612 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1613 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1615 if (end_pfn > begin_pfn)
1616 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1619 static void set_section_ro_nx(void *base,
1620 unsigned long text_size,
1621 unsigned long ro_size,
1622 unsigned long total_size)
1624 /* begin and end PFNs of the current subsection */
1625 unsigned long begin_pfn;
1626 unsigned long end_pfn;
1629 * Set RO for module text and RO-data:
1630 * - Always protect first page.
1631 * - Do not protect last partial page.
1634 set_page_attributes(base, base + ro_size, set_memory_ro);
1637 * Set NX permissions for module data:
1638 * - Do not protect first partial page.
1639 * - Always protect last page.
1641 if (total_size > text_size) {
1642 begin_pfn = PFN_UP((unsigned long)base + text_size);
1643 end_pfn = PFN_UP((unsigned long)base + total_size);
1644 if (end_pfn > begin_pfn)
1645 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1649 static void unset_module_core_ro_nx(struct module *mod)
1651 set_page_attributes(mod->module_core + mod->core_text_size,
1652 mod->module_core + mod->core_size,
1654 set_page_attributes(mod->module_core,
1655 mod->module_core + mod->core_ro_size,
1659 static void unset_module_init_ro_nx(struct module *mod)
1661 set_page_attributes(mod->module_init + mod->init_text_size,
1662 mod->module_init + mod->init_size,
1664 set_page_attributes(mod->module_init,
1665 mod->module_init + mod->init_ro_size,
1669 /* Iterate through all modules and set each module's text as RW */
1670 void set_all_modules_text_rw(void)
1674 mutex_lock(&module_mutex);
1675 list_for_each_entry_rcu(mod, &modules, list) {
1676 if ((mod->module_core) && (mod->core_text_size)) {
1677 set_page_attributes(mod->module_core,
1678 mod->module_core + mod->core_text_size,
1681 if ((mod->module_init) && (mod->init_text_size)) {
1682 set_page_attributes(mod->module_init,
1683 mod->module_init + mod->init_text_size,
1687 mutex_unlock(&module_mutex);
1690 /* Iterate through all modules and set each module's text as RO */
1691 void set_all_modules_text_ro(void)
1695 mutex_lock(&module_mutex);
1696 list_for_each_entry_rcu(mod, &modules, list) {
1697 if ((mod->module_core) && (mod->core_text_size)) {
1698 set_page_attributes(mod->module_core,
1699 mod->module_core + mod->core_text_size,
1702 if ((mod->module_init) && (mod->init_text_size)) {
1703 set_page_attributes(mod->module_init,
1704 mod->module_init + mod->init_text_size,
1708 mutex_unlock(&module_mutex);
1711 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1712 static void unset_module_core_ro_nx(struct module *mod) { }
1713 static void unset_module_init_ro_nx(struct module *mod) { }
1716 void __weak module_free(struct module *mod, void *module_region)
1718 vfree(module_region);
1721 void __weak module_arch_cleanup(struct module *mod)
1725 /* Free a module, remove from lists, etc. */
1726 static void free_module(struct module *mod)
1728 trace_module_free(mod);
1730 /* Delete from various lists */
1731 mutex_lock(&module_mutex);
1732 stop_machine(__unlink_module, mod, NULL);
1733 mutex_unlock(&module_mutex);
1734 mod_sysfs_teardown(mod);
1736 /* Remove dynamic debug info */
1737 ddebug_remove_module(mod->name);
1739 /* Arch-specific cleanup. */
1740 module_arch_cleanup(mod);
1742 /* Module unload stuff */
1743 module_unload_free(mod);
1745 /* Free any allocated parameters. */
1746 destroy_params(mod->kp, mod->num_kp);
1748 /* This may be NULL, but that's OK */
1749 unset_module_init_ro_nx(mod);
1750 module_free(mod, mod->module_init);
1752 percpu_modfree(mod);
1754 /* Free lock-classes: */
1755 lockdep_free_key_range(mod->module_core, mod->core_size);
1757 /* Finally, free the core (containing the module structure) */
1758 unset_module_core_ro_nx(mod);
1759 module_free(mod, mod->module_core);
1762 update_protections(current->mm);
1766 void *__symbol_get(const char *symbol)
1768 struct module *owner;
1769 const struct kernel_symbol *sym;
1772 sym = find_symbol(symbol, &owner, NULL, true, true);
1773 if (sym && strong_try_module_get(owner))
1777 return sym ? (void *)sym->value : NULL;
1779 EXPORT_SYMBOL_GPL(__symbol_get);
1782 * Ensure that an exported symbol [global namespace] does not already exist
1783 * in the kernel or in some other module's exported symbol table.
1785 * You must hold the module_mutex.
1787 static int verify_export_symbols(struct module *mod)
1790 struct module *owner;
1791 const struct kernel_symbol *s;
1793 const struct kernel_symbol *sym;
1796 { mod->syms, mod->num_syms },
1797 { mod->gpl_syms, mod->num_gpl_syms },
1798 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1799 #ifdef CONFIG_UNUSED_SYMBOLS
1800 { mod->unused_syms, mod->num_unused_syms },
1801 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1805 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1806 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1807 if (find_symbol(s->name, &owner, NULL, true, false)) {
1809 "%s: exports duplicate symbol %s"
1811 mod->name, s->name, module_name(owner));
1819 /* Change all symbols so that st_value encodes the pointer directly. */
1820 static int simplify_symbols(struct module *mod, const struct load_info *info)
1822 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1823 Elf_Sym *sym = (void *)symsec->sh_addr;
1824 unsigned long secbase;
1827 const struct kernel_symbol *ksym;
1829 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1830 const char *name = info->strtab + sym[i].st_name;
1832 switch (sym[i].st_shndx) {
1834 /* We compiled with -fno-common. These are not
1835 supposed to happen. */
1836 DEBUGP("Common symbol: %s\n", name);
1837 printk("%s: please compile with -fno-common\n",
1843 /* Don't need to do anything */
1844 DEBUGP("Absolute symbol: 0x%08lx\n",
1845 (long)sym[i].st_value);
1849 ksym = resolve_symbol_wait(mod, info, name);
1850 /* Ok if resolved. */
1851 if (ksym && !IS_ERR(ksym)) {
1852 sym[i].st_value = ksym->value;
1857 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1860 printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
1861 mod->name, name, PTR_ERR(ksym));
1862 ret = PTR_ERR(ksym) ?: -ENOENT;
1866 /* Divert to percpu allocation if a percpu var. */
1867 if (sym[i].st_shndx == info->index.pcpu)
1868 secbase = (unsigned long)mod_percpu(mod);
1870 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1871 sym[i].st_value += secbase;
1879 int __weak apply_relocate(Elf_Shdr *sechdrs,
1881 unsigned int symindex,
1882 unsigned int relsec,
1885 pr_err("module %s: REL relocation unsupported\n", me->name);
1889 int __weak apply_relocate_add(Elf_Shdr *sechdrs,
1891 unsigned int symindex,
1892 unsigned int relsec,
1895 pr_err("module %s: RELA relocation unsupported\n", me->name);
1899 static int apply_relocations(struct module *mod, const struct load_info *info)
1904 /* Now do relocations. */
1905 for (i = 1; i < info->hdr->e_shnum; i++) {
1906 unsigned int infosec = info->sechdrs[i].sh_info;
1908 /* Not a valid relocation section? */
1909 if (infosec >= info->hdr->e_shnum)
1912 /* Don't bother with non-allocated sections */
1913 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
1916 if (info->sechdrs[i].sh_type == SHT_REL)
1917 err = apply_relocate(info->sechdrs, info->strtab,
1918 info->index.sym, i, mod);
1919 else if (info->sechdrs[i].sh_type == SHT_RELA)
1920 err = apply_relocate_add(info->sechdrs, info->strtab,
1921 info->index.sym, i, mod);
1928 /* Additional bytes needed by arch in front of individual sections */
1929 unsigned int __weak arch_mod_section_prepend(struct module *mod,
1930 unsigned int section)
1932 /* default implementation just returns zero */
1936 /* Update size with this section: return offset. */
1937 static long get_offset(struct module *mod, unsigned int *size,
1938 Elf_Shdr *sechdr, unsigned int section)
1942 *size += arch_mod_section_prepend(mod, section);
1943 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
1944 *size = ret + sechdr->sh_size;
1948 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
1949 might -- code, read-only data, read-write data, small data. Tally
1950 sizes, and place the offsets into sh_entsize fields: high bit means it
1952 static void layout_sections(struct module *mod, struct load_info *info)
1954 static unsigned long const masks[][2] = {
1955 /* NOTE: all executable code must be the first section
1956 * in this array; otherwise modify the text_size
1957 * finder in the two loops below */
1958 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
1959 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
1960 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
1961 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
1965 for (i = 0; i < info->hdr->e_shnum; i++)
1966 info->sechdrs[i].sh_entsize = ~0UL;
1968 DEBUGP("Core section allocation order:\n");
1969 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1970 for (i = 0; i < info->hdr->e_shnum; ++i) {
1971 Elf_Shdr *s = &info->sechdrs[i];
1972 const char *sname = info->secstrings + s->sh_name;
1974 if ((s->sh_flags & masks[m][0]) != masks[m][0]
1975 || (s->sh_flags & masks[m][1])
1976 || s->sh_entsize != ~0UL
1977 || strstarts(sname, ".init"))
1979 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
1980 DEBUGP("\t%s\n", name);
1983 case 0: /* executable */
1984 mod->core_size = debug_align(mod->core_size);
1985 mod->core_text_size = mod->core_size;
1987 case 1: /* RO: text and ro-data */
1988 mod->core_size = debug_align(mod->core_size);
1989 mod->core_ro_size = mod->core_size;
1991 case 3: /* whole core */
1992 mod->core_size = debug_align(mod->core_size);
1997 DEBUGP("Init section allocation order:\n");
1998 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1999 for (i = 0; i < info->hdr->e_shnum; ++i) {
2000 Elf_Shdr *s = &info->sechdrs[i];
2001 const char *sname = info->secstrings + s->sh_name;
2003 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2004 || (s->sh_flags & masks[m][1])
2005 || s->sh_entsize != ~0UL
2006 || !strstarts(sname, ".init"))
2008 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2009 | INIT_OFFSET_MASK);
2010 DEBUGP("\t%s\n", sname);
2013 case 0: /* executable */
2014 mod->init_size = debug_align(mod->init_size);
2015 mod->init_text_size = mod->init_size;
2017 case 1: /* RO: text and ro-data */
2018 mod->init_size = debug_align(mod->init_size);
2019 mod->init_ro_size = mod->init_size;
2021 case 3: /* whole init */
2022 mod->init_size = debug_align(mod->init_size);
2028 static void set_license(struct module *mod, const char *license)
2031 license = "unspecified";
2033 if (!license_is_gpl_compatible(license)) {
2034 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2035 printk(KERN_WARNING "%s: module license '%s' taints "
2036 "kernel.\n", mod->name, license);
2037 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2041 /* Parse tag=value strings from .modinfo section */
2042 static char *next_string(char *string, unsigned long *secsize)
2044 /* Skip non-zero chars */
2047 if ((*secsize)-- <= 1)
2051 /* Skip any zero padding. */
2052 while (!string[0]) {
2054 if ((*secsize)-- <= 1)
2060 static char *get_modinfo(struct load_info *info, const char *tag)
2063 unsigned int taglen = strlen(tag);
2064 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2065 unsigned long size = infosec->sh_size;
2067 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2068 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2069 return p + taglen + 1;
2074 static void setup_modinfo(struct module *mod, struct load_info *info)
2076 struct module_attribute *attr;
2079 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2081 attr->setup(mod, get_modinfo(info, attr->attr.name));
2085 static void free_modinfo(struct module *mod)
2087 struct module_attribute *attr;
2090 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2096 #ifdef CONFIG_KALLSYMS
2098 /* lookup symbol in given range of kernel_symbols */
2099 static const struct kernel_symbol *lookup_symbol(const char *name,
2100 const struct kernel_symbol *start,
2101 const struct kernel_symbol *stop)
2103 return bsearch(name, start, stop - start,
2104 sizeof(struct kernel_symbol), cmp_name);
2107 static int is_exported(const char *name, unsigned long value,
2108 const struct module *mod)
2110 const struct kernel_symbol *ks;
2112 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2114 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2115 return ks != NULL && ks->value == value;
2119 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2121 const Elf_Shdr *sechdrs = info->sechdrs;
2123 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2124 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2129 if (sym->st_shndx == SHN_UNDEF)
2131 if (sym->st_shndx == SHN_ABS)
2133 if (sym->st_shndx >= SHN_LORESERVE)
2135 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2137 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2138 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2139 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2141 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2146 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2147 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2152 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2159 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2162 const Elf_Shdr *sec;
2164 if (src->st_shndx == SHN_UNDEF
2165 || src->st_shndx >= shnum
2169 sec = sechdrs + src->st_shndx;
2170 if (!(sec->sh_flags & SHF_ALLOC)
2171 #ifndef CONFIG_KALLSYMS_ALL
2172 || !(sec->sh_flags & SHF_EXECINSTR)
2174 || (sec->sh_entsize & INIT_OFFSET_MASK))
2181 * We only allocate and copy the strings needed by the parts of symtab
2182 * we keep. This is simple, but has the effect of making multiple
2183 * copies of duplicates. We could be more sophisticated, see
2184 * linux-kernel thread starting with
2185 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2187 static void layout_symtab(struct module *mod, struct load_info *info)
2189 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2190 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2192 unsigned int i, nsrc, ndst, strtab_size;
2194 /* Put symbol section at end of init part of module. */
2195 symsect->sh_flags |= SHF_ALLOC;
2196 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2197 info->index.sym) | INIT_OFFSET_MASK;
2198 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
2200 src = (void *)info->hdr + symsect->sh_offset;
2201 nsrc = symsect->sh_size / sizeof(*src);
2203 /* Compute total space required for the core symbols' strtab. */
2204 for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src)
2205 if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
2206 strtab_size += strlen(&info->strtab[src->st_name]) + 1;
2210 /* Append room for core symbols at end of core part. */
2211 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2212 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2213 mod->core_size += strtab_size;
2215 /* Put string table section at end of init part of module. */
2216 strsect->sh_flags |= SHF_ALLOC;
2217 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2218 info->index.str) | INIT_OFFSET_MASK;
2219 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
2222 static void add_kallsyms(struct module *mod, const struct load_info *info)
2224 unsigned int i, ndst;
2228 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2230 mod->symtab = (void *)symsec->sh_addr;
2231 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2232 /* Make sure we get permanent strtab: don't use info->strtab. */
2233 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2235 /* Set types up while we still have access to sections. */
2236 for (i = 0; i < mod->num_symtab; i++)
2237 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2239 mod->core_symtab = dst = mod->module_core + info->symoffs;
2240 mod->core_strtab = s = mod->module_core + info->stroffs;
2244 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
2245 if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
2249 dst[ndst++].st_name = s - mod->core_strtab;
2250 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
2252 mod->core_num_syms = ndst;
2255 static inline void layout_symtab(struct module *mod, struct load_info *info)
2259 static void add_kallsyms(struct module *mod, const struct load_info *info)
2262 #endif /* CONFIG_KALLSYMS */
2264 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2268 #ifdef CONFIG_DYNAMIC_DEBUG
2269 if (ddebug_add_module(debug, num, debug->modname))
2270 printk(KERN_ERR "dynamic debug error adding module: %s\n",
2275 static void dynamic_debug_remove(struct _ddebug *debug)
2278 ddebug_remove_module(debug->modname);
2281 void * __weak module_alloc(unsigned long size)
2283 return size == 0 ? NULL : vmalloc_exec(size);
2286 static void *module_alloc_update_bounds(unsigned long size)
2288 void *ret = module_alloc(size);
2291 mutex_lock(&module_mutex);
2292 /* Update module bounds. */
2293 if ((unsigned long)ret < module_addr_min)
2294 module_addr_min = (unsigned long)ret;
2295 if ((unsigned long)ret + size > module_addr_max)
2296 module_addr_max = (unsigned long)ret + size;
2297 mutex_unlock(&module_mutex);
2302 #ifdef CONFIG_DEBUG_KMEMLEAK
2303 static void kmemleak_load_module(const struct module *mod,
2304 const struct load_info *info)
2308 /* only scan the sections containing data */
2309 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2311 for (i = 1; i < info->hdr->e_shnum; i++) {
2312 const char *name = info->secstrings + info->sechdrs[i].sh_name;
2313 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
2315 if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
2318 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2319 info->sechdrs[i].sh_size, GFP_KERNEL);
2323 static inline void kmemleak_load_module(const struct module *mod,
2324 const struct load_info *info)
2329 /* Sets info->hdr and info->len. */
2330 static int copy_and_check(struct load_info *info,
2331 const void __user *umod, unsigned long len,
2332 const char __user *uargs)
2337 if (len < sizeof(*hdr))
2340 /* Suck in entire file: we'll want most of it. */
2341 /* vmalloc barfs on "unusual" numbers. Check here */
2342 if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
2345 if (copy_from_user(hdr, umod, len) != 0) {
2350 /* Sanity checks against insmoding binaries or wrong arch,
2351 weird elf version */
2352 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2353 || hdr->e_type != ET_REL
2354 || !elf_check_arch(hdr)
2355 || hdr->e_shentsize != sizeof(Elf_Shdr)) {
2360 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2374 static void free_copy(struct load_info *info)
2379 static int rewrite_section_headers(struct load_info *info)
2383 /* This should always be true, but let's be sure. */
2384 info->sechdrs[0].sh_addr = 0;
2386 for (i = 1; i < info->hdr->e_shnum; i++) {
2387 Elf_Shdr *shdr = &info->sechdrs[i];
2388 if (shdr->sh_type != SHT_NOBITS
2389 && info->len < shdr->sh_offset + shdr->sh_size) {
2390 printk(KERN_ERR "Module len %lu truncated\n",
2395 /* Mark all sections sh_addr with their address in the
2397 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2399 #ifndef CONFIG_MODULE_UNLOAD
2400 /* Don't load .exit sections */
2401 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2402 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2406 /* Track but don't keep modinfo and version sections. */
2407 info->index.vers = find_sec(info, "__versions");
2408 info->index.info = find_sec(info, ".modinfo");
2409 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2410 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2415 * Set up our basic convenience variables (pointers to section headers,
2416 * search for module section index etc), and do some basic section
2419 * Return the temporary module pointer (we'll replace it with the final
2420 * one when we move the module sections around).
2422 static struct module *setup_load_info(struct load_info *info)
2428 /* Set up the convenience variables */
2429 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2430 info->secstrings = (void *)info->hdr
2431 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2433 err = rewrite_section_headers(info);
2435 return ERR_PTR(err);
2437 /* Find internal symbols and strings. */
2438 for (i = 1; i < info->hdr->e_shnum; i++) {
2439 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2440 info->index.sym = i;
2441 info->index.str = info->sechdrs[i].sh_link;
2442 info->strtab = (char *)info->hdr
2443 + info->sechdrs[info->index.str].sh_offset;
2448 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2449 if (!info->index.mod) {
2450 printk(KERN_WARNING "No module found in object\n");
2451 return ERR_PTR(-ENOEXEC);
2453 /* This is temporary: point mod into copy of data. */
2454 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2456 if (info->index.sym == 0) {
2457 printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2459 return ERR_PTR(-ENOEXEC);
2462 info->index.pcpu = find_pcpusec(info);
2464 /* Check module struct version now, before we try to use module. */
2465 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2466 return ERR_PTR(-ENOEXEC);
2471 static int check_modinfo(struct module *mod, struct load_info *info)
2473 const char *modmagic = get_modinfo(info, "vermagic");
2476 /* This is allowed: modprobe --force will invalidate it. */
2478 err = try_to_force_load(mod, "bad vermagic");
2481 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2482 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2483 mod->name, modmagic, vermagic);
2487 if (!get_modinfo(info, "intree"))
2488 add_taint_module(mod, TAINT_OOT_MODULE);
2490 if (get_modinfo(info, "staging")) {
2491 add_taint_module(mod, TAINT_CRAP);
2492 printk(KERN_WARNING "%s: module is from the staging directory,"
2493 " the quality is unknown, you have been warned.\n",
2497 /* Set up license info based on the info section */
2498 set_license(mod, get_modinfo(info, "license"));
2503 static void find_module_sections(struct module *mod, struct load_info *info)
2505 mod->kp = section_objs(info, "__param",
2506 sizeof(*mod->kp), &mod->num_kp);
2507 mod->syms = section_objs(info, "__ksymtab",
2508 sizeof(*mod->syms), &mod->num_syms);
2509 mod->crcs = section_addr(info, "__kcrctab");
2510 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2511 sizeof(*mod->gpl_syms),
2512 &mod->num_gpl_syms);
2513 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2514 mod->gpl_future_syms = section_objs(info,
2515 "__ksymtab_gpl_future",
2516 sizeof(*mod->gpl_future_syms),
2517 &mod->num_gpl_future_syms);
2518 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2520 #ifdef CONFIG_UNUSED_SYMBOLS
2521 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2522 sizeof(*mod->unused_syms),
2523 &mod->num_unused_syms);
2524 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2525 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2526 sizeof(*mod->unused_gpl_syms),
2527 &mod->num_unused_gpl_syms);
2528 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2530 #ifdef CONFIG_CONSTRUCTORS
2531 mod->ctors = section_objs(info, ".ctors",
2532 sizeof(*mod->ctors), &mod->num_ctors);
2535 #ifdef CONFIG_TRACEPOINTS
2536 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2537 sizeof(*mod->tracepoints_ptrs),
2538 &mod->num_tracepoints);
2540 #ifdef HAVE_JUMP_LABEL
2541 mod->jump_entries = section_objs(info, "__jump_table",
2542 sizeof(*mod->jump_entries),
2543 &mod->num_jump_entries);
2545 #ifdef CONFIG_EVENT_TRACING
2546 mod->trace_events = section_objs(info, "_ftrace_events",
2547 sizeof(*mod->trace_events),
2548 &mod->num_trace_events);
2550 * This section contains pointers to allocated objects in the trace
2551 * code and not scanning it leads to false positives.
2553 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2554 mod->num_trace_events, GFP_KERNEL);
2556 #ifdef CONFIG_TRACING
2557 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2558 sizeof(*mod->trace_bprintk_fmt_start),
2559 &mod->num_trace_bprintk_fmt);
2561 * This section contains pointers to allocated objects in the trace
2562 * code and not scanning it leads to false positives.
2564 kmemleak_scan_area(mod->trace_bprintk_fmt_start,
2565 sizeof(*mod->trace_bprintk_fmt_start) *
2566 mod->num_trace_bprintk_fmt, GFP_KERNEL);
2568 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2569 /* sechdrs[0].sh_size is always zero */
2570 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2571 sizeof(*mod->ftrace_callsites),
2572 &mod->num_ftrace_callsites);
2575 mod->extable = section_objs(info, "__ex_table",
2576 sizeof(*mod->extable), &mod->num_exentries);
2578 if (section_addr(info, "__obsparm"))
2579 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2582 info->debug = section_objs(info, "__verbose",
2583 sizeof(*info->debug), &info->num_debug);
2586 static int move_module(struct module *mod, struct load_info *info)
2591 /* Do the allocs. */
2592 ptr = module_alloc_update_bounds(mod->core_size);
2594 * The pointer to this block is stored in the module structure
2595 * which is inside the block. Just mark it as not being a
2598 kmemleak_not_leak(ptr);
2602 memset(ptr, 0, mod->core_size);
2603 mod->module_core = ptr;
2605 ptr = module_alloc_update_bounds(mod->init_size);
2607 * The pointer to this block is stored in the module structure
2608 * which is inside the block. This block doesn't need to be
2609 * scanned as it contains data and code that will be freed
2610 * after the module is initialized.
2612 kmemleak_ignore(ptr);
2613 if (!ptr && mod->init_size) {
2614 module_free(mod, mod->module_core);
2617 memset(ptr, 0, mod->init_size);
2618 mod->module_init = ptr;
2620 /* Transfer each section which specifies SHF_ALLOC */
2621 DEBUGP("final section addresses:\n");
2622 for (i = 0; i < info->hdr->e_shnum; i++) {
2624 Elf_Shdr *shdr = &info->sechdrs[i];
2626 if (!(shdr->sh_flags & SHF_ALLOC))
2629 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2630 dest = mod->module_init
2631 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2633 dest = mod->module_core + shdr->sh_entsize;
2635 if (shdr->sh_type != SHT_NOBITS)
2636 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2637 /* Update sh_addr to point to copy in image. */
2638 shdr->sh_addr = (unsigned long)dest;
2639 DEBUGP("\t0x%lx %s\n",
2640 shdr->sh_addr, info->secstrings + shdr->sh_name);
2646 static int check_module_license_and_versions(struct module *mod)
2649 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2650 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2651 * using GPL-only symbols it needs.
2653 if (strcmp(mod->name, "ndiswrapper") == 0)
2654 add_taint(TAINT_PROPRIETARY_MODULE);
2656 /* driverloader was caught wrongly pretending to be under GPL */
2657 if (strcmp(mod->name, "driverloader") == 0)
2658 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2660 #ifdef CONFIG_MODVERSIONS
2661 if ((mod->num_syms && !mod->crcs)
2662 || (mod->num_gpl_syms && !mod->gpl_crcs)
2663 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2664 #ifdef CONFIG_UNUSED_SYMBOLS
2665 || (mod->num_unused_syms && !mod->unused_crcs)
2666 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2669 return try_to_force_load(mod,
2670 "no versions for exported symbols");
2676 static void flush_module_icache(const struct module *mod)
2678 mm_segment_t old_fs;
2680 /* flush the icache in correct context */
2685 * Flush the instruction cache, since we've played with text.
2686 * Do it before processing of module parameters, so the module
2687 * can provide parameter accessor functions of its own.
2689 if (mod->module_init)
2690 flush_icache_range((unsigned long)mod->module_init,
2691 (unsigned long)mod->module_init
2693 flush_icache_range((unsigned long)mod->module_core,
2694 (unsigned long)mod->module_core + mod->core_size);
2699 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2707 static struct module *layout_and_allocate(struct load_info *info)
2709 /* Module within temporary copy. */
2714 mod = setup_load_info(info);
2718 err = check_modinfo(mod, info);
2720 return ERR_PTR(err);
2722 /* Allow arches to frob section contents and sizes. */
2723 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2724 info->secstrings, mod);
2728 pcpusec = &info->sechdrs[info->index.pcpu];
2729 if (pcpusec->sh_size) {
2730 /* We have a special allocation for this section. */
2731 err = percpu_modalloc(mod,
2732 pcpusec->sh_size, pcpusec->sh_addralign);
2735 pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
2738 /* Determine total sizes, and put offsets in sh_entsize. For now
2739 this is done generically; there doesn't appear to be any
2740 special cases for the architectures. */
2741 layout_sections(mod, info);
2742 layout_symtab(mod, info);
2744 /* Allocate and move to the final place */
2745 err = move_module(mod, info);
2749 /* Module has been copied to its final place now: return it. */
2750 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2751 kmemleak_load_module(mod, info);
2755 percpu_modfree(mod);
2757 return ERR_PTR(err);
2760 /* mod is no longer valid after this! */
2761 static void module_deallocate(struct module *mod, struct load_info *info)
2763 percpu_modfree(mod);
2764 module_free(mod, mod->module_init);
2765 module_free(mod, mod->module_core);
2768 int __weak module_finalize(const Elf_Ehdr *hdr,
2769 const Elf_Shdr *sechdrs,
2775 static int post_relocation(struct module *mod, const struct load_info *info)
2777 /* Sort exception table now relocations are done. */
2778 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2780 /* Copy relocated percpu area over. */
2781 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2782 info->sechdrs[info->index.pcpu].sh_size);
2784 /* Setup kallsyms-specific fields. */
2785 add_kallsyms(mod, info);
2787 /* Arch-specific module finalizing. */
2788 return module_finalize(info->hdr, info->sechdrs, mod);
2791 /* Allocate and load the module: note that size of section 0 is always
2792 zero, and we rely on this for optional sections. */
2793 static struct module *load_module(void __user *umod,
2795 const char __user *uargs)
2797 struct load_info info = { NULL, };
2801 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
2804 /* Copy in the blobs from userspace, check they are vaguely sane. */
2805 err = copy_and_check(&info, umod, len, uargs);
2807 return ERR_PTR(err);
2809 /* Figure out module layout, and allocate all the memory. */
2810 mod = layout_and_allocate(&info);
2816 /* Now module is in final location, initialize linked lists, etc. */
2817 err = module_unload_init(mod);
2821 /* Now we've got everything in the final locations, we can
2822 * find optional sections. */
2823 find_module_sections(mod, &info);
2825 err = check_module_license_and_versions(mod);
2829 /* Set up MODINFO_ATTR fields */
2830 setup_modinfo(mod, &info);
2832 /* Fix up syms, so that st_value is a pointer to location. */
2833 err = simplify_symbols(mod, &info);
2837 err = apply_relocations(mod, &info);
2841 err = post_relocation(mod, &info);
2845 flush_module_icache(mod);
2847 /* Now copy in args */
2848 mod->args = strndup_user(uargs, ~0UL >> 1);
2849 if (IS_ERR(mod->args)) {
2850 err = PTR_ERR(mod->args);
2851 goto free_arch_cleanup;
2854 /* Mark state as coming so strong_try_module_get() ignores us. */
2855 mod->state = MODULE_STATE_COMING;
2857 /* Now sew it into the lists so we can get lockdep and oops
2858 * info during argument parsing. No one should access us, since
2859 * strong_try_module_get() will fail.
2860 * lockdep/oops can run asynchronous, so use the RCU list insertion
2861 * function to insert in a way safe to concurrent readers.
2862 * The mutex protects against concurrent writers.
2864 mutex_lock(&module_mutex);
2865 if (find_module(mod->name)) {
2870 /* This has to be done once we're sure module name is unique. */
2871 dynamic_debug_setup(info.debug, info.num_debug);
2873 /* Find duplicate symbols */
2874 err = verify_export_symbols(mod);
2878 module_bug_finalize(info.hdr, info.sechdrs, mod);
2879 list_add_rcu(&mod->list, &modules);
2880 mutex_unlock(&module_mutex);
2882 /* Module is ready to execute: parsing args may do that. */
2883 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
2887 /* Link in to syfs. */
2888 err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp);
2892 /* Get rid of temporary copy. */
2896 trace_module_load(mod);
2900 mutex_lock(&module_mutex);
2901 /* Unlink carefully: kallsyms could be walking list. */
2902 list_del_rcu(&mod->list);
2903 module_bug_cleanup(mod);
2906 dynamic_debug_remove(info.debug);
2908 mutex_unlock(&module_mutex);
2909 synchronize_sched();
2912 module_arch_cleanup(mod);
2916 module_unload_free(mod);
2918 module_deallocate(mod, &info);
2921 return ERR_PTR(err);
2924 /* Call module constructors. */
2925 static void do_mod_ctors(struct module *mod)
2927 #ifdef CONFIG_CONSTRUCTORS
2930 for (i = 0; i < mod->num_ctors; i++)
2935 /* This is where the real work happens */
2936 SYSCALL_DEFINE3(init_module, void __user *, umod,
2937 unsigned long, len, const char __user *, uargs)
2942 /* Must have permission */
2943 if (!capable(CAP_SYS_MODULE) || modules_disabled)
2946 /* Do all the hard work */
2947 mod = load_module(umod, len, uargs);
2949 return PTR_ERR(mod);
2951 blocking_notifier_call_chain(&module_notify_list,
2952 MODULE_STATE_COMING, mod);
2954 /* Set RO and NX regions for core */
2955 set_section_ro_nx(mod->module_core,
2956 mod->core_text_size,
2960 /* Set RO and NX regions for init */
2961 set_section_ro_nx(mod->module_init,
2962 mod->init_text_size,
2967 /* Start the module */
2968 if (mod->init != NULL)
2969 ret = do_one_initcall(mod->init);
2971 /* Init routine failed: abort. Try to protect us from
2972 buggy refcounters. */
2973 mod->state = MODULE_STATE_GOING;
2974 synchronize_sched();
2976 blocking_notifier_call_chain(&module_notify_list,
2977 MODULE_STATE_GOING, mod);
2979 wake_up(&module_wq);
2984 "%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
2985 "%s: loading module anyway...\n",
2986 __func__, mod->name, ret,
2991 /* Now it's a first class citizen! Wake up anyone waiting for it. */
2992 mod->state = MODULE_STATE_LIVE;
2993 wake_up(&module_wq);
2994 blocking_notifier_call_chain(&module_notify_list,
2995 MODULE_STATE_LIVE, mod);
2997 /* We need to finish all async code before the module init sequence is done */
2998 async_synchronize_full();
3000 mutex_lock(&module_mutex);
3001 /* Drop initial reference. */
3003 trim_init_extable(mod);
3004 #ifdef CONFIG_KALLSYMS
3005 mod->num_symtab = mod->core_num_syms;
3006 mod->symtab = mod->core_symtab;
3007 mod->strtab = mod->core_strtab;
3009 unset_module_init_ro_nx(mod);
3010 module_free(mod, mod->module_init);
3011 mod->module_init = NULL;
3013 mod->init_ro_size = 0;
3014 mod->init_text_size = 0;
3015 mutex_unlock(&module_mutex);
3020 static inline int within(unsigned long addr, void *start, unsigned long size)
3022 return ((void *)addr >= start && (void *)addr < start + size);
3025 #ifdef CONFIG_KALLSYMS
3027 * This ignores the intensely annoying "mapping symbols" found
3028 * in ARM ELF files: $a, $t and $d.
3030 static inline int is_arm_mapping_symbol(const char *str)
3032 return str[0] == '$' && strchr("atd", str[1])
3033 && (str[2] == '\0' || str[2] == '.');
3036 static const char *get_ksymbol(struct module *mod,
3038 unsigned long *size,
3039 unsigned long *offset)
3041 unsigned int i, best = 0;
3042 unsigned long nextval;
3044 /* At worse, next value is at end of module */
3045 if (within_module_init(addr, mod))
3046 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3048 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3050 /* Scan for closest preceding symbol, and next symbol. (ELF
3051 starts real symbols at 1). */
3052 for (i = 1; i < mod->num_symtab; i++) {
3053 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3056 /* We ignore unnamed symbols: they're uninformative
3057 * and inserted at a whim. */
3058 if (mod->symtab[i].st_value <= addr
3059 && mod->symtab[i].st_value > mod->symtab[best].st_value
3060 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3061 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3063 if (mod->symtab[i].st_value > addr
3064 && mod->symtab[i].st_value < nextval
3065 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3066 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3067 nextval = mod->symtab[i].st_value;
3074 *size = nextval - mod->symtab[best].st_value;
3076 *offset = addr - mod->symtab[best].st_value;
3077 return mod->strtab + mod->symtab[best].st_name;
3080 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3081 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3082 const char *module_address_lookup(unsigned long addr,
3083 unsigned long *size,
3084 unsigned long *offset,
3089 const char *ret = NULL;
3092 list_for_each_entry_rcu(mod, &modules, list) {
3093 if (within_module_init(addr, mod) ||
3094 within_module_core(addr, mod)) {
3096 *modname = mod->name;
3097 ret = get_ksymbol(mod, addr, size, offset);
3101 /* Make a copy in here where it's safe */
3103 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3110 int lookup_module_symbol_name(unsigned long addr, char *symname)
3115 list_for_each_entry_rcu(mod, &modules, list) {
3116 if (within_module_init(addr, mod) ||
3117 within_module_core(addr, mod)) {
3120 sym = get_ksymbol(mod, addr, NULL, NULL);
3123 strlcpy(symname, sym, KSYM_NAME_LEN);
3133 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3134 unsigned long *offset, char *modname, char *name)
3139 list_for_each_entry_rcu(mod, &modules, list) {
3140 if (within_module_init(addr, mod) ||
3141 within_module_core(addr, mod)) {
3144 sym = get_ksymbol(mod, addr, size, offset);
3148 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3150 strlcpy(name, sym, KSYM_NAME_LEN);
3160 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3161 char *name, char *module_name, int *exported)
3166 list_for_each_entry_rcu(mod, &modules, list) {
3167 if (symnum < mod->num_symtab) {
3168 *value = mod->symtab[symnum].st_value;
3169 *type = mod->symtab[symnum].st_info;
3170 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3172 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3173 *exported = is_exported(name, *value, mod);
3177 symnum -= mod->num_symtab;
3183 static unsigned long mod_find_symname(struct module *mod, const char *name)
3187 for (i = 0; i < mod->num_symtab; i++)
3188 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3189 mod->symtab[i].st_info != 'U')
3190 return mod->symtab[i].st_value;
3194 /* Look for this name: can be of form module:name. */
3195 unsigned long module_kallsyms_lookup_name(const char *name)
3199 unsigned long ret = 0;
3201 /* Don't lock: we're in enough trouble already. */
3203 if ((colon = strchr(name, ':')) != NULL) {
3205 if ((mod = find_module(name)) != NULL)
3206 ret = mod_find_symname(mod, colon+1);
3209 list_for_each_entry_rcu(mod, &modules, list)
3210 if ((ret = mod_find_symname(mod, name)) != 0)
3217 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3218 struct module *, unsigned long),
3225 list_for_each_entry(mod, &modules, list) {
3226 for (i = 0; i < mod->num_symtab; i++) {
3227 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3228 mod, mod->symtab[i].st_value);
3235 #endif /* CONFIG_KALLSYMS */
3237 static char *module_flags(struct module *mod, char *buf)
3242 mod->state == MODULE_STATE_GOING ||
3243 mod->state == MODULE_STATE_COMING) {
3245 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
3247 else if (mod->taints & (1 << TAINT_OOT_MODULE))
3249 if (mod->taints & (1 << TAINT_FORCED_MODULE))
3251 if (mod->taints & (1 << TAINT_CRAP))
3254 * TAINT_FORCED_RMMOD: could be added.
3255 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
3259 /* Show a - for module-is-being-unloaded */
3260 if (mod->state == MODULE_STATE_GOING)
3262 /* Show a + for module-is-being-loaded */
3263 if (mod->state == MODULE_STATE_COMING)
3272 #ifdef CONFIG_PROC_FS
3273 /* Called by the /proc file system to return a list of modules. */
3274 static void *m_start(struct seq_file *m, loff_t *pos)
3276 mutex_lock(&module_mutex);
3277 return seq_list_start(&modules, *pos);
3280 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3282 return seq_list_next(p, &modules, pos);
3285 static void m_stop(struct seq_file *m, void *p)
3287 mutex_unlock(&module_mutex);
3290 static int m_show(struct seq_file *m, void *p)
3292 struct module *mod = list_entry(p, struct module, list);
3295 seq_printf(m, "%s %u",
3296 mod->name, mod->init_size + mod->core_size);
3297 print_unload_info(m, mod);
3299 /* Informative for users. */
3300 seq_printf(m, " %s",
3301 mod->state == MODULE_STATE_GOING ? "Unloading":
3302 mod->state == MODULE_STATE_COMING ? "Loading":
3304 /* Used by oprofile and other similar tools. */
3305 seq_printf(m, " 0x%pK", mod->module_core);
3309 seq_printf(m, " %s", module_flags(mod, buf));
3311 seq_printf(m, "\n");
3315 /* Format: modulename size refcount deps address
3317 Where refcount is a number or -, and deps is a comma-separated list
3320 static const struct seq_operations modules_op = {
3327 static int modules_open(struct inode *inode, struct file *file)
3329 return seq_open(file, &modules_op);
3332 static const struct file_operations proc_modules_operations = {
3333 .open = modules_open,
3335 .llseek = seq_lseek,
3336 .release = seq_release,
3339 static int __init proc_modules_init(void)
3341 proc_create("modules", 0, NULL, &proc_modules_operations);
3344 module_init(proc_modules_init);
3347 /* Given an address, look for it in the module exception tables. */
3348 const struct exception_table_entry *search_module_extables(unsigned long addr)
3350 const struct exception_table_entry *e = NULL;
3354 list_for_each_entry_rcu(mod, &modules, list) {
3355 if (mod->num_exentries == 0)
3358 e = search_extable(mod->extable,
3359 mod->extable + mod->num_exentries - 1,
3366 /* Now, if we found one, we are running inside it now, hence
3367 we cannot unload the module, hence no refcnt needed. */
3372 * is_module_address - is this address inside a module?
3373 * @addr: the address to check.
3375 * See is_module_text_address() if you simply want to see if the address
3376 * is code (not data).
3378 bool is_module_address(unsigned long addr)
3383 ret = __module_address(addr) != NULL;
3390 * __module_address - get the module which contains an address.
3391 * @addr: the address.
3393 * Must be called with preempt disabled or module mutex held so that
3394 * module doesn't get freed during this.
3396 struct module *__module_address(unsigned long addr)
3400 if (addr < module_addr_min || addr > module_addr_max)
3403 list_for_each_entry_rcu(mod, &modules, list)
3404 if (within_module_core(addr, mod)
3405 || within_module_init(addr, mod))
3409 EXPORT_SYMBOL_GPL(__module_address);
3412 * is_module_text_address - is this address inside module code?
3413 * @addr: the address to check.
3415 * See is_module_address() if you simply want to see if the address is
3416 * anywhere in a module. See kernel_text_address() for testing if an
3417 * address corresponds to kernel or module code.
3419 bool is_module_text_address(unsigned long addr)
3424 ret = __module_text_address(addr) != NULL;
3431 * __module_text_address - get the module whose code contains an address.
3432 * @addr: the address.
3434 * Must be called with preempt disabled or module mutex held so that
3435 * module doesn't get freed during this.
3437 struct module *__module_text_address(unsigned long addr)
3439 struct module *mod = __module_address(addr);
3441 /* Make sure it's within the text section. */
3442 if (!within(addr, mod->module_init, mod->init_text_size)
3443 && !within(addr, mod->module_core, mod->core_text_size))
3448 EXPORT_SYMBOL_GPL(__module_text_address);
3450 /* Don't grab lock, we're oopsing. */
3451 void print_modules(void)
3456 printk(KERN_DEFAULT "Modules linked in:");
3457 /* Most callers should already have preempt disabled, but make sure */
3459 list_for_each_entry_rcu(mod, &modules, list)
3460 printk(" %s%s", mod->name, module_flags(mod, buf));
3462 if (last_unloaded_module[0])
3463 printk(" [last unloaded: %s]", last_unloaded_module);
3467 #ifdef CONFIG_MODVERSIONS
3468 /* Generate the signature for all relevant module structures here.
3469 * If these change, we don't want to try to parse the module. */
3470 void module_layout(struct module *mod,
3471 struct modversion_info *ver,
3472 struct kernel_param *kp,
3473 struct kernel_symbol *ks,
3474 struct tracepoint * const *tp)
3477 EXPORT_SYMBOL(module_layout);