2 * firmware_class.c - Multi purpose firmware loading support
4 * Copyright (c) 2003 Manuel Estrada Sainz
6 * Please see Documentation/firmware_class/ for more information.
10 #include <linux/capability.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/timer.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/mutex.h>
19 #include <linux/workqueue.h>
20 #include <linux/highmem.h>
21 #include <linux/firmware.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/file.h>
25 #include <linux/list.h>
27 #include <linux/async.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/reboot.h>
32 #include <linux/security.h>
34 #include <generated/utsrelease.h>
38 MODULE_AUTHOR("Manuel Estrada Sainz");
39 MODULE_DESCRIPTION("Multi purpose firmware loading support");
40 MODULE_LICENSE("GPL");
42 /* Builtin firmware support */
44 #ifdef CONFIG_FW_LOADER
46 extern struct builtin_fw __start_builtin_fw[];
47 extern struct builtin_fw __end_builtin_fw[];
49 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
50 void *buf, size_t size)
52 struct builtin_fw *b_fw;
54 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
55 if (strcmp(name, b_fw->name) == 0) {
56 fw->size = b_fw->size;
57 fw->data = b_fw->data;
59 if (buf && fw->size <= size)
60 memcpy(buf, fw->data, fw->size);
68 static bool fw_is_builtin_firmware(const struct firmware *fw)
70 struct builtin_fw *b_fw;
72 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
73 if (fw->data == b_fw->data)
79 #else /* Module case - no builtin firmware support */
81 static inline bool fw_get_builtin_firmware(struct firmware *fw,
82 const char *name, void *buf,
88 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
101 static int loading_timeout = 60; /* In seconds */
103 static inline long firmware_loading_timeout(void)
105 return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
109 * Concurrent request_firmware() for the same firmware need to be
110 * serialized. struct fw_state is simple state machine which hold the
111 * state of the firmware loading.
114 struct completion completion;
115 enum fw_status status;
118 static void fw_state_init(struct fw_state *fw_st)
120 init_completion(&fw_st->completion);
121 fw_st->status = FW_STATUS_UNKNOWN;
124 static inline bool __fw_state_is_done(enum fw_status status)
126 return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
129 static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
133 ret = wait_for_completion_interruptible_timeout(&fw_st->completion,
135 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
140 return ret < 0 ? ret : 0;
143 static void __fw_state_set(struct fw_state *fw_st,
144 enum fw_status status)
146 WRITE_ONCE(fw_st->status, status);
148 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
149 complete_all(&fw_st->completion);
152 #define fw_state_start(fw_st) \
153 __fw_state_set(fw_st, FW_STATUS_LOADING)
154 #define fw_state_done(fw_st) \
155 __fw_state_set(fw_st, FW_STATUS_DONE)
156 #define fw_state_aborted(fw_st) \
157 __fw_state_set(fw_st, FW_STATUS_ABORTED)
158 #define fw_state_wait(fw_st) \
159 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
161 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
163 return fw_st->status == status;
166 #define fw_state_is_aborted(fw_st) \
167 __fw_state_check(fw_st, FW_STATUS_ABORTED)
169 #ifdef CONFIG_FW_LOADER_USER_HELPER
171 #define fw_state_aborted(fw_st) \
172 __fw_state_set(fw_st, FW_STATUS_ABORTED)
173 #define fw_state_is_done(fw_st) \
174 __fw_state_check(fw_st, FW_STATUS_DONE)
175 #define fw_state_is_loading(fw_st) \
176 __fw_state_check(fw_st, FW_STATUS_LOADING)
177 #define fw_state_wait_timeout(fw_st, timeout) \
178 __fw_state_wait_common(fw_st, timeout)
180 #endif /* CONFIG_FW_LOADER_USER_HELPER */
182 /* firmware behavior options */
183 #define FW_OPT_UEVENT (1U << 0)
184 #define FW_OPT_NOWAIT (1U << 1)
185 #ifdef CONFIG_FW_LOADER_USER_HELPER
186 #define FW_OPT_USERHELPER (1U << 2)
188 #define FW_OPT_USERHELPER 0
190 #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
191 #define FW_OPT_FALLBACK FW_OPT_USERHELPER
193 #define FW_OPT_FALLBACK 0
195 #define FW_OPT_NO_WARN (1U << 3)
196 #define FW_OPT_NOCACHE (1U << 4)
198 struct firmware_cache {
199 /* firmware_buf instance will be added into the below list */
201 struct list_head head;
204 #ifdef CONFIG_PM_SLEEP
206 * Names of firmware images which have been cached successfully
207 * will be added into the below list so that device uncache
208 * helper can trace which firmware images have been cached
211 spinlock_t name_lock;
212 struct list_head fw_names;
214 struct delayed_work work;
216 struct notifier_block pm_notify;
220 struct firmware_buf {
222 struct list_head list;
223 struct firmware_cache *fwc;
224 struct fw_state fw_st;
227 size_t allocated_size;
228 #ifdef CONFIG_FW_LOADER_USER_HELPER
234 struct list_head pending_list;
239 struct fw_cache_entry {
240 struct list_head list;
244 struct fw_name_devm {
249 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
251 #define FW_LOADER_NO_CACHE 0
252 #define FW_LOADER_START_CACHE 1
254 static int fw_cache_piggyback_on_request(const char *name);
256 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
257 * guarding for corner cases a global lock should be OK */
258 static DEFINE_MUTEX(fw_lock);
260 static bool __enable_firmware = false;
262 static void enable_firmware(void)
264 mutex_lock(&fw_lock);
265 __enable_firmware = true;
266 mutex_unlock(&fw_lock);
269 static void disable_firmware(void)
271 mutex_lock(&fw_lock);
272 __enable_firmware = false;
273 mutex_unlock(&fw_lock);
277 * When disabled only the built-in firmware and the firmware cache will be
278 * used to look for firmware.
280 static bool firmware_enabled(void)
282 bool enabled = false;
284 mutex_lock(&fw_lock);
285 if (__enable_firmware)
287 mutex_unlock(&fw_lock);
292 static struct firmware_cache fw_cache;
294 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
295 struct firmware_cache *fwc,
296 void *dbuf, size_t size)
298 struct firmware_buf *buf;
300 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
304 buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
310 kref_init(&buf->ref);
313 buf->allocated_size = size;
314 fw_state_init(&buf->fw_st);
315 #ifdef CONFIG_FW_LOADER_USER_HELPER
316 INIT_LIST_HEAD(&buf->pending_list);
319 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
324 static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
326 struct firmware_buf *tmp;
327 struct firmware_cache *fwc = &fw_cache;
329 list_for_each_entry(tmp, &fwc->head, list)
330 if (!strcmp(tmp->fw_id, fw_name))
335 static int fw_lookup_and_allocate_buf(const char *fw_name,
336 struct firmware_cache *fwc,
337 struct firmware_buf **buf, void *dbuf,
340 struct firmware_buf *tmp;
342 spin_lock(&fwc->lock);
343 tmp = __fw_lookup_buf(fw_name);
346 spin_unlock(&fwc->lock);
350 tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
352 list_add(&tmp->list, &fwc->head);
353 spin_unlock(&fwc->lock);
357 return tmp ? 0 : -ENOMEM;
360 static void __fw_free_buf(struct kref *ref)
361 __releases(&fwc->lock)
363 struct firmware_buf *buf = to_fwbuf(ref);
364 struct firmware_cache *fwc = buf->fwc;
366 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
367 __func__, buf->fw_id, buf, buf->data,
368 (unsigned int)buf->size);
370 list_del(&buf->list);
371 spin_unlock(&fwc->lock);
373 #ifdef CONFIG_FW_LOADER_USER_HELPER
374 if (buf->is_paged_buf) {
377 for (i = 0; i < buf->nr_pages; i++)
378 __free_page(buf->pages[i]);
382 if (!buf->allocated_size)
384 kfree_const(buf->fw_id);
388 static void fw_free_buf(struct firmware_buf *buf)
390 struct firmware_cache *fwc = buf->fwc;
391 spin_lock(&fwc->lock);
392 if (!kref_put(&buf->ref, __fw_free_buf))
393 spin_unlock(&fwc->lock);
396 /* direct firmware loading support */
397 static char fw_path_para[256];
398 static const char * const fw_path[] = {
400 "/lib/firmware/updates/" UTS_RELEASE,
401 "/lib/firmware/updates",
402 "/lib/firmware/" UTS_RELEASE,
407 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
408 * from kernel command line because firmware_class is generally built in
409 * kernel instead of module.
411 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
412 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
415 fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
421 enum kernel_read_file_id id = READING_FIRMWARE;
422 size_t msize = INT_MAX;
424 /* Already populated data member means we're loading into a buffer */
426 id = READING_FIRMWARE_PREALLOC_BUFFER;
427 msize = buf->allocated_size;
434 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
435 /* skip the unset customized path */
439 len = snprintf(path, PATH_MAX, "%s/%s",
440 fw_path[i], buf->fw_id);
441 if (len >= PATH_MAX) {
447 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
451 dev_dbg(device, "loading %s failed with error %d\n",
454 dev_warn(device, "loading %s failed with error %d\n",
458 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
460 fw_state_done(&buf->fw_st);
468 /* firmware holds the ownership of pages */
469 static void firmware_free_data(const struct firmware *fw)
471 /* Loaded directly? */
476 fw_free_buf(fw->priv);
479 /* store the pages buffer info firmware from buf */
480 static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
483 #ifdef CONFIG_FW_LOADER_USER_HELPER
484 fw->pages = buf->pages;
486 fw->size = buf->size;
487 fw->data = buf->data;
489 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
490 __func__, buf->fw_id, buf, buf->data,
491 (unsigned int)buf->size);
494 #ifdef CONFIG_PM_SLEEP
495 static void fw_name_devm_release(struct device *dev, void *res)
497 struct fw_name_devm *fwn = res;
499 if (fwn->magic == (unsigned long)&fw_cache)
500 pr_debug("%s: fw_name-%s devm-%p released\n",
501 __func__, fwn->name, res);
502 kfree_const(fwn->name);
505 static int fw_devm_match(struct device *dev, void *res,
508 struct fw_name_devm *fwn = res;
510 return (fwn->magic == (unsigned long)&fw_cache) &&
511 !strcmp(fwn->name, match_data);
514 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
517 struct fw_name_devm *fwn;
519 fwn = devres_find(dev, fw_name_devm_release,
520 fw_devm_match, (void *)name);
524 /* add firmware name into devres list */
525 static int fw_add_devm_name(struct device *dev, const char *name)
527 struct fw_name_devm *fwn;
529 fwn = fw_find_devm_name(dev, name);
533 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
537 fwn->name = kstrdup_const(name, GFP_KERNEL);
543 fwn->magic = (unsigned long)&fw_cache;
544 devres_add(dev, fwn);
549 static int fw_add_devm_name(struct device *dev, const char *name)
555 static int assign_firmware_buf(struct firmware *fw, struct device *device,
556 unsigned int opt_flags)
558 struct firmware_buf *buf = fw->priv;
560 mutex_lock(&fw_lock);
561 if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
562 mutex_unlock(&fw_lock);
567 * add firmware name into devres list so that we can auto cache
568 * and uncache firmware for device.
570 * device may has been deleted already, but the problem
571 * should be fixed in devres or driver core.
573 /* don't cache firmware handled without uevent */
574 if (device && (opt_flags & FW_OPT_UEVENT) &&
575 !(opt_flags & FW_OPT_NOCACHE))
576 fw_add_devm_name(device, buf->fw_id);
579 * After caching firmware image is started, let it piggyback
580 * on request firmware.
582 if (!(opt_flags & FW_OPT_NOCACHE) &&
583 buf->fwc->state == FW_LOADER_START_CACHE) {
584 if (fw_cache_piggyback_on_request(buf->fw_id))
588 /* pass the pages buffer to driver at the last minute */
589 fw_set_page_data(buf, fw);
590 mutex_unlock(&fw_lock);
595 * user-mode helper code
597 #ifdef CONFIG_FW_LOADER_USER_HELPER
598 struct firmware_priv {
601 struct firmware_buf *buf;
605 static struct firmware_priv *to_firmware_priv(struct device *dev)
607 return container_of(dev, struct firmware_priv, dev);
610 static void __fw_load_abort(struct firmware_buf *buf)
613 * There is a small window in which user can write to 'loading'
614 * between loading done and disappearance of 'loading'
616 if (fw_state_is_done(&buf->fw_st))
619 list_del_init(&buf->pending_list);
620 fw_state_aborted(&buf->fw_st);
623 static void fw_load_abort(struct firmware_priv *fw_priv)
625 struct firmware_buf *buf = fw_priv->buf;
627 __fw_load_abort(buf);
630 static LIST_HEAD(pending_fw_head);
632 static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
634 struct firmware_buf *buf;
635 struct firmware_buf *next;
637 mutex_lock(&fw_lock);
638 list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
639 if (!buf->need_uevent || !only_kill_custom)
640 __fw_load_abort(buf);
642 mutex_unlock(&fw_lock);
645 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
648 return sprintf(buf, "%d\n", loading_timeout);
652 * firmware_timeout_store - set number of seconds to wait for firmware
653 * @class: device class pointer
654 * @attr: device attribute pointer
655 * @buf: buffer to scan for timeout value
656 * @count: number of bytes in @buf
658 * Sets the number of seconds to wait for the firmware. Once
659 * this expires an error will be returned to the driver and no
660 * firmware will be provided.
662 * Note: zero means 'wait forever'.
664 static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
665 const char *buf, size_t count)
667 loading_timeout = simple_strtol(buf, NULL, 10);
668 if (loading_timeout < 0)
673 static CLASS_ATTR_RW(timeout);
675 static struct attribute *firmware_class_attrs[] = {
676 &class_attr_timeout.attr,
679 ATTRIBUTE_GROUPS(firmware_class);
681 static void fw_dev_release(struct device *dev)
683 struct firmware_priv *fw_priv = to_firmware_priv(dev);
688 static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
690 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
692 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
694 if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
700 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
702 struct firmware_priv *fw_priv = to_firmware_priv(dev);
705 mutex_lock(&fw_lock);
707 err = do_firmware_uevent(fw_priv, env);
708 mutex_unlock(&fw_lock);
712 static struct class firmware_class = {
714 .class_groups = firmware_class_groups,
715 .dev_uevent = firmware_uevent,
716 .dev_release = fw_dev_release,
719 static ssize_t firmware_loading_show(struct device *dev,
720 struct device_attribute *attr, char *buf)
722 struct firmware_priv *fw_priv = to_firmware_priv(dev);
725 mutex_lock(&fw_lock);
727 loading = fw_state_is_loading(&fw_priv->buf->fw_st);
728 mutex_unlock(&fw_lock);
730 return sprintf(buf, "%d\n", loading);
733 /* Some architectures don't have PAGE_KERNEL_RO */
734 #ifndef PAGE_KERNEL_RO
735 #define PAGE_KERNEL_RO PAGE_KERNEL
738 /* one pages buffer should be mapped/unmapped only once */
739 static int fw_map_pages_buf(struct firmware_buf *buf)
741 if (!buf->is_paged_buf)
745 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
752 * firmware_loading_store - set value in the 'loading' control file
753 * @dev: device pointer
754 * @attr: device attribute pointer
755 * @buf: buffer to scan for loading control value
756 * @count: number of bytes in @buf
758 * The relevant values are:
760 * 1: Start a load, discarding any previous partial load.
761 * 0: Conclude the load and hand the data to the driver code.
762 * -1: Conclude the load with an error and discard any written data.
764 static ssize_t firmware_loading_store(struct device *dev,
765 struct device_attribute *attr,
766 const char *buf, size_t count)
768 struct firmware_priv *fw_priv = to_firmware_priv(dev);
769 struct firmware_buf *fw_buf;
770 ssize_t written = count;
771 int loading = simple_strtol(buf, NULL, 10);
774 mutex_lock(&fw_lock);
775 fw_buf = fw_priv->buf;
776 if (fw_state_is_aborted(&fw_buf->fw_st))
781 /* discarding any previous partial load */
782 if (!fw_state_is_done(&fw_buf->fw_st)) {
783 for (i = 0; i < fw_buf->nr_pages; i++)
784 __free_page(fw_buf->pages[i]);
785 vfree(fw_buf->pages);
786 fw_buf->pages = NULL;
787 fw_buf->page_array_size = 0;
788 fw_buf->nr_pages = 0;
789 fw_state_start(&fw_buf->fw_st);
793 if (fw_state_is_loading(&fw_buf->fw_st)) {
797 * Several loading requests may be pending on
798 * one same firmware buf, so let all requests
799 * see the mapped 'buf->data' once the loading
802 rc = fw_map_pages_buf(fw_buf);
804 dev_err(dev, "%s: map pages failed\n",
807 rc = security_kernel_post_read_file(NULL,
808 fw_buf->data, fw_buf->size,
812 * Same logic as fw_load_abort, only the DONE bit
813 * is ignored and we set ABORT only on failure.
815 list_del_init(&fw_buf->pending_list);
817 fw_state_aborted(&fw_buf->fw_st);
820 fw_state_done(&fw_buf->fw_st);
826 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
829 fw_load_abort(fw_priv);
833 mutex_unlock(&fw_lock);
837 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
839 static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
840 loff_t offset, size_t count, bool read)
843 memcpy(buffer, buf->data + offset, count);
845 memcpy(buf->data + offset, buffer, count);
848 static void firmware_rw(struct firmware_buf *buf, char *buffer,
849 loff_t offset, size_t count, bool read)
853 int page_nr = offset >> PAGE_SHIFT;
854 int page_ofs = offset & (PAGE_SIZE-1);
855 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
857 page_data = kmap(buf->pages[page_nr]);
860 memcpy(buffer, page_data + page_ofs, page_cnt);
862 memcpy(page_data + page_ofs, buffer, page_cnt);
864 kunmap(buf->pages[page_nr]);
871 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
872 struct bin_attribute *bin_attr,
873 char *buffer, loff_t offset, size_t count)
875 struct device *dev = kobj_to_dev(kobj);
876 struct firmware_priv *fw_priv = to_firmware_priv(dev);
877 struct firmware_buf *buf;
880 mutex_lock(&fw_lock);
882 if (!buf || fw_state_is_done(&buf->fw_st)) {
886 if (offset > buf->size) {
890 if (count > buf->size - offset)
891 count = buf->size - offset;
896 firmware_rw_buf(buf, buffer, offset, count, true);
898 firmware_rw(buf, buffer, offset, count, true);
901 mutex_unlock(&fw_lock);
905 static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
907 struct firmware_buf *buf = fw_priv->buf;
908 int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
910 /* If the array of pages is too small, grow it... */
911 if (buf->page_array_size < pages_needed) {
912 int new_array_size = max(pages_needed,
913 buf->page_array_size * 2);
914 struct page **new_pages;
916 new_pages = vmalloc(new_array_size * sizeof(void *));
918 fw_load_abort(fw_priv);
921 memcpy(new_pages, buf->pages,
922 buf->page_array_size * sizeof(void *));
923 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
924 (new_array_size - buf->page_array_size));
926 buf->pages = new_pages;
927 buf->page_array_size = new_array_size;
930 while (buf->nr_pages < pages_needed) {
931 buf->pages[buf->nr_pages] =
932 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
934 if (!buf->pages[buf->nr_pages]) {
935 fw_load_abort(fw_priv);
944 * firmware_data_write - write method for firmware
945 * @filp: open sysfs file
946 * @kobj: kobject for the device
947 * @bin_attr: bin_attr structure
948 * @buffer: buffer being written
949 * @offset: buffer offset for write in total data store area
950 * @count: buffer size
952 * Data written to the 'data' attribute will be later handed to
953 * the driver as a firmware image.
955 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
956 struct bin_attribute *bin_attr,
957 char *buffer, loff_t offset, size_t count)
959 struct device *dev = kobj_to_dev(kobj);
960 struct firmware_priv *fw_priv = to_firmware_priv(dev);
961 struct firmware_buf *buf;
964 if (!capable(CAP_SYS_RAWIO))
967 mutex_lock(&fw_lock);
969 if (!buf || fw_state_is_done(&buf->fw_st)) {
975 if (offset + count > buf->allocated_size) {
979 firmware_rw_buf(buf, buffer, offset, count, false);
982 retval = fw_realloc_buffer(fw_priv, offset + count);
987 firmware_rw(buf, buffer, offset, count, false);
990 buf->size = max_t(size_t, offset + count, buf->size);
992 mutex_unlock(&fw_lock);
996 static struct bin_attribute firmware_attr_data = {
997 .attr = { .name = "data", .mode = 0644 },
999 .read = firmware_data_read,
1000 .write = firmware_data_write,
1003 static struct attribute *fw_dev_attrs[] = {
1004 &dev_attr_loading.attr,
1008 static struct bin_attribute *fw_dev_bin_attrs[] = {
1009 &firmware_attr_data,
1013 static const struct attribute_group fw_dev_attr_group = {
1014 .attrs = fw_dev_attrs,
1015 .bin_attrs = fw_dev_bin_attrs,
1018 static const struct attribute_group *fw_dev_attr_groups[] = {
1023 static struct firmware_priv *
1024 fw_create_instance(struct firmware *firmware, const char *fw_name,
1025 struct device *device, unsigned int opt_flags)
1027 struct firmware_priv *fw_priv;
1028 struct device *f_dev;
1030 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
1032 fw_priv = ERR_PTR(-ENOMEM);
1036 fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
1037 fw_priv->fw = firmware;
1038 f_dev = &fw_priv->dev;
1040 device_initialize(f_dev);
1041 dev_set_name(f_dev, "%s", fw_name);
1042 f_dev->parent = device;
1043 f_dev->class = &firmware_class;
1044 f_dev->groups = fw_dev_attr_groups;
1049 /* load a firmware via user helper */
1050 static int _request_firmware_load(struct firmware_priv *fw_priv,
1051 unsigned int opt_flags, long timeout)
1054 struct device *f_dev = &fw_priv->dev;
1055 struct firmware_buf *buf = fw_priv->buf;
1057 /* fall back on userspace loading */
1059 buf->is_paged_buf = true;
1061 dev_set_uevent_suppress(f_dev, true);
1063 retval = device_add(f_dev);
1065 dev_err(f_dev, "%s: device_register failed\n", __func__);
1069 mutex_lock(&fw_lock);
1070 list_add(&buf->pending_list, &pending_fw_head);
1071 mutex_unlock(&fw_lock);
1073 if (opt_flags & FW_OPT_UEVENT) {
1074 buf->need_uevent = true;
1075 dev_set_uevent_suppress(f_dev, false);
1076 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
1077 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
1079 timeout = MAX_JIFFY_OFFSET;
1082 retval = fw_state_wait_timeout(&buf->fw_st, timeout);
1084 mutex_lock(&fw_lock);
1085 fw_load_abort(fw_priv);
1086 mutex_unlock(&fw_lock);
1089 if (fw_state_is_aborted(&buf->fw_st))
1091 else if (buf->is_paged_buf && !buf->data)
1100 static int fw_load_from_user_helper(struct firmware *firmware,
1101 const char *name, struct device *device,
1102 unsigned int opt_flags)
1104 struct firmware_priv *fw_priv;
1108 timeout = firmware_loading_timeout();
1109 if (opt_flags & FW_OPT_NOWAIT) {
1110 timeout = usermodehelper_read_lock_wait(timeout);
1112 dev_dbg(device, "firmware: %s loading timed out\n",
1117 ret = usermodehelper_read_trylock();
1119 dev_err(device, "firmware: %s will not be loaded\n",
1125 fw_priv = fw_create_instance(firmware, name, device, opt_flags);
1126 if (IS_ERR(fw_priv)) {
1127 ret = PTR_ERR(fw_priv);
1131 fw_priv->buf = firmware->priv;
1132 ret = _request_firmware_load(fw_priv, opt_flags, timeout);
1135 ret = assign_firmware_buf(firmware, device, opt_flags);
1138 usermodehelper_read_unlock();
1143 #else /* CONFIG_FW_LOADER_USER_HELPER */
1145 fw_load_from_user_helper(struct firmware *firmware, const char *name,
1146 struct device *device, unsigned int opt_flags)
1151 static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
1153 #endif /* CONFIG_FW_LOADER_USER_HELPER */
1155 /* prepare firmware and firmware_buf structs;
1156 * return 0 if a firmware is already assigned, 1 if need to load one,
1157 * or a negative error code
1160 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
1161 struct device *device, void *dbuf, size_t size)
1163 struct firmware *firmware;
1164 struct firmware_buf *buf;
1167 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1169 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1174 if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
1175 dev_dbg(device, "using built-in %s\n", name);
1176 return 0; /* assigned */
1179 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
1182 * bind with 'buf' now to avoid warning in failure path
1183 * of requesting firmware.
1185 firmware->priv = buf;
1188 ret = fw_state_wait(&buf->fw_st);
1190 fw_set_page_data(buf, firmware);
1191 return 0; /* assigned */
1197 return 1; /* need to load */
1201 * Batched requests need only one wake, we need to do this step last due to the
1202 * fallback mechanism. The buf is protected with kref_get(), and it won't be
1203 * released until the last user calls release_firmware().
1205 * Failed batched requests are possible as well, in such cases we just share
1206 * the struct firmware_buf and won't release it until all requests are woken
1207 * and have gone through this same path.
1209 static void fw_abort_batch_reqs(struct firmware *fw)
1211 struct firmware_buf *buf;
1213 /* Loaded directly? */
1214 if (!fw || !fw->priv)
1218 if (!fw_state_is_aborted(&buf->fw_st))
1219 fw_state_aborted(&buf->fw_st);
1222 /* called from request_firmware() and request_firmware_work_func() */
1224 _request_firmware(const struct firmware **firmware_p, const char *name,
1225 struct device *device, void *buf, size_t size,
1226 unsigned int opt_flags)
1228 struct firmware *fw = NULL;
1234 if (!name || name[0] == '\0') {
1239 ret = _request_firmware_prepare(&fw, name, device, buf, size);
1240 if (ret <= 0) /* error or already assigned */
1243 if (!firmware_enabled()) {
1244 WARN(1, "firmware request while host is not available\n");
1249 ret = fw_get_filesystem_firmware(device, fw->priv);
1251 if (!(opt_flags & FW_OPT_NO_WARN))
1253 "Direct firmware load for %s failed with error %d\n",
1255 if (opt_flags & FW_OPT_USERHELPER) {
1256 dev_warn(device, "Falling back to user helper\n");
1257 ret = fw_load_from_user_helper(fw, name, device,
1261 ret = assign_firmware_buf(fw, device, opt_flags);
1265 fw_abort_batch_reqs(fw);
1266 release_firmware(fw);
1275 * request_firmware: - send firmware request and wait for it
1276 * @firmware_p: pointer to firmware image
1277 * @name: name of firmware file
1278 * @device: device for which firmware is being loaded
1280 * @firmware_p will be used to return a firmware image by the name
1281 * of @name for device @device.
1283 * Should be called from user context where sleeping is allowed.
1285 * @name will be used as $FIRMWARE in the uevent environment and
1286 * should be distinctive enough not to be confused with any other
1287 * firmware image for this or any other device.
1289 * Caller must hold the reference count of @device.
1291 * The function can be called safely inside device's suspend and
1295 request_firmware(const struct firmware **firmware_p, const char *name,
1296 struct device *device)
1300 /* Need to pin this module until return */
1301 __module_get(THIS_MODULE);
1302 ret = _request_firmware(firmware_p, name, device, NULL, 0,
1303 FW_OPT_UEVENT | FW_OPT_FALLBACK);
1304 module_put(THIS_MODULE);
1307 EXPORT_SYMBOL(request_firmware);
1310 * request_firmware_direct: - load firmware directly without usermode helper
1311 * @firmware_p: pointer to firmware image
1312 * @name: name of firmware file
1313 * @device: device for which firmware is being loaded
1315 * This function works pretty much like request_firmware(), but this doesn't
1316 * fall back to usermode helper even if the firmware couldn't be loaded
1317 * directly from fs. Hence it's useful for loading optional firmwares, which
1318 * aren't always present, without extra long timeouts of udev.
1320 int request_firmware_direct(const struct firmware **firmware_p,
1321 const char *name, struct device *device)
1325 __module_get(THIS_MODULE);
1326 ret = _request_firmware(firmware_p, name, device, NULL, 0,
1327 FW_OPT_UEVENT | FW_OPT_NO_WARN);
1328 module_put(THIS_MODULE);
1331 EXPORT_SYMBOL_GPL(request_firmware_direct);
1334 * request_firmware_into_buf - load firmware into a previously allocated buffer
1335 * @firmware_p: pointer to firmware image
1336 * @name: name of firmware file
1337 * @device: device for which firmware is being loaded and DMA region allocated
1338 * @buf: address of buffer to load firmware into
1339 * @size: size of buffer
1341 * This function works pretty much like request_firmware(), but it doesn't
1342 * allocate a buffer to hold the firmware data. Instead, the firmware
1343 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1344 * data member is pointed at @buf.
1346 * This function doesn't cache firmware either.
1349 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1350 struct device *device, void *buf, size_t size)
1354 __module_get(THIS_MODULE);
1355 ret = _request_firmware(firmware_p, name, device, buf, size,
1356 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1358 module_put(THIS_MODULE);
1361 EXPORT_SYMBOL(request_firmware_into_buf);
1364 * release_firmware: - release the resource associated with a firmware image
1365 * @fw: firmware resource to release
1367 void release_firmware(const struct firmware *fw)
1370 if (!fw_is_builtin_firmware(fw))
1371 firmware_free_data(fw);
1375 EXPORT_SYMBOL(release_firmware);
1378 struct firmware_work {
1379 struct work_struct work;
1380 struct module *module;
1382 struct device *device;
1384 void (*cont)(const struct firmware *fw, void *context);
1385 unsigned int opt_flags;
1388 static void request_firmware_work_func(struct work_struct *work)
1390 struct firmware_work *fw_work;
1391 const struct firmware *fw;
1393 fw_work = container_of(work, struct firmware_work, work);
1395 _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
1396 fw_work->opt_flags);
1397 fw_work->cont(fw, fw_work->context);
1398 put_device(fw_work->device); /* taken in request_firmware_nowait() */
1400 module_put(fw_work->module);
1401 kfree_const(fw_work->name);
1406 * request_firmware_nowait - asynchronous version of request_firmware
1407 * @module: module requesting the firmware
1408 * @uevent: sends uevent to copy the firmware image if this flag
1409 * is non-zero else the firmware copy must be done manually.
1410 * @name: name of firmware file
1411 * @device: device for which firmware is being loaded
1412 * @gfp: allocation flags
1413 * @context: will be passed over to @cont, and
1414 * @fw may be %NULL if firmware request fails.
1415 * @cont: function will be called asynchronously when the firmware
1418 * Caller must hold the reference count of @device.
1420 * Asynchronous variant of request_firmware() for user contexts:
1421 * - sleep for as small periods as possible since it may
1422 * increase kernel boot time of built-in device drivers
1423 * requesting firmware in their ->probe() methods, if
1424 * @gfp is GFP_KERNEL.
1426 * - can't sleep at all if @gfp is GFP_ATOMIC.
1429 request_firmware_nowait(
1430 struct module *module, bool uevent,
1431 const char *name, struct device *device, gfp_t gfp, void *context,
1432 void (*cont)(const struct firmware *fw, void *context))
1434 struct firmware_work *fw_work;
1436 fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1440 fw_work->module = module;
1441 fw_work->name = kstrdup_const(name, gfp);
1442 if (!fw_work->name) {
1446 fw_work->device = device;
1447 fw_work->context = context;
1448 fw_work->cont = cont;
1449 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
1450 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1452 if (!try_module_get(module)) {
1453 kfree_const(fw_work->name);
1458 get_device(fw_work->device);
1459 INIT_WORK(&fw_work->work, request_firmware_work_func);
1460 schedule_work(&fw_work->work);
1463 EXPORT_SYMBOL(request_firmware_nowait);
1465 #ifdef CONFIG_PM_SLEEP
1466 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1469 * cache_firmware - cache one firmware image in kernel memory space
1470 * @fw_name: the firmware image name
1472 * Cache firmware in kernel memory so that drivers can use it when
1473 * system isn't ready for them to request firmware image from userspace.
1474 * Once it returns successfully, driver can use request_firmware or its
1475 * nowait version to get the cached firmware without any interacting
1478 * Return 0 if the firmware image has been cached successfully
1479 * Return !0 otherwise
1482 static int cache_firmware(const char *fw_name)
1485 const struct firmware *fw;
1487 pr_debug("%s: %s\n", __func__, fw_name);
1489 ret = request_firmware(&fw, fw_name, NULL);
1493 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1498 static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1500 struct firmware_buf *tmp;
1501 struct firmware_cache *fwc = &fw_cache;
1503 spin_lock(&fwc->lock);
1504 tmp = __fw_lookup_buf(fw_name);
1505 spin_unlock(&fwc->lock);
1511 * uncache_firmware - remove one cached firmware image
1512 * @fw_name: the firmware image name
1514 * Uncache one firmware image which has been cached successfully
1517 * Return 0 if the firmware cache has been removed successfully
1518 * Return !0 otherwise
1521 static int uncache_firmware(const char *fw_name)
1523 struct firmware_buf *buf;
1526 pr_debug("%s: %s\n", __func__, fw_name);
1528 if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1531 buf = fw_lookup_buf(fw_name);
1540 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1542 struct fw_cache_entry *fce;
1544 fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1548 fce->name = kstrdup_const(name, GFP_ATOMIC);
1558 static int __fw_entry_found(const char *name)
1560 struct firmware_cache *fwc = &fw_cache;
1561 struct fw_cache_entry *fce;
1563 list_for_each_entry(fce, &fwc->fw_names, list) {
1564 if (!strcmp(fce->name, name))
1570 static int fw_cache_piggyback_on_request(const char *name)
1572 struct firmware_cache *fwc = &fw_cache;
1573 struct fw_cache_entry *fce;
1576 spin_lock(&fwc->name_lock);
1577 if (__fw_entry_found(name))
1580 fce = alloc_fw_cache_entry(name);
1583 list_add(&fce->list, &fwc->fw_names);
1584 pr_debug("%s: fw: %s\n", __func__, name);
1587 spin_unlock(&fwc->name_lock);
1591 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1593 kfree_const(fce->name);
1597 static void __async_dev_cache_fw_image(void *fw_entry,
1598 async_cookie_t cookie)
1600 struct fw_cache_entry *fce = fw_entry;
1601 struct firmware_cache *fwc = &fw_cache;
1604 ret = cache_firmware(fce->name);
1606 spin_lock(&fwc->name_lock);
1607 list_del(&fce->list);
1608 spin_unlock(&fwc->name_lock);
1610 free_fw_cache_entry(fce);
1614 /* called with dev->devres_lock held */
1615 static void dev_create_fw_entry(struct device *dev, void *res,
1618 struct fw_name_devm *fwn = res;
1619 const char *fw_name = fwn->name;
1620 struct list_head *head = data;
1621 struct fw_cache_entry *fce;
1623 fce = alloc_fw_cache_entry(fw_name);
1625 list_add(&fce->list, head);
1628 static int devm_name_match(struct device *dev, void *res,
1631 struct fw_name_devm *fwn = res;
1632 return (fwn->magic == (unsigned long)match_data);
1635 static void dev_cache_fw_image(struct device *dev, void *data)
1638 struct fw_cache_entry *fce;
1639 struct fw_cache_entry *fce_next;
1640 struct firmware_cache *fwc = &fw_cache;
1642 devres_for_each_res(dev, fw_name_devm_release,
1643 devm_name_match, &fw_cache,
1644 dev_create_fw_entry, &todo);
1646 list_for_each_entry_safe(fce, fce_next, &todo, list) {
1647 list_del(&fce->list);
1649 spin_lock(&fwc->name_lock);
1650 /* only one cache entry for one firmware */
1651 if (!__fw_entry_found(fce->name)) {
1652 list_add(&fce->list, &fwc->fw_names);
1654 free_fw_cache_entry(fce);
1657 spin_unlock(&fwc->name_lock);
1660 async_schedule_domain(__async_dev_cache_fw_image,
1666 static void __device_uncache_fw_images(void)
1668 struct firmware_cache *fwc = &fw_cache;
1669 struct fw_cache_entry *fce;
1671 spin_lock(&fwc->name_lock);
1672 while (!list_empty(&fwc->fw_names)) {
1673 fce = list_entry(fwc->fw_names.next,
1674 struct fw_cache_entry, list);
1675 list_del(&fce->list);
1676 spin_unlock(&fwc->name_lock);
1678 uncache_firmware(fce->name);
1679 free_fw_cache_entry(fce);
1681 spin_lock(&fwc->name_lock);
1683 spin_unlock(&fwc->name_lock);
1687 * device_cache_fw_images - cache devices' firmware
1689 * If one device called request_firmware or its nowait version
1690 * successfully before, the firmware names are recored into the
1691 * device's devres link list, so device_cache_fw_images can call
1692 * cache_firmware() to cache these firmwares for the device,
1693 * then the device driver can load its firmwares easily at
1694 * time when system is not ready to complete loading firmware.
1696 static void device_cache_fw_images(void)
1698 struct firmware_cache *fwc = &fw_cache;
1702 pr_debug("%s\n", __func__);
1704 /* cancel uncache work */
1705 cancel_delayed_work_sync(&fwc->work);
1708 * use small loading timeout for caching devices' firmware
1709 * because all these firmware images have been loaded
1710 * successfully at lease once, also system is ready for
1711 * completing firmware loading now. The maximum size of
1712 * firmware in current distributions is about 2M bytes,
1713 * so 10 secs should be enough.
1715 old_timeout = loading_timeout;
1716 loading_timeout = 10;
1718 mutex_lock(&fw_lock);
1719 fwc->state = FW_LOADER_START_CACHE;
1720 dpm_for_each_dev(NULL, dev_cache_fw_image);
1721 mutex_unlock(&fw_lock);
1723 /* wait for completion of caching firmware for all devices */
1724 async_synchronize_full_domain(&fw_cache_domain);
1726 loading_timeout = old_timeout;
1730 * device_uncache_fw_images - uncache devices' firmware
1732 * uncache all firmwares which have been cached successfully
1733 * by device_uncache_fw_images earlier
1735 static void device_uncache_fw_images(void)
1737 pr_debug("%s\n", __func__);
1738 __device_uncache_fw_images();
1741 static void device_uncache_fw_images_work(struct work_struct *work)
1743 device_uncache_fw_images();
1747 * device_uncache_fw_images_delay - uncache devices firmwares
1748 * @delay: number of milliseconds to delay uncache device firmwares
1750 * uncache all devices's firmwares which has been cached successfully
1751 * by device_cache_fw_images after @delay milliseconds.
1753 static void device_uncache_fw_images_delay(unsigned long delay)
1755 queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1756 msecs_to_jiffies(delay));
1760 * fw_pm_notify - notifier for suspend/resume
1761 * @notify_block: unused
1762 * @mode: mode we are switching to
1765 * Used to modify the firmware_class state as we move in between states.
1766 * The firmware_class implements a firmware cache to enable device driver
1767 * to fetch firmware upon resume before the root filesystem is ready. We
1768 * disable API calls which do not use the built-in firmware or the firmware
1769 * cache when we know these calls will not work.
1771 * The inner logic behind all this is a bit complex so it is worth summarizing
1772 * the kernel's own suspend/resume process with context and focus on how this
1773 * can impact the firmware API.
1775 * First a review on how we go to suspend::
1777 * pm_suspend() --> enter_state() -->
1779 * suspend_prepare() -->
1780 * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
1781 * suspend_freeze_processes() -->
1782 * freeze_processes() -->
1783 * __usermodehelper_set_disable_depth(UMH_DISABLED);
1784 * freeze all tasks ...
1785 * freeze_kernel_threads()
1786 * suspend_devices_and_enter() -->
1787 * dpm_suspend_start() -->
1790 * suspend_enter() -->
1791 * platform_suspend_prepare()
1792 * dpm_suspend_late()
1796 * When we resume we bail out of a loop from suspend_devices_and_enter() and
1797 * unwind back out to the caller enter_state() where we were before as follows::
1800 * suspend_devices_and_enter() --> (bail from loop)
1801 * dpm_resume_end() -->
1804 * suspend_finish() -->
1805 * suspend_thaw_processes() -->
1806 * thaw_processes() -->
1807 * __usermodehelper_set_disable_depth(UMH_FREEZING);
1808 * thaw_workqueues();
1809 * thaw all processes ...
1810 * usermodehelper_enable();
1811 * pm_notifier_call_chain(PM_POST_SUSPEND);
1813 * fw_pm_notify() works through pm_notifier_call_chain().
1815 static int fw_pm_notify(struct notifier_block *notify_block,
1816 unsigned long mode, void *unused)
1819 case PM_HIBERNATION_PREPARE:
1820 case PM_SUSPEND_PREPARE:
1821 case PM_RESTORE_PREPARE:
1823 * kill pending fallback requests with a custom fallback
1824 * to avoid stalling suspend.
1826 kill_pending_fw_fallback_reqs(true);
1827 device_cache_fw_images();
1831 case PM_POST_SUSPEND:
1832 case PM_POST_HIBERNATION:
1833 case PM_POST_RESTORE:
1835 * In case that system sleep failed and syscore_suspend is
1838 mutex_lock(&fw_lock);
1839 fw_cache.state = FW_LOADER_NO_CACHE;
1840 mutex_unlock(&fw_lock);
1843 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1850 /* stop caching firmware once syscore_suspend is reached */
1851 static int fw_suspend(void)
1853 fw_cache.state = FW_LOADER_NO_CACHE;
1857 static struct syscore_ops fw_syscore_ops = {
1858 .suspend = fw_suspend,
1861 static int fw_cache_piggyback_on_request(const char *name)
1867 static void __init fw_cache_init(void)
1869 spin_lock_init(&fw_cache.lock);
1870 INIT_LIST_HEAD(&fw_cache.head);
1871 fw_cache.state = FW_LOADER_NO_CACHE;
1873 #ifdef CONFIG_PM_SLEEP
1874 spin_lock_init(&fw_cache.name_lock);
1875 INIT_LIST_HEAD(&fw_cache.fw_names);
1877 INIT_DELAYED_WORK(&fw_cache.work,
1878 device_uncache_fw_images_work);
1880 fw_cache.pm_notify.notifier_call = fw_pm_notify;
1881 register_pm_notifier(&fw_cache.pm_notify);
1883 register_syscore_ops(&fw_syscore_ops);
1887 static int fw_shutdown_notify(struct notifier_block *unused1,
1888 unsigned long unused2, void *unused3)
1892 * Kill all pending fallback requests to avoid both stalling shutdown,
1893 * and avoid a deadlock with the usermode_lock.
1895 kill_pending_fw_fallback_reqs(false);
1900 static struct notifier_block fw_shutdown_nb = {
1901 .notifier_call = fw_shutdown_notify,
1904 static int __init firmware_class_init(void)
1908 register_reboot_notifier(&fw_shutdown_nb);
1909 #ifdef CONFIG_FW_LOADER_USER_HELPER
1910 return class_register(&firmware_class);
1916 static void __exit firmware_class_exit(void)
1919 #ifdef CONFIG_PM_SLEEP
1920 unregister_syscore_ops(&fw_syscore_ops);
1921 unregister_pm_notifier(&fw_cache.pm_notify);
1923 unregister_reboot_notifier(&fw_shutdown_nb);
1924 #ifdef CONFIG_FW_LOADER_USER_HELPER
1925 class_unregister(&firmware_class);
1929 fs_initcall(firmware_class_init);
1930 module_exit(firmware_class_exit);