]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/firmware_class.c
Merge tag 'driver-core-4.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / base / firmware_class.c
1 /*
2  * firmware_class.c - Multi purpose firmware loading support
3  *
4  * Copyright (c) 2003 Manuel Estrada Sainz
5  *
6  * Please see Documentation/firmware_class/ for more information.
7  *
8  */
9
10 #include <linux/capability.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/timer.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/mutex.h>
19 #include <linux/workqueue.h>
20 #include <linux/highmem.h>
21 #include <linux/firmware.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/file.h>
25 #include <linux/list.h>
26 #include <linux/fs.h>
27 #include <linux/async.h>
28 #include <linux/pm.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/reboot.h>
32 #include <linux/security.h>
33
34 #include <generated/utsrelease.h>
35
36 #include "base.h"
37
38 MODULE_AUTHOR("Manuel Estrada Sainz");
39 MODULE_DESCRIPTION("Multi purpose firmware loading support");
40 MODULE_LICENSE("GPL");
41
42 /* Builtin firmware support */
43
44 #ifdef CONFIG_FW_LOADER
45
46 extern struct builtin_fw __start_builtin_fw[];
47 extern struct builtin_fw __end_builtin_fw[];
48
49 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
50                                     void *buf, size_t size)
51 {
52         struct builtin_fw *b_fw;
53
54         for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
55                 if (strcmp(name, b_fw->name) == 0) {
56                         fw->size = b_fw->size;
57                         fw->data = b_fw->data;
58
59                         if (buf && fw->size <= size)
60                                 memcpy(buf, fw->data, fw->size);
61                         return true;
62                 }
63         }
64
65         return false;
66 }
67
68 static bool fw_is_builtin_firmware(const struct firmware *fw)
69 {
70         struct builtin_fw *b_fw;
71
72         for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
73                 if (fw->data == b_fw->data)
74                         return true;
75
76         return false;
77 }
78
79 #else /* Module case - no builtin firmware support */
80
81 static inline bool fw_get_builtin_firmware(struct firmware *fw,
82                                            const char *name, void *buf,
83                                            size_t size)
84 {
85         return false;
86 }
87
88 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
89 {
90         return false;
91 }
92 #endif
93
94 enum fw_status {
95         FW_STATUS_UNKNOWN,
96         FW_STATUS_LOADING,
97         FW_STATUS_DONE,
98         FW_STATUS_ABORTED,
99 };
100
101 static int loading_timeout = 60;        /* In seconds */
102
103 static inline long firmware_loading_timeout(void)
104 {
105         return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
106 }
107
108 /*
109  * Concurrent request_firmware() for the same firmware need to be
110  * serialized.  struct fw_state is simple state machine which hold the
111  * state of the firmware loading.
112  */
113 struct fw_state {
114         struct completion completion;
115         enum fw_status status;
116 };
117
118 static void fw_state_init(struct fw_state *fw_st)
119 {
120         init_completion(&fw_st->completion);
121         fw_st->status = FW_STATUS_UNKNOWN;
122 }
123
124 static inline bool __fw_state_is_done(enum fw_status status)
125 {
126         return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
127 }
128
129 static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
130 {
131         long ret;
132
133         ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
134         if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
135                 return -ENOENT;
136         if (!ret)
137                 return -ETIMEDOUT;
138
139         return ret < 0 ? ret : 0;
140 }
141
142 static void __fw_state_set(struct fw_state *fw_st,
143                            enum fw_status status)
144 {
145         WRITE_ONCE(fw_st->status, status);
146
147         if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
148                 complete_all(&fw_st->completion);
149 }
150
151 #define fw_state_start(fw_st)                                   \
152         __fw_state_set(fw_st, FW_STATUS_LOADING)
153 #define fw_state_done(fw_st)                                    \
154         __fw_state_set(fw_st, FW_STATUS_DONE)
155 #define fw_state_aborted(fw_st)                                 \
156         __fw_state_set(fw_st, FW_STATUS_ABORTED)
157 #define fw_state_wait(fw_st)                                    \
158         __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
159
160 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
161 {
162         return fw_st->status == status;
163 }
164
165 #define fw_state_is_aborted(fw_st)                              \
166         __fw_state_check(fw_st, FW_STATUS_ABORTED)
167
168 #ifdef CONFIG_FW_LOADER_USER_HELPER
169
170 #define fw_state_aborted(fw_st)                                 \
171         __fw_state_set(fw_st, FW_STATUS_ABORTED)
172 #define fw_state_is_done(fw_st)                                 \
173         __fw_state_check(fw_st, FW_STATUS_DONE)
174 #define fw_state_is_loading(fw_st)                              \
175         __fw_state_check(fw_st, FW_STATUS_LOADING)
176 #define fw_state_wait_timeout(fw_st, timeout)                   \
177         __fw_state_wait_common(fw_st, timeout)
178
179 #endif /* CONFIG_FW_LOADER_USER_HELPER */
180
181 /* firmware behavior options */
182 #define FW_OPT_UEVENT   (1U << 0)
183 #define FW_OPT_NOWAIT   (1U << 1)
184 #ifdef CONFIG_FW_LOADER_USER_HELPER
185 #define FW_OPT_USERHELPER       (1U << 2)
186 #else
187 #define FW_OPT_USERHELPER       0
188 #endif
189 #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
190 #define FW_OPT_FALLBACK         FW_OPT_USERHELPER
191 #else
192 #define FW_OPT_FALLBACK         0
193 #endif
194 #define FW_OPT_NO_WARN  (1U << 3)
195 #define FW_OPT_NOCACHE  (1U << 4)
196
197 struct firmware_cache {
198         /* firmware_buf instance will be added into the below list */
199         spinlock_t lock;
200         struct list_head head;
201         int state;
202
203 #ifdef CONFIG_PM_SLEEP
204         /*
205          * Names of firmware images which have been cached successfully
206          * will be added into the below list so that device uncache
207          * helper can trace which firmware images have been cached
208          * before.
209          */
210         spinlock_t name_lock;
211         struct list_head fw_names;
212
213         struct delayed_work work;
214
215         struct notifier_block   pm_notify;
216 #endif
217 };
218
219 struct firmware_buf {
220         struct kref ref;
221         struct list_head list;
222         struct firmware_cache *fwc;
223         struct fw_state fw_st;
224         void *data;
225         size_t size;
226         size_t allocated_size;
227 #ifdef CONFIG_FW_LOADER_USER_HELPER
228         bool is_paged_buf;
229         bool need_uevent;
230         struct page **pages;
231         int nr_pages;
232         int page_array_size;
233         struct list_head pending_list;
234 #endif
235         const char *fw_id;
236 };
237
238 struct fw_cache_entry {
239         struct list_head list;
240         const char *name;
241 };
242
243 struct fw_name_devm {
244         unsigned long magic;
245         const char *name;
246 };
247
248 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
249
250 #define FW_LOADER_NO_CACHE      0
251 #define FW_LOADER_START_CACHE   1
252
253 static int fw_cache_piggyback_on_request(const char *name);
254
255 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
256  * guarding for corner cases a global lock should be OK */
257 static DEFINE_MUTEX(fw_lock);
258
259 static bool __enable_firmware = false;
260
261 static void enable_firmware(void)
262 {
263         mutex_lock(&fw_lock);
264         __enable_firmware = true;
265         mutex_unlock(&fw_lock);
266 }
267
268 static void disable_firmware(void)
269 {
270         mutex_lock(&fw_lock);
271         __enable_firmware = false;
272         mutex_unlock(&fw_lock);
273 }
274
275 /*
276  * When disabled only the built-in firmware and the firmware cache will be
277  * used to look for firmware.
278  */
279 static bool firmware_enabled(void)
280 {
281         bool enabled = false;
282
283         mutex_lock(&fw_lock);
284         if (__enable_firmware)
285                 enabled = true;
286         mutex_unlock(&fw_lock);
287
288         return enabled;
289 }
290
291 static struct firmware_cache fw_cache;
292
293 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
294                                               struct firmware_cache *fwc,
295                                               void *dbuf, size_t size)
296 {
297         struct firmware_buf *buf;
298
299         buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
300         if (!buf)
301                 return NULL;
302
303         buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
304         if (!buf->fw_id) {
305                 kfree(buf);
306                 return NULL;
307         }
308
309         kref_init(&buf->ref);
310         buf->fwc = fwc;
311         buf->data = dbuf;
312         buf->allocated_size = size;
313         fw_state_init(&buf->fw_st);
314 #ifdef CONFIG_FW_LOADER_USER_HELPER
315         INIT_LIST_HEAD(&buf->pending_list);
316 #endif
317
318         pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
319
320         return buf;
321 }
322
323 static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
324 {
325         struct firmware_buf *tmp;
326         struct firmware_cache *fwc = &fw_cache;
327
328         list_for_each_entry(tmp, &fwc->head, list)
329                 if (!strcmp(tmp->fw_id, fw_name))
330                         return tmp;
331         return NULL;
332 }
333
334 static int fw_lookup_and_allocate_buf(const char *fw_name,
335                                       struct firmware_cache *fwc,
336                                       struct firmware_buf **buf, void *dbuf,
337                                       size_t size)
338 {
339         struct firmware_buf *tmp;
340
341         spin_lock(&fwc->lock);
342         tmp = __fw_lookup_buf(fw_name);
343         if (tmp) {
344                 kref_get(&tmp->ref);
345                 spin_unlock(&fwc->lock);
346                 *buf = tmp;
347                 return 1;
348         }
349         tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
350         if (tmp)
351                 list_add(&tmp->list, &fwc->head);
352         spin_unlock(&fwc->lock);
353
354         *buf = tmp;
355
356         return tmp ? 0 : -ENOMEM;
357 }
358
359 static void __fw_free_buf(struct kref *ref)
360         __releases(&fwc->lock)
361 {
362         struct firmware_buf *buf = to_fwbuf(ref);
363         struct firmware_cache *fwc = buf->fwc;
364
365         pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
366                  __func__, buf->fw_id, buf, buf->data,
367                  (unsigned int)buf->size);
368
369         list_del(&buf->list);
370         spin_unlock(&fwc->lock);
371
372 #ifdef CONFIG_FW_LOADER_USER_HELPER
373         if (buf->is_paged_buf) {
374                 int i;
375                 vunmap(buf->data);
376                 for (i = 0; i < buf->nr_pages; i++)
377                         __free_page(buf->pages[i]);
378                 vfree(buf->pages);
379         } else
380 #endif
381         if (!buf->allocated_size)
382                 vfree(buf->data);
383         kfree_const(buf->fw_id);
384         kfree(buf);
385 }
386
387 static void fw_free_buf(struct firmware_buf *buf)
388 {
389         struct firmware_cache *fwc = buf->fwc;
390         spin_lock(&fwc->lock);
391         if (!kref_put(&buf->ref, __fw_free_buf))
392                 spin_unlock(&fwc->lock);
393 }
394
395 /* direct firmware loading support */
396 static char fw_path_para[256];
397 static const char * const fw_path[] = {
398         fw_path_para,
399         "/lib/firmware/updates/" UTS_RELEASE,
400         "/lib/firmware/updates",
401         "/lib/firmware/" UTS_RELEASE,
402         "/lib/firmware"
403 };
404
405 /*
406  * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
407  * from kernel command line because firmware_class is generally built in
408  * kernel instead of module.
409  */
410 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
411 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
412
413 static int
414 fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
415 {
416         loff_t size;
417         int i, len;
418         int rc = -ENOENT;
419         char *path;
420         enum kernel_read_file_id id = READING_FIRMWARE;
421         size_t msize = INT_MAX;
422
423         /* Already populated data member means we're loading into a buffer */
424         if (buf->data) {
425                 id = READING_FIRMWARE_PREALLOC_BUFFER;
426                 msize = buf->allocated_size;
427         }
428
429         path = __getname();
430         if (!path)
431                 return -ENOMEM;
432
433         for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
434                 /* skip the unset customized path */
435                 if (!fw_path[i][0])
436                         continue;
437
438                 len = snprintf(path, PATH_MAX, "%s/%s",
439                                fw_path[i], buf->fw_id);
440                 if (len >= PATH_MAX) {
441                         rc = -ENAMETOOLONG;
442                         break;
443                 }
444
445                 buf->size = 0;
446                 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
447                                                 id);
448                 if (rc) {
449                         if (rc == -ENOENT)
450                                 dev_dbg(device, "loading %s failed with error %d\n",
451                                          path, rc);
452                         else
453                                 dev_warn(device, "loading %s failed with error %d\n",
454                                          path, rc);
455                         continue;
456                 }
457                 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
458                 buf->size = size;
459                 fw_state_done(&buf->fw_st);
460                 break;
461         }
462         __putname(path);
463
464         return rc;
465 }
466
467 /* firmware holds the ownership of pages */
468 static void firmware_free_data(const struct firmware *fw)
469 {
470         /* Loaded directly? */
471         if (!fw->priv) {
472                 vfree(fw->data);
473                 return;
474         }
475         fw_free_buf(fw->priv);
476 }
477
478 /* store the pages buffer info firmware from buf */
479 static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
480 {
481         fw->priv = buf;
482 #ifdef CONFIG_FW_LOADER_USER_HELPER
483         fw->pages = buf->pages;
484 #endif
485         fw->size = buf->size;
486         fw->data = buf->data;
487
488         pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
489                  __func__, buf->fw_id, buf, buf->data,
490                  (unsigned int)buf->size);
491 }
492
493 #ifdef CONFIG_PM_SLEEP
494 static void fw_name_devm_release(struct device *dev, void *res)
495 {
496         struct fw_name_devm *fwn = res;
497
498         if (fwn->magic == (unsigned long)&fw_cache)
499                 pr_debug("%s: fw_name-%s devm-%p released\n",
500                                 __func__, fwn->name, res);
501         kfree_const(fwn->name);
502 }
503
504 static int fw_devm_match(struct device *dev, void *res,
505                 void *match_data)
506 {
507         struct fw_name_devm *fwn = res;
508
509         return (fwn->magic == (unsigned long)&fw_cache) &&
510                 !strcmp(fwn->name, match_data);
511 }
512
513 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
514                 const char *name)
515 {
516         struct fw_name_devm *fwn;
517
518         fwn = devres_find(dev, fw_name_devm_release,
519                           fw_devm_match, (void *)name);
520         return fwn;
521 }
522
523 /* add firmware name into devres list */
524 static int fw_add_devm_name(struct device *dev, const char *name)
525 {
526         struct fw_name_devm *fwn;
527
528         fwn = fw_find_devm_name(dev, name);
529         if (fwn)
530                 return 1;
531
532         fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
533                            GFP_KERNEL);
534         if (!fwn)
535                 return -ENOMEM;
536         fwn->name = kstrdup_const(name, GFP_KERNEL);
537         if (!fwn->name) {
538                 devres_free(fwn);
539                 return -ENOMEM;
540         }
541
542         fwn->magic = (unsigned long)&fw_cache;
543         devres_add(dev, fwn);
544
545         return 0;
546 }
547 #else
548 static int fw_add_devm_name(struct device *dev, const char *name)
549 {
550         return 0;
551 }
552 #endif
553
554 static int assign_firmware_buf(struct firmware *fw, struct device *device,
555                                unsigned int opt_flags)
556 {
557         struct firmware_buf *buf = fw->priv;
558
559         mutex_lock(&fw_lock);
560         if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
561                 mutex_unlock(&fw_lock);
562                 return -ENOENT;
563         }
564
565         /*
566          * add firmware name into devres list so that we can auto cache
567          * and uncache firmware for device.
568          *
569          * device may has been deleted already, but the problem
570          * should be fixed in devres or driver core.
571          */
572         /* don't cache firmware handled without uevent */
573         if (device && (opt_flags & FW_OPT_UEVENT) &&
574             !(opt_flags & FW_OPT_NOCACHE))
575                 fw_add_devm_name(device, buf->fw_id);
576
577         /*
578          * After caching firmware image is started, let it piggyback
579          * on request firmware.
580          */
581         if (!(opt_flags & FW_OPT_NOCACHE) &&
582             buf->fwc->state == FW_LOADER_START_CACHE) {
583                 if (fw_cache_piggyback_on_request(buf->fw_id))
584                         kref_get(&buf->ref);
585         }
586
587         /* pass the pages buffer to driver at the last minute */
588         fw_set_page_data(buf, fw);
589         mutex_unlock(&fw_lock);
590         return 0;
591 }
592
593 /*
594  * user-mode helper code
595  */
596 #ifdef CONFIG_FW_LOADER_USER_HELPER
597 struct firmware_priv {
598         bool nowait;
599         struct device dev;
600         struct firmware_buf *buf;
601         struct firmware *fw;
602 };
603
604 static struct firmware_priv *to_firmware_priv(struct device *dev)
605 {
606         return container_of(dev, struct firmware_priv, dev);
607 }
608
609 static void __fw_load_abort(struct firmware_buf *buf)
610 {
611         /*
612          * There is a small window in which user can write to 'loading'
613          * between loading done and disappearance of 'loading'
614          */
615         if (fw_state_is_done(&buf->fw_st))
616                 return;
617
618         list_del_init(&buf->pending_list);
619         fw_state_aborted(&buf->fw_st);
620 }
621
622 static void fw_load_abort(struct firmware_priv *fw_priv)
623 {
624         struct firmware_buf *buf = fw_priv->buf;
625
626         __fw_load_abort(buf);
627 }
628
629 static LIST_HEAD(pending_fw_head);
630
631 static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
632 {
633         struct firmware_buf *buf;
634         struct firmware_buf *next;
635
636         mutex_lock(&fw_lock);
637         list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
638                 if (!buf->need_uevent || !only_kill_custom)
639                          __fw_load_abort(buf);
640         }
641         mutex_unlock(&fw_lock);
642 }
643
644 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
645                             char *buf)
646 {
647         return sprintf(buf, "%d\n", loading_timeout);
648 }
649
650 /**
651  * firmware_timeout_store - set number of seconds to wait for firmware
652  * @class: device class pointer
653  * @attr: device attribute pointer
654  * @buf: buffer to scan for timeout value
655  * @count: number of bytes in @buf
656  *
657  *      Sets the number of seconds to wait for the firmware.  Once
658  *      this expires an error will be returned to the driver and no
659  *      firmware will be provided.
660  *
661  *      Note: zero means 'wait forever'.
662  **/
663 static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
664                              const char *buf, size_t count)
665 {
666         loading_timeout = simple_strtol(buf, NULL, 10);
667         if (loading_timeout < 0)
668                 loading_timeout = 0;
669
670         return count;
671 }
672 static CLASS_ATTR_RW(timeout);
673
674 static struct attribute *firmware_class_attrs[] = {
675         &class_attr_timeout.attr,
676         NULL,
677 };
678 ATTRIBUTE_GROUPS(firmware_class);
679
680 static void fw_dev_release(struct device *dev)
681 {
682         struct firmware_priv *fw_priv = to_firmware_priv(dev);
683
684         kfree(fw_priv);
685 }
686
687 static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
688 {
689         if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
690                 return -ENOMEM;
691         if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
692                 return -ENOMEM;
693         if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
694                 return -ENOMEM;
695
696         return 0;
697 }
698
699 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
700 {
701         struct firmware_priv *fw_priv = to_firmware_priv(dev);
702         int err = 0;
703
704         mutex_lock(&fw_lock);
705         if (fw_priv->buf)
706                 err = do_firmware_uevent(fw_priv, env);
707         mutex_unlock(&fw_lock);
708         return err;
709 }
710
711 static struct class firmware_class = {
712         .name           = "firmware",
713         .class_groups   = firmware_class_groups,
714         .dev_uevent     = firmware_uevent,
715         .dev_release    = fw_dev_release,
716 };
717
718 static ssize_t firmware_loading_show(struct device *dev,
719                                      struct device_attribute *attr, char *buf)
720 {
721         struct firmware_priv *fw_priv = to_firmware_priv(dev);
722         int loading = 0;
723
724         mutex_lock(&fw_lock);
725         if (fw_priv->buf)
726                 loading = fw_state_is_loading(&fw_priv->buf->fw_st);
727         mutex_unlock(&fw_lock);
728
729         return sprintf(buf, "%d\n", loading);
730 }
731
732 /* Some architectures don't have PAGE_KERNEL_RO */
733 #ifndef PAGE_KERNEL_RO
734 #define PAGE_KERNEL_RO PAGE_KERNEL
735 #endif
736
737 /* one pages buffer should be mapped/unmapped only once */
738 static int fw_map_pages_buf(struct firmware_buf *buf)
739 {
740         if (!buf->is_paged_buf)
741                 return 0;
742
743         vunmap(buf->data);
744         buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
745         if (!buf->data)
746                 return -ENOMEM;
747         return 0;
748 }
749
750 /**
751  * firmware_loading_store - set value in the 'loading' control file
752  * @dev: device pointer
753  * @attr: device attribute pointer
754  * @buf: buffer to scan for loading control value
755  * @count: number of bytes in @buf
756  *
757  *      The relevant values are:
758  *
759  *       1: Start a load, discarding any previous partial load.
760  *       0: Conclude the load and hand the data to the driver code.
761  *      -1: Conclude the load with an error and discard any written data.
762  **/
763 static ssize_t firmware_loading_store(struct device *dev,
764                                       struct device_attribute *attr,
765                                       const char *buf, size_t count)
766 {
767         struct firmware_priv *fw_priv = to_firmware_priv(dev);
768         struct firmware_buf *fw_buf;
769         ssize_t written = count;
770         int loading = simple_strtol(buf, NULL, 10);
771         int i;
772
773         mutex_lock(&fw_lock);
774         fw_buf = fw_priv->buf;
775         if (fw_state_is_aborted(&fw_buf->fw_st))
776                 goto out;
777
778         switch (loading) {
779         case 1:
780                 /* discarding any previous partial load */
781                 if (!fw_state_is_done(&fw_buf->fw_st)) {
782                         for (i = 0; i < fw_buf->nr_pages; i++)
783                                 __free_page(fw_buf->pages[i]);
784                         vfree(fw_buf->pages);
785                         fw_buf->pages = NULL;
786                         fw_buf->page_array_size = 0;
787                         fw_buf->nr_pages = 0;
788                         fw_state_start(&fw_buf->fw_st);
789                 }
790                 break;
791         case 0:
792                 if (fw_state_is_loading(&fw_buf->fw_st)) {
793                         int rc;
794
795                         /*
796                          * Several loading requests may be pending on
797                          * one same firmware buf, so let all requests
798                          * see the mapped 'buf->data' once the loading
799                          * is completed.
800                          * */
801                         rc = fw_map_pages_buf(fw_buf);
802                         if (rc)
803                                 dev_err(dev, "%s: map pages failed\n",
804                                         __func__);
805                         else
806                                 rc = security_kernel_post_read_file(NULL,
807                                                 fw_buf->data, fw_buf->size,
808                                                 READING_FIRMWARE);
809
810                         /*
811                          * Same logic as fw_load_abort, only the DONE bit
812                          * is ignored and we set ABORT only on failure.
813                          */
814                         list_del_init(&fw_buf->pending_list);
815                         if (rc) {
816                                 fw_state_aborted(&fw_buf->fw_st);
817                                 written = rc;
818                         } else {
819                                 fw_state_done(&fw_buf->fw_st);
820                         }
821                         break;
822                 }
823                 /* fallthrough */
824         default:
825                 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
826                 /* fallthrough */
827         case -1:
828                 fw_load_abort(fw_priv);
829                 break;
830         }
831 out:
832         mutex_unlock(&fw_lock);
833         return written;
834 }
835
836 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
837
838 static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
839                            loff_t offset, size_t count, bool read)
840 {
841         if (read)
842                 memcpy(buffer, buf->data + offset, count);
843         else
844                 memcpy(buf->data + offset, buffer, count);
845 }
846
847 static void firmware_rw(struct firmware_buf *buf, char *buffer,
848                         loff_t offset, size_t count, bool read)
849 {
850         while (count) {
851                 void *page_data;
852                 int page_nr = offset >> PAGE_SHIFT;
853                 int page_ofs = offset & (PAGE_SIZE-1);
854                 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
855
856                 page_data = kmap(buf->pages[page_nr]);
857
858                 if (read)
859                         memcpy(buffer, page_data + page_ofs, page_cnt);
860                 else
861                         memcpy(page_data + page_ofs, buffer, page_cnt);
862
863                 kunmap(buf->pages[page_nr]);
864                 buffer += page_cnt;
865                 offset += page_cnt;
866                 count -= page_cnt;
867         }
868 }
869
870 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
871                                   struct bin_attribute *bin_attr,
872                                   char *buffer, loff_t offset, size_t count)
873 {
874         struct device *dev = kobj_to_dev(kobj);
875         struct firmware_priv *fw_priv = to_firmware_priv(dev);
876         struct firmware_buf *buf;
877         ssize_t ret_count;
878
879         mutex_lock(&fw_lock);
880         buf = fw_priv->buf;
881         if (!buf || fw_state_is_done(&buf->fw_st)) {
882                 ret_count = -ENODEV;
883                 goto out;
884         }
885         if (offset > buf->size) {
886                 ret_count = 0;
887                 goto out;
888         }
889         if (count > buf->size - offset)
890                 count = buf->size - offset;
891
892         ret_count = count;
893
894         if (buf->data)
895                 firmware_rw_buf(buf, buffer, offset, count, true);
896         else
897                 firmware_rw(buf, buffer, offset, count, true);
898
899 out:
900         mutex_unlock(&fw_lock);
901         return ret_count;
902 }
903
904 static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
905 {
906         struct firmware_buf *buf = fw_priv->buf;
907         int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
908
909         /* If the array of pages is too small, grow it... */
910         if (buf->page_array_size < pages_needed) {
911                 int new_array_size = max(pages_needed,
912                                          buf->page_array_size * 2);
913                 struct page **new_pages;
914
915                 new_pages = vmalloc(new_array_size * sizeof(void *));
916                 if (!new_pages) {
917                         fw_load_abort(fw_priv);
918                         return -ENOMEM;
919                 }
920                 memcpy(new_pages, buf->pages,
921                        buf->page_array_size * sizeof(void *));
922                 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
923                        (new_array_size - buf->page_array_size));
924                 vfree(buf->pages);
925                 buf->pages = new_pages;
926                 buf->page_array_size = new_array_size;
927         }
928
929         while (buf->nr_pages < pages_needed) {
930                 buf->pages[buf->nr_pages] =
931                         alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
932
933                 if (!buf->pages[buf->nr_pages]) {
934                         fw_load_abort(fw_priv);
935                         return -ENOMEM;
936                 }
937                 buf->nr_pages++;
938         }
939         return 0;
940 }
941
942 /**
943  * firmware_data_write - write method for firmware
944  * @filp: open sysfs file
945  * @kobj: kobject for the device
946  * @bin_attr: bin_attr structure
947  * @buffer: buffer being written
948  * @offset: buffer offset for write in total data store area
949  * @count: buffer size
950  *
951  *      Data written to the 'data' attribute will be later handed to
952  *      the driver as a firmware image.
953  **/
954 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
955                                    struct bin_attribute *bin_attr,
956                                    char *buffer, loff_t offset, size_t count)
957 {
958         struct device *dev = kobj_to_dev(kobj);
959         struct firmware_priv *fw_priv = to_firmware_priv(dev);
960         struct firmware_buf *buf;
961         ssize_t retval;
962
963         if (!capable(CAP_SYS_RAWIO))
964                 return -EPERM;
965
966         mutex_lock(&fw_lock);
967         buf = fw_priv->buf;
968         if (!buf || fw_state_is_done(&buf->fw_st)) {
969                 retval = -ENODEV;
970                 goto out;
971         }
972
973         if (buf->data) {
974                 if (offset + count > buf->allocated_size) {
975                         retval = -ENOMEM;
976                         goto out;
977                 }
978                 firmware_rw_buf(buf, buffer, offset, count, false);
979                 retval = count;
980         } else {
981                 retval = fw_realloc_buffer(fw_priv, offset + count);
982                 if (retval)
983                         goto out;
984
985                 retval = count;
986                 firmware_rw(buf, buffer, offset, count, false);
987         }
988
989         buf->size = max_t(size_t, offset + count, buf->size);
990 out:
991         mutex_unlock(&fw_lock);
992         return retval;
993 }
994
995 static struct bin_attribute firmware_attr_data = {
996         .attr = { .name = "data", .mode = 0644 },
997         .size = 0,
998         .read = firmware_data_read,
999         .write = firmware_data_write,
1000 };
1001
1002 static struct attribute *fw_dev_attrs[] = {
1003         &dev_attr_loading.attr,
1004         NULL
1005 };
1006
1007 static struct bin_attribute *fw_dev_bin_attrs[] = {
1008         &firmware_attr_data,
1009         NULL
1010 };
1011
1012 static const struct attribute_group fw_dev_attr_group = {
1013         .attrs = fw_dev_attrs,
1014         .bin_attrs = fw_dev_bin_attrs,
1015 };
1016
1017 static const struct attribute_group *fw_dev_attr_groups[] = {
1018         &fw_dev_attr_group,
1019         NULL
1020 };
1021
1022 static struct firmware_priv *
1023 fw_create_instance(struct firmware *firmware, const char *fw_name,
1024                    struct device *device, unsigned int opt_flags)
1025 {
1026         struct firmware_priv *fw_priv;
1027         struct device *f_dev;
1028
1029         fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
1030         if (!fw_priv) {
1031                 fw_priv = ERR_PTR(-ENOMEM);
1032                 goto exit;
1033         }
1034
1035         fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
1036         fw_priv->fw = firmware;
1037         f_dev = &fw_priv->dev;
1038
1039         device_initialize(f_dev);
1040         dev_set_name(f_dev, "%s", fw_name);
1041         f_dev->parent = device;
1042         f_dev->class = &firmware_class;
1043         f_dev->groups = fw_dev_attr_groups;
1044 exit:
1045         return fw_priv;
1046 }
1047
1048 /* load a firmware via user helper */
1049 static int _request_firmware_load(struct firmware_priv *fw_priv,
1050                                   unsigned int opt_flags, long timeout)
1051 {
1052         int retval = 0;
1053         struct device *f_dev = &fw_priv->dev;
1054         struct firmware_buf *buf = fw_priv->buf;
1055
1056         /* fall back on userspace loading */
1057         if (!buf->data)
1058                 buf->is_paged_buf = true;
1059
1060         dev_set_uevent_suppress(f_dev, true);
1061
1062         retval = device_add(f_dev);
1063         if (retval) {
1064                 dev_err(f_dev, "%s: device_register failed\n", __func__);
1065                 goto err_put_dev;
1066         }
1067
1068         mutex_lock(&fw_lock);
1069         list_add(&buf->pending_list, &pending_fw_head);
1070         mutex_unlock(&fw_lock);
1071
1072         if (opt_flags & FW_OPT_UEVENT) {
1073                 buf->need_uevent = true;
1074                 dev_set_uevent_suppress(f_dev, false);
1075                 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
1076                 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
1077         } else {
1078                 timeout = MAX_JIFFY_OFFSET;
1079         }
1080
1081         retval = fw_state_wait_timeout(&buf->fw_st, timeout);
1082         if (retval < 0) {
1083                 mutex_lock(&fw_lock);
1084                 fw_load_abort(fw_priv);
1085                 mutex_unlock(&fw_lock);
1086         }
1087
1088         if (fw_state_is_aborted(&buf->fw_st))
1089                 retval = -EAGAIN;
1090         else if (buf->is_paged_buf && !buf->data)
1091                 retval = -ENOMEM;
1092
1093         device_del(f_dev);
1094 err_put_dev:
1095         put_device(f_dev);
1096         return retval;
1097 }
1098
1099 static int fw_load_from_user_helper(struct firmware *firmware,
1100                                     const char *name, struct device *device,
1101                                     unsigned int opt_flags)
1102 {
1103         struct firmware_priv *fw_priv;
1104         long timeout;
1105         int ret;
1106
1107         timeout = firmware_loading_timeout();
1108         if (opt_flags & FW_OPT_NOWAIT) {
1109                 timeout = usermodehelper_read_lock_wait(timeout);
1110                 if (!timeout) {
1111                         dev_dbg(device, "firmware: %s loading timed out\n",
1112                                 name);
1113                         return -EBUSY;
1114                 }
1115         } else {
1116                 ret = usermodehelper_read_trylock();
1117                 if (WARN_ON(ret)) {
1118                         dev_err(device, "firmware: %s will not be loaded\n",
1119                                 name);
1120                         return ret;
1121                 }
1122         }
1123
1124         fw_priv = fw_create_instance(firmware, name, device, opt_flags);
1125         if (IS_ERR(fw_priv)) {
1126                 ret = PTR_ERR(fw_priv);
1127                 goto out_unlock;
1128         }
1129
1130         fw_priv->buf = firmware->priv;
1131         ret = _request_firmware_load(fw_priv, opt_flags, timeout);
1132
1133         if (!ret)
1134                 ret = assign_firmware_buf(firmware, device, opt_flags);
1135
1136 out_unlock:
1137         usermodehelper_read_unlock();
1138
1139         return ret;
1140 }
1141
1142 #else /* CONFIG_FW_LOADER_USER_HELPER */
1143 static inline int
1144 fw_load_from_user_helper(struct firmware *firmware, const char *name,
1145                          struct device *device, unsigned int opt_flags)
1146 {
1147         return -ENOENT;
1148 }
1149
1150 static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
1151
1152 #endif /* CONFIG_FW_LOADER_USER_HELPER */
1153
1154 /* prepare firmware and firmware_buf structs;
1155  * return 0 if a firmware is already assigned, 1 if need to load one,
1156  * or a negative error code
1157  */
1158 static int
1159 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
1160                           struct device *device, void *dbuf, size_t size)
1161 {
1162         struct firmware *firmware;
1163         struct firmware_buf *buf;
1164         int ret;
1165
1166         *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1167         if (!firmware) {
1168                 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1169                         __func__);
1170                 return -ENOMEM;
1171         }
1172
1173         if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
1174                 dev_dbg(device, "using built-in %s\n", name);
1175                 return 0; /* assigned */
1176         }
1177
1178         ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
1179
1180         /*
1181          * bind with 'buf' now to avoid warning in failure path
1182          * of requesting firmware.
1183          */
1184         firmware->priv = buf;
1185
1186         if (ret > 0) {
1187                 ret = fw_state_wait(&buf->fw_st);
1188                 if (!ret) {
1189                         fw_set_page_data(buf, firmware);
1190                         return 0; /* assigned */
1191                 }
1192         }
1193
1194         if (ret < 0)
1195                 return ret;
1196         return 1; /* need to load */
1197 }
1198
1199 /*
1200  * Batched requests need only one wake, we need to do this step last due to the
1201  * fallback mechanism. The buf is protected with kref_get(), and it won't be
1202  * released until the last user calls release_firmware().
1203  *
1204  * Failed batched requests are possible as well, in such cases we just share
1205  * the struct firmware_buf and won't release it until all requests are woken
1206  * and have gone through this same path.
1207  */
1208 static void fw_abort_batch_reqs(struct firmware *fw)
1209 {
1210         struct firmware_buf *buf;
1211
1212         /* Loaded directly? */
1213         if (!fw || !fw->priv)
1214                 return;
1215
1216         buf = fw->priv;
1217         if (!fw_state_is_aborted(&buf->fw_st))
1218                 fw_state_aborted(&buf->fw_st);
1219 }
1220
1221 /* called from request_firmware() and request_firmware_work_func() */
1222 static int
1223 _request_firmware(const struct firmware **firmware_p, const char *name,
1224                   struct device *device, void *buf, size_t size,
1225                   unsigned int opt_flags)
1226 {
1227         struct firmware *fw = NULL;
1228         int ret;
1229
1230         if (!firmware_p)
1231                 return -EINVAL;
1232
1233         if (!name || name[0] == '\0') {
1234                 ret = -EINVAL;
1235                 goto out;
1236         }
1237
1238         ret = _request_firmware_prepare(&fw, name, device, buf, size);
1239         if (ret <= 0) /* error or already assigned */
1240                 goto out;
1241
1242         if (!firmware_enabled()) {
1243                 WARN(1, "firmware request while host is not available\n");
1244                 ret = -EHOSTDOWN;
1245                 goto out;
1246         }
1247
1248         ret = fw_get_filesystem_firmware(device, fw->priv);
1249         if (ret) {
1250                 if (!(opt_flags & FW_OPT_NO_WARN))
1251                         dev_warn(device,
1252                                  "Direct firmware load for %s failed with error %d\n",
1253                                  name, ret);
1254                 if (opt_flags & FW_OPT_USERHELPER) {
1255                         dev_warn(device, "Falling back to user helper\n");
1256                         ret = fw_load_from_user_helper(fw, name, device,
1257                                                        opt_flags);
1258                 }
1259         } else
1260                 ret = assign_firmware_buf(fw, device, opt_flags);
1261
1262  out:
1263         if (ret < 0) {
1264                 fw_abort_batch_reqs(fw);
1265                 release_firmware(fw);
1266                 fw = NULL;
1267         }
1268
1269         *firmware_p = fw;
1270         return ret;
1271 }
1272
1273 /**
1274  * request_firmware: - send firmware request and wait for it
1275  * @firmware_p: pointer to firmware image
1276  * @name: name of firmware file
1277  * @device: device for which firmware is being loaded
1278  *
1279  *      @firmware_p will be used to return a firmware image by the name
1280  *      of @name for device @device.
1281  *
1282  *      Should be called from user context where sleeping is allowed.
1283  *
1284  *      @name will be used as $FIRMWARE in the uevent environment and
1285  *      should be distinctive enough not to be confused with any other
1286  *      firmware image for this or any other device.
1287  *
1288  *      Caller must hold the reference count of @device.
1289  *
1290  *      The function can be called safely inside device's suspend and
1291  *      resume callback.
1292  **/
1293 int
1294 request_firmware(const struct firmware **firmware_p, const char *name,
1295                  struct device *device)
1296 {
1297         int ret;
1298
1299         /* Need to pin this module until return */
1300         __module_get(THIS_MODULE);
1301         ret = _request_firmware(firmware_p, name, device, NULL, 0,
1302                                 FW_OPT_UEVENT | FW_OPT_FALLBACK);
1303         module_put(THIS_MODULE);
1304         return ret;
1305 }
1306 EXPORT_SYMBOL(request_firmware);
1307
1308 /**
1309  * request_firmware_direct: - load firmware directly without usermode helper
1310  * @firmware_p: pointer to firmware image
1311  * @name: name of firmware file
1312  * @device: device for which firmware is being loaded
1313  *
1314  * This function works pretty much like request_firmware(), but this doesn't
1315  * fall back to usermode helper even if the firmware couldn't be loaded
1316  * directly from fs.  Hence it's useful for loading optional firmwares, which
1317  * aren't always present, without extra long timeouts of udev.
1318  **/
1319 int request_firmware_direct(const struct firmware **firmware_p,
1320                             const char *name, struct device *device)
1321 {
1322         int ret;
1323
1324         __module_get(THIS_MODULE);
1325         ret = _request_firmware(firmware_p, name, device, NULL, 0,
1326                                 FW_OPT_UEVENT | FW_OPT_NO_WARN);
1327         module_put(THIS_MODULE);
1328         return ret;
1329 }
1330 EXPORT_SYMBOL_GPL(request_firmware_direct);
1331
1332 /**
1333  * request_firmware_into_buf - load firmware into a previously allocated buffer
1334  * @firmware_p: pointer to firmware image
1335  * @name: name of firmware file
1336  * @device: device for which firmware is being loaded and DMA region allocated
1337  * @buf: address of buffer to load firmware into
1338  * @size: size of buffer
1339  *
1340  * This function works pretty much like request_firmware(), but it doesn't
1341  * allocate a buffer to hold the firmware data. Instead, the firmware
1342  * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1343  * data member is pointed at @buf.
1344  *
1345  * This function doesn't cache firmware either.
1346  */
1347 int
1348 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1349                           struct device *device, void *buf, size_t size)
1350 {
1351         int ret;
1352
1353         __module_get(THIS_MODULE);
1354         ret = _request_firmware(firmware_p, name, device, buf, size,
1355                                 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1356                                 FW_OPT_NOCACHE);
1357         module_put(THIS_MODULE);
1358         return ret;
1359 }
1360 EXPORT_SYMBOL(request_firmware_into_buf);
1361
1362 /**
1363  * release_firmware: - release the resource associated with a firmware image
1364  * @fw: firmware resource to release
1365  **/
1366 void release_firmware(const struct firmware *fw)
1367 {
1368         if (fw) {
1369                 if (!fw_is_builtin_firmware(fw))
1370                         firmware_free_data(fw);
1371                 kfree(fw);
1372         }
1373 }
1374 EXPORT_SYMBOL(release_firmware);
1375
1376 /* Async support */
1377 struct firmware_work {
1378         struct work_struct work;
1379         struct module *module;
1380         const char *name;
1381         struct device *device;
1382         void *context;
1383         void (*cont)(const struct firmware *fw, void *context);
1384         unsigned int opt_flags;
1385 };
1386
1387 static void request_firmware_work_func(struct work_struct *work)
1388 {
1389         struct firmware_work *fw_work;
1390         const struct firmware *fw;
1391
1392         fw_work = container_of(work, struct firmware_work, work);
1393
1394         _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
1395                           fw_work->opt_flags);
1396         fw_work->cont(fw, fw_work->context);
1397         put_device(fw_work->device); /* taken in request_firmware_nowait() */
1398
1399         module_put(fw_work->module);
1400         kfree_const(fw_work->name);
1401         kfree(fw_work);
1402 }
1403
1404 /**
1405  * request_firmware_nowait - asynchronous version of request_firmware
1406  * @module: module requesting the firmware
1407  * @uevent: sends uevent to copy the firmware image if this flag
1408  *      is non-zero else the firmware copy must be done manually.
1409  * @name: name of firmware file
1410  * @device: device for which firmware is being loaded
1411  * @gfp: allocation flags
1412  * @context: will be passed over to @cont, and
1413  *      @fw may be %NULL if firmware request fails.
1414  * @cont: function will be called asynchronously when the firmware
1415  *      request is over.
1416  *
1417  *      Caller must hold the reference count of @device.
1418  *
1419  *      Asynchronous variant of request_firmware() for user contexts:
1420  *              - sleep for as small periods as possible since it may
1421  *                increase kernel boot time of built-in device drivers
1422  *                requesting firmware in their ->probe() methods, if
1423  *                @gfp is GFP_KERNEL.
1424  *
1425  *              - can't sleep at all if @gfp is GFP_ATOMIC.
1426  **/
1427 int
1428 request_firmware_nowait(
1429         struct module *module, bool uevent,
1430         const char *name, struct device *device, gfp_t gfp, void *context,
1431         void (*cont)(const struct firmware *fw, void *context))
1432 {
1433         struct firmware_work *fw_work;
1434
1435         fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1436         if (!fw_work)
1437                 return -ENOMEM;
1438
1439         fw_work->module = module;
1440         fw_work->name = kstrdup_const(name, gfp);
1441         if (!fw_work->name) {
1442                 kfree(fw_work);
1443                 return -ENOMEM;
1444         }
1445         fw_work->device = device;
1446         fw_work->context = context;
1447         fw_work->cont = cont;
1448         fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
1449                 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1450
1451         if (!try_module_get(module)) {
1452                 kfree_const(fw_work->name);
1453                 kfree(fw_work);
1454                 return -EFAULT;
1455         }
1456
1457         get_device(fw_work->device);
1458         INIT_WORK(&fw_work->work, request_firmware_work_func);
1459         schedule_work(&fw_work->work);
1460         return 0;
1461 }
1462 EXPORT_SYMBOL(request_firmware_nowait);
1463
1464 #ifdef CONFIG_PM_SLEEP
1465 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1466
1467 /**
1468  * cache_firmware - cache one firmware image in kernel memory space
1469  * @fw_name: the firmware image name
1470  *
1471  * Cache firmware in kernel memory so that drivers can use it when
1472  * system isn't ready for them to request firmware image from userspace.
1473  * Once it returns successfully, driver can use request_firmware or its
1474  * nowait version to get the cached firmware without any interacting
1475  * with userspace
1476  *
1477  * Return 0 if the firmware image has been cached successfully
1478  * Return !0 otherwise
1479  *
1480  */
1481 static int cache_firmware(const char *fw_name)
1482 {
1483         int ret;
1484         const struct firmware *fw;
1485
1486         pr_debug("%s: %s\n", __func__, fw_name);
1487
1488         ret = request_firmware(&fw, fw_name, NULL);
1489         if (!ret)
1490                 kfree(fw);
1491
1492         pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1493
1494         return ret;
1495 }
1496
1497 static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1498 {
1499         struct firmware_buf *tmp;
1500         struct firmware_cache *fwc = &fw_cache;
1501
1502         spin_lock(&fwc->lock);
1503         tmp = __fw_lookup_buf(fw_name);
1504         spin_unlock(&fwc->lock);
1505
1506         return tmp;
1507 }
1508
1509 /**
1510  * uncache_firmware - remove one cached firmware image
1511  * @fw_name: the firmware image name
1512  *
1513  * Uncache one firmware image which has been cached successfully
1514  * before.
1515  *
1516  * Return 0 if the firmware cache has been removed successfully
1517  * Return !0 otherwise
1518  *
1519  */
1520 static int uncache_firmware(const char *fw_name)
1521 {
1522         struct firmware_buf *buf;
1523         struct firmware fw;
1524
1525         pr_debug("%s: %s\n", __func__, fw_name);
1526
1527         if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1528                 return 0;
1529
1530         buf = fw_lookup_buf(fw_name);
1531         if (buf) {
1532                 fw_free_buf(buf);
1533                 return 0;
1534         }
1535
1536         return -EINVAL;
1537 }
1538
1539 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1540 {
1541         struct fw_cache_entry *fce;
1542
1543         fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1544         if (!fce)
1545                 goto exit;
1546
1547         fce->name = kstrdup_const(name, GFP_ATOMIC);
1548         if (!fce->name) {
1549                 kfree(fce);
1550                 fce = NULL;
1551                 goto exit;
1552         }
1553 exit:
1554         return fce;
1555 }
1556
1557 static int __fw_entry_found(const char *name)
1558 {
1559         struct firmware_cache *fwc = &fw_cache;
1560         struct fw_cache_entry *fce;
1561
1562         list_for_each_entry(fce, &fwc->fw_names, list) {
1563                 if (!strcmp(fce->name, name))
1564                         return 1;
1565         }
1566         return 0;
1567 }
1568
1569 static int fw_cache_piggyback_on_request(const char *name)
1570 {
1571         struct firmware_cache *fwc = &fw_cache;
1572         struct fw_cache_entry *fce;
1573         int ret = 0;
1574
1575         spin_lock(&fwc->name_lock);
1576         if (__fw_entry_found(name))
1577                 goto found;
1578
1579         fce = alloc_fw_cache_entry(name);
1580         if (fce) {
1581                 ret = 1;
1582                 list_add(&fce->list, &fwc->fw_names);
1583                 pr_debug("%s: fw: %s\n", __func__, name);
1584         }
1585 found:
1586         spin_unlock(&fwc->name_lock);
1587         return ret;
1588 }
1589
1590 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1591 {
1592         kfree_const(fce->name);
1593         kfree(fce);
1594 }
1595
1596 static void __async_dev_cache_fw_image(void *fw_entry,
1597                                        async_cookie_t cookie)
1598 {
1599         struct fw_cache_entry *fce = fw_entry;
1600         struct firmware_cache *fwc = &fw_cache;
1601         int ret;
1602
1603         ret = cache_firmware(fce->name);
1604         if (ret) {
1605                 spin_lock(&fwc->name_lock);
1606                 list_del(&fce->list);
1607                 spin_unlock(&fwc->name_lock);
1608
1609                 free_fw_cache_entry(fce);
1610         }
1611 }
1612
1613 /* called with dev->devres_lock held */
1614 static void dev_create_fw_entry(struct device *dev, void *res,
1615                                 void *data)
1616 {
1617         struct fw_name_devm *fwn = res;
1618         const char *fw_name = fwn->name;
1619         struct list_head *head = data;
1620         struct fw_cache_entry *fce;
1621
1622         fce = alloc_fw_cache_entry(fw_name);
1623         if (fce)
1624                 list_add(&fce->list, head);
1625 }
1626
1627 static int devm_name_match(struct device *dev, void *res,
1628                            void *match_data)
1629 {
1630         struct fw_name_devm *fwn = res;
1631         return (fwn->magic == (unsigned long)match_data);
1632 }
1633
1634 static void dev_cache_fw_image(struct device *dev, void *data)
1635 {
1636         LIST_HEAD(todo);
1637         struct fw_cache_entry *fce;
1638         struct fw_cache_entry *fce_next;
1639         struct firmware_cache *fwc = &fw_cache;
1640
1641         devres_for_each_res(dev, fw_name_devm_release,
1642                             devm_name_match, &fw_cache,
1643                             dev_create_fw_entry, &todo);
1644
1645         list_for_each_entry_safe(fce, fce_next, &todo, list) {
1646                 list_del(&fce->list);
1647
1648                 spin_lock(&fwc->name_lock);
1649                 /* only one cache entry for one firmware */
1650                 if (!__fw_entry_found(fce->name)) {
1651                         list_add(&fce->list, &fwc->fw_names);
1652                 } else {
1653                         free_fw_cache_entry(fce);
1654                         fce = NULL;
1655                 }
1656                 spin_unlock(&fwc->name_lock);
1657
1658                 if (fce)
1659                         async_schedule_domain(__async_dev_cache_fw_image,
1660                                               (void *)fce,
1661                                               &fw_cache_domain);
1662         }
1663 }
1664
1665 static void __device_uncache_fw_images(void)
1666 {
1667         struct firmware_cache *fwc = &fw_cache;
1668         struct fw_cache_entry *fce;
1669
1670         spin_lock(&fwc->name_lock);
1671         while (!list_empty(&fwc->fw_names)) {
1672                 fce = list_entry(fwc->fw_names.next,
1673                                 struct fw_cache_entry, list);
1674                 list_del(&fce->list);
1675                 spin_unlock(&fwc->name_lock);
1676
1677                 uncache_firmware(fce->name);
1678                 free_fw_cache_entry(fce);
1679
1680                 spin_lock(&fwc->name_lock);
1681         }
1682         spin_unlock(&fwc->name_lock);
1683 }
1684
1685 /**
1686  * device_cache_fw_images - cache devices' firmware
1687  *
1688  * If one device called request_firmware or its nowait version
1689  * successfully before, the firmware names are recored into the
1690  * device's devres link list, so device_cache_fw_images can call
1691  * cache_firmware() to cache these firmwares for the device,
1692  * then the device driver can load its firmwares easily at
1693  * time when system is not ready to complete loading firmware.
1694  */
1695 static void device_cache_fw_images(void)
1696 {
1697         struct firmware_cache *fwc = &fw_cache;
1698         int old_timeout;
1699         DEFINE_WAIT(wait);
1700
1701         pr_debug("%s\n", __func__);
1702
1703         /* cancel uncache work */
1704         cancel_delayed_work_sync(&fwc->work);
1705
1706         /*
1707          * use small loading timeout for caching devices' firmware
1708          * because all these firmware images have been loaded
1709          * successfully at lease once, also system is ready for
1710          * completing firmware loading now. The maximum size of
1711          * firmware in current distributions is about 2M bytes,
1712          * so 10 secs should be enough.
1713          */
1714         old_timeout = loading_timeout;
1715         loading_timeout = 10;
1716
1717         mutex_lock(&fw_lock);
1718         fwc->state = FW_LOADER_START_CACHE;
1719         dpm_for_each_dev(NULL, dev_cache_fw_image);
1720         mutex_unlock(&fw_lock);
1721
1722         /* wait for completion of caching firmware for all devices */
1723         async_synchronize_full_domain(&fw_cache_domain);
1724
1725         loading_timeout = old_timeout;
1726 }
1727
1728 /**
1729  * device_uncache_fw_images - uncache devices' firmware
1730  *
1731  * uncache all firmwares which have been cached successfully
1732  * by device_uncache_fw_images earlier
1733  */
1734 static void device_uncache_fw_images(void)
1735 {
1736         pr_debug("%s\n", __func__);
1737         __device_uncache_fw_images();
1738 }
1739
1740 static void device_uncache_fw_images_work(struct work_struct *work)
1741 {
1742         device_uncache_fw_images();
1743 }
1744
1745 /**
1746  * device_uncache_fw_images_delay - uncache devices firmwares
1747  * @delay: number of milliseconds to delay uncache device firmwares
1748  *
1749  * uncache all devices's firmwares which has been cached successfully
1750  * by device_cache_fw_images after @delay milliseconds.
1751  */
1752 static void device_uncache_fw_images_delay(unsigned long delay)
1753 {
1754         queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1755                            msecs_to_jiffies(delay));
1756 }
1757
1758 /**
1759  * fw_pm_notify - notifier for suspend/resume
1760  * @notify_block: unused
1761  * @mode: mode we are switching to
1762  * @unused: unused
1763  *
1764  * Used to modify the firmware_class state as we move in between states.
1765  * The firmware_class implements a firmware cache to enable device driver
1766  * to fetch firmware upon resume before the root filesystem is ready. We
1767  * disable API calls which do not use the built-in firmware or the firmware
1768  * cache when we know these calls will not work.
1769  *
1770  * The inner logic behind all this is a bit complex so it is worth summarizing
1771  * the kernel's own suspend/resume process with context and focus on how this
1772  * can impact the firmware API.
1773  *
1774  * First a review on how we go to suspend::
1775  *
1776  *      pm_suspend() --> enter_state() -->
1777  *      sys_sync()
1778  *      suspend_prepare() -->
1779  *              __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
1780  *              suspend_freeze_processes() -->
1781  *                      freeze_processes() -->
1782  *                              __usermodehelper_set_disable_depth(UMH_DISABLED);
1783  *                              freeze all tasks ...
1784  *                      freeze_kernel_threads()
1785  *      suspend_devices_and_enter() -->
1786  *              dpm_suspend_start() -->
1787  *                              dpm_prepare()
1788  *                              dpm_suspend()
1789  *              suspend_enter()  -->
1790  *                      platform_suspend_prepare()
1791  *                      dpm_suspend_late()
1792  *                      freeze_enter()
1793  *                      syscore_suspend()
1794  *
1795  * When we resume we bail out of a loop from suspend_devices_and_enter() and
1796  * unwind back out to the caller enter_state() where we were before as follows::
1797  *
1798  *      enter_state() -->
1799  *      suspend_devices_and_enter() --> (bail from loop)
1800  *              dpm_resume_end() -->
1801  *                      dpm_resume()
1802  *                      dpm_complete()
1803  *      suspend_finish() -->
1804  *              suspend_thaw_processes() -->
1805  *                      thaw_processes() -->
1806  *                              __usermodehelper_set_disable_depth(UMH_FREEZING);
1807  *                              thaw_workqueues();
1808  *                              thaw all processes ...
1809  *                              usermodehelper_enable();
1810  *              pm_notifier_call_chain(PM_POST_SUSPEND);
1811  *
1812  * fw_pm_notify() works through pm_notifier_call_chain().
1813  */
1814 static int fw_pm_notify(struct notifier_block *notify_block,
1815                         unsigned long mode, void *unused)
1816 {
1817         switch (mode) {
1818         case PM_HIBERNATION_PREPARE:
1819         case PM_SUSPEND_PREPARE:
1820         case PM_RESTORE_PREPARE:
1821                 /*
1822                  * kill pending fallback requests with a custom fallback
1823                  * to avoid stalling suspend.
1824                  */
1825                 kill_pending_fw_fallback_reqs(true);
1826                 device_cache_fw_images();
1827                 disable_firmware();
1828                 break;
1829
1830         case PM_POST_SUSPEND:
1831         case PM_POST_HIBERNATION:
1832         case PM_POST_RESTORE:
1833                 /*
1834                  * In case that system sleep failed and syscore_suspend is
1835                  * not called.
1836                  */
1837                 mutex_lock(&fw_lock);
1838                 fw_cache.state = FW_LOADER_NO_CACHE;
1839                 mutex_unlock(&fw_lock);
1840                 enable_firmware();
1841
1842                 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1843                 break;
1844         }
1845
1846         return 0;
1847 }
1848
1849 /* stop caching firmware once syscore_suspend is reached */
1850 static int fw_suspend(void)
1851 {
1852         fw_cache.state = FW_LOADER_NO_CACHE;
1853         return 0;
1854 }
1855
1856 static struct syscore_ops fw_syscore_ops = {
1857         .suspend = fw_suspend,
1858 };
1859 #else
1860 static int fw_cache_piggyback_on_request(const char *name)
1861 {
1862         return 0;
1863 }
1864 #endif
1865
1866 static void __init fw_cache_init(void)
1867 {
1868         spin_lock_init(&fw_cache.lock);
1869         INIT_LIST_HEAD(&fw_cache.head);
1870         fw_cache.state = FW_LOADER_NO_CACHE;
1871
1872 #ifdef CONFIG_PM_SLEEP
1873         spin_lock_init(&fw_cache.name_lock);
1874         INIT_LIST_HEAD(&fw_cache.fw_names);
1875
1876         INIT_DELAYED_WORK(&fw_cache.work,
1877                           device_uncache_fw_images_work);
1878
1879         fw_cache.pm_notify.notifier_call = fw_pm_notify;
1880         register_pm_notifier(&fw_cache.pm_notify);
1881
1882         register_syscore_ops(&fw_syscore_ops);
1883 #endif
1884 }
1885
1886 static int fw_shutdown_notify(struct notifier_block *unused1,
1887                               unsigned long unused2, void *unused3)
1888 {
1889         disable_firmware();
1890         /*
1891          * Kill all pending fallback requests to avoid both stalling shutdown,
1892          * and avoid a deadlock with the usermode_lock.
1893          */
1894         kill_pending_fw_fallback_reqs(false);
1895
1896         return NOTIFY_DONE;
1897 }
1898
1899 static struct notifier_block fw_shutdown_nb = {
1900         .notifier_call = fw_shutdown_notify,
1901 };
1902
1903 static int __init firmware_class_init(void)
1904 {
1905         enable_firmware();
1906         fw_cache_init();
1907         register_reboot_notifier(&fw_shutdown_nb);
1908 #ifdef CONFIG_FW_LOADER_USER_HELPER
1909         return class_register(&firmware_class);
1910 #else
1911         return 0;
1912 #endif
1913 }
1914
1915 static void __exit firmware_class_exit(void)
1916 {
1917         disable_firmware();
1918 #ifdef CONFIG_PM_SLEEP
1919         unregister_syscore_ops(&fw_syscore_ops);
1920         unregister_pm_notifier(&fw_cache.pm_notify);
1921 #endif
1922         unregister_reboot_notifier(&fw_shutdown_nb);
1923 #ifdef CONFIG_FW_LOADER_USER_HELPER
1924         class_unregister(&firmware_class);
1925 #endif
1926 }
1927
1928 fs_initcall(firmware_class_init);
1929 module_exit(firmware_class_exit);