]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/base/firmware_class.c
76f1b702bdd627ccc54fb2343405700a1719b261
[karo-tx-linux.git] / drivers / base / firmware_class.c
1 /*
2  * firmware_class.c - Multi purpose firmware loading support
3  *
4  * Copyright (c) 2003 Manuel Estrada Sainz
5  *
6  * Please see Documentation/firmware_class/ for more information.
7  *
8  */
9
10 #include <linux/capability.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/timer.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/mutex.h>
19 #include <linux/workqueue.h>
20 #include <linux/highmem.h>
21 #include <linux/firmware.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/file.h>
25 #include <linux/list.h>
26 #include <linux/fs.h>
27 #include <linux/async.h>
28 #include <linux/pm.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/reboot.h>
32 #include <linux/security.h>
33
34 #include <generated/utsrelease.h>
35
36 #include "base.h"
37
38 MODULE_AUTHOR("Manuel Estrada Sainz");
39 MODULE_DESCRIPTION("Multi purpose firmware loading support");
40 MODULE_LICENSE("GPL");
41
42 /* Builtin firmware support */
43
44 #ifdef CONFIG_FW_LOADER
45
46 extern struct builtin_fw __start_builtin_fw[];
47 extern struct builtin_fw __end_builtin_fw[];
48
49 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
50                                     void *buf, size_t size)
51 {
52         struct builtin_fw *b_fw;
53
54         for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
55                 if (strcmp(name, b_fw->name) == 0) {
56                         fw->size = b_fw->size;
57                         fw->data = b_fw->data;
58
59                         if (buf && fw->size <= size)
60                                 memcpy(buf, fw->data, fw->size);
61                         return true;
62                 }
63         }
64
65         return false;
66 }
67
68 static bool fw_is_builtin_firmware(const struct firmware *fw)
69 {
70         struct builtin_fw *b_fw;
71
72         for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
73                 if (fw->data == b_fw->data)
74                         return true;
75
76         return false;
77 }
78
79 #else /* Module case - no builtin firmware support */
80
81 static inline bool fw_get_builtin_firmware(struct firmware *fw,
82                                            const char *name, void *buf,
83                                            size_t size)
84 {
85         return false;
86 }
87
88 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
89 {
90         return false;
91 }
92 #endif
93
94 enum fw_status {
95         FW_STATUS_UNKNOWN,
96         FW_STATUS_LOADING,
97         FW_STATUS_DONE,
98         FW_STATUS_ABORTED,
99 };
100
101 static int loading_timeout = 60;        /* In seconds */
102
103 static inline long firmware_loading_timeout(void)
104 {
105         return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
106 }
107
108 /*
109  * Concurrent request_firmware() for the same firmware need to be
110  * serialized.  struct fw_state is simple state machine which hold the
111  * state of the firmware loading.
112  */
113 struct fw_state {
114         struct completion completion;
115         enum fw_status status;
116 };
117
118 static void fw_state_init(struct fw_state *fw_st)
119 {
120         init_completion(&fw_st->completion);
121         fw_st->status = FW_STATUS_UNKNOWN;
122 }
123
124 static inline bool __fw_state_is_done(enum fw_status status)
125 {
126         return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
127 }
128
129 static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
130 {
131         long ret;
132
133         ret = wait_for_completion_interruptible_timeout(&fw_st->completion,
134                                                         timeout);
135         if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
136                 return -ENOENT;
137         if (!ret)
138                 return -ETIMEDOUT;
139
140         return ret < 0 ? ret : 0;
141 }
142
143 static void __fw_state_set(struct fw_state *fw_st,
144                            enum fw_status status)
145 {
146         WRITE_ONCE(fw_st->status, status);
147
148         if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
149                 complete_all(&fw_st->completion);
150 }
151
152 #define fw_state_start(fw_st)                                   \
153         __fw_state_set(fw_st, FW_STATUS_LOADING)
154 #define fw_state_done(fw_st)                                    \
155         __fw_state_set(fw_st, FW_STATUS_DONE)
156 #define fw_state_aborted(fw_st)                                 \
157         __fw_state_set(fw_st, FW_STATUS_ABORTED)
158 #define fw_state_wait(fw_st)                                    \
159         __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
160
161 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
162 {
163         return fw_st->status == status;
164 }
165
166 #define fw_state_is_aborted(fw_st)                              \
167         __fw_state_check(fw_st, FW_STATUS_ABORTED)
168
169 #ifdef CONFIG_FW_LOADER_USER_HELPER
170
171 #define fw_state_aborted(fw_st)                                 \
172         __fw_state_set(fw_st, FW_STATUS_ABORTED)
173 #define fw_state_is_done(fw_st)                                 \
174         __fw_state_check(fw_st, FW_STATUS_DONE)
175 #define fw_state_is_loading(fw_st)                              \
176         __fw_state_check(fw_st, FW_STATUS_LOADING)
177 #define fw_state_wait_timeout(fw_st, timeout)                   \
178         __fw_state_wait_common(fw_st, timeout)
179
180 #endif /* CONFIG_FW_LOADER_USER_HELPER */
181
182 /* firmware behavior options */
183 #define FW_OPT_UEVENT   (1U << 0)
184 #define FW_OPT_NOWAIT   (1U << 1)
185 #ifdef CONFIG_FW_LOADER_USER_HELPER
186 #define FW_OPT_USERHELPER       (1U << 2)
187 #else
188 #define FW_OPT_USERHELPER       0
189 #endif
190 #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
191 #define FW_OPT_FALLBACK         FW_OPT_USERHELPER
192 #else
193 #define FW_OPT_FALLBACK         0
194 #endif
195 #define FW_OPT_NO_WARN  (1U << 3)
196 #define FW_OPT_NOCACHE  (1U << 4)
197
198 struct firmware_cache {
199         /* firmware_buf instance will be added into the below list */
200         spinlock_t lock;
201         struct list_head head;
202         int state;
203
204 #ifdef CONFIG_PM_SLEEP
205         /*
206          * Names of firmware images which have been cached successfully
207          * will be added into the below list so that device uncache
208          * helper can trace which firmware images have been cached
209          * before.
210          */
211         spinlock_t name_lock;
212         struct list_head fw_names;
213
214         struct delayed_work work;
215
216         struct notifier_block   pm_notify;
217 #endif
218 };
219
220 struct firmware_buf {
221         struct kref ref;
222         struct list_head list;
223         struct firmware_cache *fwc;
224         struct fw_state fw_st;
225         void *data;
226         size_t size;
227         size_t allocated_size;
228 #ifdef CONFIG_FW_LOADER_USER_HELPER
229         bool is_paged_buf;
230         bool need_uevent;
231         struct page **pages;
232         int nr_pages;
233         int page_array_size;
234         struct list_head pending_list;
235 #endif
236         const char *fw_id;
237 };
238
239 struct fw_cache_entry {
240         struct list_head list;
241         const char *name;
242 };
243
244 struct fw_name_devm {
245         unsigned long magic;
246         const char *name;
247 };
248
249 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
250
251 #define FW_LOADER_NO_CACHE      0
252 #define FW_LOADER_START_CACHE   1
253
254 static int fw_cache_piggyback_on_request(const char *name);
255
256 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
257  * guarding for corner cases a global lock should be OK */
258 static DEFINE_MUTEX(fw_lock);
259
260 static bool __enable_firmware = false;
261
262 static void enable_firmware(void)
263 {
264         mutex_lock(&fw_lock);
265         __enable_firmware = true;
266         mutex_unlock(&fw_lock);
267 }
268
269 static void disable_firmware(void)
270 {
271         mutex_lock(&fw_lock);
272         __enable_firmware = false;
273         mutex_unlock(&fw_lock);
274 }
275
276 /*
277  * When disabled only the built-in firmware and the firmware cache will be
278  * used to look for firmware.
279  */
280 static bool firmware_enabled(void)
281 {
282         bool enabled = false;
283
284         mutex_lock(&fw_lock);
285         if (__enable_firmware)
286                 enabled = true;
287         mutex_unlock(&fw_lock);
288
289         return enabled;
290 }
291
292 static struct firmware_cache fw_cache;
293
294 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
295                                               struct firmware_cache *fwc,
296                                               void *dbuf, size_t size)
297 {
298         struct firmware_buf *buf;
299
300         buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
301         if (!buf)
302                 return NULL;
303
304         buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
305         if (!buf->fw_id) {
306                 kfree(buf);
307                 return NULL;
308         }
309
310         kref_init(&buf->ref);
311         buf->fwc = fwc;
312         buf->data = dbuf;
313         buf->allocated_size = size;
314         fw_state_init(&buf->fw_st);
315 #ifdef CONFIG_FW_LOADER_USER_HELPER
316         INIT_LIST_HEAD(&buf->pending_list);
317 #endif
318
319         pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
320
321         return buf;
322 }
323
324 static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
325 {
326         struct firmware_buf *tmp;
327         struct firmware_cache *fwc = &fw_cache;
328
329         list_for_each_entry(tmp, &fwc->head, list)
330                 if (!strcmp(tmp->fw_id, fw_name))
331                         return tmp;
332         return NULL;
333 }
334
335 static int fw_lookup_and_allocate_buf(const char *fw_name,
336                                       struct firmware_cache *fwc,
337                                       struct firmware_buf **buf, void *dbuf,
338                                       size_t size)
339 {
340         struct firmware_buf *tmp;
341
342         spin_lock(&fwc->lock);
343         tmp = __fw_lookup_buf(fw_name);
344         if (tmp) {
345                 kref_get(&tmp->ref);
346                 spin_unlock(&fwc->lock);
347                 *buf = tmp;
348                 return 1;
349         }
350         tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
351         if (tmp)
352                 list_add(&tmp->list, &fwc->head);
353         spin_unlock(&fwc->lock);
354
355         *buf = tmp;
356
357         return tmp ? 0 : -ENOMEM;
358 }
359
360 static void __fw_free_buf(struct kref *ref)
361         __releases(&fwc->lock)
362 {
363         struct firmware_buf *buf = to_fwbuf(ref);
364         struct firmware_cache *fwc = buf->fwc;
365
366         pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
367                  __func__, buf->fw_id, buf, buf->data,
368                  (unsigned int)buf->size);
369
370         list_del(&buf->list);
371         spin_unlock(&fwc->lock);
372
373 #ifdef CONFIG_FW_LOADER_USER_HELPER
374         if (buf->is_paged_buf) {
375                 int i;
376                 vunmap(buf->data);
377                 for (i = 0; i < buf->nr_pages; i++)
378                         __free_page(buf->pages[i]);
379                 vfree(buf->pages);
380         } else
381 #endif
382         if (!buf->allocated_size)
383                 vfree(buf->data);
384         kfree_const(buf->fw_id);
385         kfree(buf);
386 }
387
388 static void fw_free_buf(struct firmware_buf *buf)
389 {
390         struct firmware_cache *fwc = buf->fwc;
391         spin_lock(&fwc->lock);
392         if (!kref_put(&buf->ref, __fw_free_buf))
393                 spin_unlock(&fwc->lock);
394 }
395
396 /* direct firmware loading support */
397 static char fw_path_para[256];
398 static const char * const fw_path[] = {
399         fw_path_para,
400         "/lib/firmware/updates/" UTS_RELEASE,
401         "/lib/firmware/updates",
402         "/lib/firmware/" UTS_RELEASE,
403         "/lib/firmware"
404 };
405
406 /*
407  * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
408  * from kernel command line because firmware_class is generally built in
409  * kernel instead of module.
410  */
411 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
412 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
413
414 static int
415 fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
416 {
417         loff_t size;
418         int i, len;
419         int rc = -ENOENT;
420         char *path;
421         enum kernel_read_file_id id = READING_FIRMWARE;
422         size_t msize = INT_MAX;
423
424         /* Already populated data member means we're loading into a buffer */
425         if (buf->data) {
426                 id = READING_FIRMWARE_PREALLOC_BUFFER;
427                 msize = buf->allocated_size;
428         }
429
430         path = __getname();
431         if (!path)
432                 return -ENOMEM;
433
434         for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
435                 /* skip the unset customized path */
436                 if (!fw_path[i][0])
437                         continue;
438
439                 len = snprintf(path, PATH_MAX, "%s/%s",
440                                fw_path[i], buf->fw_id);
441                 if (len >= PATH_MAX) {
442                         rc = -ENAMETOOLONG;
443                         break;
444                 }
445
446                 buf->size = 0;
447                 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
448                                                 id);
449                 if (rc) {
450                         if (rc == -ENOENT)
451                                 dev_dbg(device, "loading %s failed with error %d\n",
452                                          path, rc);
453                         else
454                                 dev_warn(device, "loading %s failed with error %d\n",
455                                          path, rc);
456                         continue;
457                 }
458                 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
459                 buf->size = size;
460                 fw_state_done(&buf->fw_st);
461                 break;
462         }
463         __putname(path);
464
465         return rc;
466 }
467
468 /* firmware holds the ownership of pages */
469 static void firmware_free_data(const struct firmware *fw)
470 {
471         /* Loaded directly? */
472         if (!fw->priv) {
473                 vfree(fw->data);
474                 return;
475         }
476         fw_free_buf(fw->priv);
477 }
478
479 /* store the pages buffer info firmware from buf */
480 static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
481 {
482         fw->priv = buf;
483 #ifdef CONFIG_FW_LOADER_USER_HELPER
484         fw->pages = buf->pages;
485 #endif
486         fw->size = buf->size;
487         fw->data = buf->data;
488
489         pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
490                  __func__, buf->fw_id, buf, buf->data,
491                  (unsigned int)buf->size);
492 }
493
494 #ifdef CONFIG_PM_SLEEP
495 static void fw_name_devm_release(struct device *dev, void *res)
496 {
497         struct fw_name_devm *fwn = res;
498
499         if (fwn->magic == (unsigned long)&fw_cache)
500                 pr_debug("%s: fw_name-%s devm-%p released\n",
501                                 __func__, fwn->name, res);
502         kfree_const(fwn->name);
503 }
504
505 static int fw_devm_match(struct device *dev, void *res,
506                 void *match_data)
507 {
508         struct fw_name_devm *fwn = res;
509
510         return (fwn->magic == (unsigned long)&fw_cache) &&
511                 !strcmp(fwn->name, match_data);
512 }
513
514 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
515                 const char *name)
516 {
517         struct fw_name_devm *fwn;
518
519         fwn = devres_find(dev, fw_name_devm_release,
520                           fw_devm_match, (void *)name);
521         return fwn;
522 }
523
524 /* add firmware name into devres list */
525 static int fw_add_devm_name(struct device *dev, const char *name)
526 {
527         struct fw_name_devm *fwn;
528
529         fwn = fw_find_devm_name(dev, name);
530         if (fwn)
531                 return 1;
532
533         fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
534                            GFP_KERNEL);
535         if (!fwn)
536                 return -ENOMEM;
537         fwn->name = kstrdup_const(name, GFP_KERNEL);
538         if (!fwn->name) {
539                 devres_free(fwn);
540                 return -ENOMEM;
541         }
542
543         fwn->magic = (unsigned long)&fw_cache;
544         devres_add(dev, fwn);
545
546         return 0;
547 }
548 #else
549 static int fw_add_devm_name(struct device *dev, const char *name)
550 {
551         return 0;
552 }
553 #endif
554
555 static int assign_firmware_buf(struct firmware *fw, struct device *device,
556                                unsigned int opt_flags)
557 {
558         struct firmware_buf *buf = fw->priv;
559
560         mutex_lock(&fw_lock);
561         if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
562                 mutex_unlock(&fw_lock);
563                 return -ENOENT;
564         }
565
566         /*
567          * add firmware name into devres list so that we can auto cache
568          * and uncache firmware for device.
569          *
570          * device may has been deleted already, but the problem
571          * should be fixed in devres or driver core.
572          */
573         /* don't cache firmware handled without uevent */
574         if (device && (opt_flags & FW_OPT_UEVENT) &&
575             !(opt_flags & FW_OPT_NOCACHE))
576                 fw_add_devm_name(device, buf->fw_id);
577
578         /*
579          * After caching firmware image is started, let it piggyback
580          * on request firmware.
581          */
582         if (!(opt_flags & FW_OPT_NOCACHE) &&
583             buf->fwc->state == FW_LOADER_START_CACHE) {
584                 if (fw_cache_piggyback_on_request(buf->fw_id))
585                         kref_get(&buf->ref);
586         }
587
588         /* pass the pages buffer to driver at the last minute */
589         fw_set_page_data(buf, fw);
590         mutex_unlock(&fw_lock);
591         return 0;
592 }
593
594 /*
595  * user-mode helper code
596  */
597 #ifdef CONFIG_FW_LOADER_USER_HELPER
598 struct firmware_priv {
599         bool nowait;
600         struct device dev;
601         struct firmware_buf *buf;
602         struct firmware *fw;
603 };
604
605 static struct firmware_priv *to_firmware_priv(struct device *dev)
606 {
607         return container_of(dev, struct firmware_priv, dev);
608 }
609
610 static void __fw_load_abort(struct firmware_buf *buf)
611 {
612         /*
613          * There is a small window in which user can write to 'loading'
614          * between loading done and disappearance of 'loading'
615          */
616         if (fw_state_is_done(&buf->fw_st))
617                 return;
618
619         list_del_init(&buf->pending_list);
620         fw_state_aborted(&buf->fw_st);
621 }
622
623 static void fw_load_abort(struct firmware_priv *fw_priv)
624 {
625         struct firmware_buf *buf = fw_priv->buf;
626
627         __fw_load_abort(buf);
628 }
629
630 static LIST_HEAD(pending_fw_head);
631
632 static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
633 {
634         struct firmware_buf *buf;
635         struct firmware_buf *next;
636
637         mutex_lock(&fw_lock);
638         list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
639                 if (!buf->need_uevent || !only_kill_custom)
640                          __fw_load_abort(buf);
641         }
642         mutex_unlock(&fw_lock);
643 }
644
645 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
646                             char *buf)
647 {
648         return sprintf(buf, "%d\n", loading_timeout);
649 }
650
651 /**
652  * firmware_timeout_store - set number of seconds to wait for firmware
653  * @class: device class pointer
654  * @attr: device attribute pointer
655  * @buf: buffer to scan for timeout value
656  * @count: number of bytes in @buf
657  *
658  *      Sets the number of seconds to wait for the firmware.  Once
659  *      this expires an error will be returned to the driver and no
660  *      firmware will be provided.
661  *
662  *      Note: zero means 'wait forever'.
663  **/
664 static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
665                              const char *buf, size_t count)
666 {
667         loading_timeout = simple_strtol(buf, NULL, 10);
668         if (loading_timeout < 0)
669                 loading_timeout = 0;
670
671         return count;
672 }
673 static CLASS_ATTR_RW(timeout);
674
675 static struct attribute *firmware_class_attrs[] = {
676         &class_attr_timeout.attr,
677         NULL,
678 };
679 ATTRIBUTE_GROUPS(firmware_class);
680
681 static void fw_dev_release(struct device *dev)
682 {
683         struct firmware_priv *fw_priv = to_firmware_priv(dev);
684
685         kfree(fw_priv);
686 }
687
688 static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
689 {
690         if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
691                 return -ENOMEM;
692         if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
693                 return -ENOMEM;
694         if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
695                 return -ENOMEM;
696
697         return 0;
698 }
699
700 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
701 {
702         struct firmware_priv *fw_priv = to_firmware_priv(dev);
703         int err = 0;
704
705         mutex_lock(&fw_lock);
706         if (fw_priv->buf)
707                 err = do_firmware_uevent(fw_priv, env);
708         mutex_unlock(&fw_lock);
709         return err;
710 }
711
712 static struct class firmware_class = {
713         .name           = "firmware",
714         .class_groups   = firmware_class_groups,
715         .dev_uevent     = firmware_uevent,
716         .dev_release    = fw_dev_release,
717 };
718
719 static ssize_t firmware_loading_show(struct device *dev,
720                                      struct device_attribute *attr, char *buf)
721 {
722         struct firmware_priv *fw_priv = to_firmware_priv(dev);
723         int loading = 0;
724
725         mutex_lock(&fw_lock);
726         if (fw_priv->buf)
727                 loading = fw_state_is_loading(&fw_priv->buf->fw_st);
728         mutex_unlock(&fw_lock);
729
730         return sprintf(buf, "%d\n", loading);
731 }
732
733 /* Some architectures don't have PAGE_KERNEL_RO */
734 #ifndef PAGE_KERNEL_RO
735 #define PAGE_KERNEL_RO PAGE_KERNEL
736 #endif
737
738 /* one pages buffer should be mapped/unmapped only once */
739 static int fw_map_pages_buf(struct firmware_buf *buf)
740 {
741         if (!buf->is_paged_buf)
742                 return 0;
743
744         vunmap(buf->data);
745         buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
746         if (!buf->data)
747                 return -ENOMEM;
748         return 0;
749 }
750
751 /**
752  * firmware_loading_store - set value in the 'loading' control file
753  * @dev: device pointer
754  * @attr: device attribute pointer
755  * @buf: buffer to scan for loading control value
756  * @count: number of bytes in @buf
757  *
758  *      The relevant values are:
759  *
760  *       1: Start a load, discarding any previous partial load.
761  *       0: Conclude the load and hand the data to the driver code.
762  *      -1: Conclude the load with an error and discard any written data.
763  **/
764 static ssize_t firmware_loading_store(struct device *dev,
765                                       struct device_attribute *attr,
766                                       const char *buf, size_t count)
767 {
768         struct firmware_priv *fw_priv = to_firmware_priv(dev);
769         struct firmware_buf *fw_buf;
770         ssize_t written = count;
771         int loading = simple_strtol(buf, NULL, 10);
772         int i;
773
774         mutex_lock(&fw_lock);
775         fw_buf = fw_priv->buf;
776         if (fw_state_is_aborted(&fw_buf->fw_st))
777                 goto out;
778
779         switch (loading) {
780         case 1:
781                 /* discarding any previous partial load */
782                 if (!fw_state_is_done(&fw_buf->fw_st)) {
783                         for (i = 0; i < fw_buf->nr_pages; i++)
784                                 __free_page(fw_buf->pages[i]);
785                         vfree(fw_buf->pages);
786                         fw_buf->pages = NULL;
787                         fw_buf->page_array_size = 0;
788                         fw_buf->nr_pages = 0;
789                         fw_state_start(&fw_buf->fw_st);
790                 }
791                 break;
792         case 0:
793                 if (fw_state_is_loading(&fw_buf->fw_st)) {
794                         int rc;
795
796                         /*
797                          * Several loading requests may be pending on
798                          * one same firmware buf, so let all requests
799                          * see the mapped 'buf->data' once the loading
800                          * is completed.
801                          * */
802                         rc = fw_map_pages_buf(fw_buf);
803                         if (rc)
804                                 dev_err(dev, "%s: map pages failed\n",
805                                         __func__);
806                         else
807                                 rc = security_kernel_post_read_file(NULL,
808                                                 fw_buf->data, fw_buf->size,
809                                                 READING_FIRMWARE);
810
811                         /*
812                          * Same logic as fw_load_abort, only the DONE bit
813                          * is ignored and we set ABORT only on failure.
814                          */
815                         list_del_init(&fw_buf->pending_list);
816                         if (rc) {
817                                 fw_state_aborted(&fw_buf->fw_st);
818                                 written = rc;
819                         } else {
820                                 fw_state_done(&fw_buf->fw_st);
821                         }
822                         break;
823                 }
824                 /* fallthrough */
825         default:
826                 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
827                 /* fallthrough */
828         case -1:
829                 fw_load_abort(fw_priv);
830                 break;
831         }
832 out:
833         mutex_unlock(&fw_lock);
834         return written;
835 }
836
837 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
838
839 static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
840                            loff_t offset, size_t count, bool read)
841 {
842         if (read)
843                 memcpy(buffer, buf->data + offset, count);
844         else
845                 memcpy(buf->data + offset, buffer, count);
846 }
847
848 static void firmware_rw(struct firmware_buf *buf, char *buffer,
849                         loff_t offset, size_t count, bool read)
850 {
851         while (count) {
852                 void *page_data;
853                 int page_nr = offset >> PAGE_SHIFT;
854                 int page_ofs = offset & (PAGE_SIZE-1);
855                 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
856
857                 page_data = kmap(buf->pages[page_nr]);
858
859                 if (read)
860                         memcpy(buffer, page_data + page_ofs, page_cnt);
861                 else
862                         memcpy(page_data + page_ofs, buffer, page_cnt);
863
864                 kunmap(buf->pages[page_nr]);
865                 buffer += page_cnt;
866                 offset += page_cnt;
867                 count -= page_cnt;
868         }
869 }
870
871 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
872                                   struct bin_attribute *bin_attr,
873                                   char *buffer, loff_t offset, size_t count)
874 {
875         struct device *dev = kobj_to_dev(kobj);
876         struct firmware_priv *fw_priv = to_firmware_priv(dev);
877         struct firmware_buf *buf;
878         ssize_t ret_count;
879
880         mutex_lock(&fw_lock);
881         buf = fw_priv->buf;
882         if (!buf || fw_state_is_done(&buf->fw_st)) {
883                 ret_count = -ENODEV;
884                 goto out;
885         }
886         if (offset > buf->size) {
887                 ret_count = 0;
888                 goto out;
889         }
890         if (count > buf->size - offset)
891                 count = buf->size - offset;
892
893         ret_count = count;
894
895         if (buf->data)
896                 firmware_rw_buf(buf, buffer, offset, count, true);
897         else
898                 firmware_rw(buf, buffer, offset, count, true);
899
900 out:
901         mutex_unlock(&fw_lock);
902         return ret_count;
903 }
904
905 static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
906 {
907         struct firmware_buf *buf = fw_priv->buf;
908         int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
909
910         /* If the array of pages is too small, grow it... */
911         if (buf->page_array_size < pages_needed) {
912                 int new_array_size = max(pages_needed,
913                                          buf->page_array_size * 2);
914                 struct page **new_pages;
915
916                 new_pages = vmalloc(new_array_size * sizeof(void *));
917                 if (!new_pages) {
918                         fw_load_abort(fw_priv);
919                         return -ENOMEM;
920                 }
921                 memcpy(new_pages, buf->pages,
922                        buf->page_array_size * sizeof(void *));
923                 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
924                        (new_array_size - buf->page_array_size));
925                 vfree(buf->pages);
926                 buf->pages = new_pages;
927                 buf->page_array_size = new_array_size;
928         }
929
930         while (buf->nr_pages < pages_needed) {
931                 buf->pages[buf->nr_pages] =
932                         alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
933
934                 if (!buf->pages[buf->nr_pages]) {
935                         fw_load_abort(fw_priv);
936                         return -ENOMEM;
937                 }
938                 buf->nr_pages++;
939         }
940         return 0;
941 }
942
943 /**
944  * firmware_data_write - write method for firmware
945  * @filp: open sysfs file
946  * @kobj: kobject for the device
947  * @bin_attr: bin_attr structure
948  * @buffer: buffer being written
949  * @offset: buffer offset for write in total data store area
950  * @count: buffer size
951  *
952  *      Data written to the 'data' attribute will be later handed to
953  *      the driver as a firmware image.
954  **/
955 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
956                                    struct bin_attribute *bin_attr,
957                                    char *buffer, loff_t offset, size_t count)
958 {
959         struct device *dev = kobj_to_dev(kobj);
960         struct firmware_priv *fw_priv = to_firmware_priv(dev);
961         struct firmware_buf *buf;
962         ssize_t retval;
963
964         if (!capable(CAP_SYS_RAWIO))
965                 return -EPERM;
966
967         mutex_lock(&fw_lock);
968         buf = fw_priv->buf;
969         if (!buf || fw_state_is_done(&buf->fw_st)) {
970                 retval = -ENODEV;
971                 goto out;
972         }
973
974         if (buf->data) {
975                 if (offset + count > buf->allocated_size) {
976                         retval = -ENOMEM;
977                         goto out;
978                 }
979                 firmware_rw_buf(buf, buffer, offset, count, false);
980                 retval = count;
981         } else {
982                 retval = fw_realloc_buffer(fw_priv, offset + count);
983                 if (retval)
984                         goto out;
985
986                 retval = count;
987                 firmware_rw(buf, buffer, offset, count, false);
988         }
989
990         buf->size = max_t(size_t, offset + count, buf->size);
991 out:
992         mutex_unlock(&fw_lock);
993         return retval;
994 }
995
996 static struct bin_attribute firmware_attr_data = {
997         .attr = { .name = "data", .mode = 0644 },
998         .size = 0,
999         .read = firmware_data_read,
1000         .write = firmware_data_write,
1001 };
1002
1003 static struct attribute *fw_dev_attrs[] = {
1004         &dev_attr_loading.attr,
1005         NULL
1006 };
1007
1008 static struct bin_attribute *fw_dev_bin_attrs[] = {
1009         &firmware_attr_data,
1010         NULL
1011 };
1012
1013 static const struct attribute_group fw_dev_attr_group = {
1014         .attrs = fw_dev_attrs,
1015         .bin_attrs = fw_dev_bin_attrs,
1016 };
1017
1018 static const struct attribute_group *fw_dev_attr_groups[] = {
1019         &fw_dev_attr_group,
1020         NULL
1021 };
1022
1023 static struct firmware_priv *
1024 fw_create_instance(struct firmware *firmware, const char *fw_name,
1025                    struct device *device, unsigned int opt_flags)
1026 {
1027         struct firmware_priv *fw_priv;
1028         struct device *f_dev;
1029
1030         fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
1031         if (!fw_priv) {
1032                 fw_priv = ERR_PTR(-ENOMEM);
1033                 goto exit;
1034         }
1035
1036         fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
1037         fw_priv->fw = firmware;
1038         f_dev = &fw_priv->dev;
1039
1040         device_initialize(f_dev);
1041         dev_set_name(f_dev, "%s", fw_name);
1042         f_dev->parent = device;
1043         f_dev->class = &firmware_class;
1044         f_dev->groups = fw_dev_attr_groups;
1045 exit:
1046         return fw_priv;
1047 }
1048
1049 /* load a firmware via user helper */
1050 static int _request_firmware_load(struct firmware_priv *fw_priv,
1051                                   unsigned int opt_flags, long timeout)
1052 {
1053         int retval = 0;
1054         struct device *f_dev = &fw_priv->dev;
1055         struct firmware_buf *buf = fw_priv->buf;
1056
1057         /* fall back on userspace loading */
1058         if (!buf->data)
1059                 buf->is_paged_buf = true;
1060
1061         dev_set_uevent_suppress(f_dev, true);
1062
1063         retval = device_add(f_dev);
1064         if (retval) {
1065                 dev_err(f_dev, "%s: device_register failed\n", __func__);
1066                 goto err_put_dev;
1067         }
1068
1069         mutex_lock(&fw_lock);
1070         list_add(&buf->pending_list, &pending_fw_head);
1071         mutex_unlock(&fw_lock);
1072
1073         if (opt_flags & FW_OPT_UEVENT) {
1074                 buf->need_uevent = true;
1075                 dev_set_uevent_suppress(f_dev, false);
1076                 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
1077                 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
1078         } else {
1079                 timeout = MAX_JIFFY_OFFSET;
1080         }
1081
1082         retval = fw_state_wait_timeout(&buf->fw_st, timeout);
1083         if (retval < 0) {
1084                 mutex_lock(&fw_lock);
1085                 fw_load_abort(fw_priv);
1086                 mutex_unlock(&fw_lock);
1087         }
1088
1089         if (fw_state_is_aborted(&buf->fw_st))
1090                 retval = -EAGAIN;
1091         else if (buf->is_paged_buf && !buf->data)
1092                 retval = -ENOMEM;
1093
1094         device_del(f_dev);
1095 err_put_dev:
1096         put_device(f_dev);
1097         return retval;
1098 }
1099
1100 static int fw_load_from_user_helper(struct firmware *firmware,
1101                                     const char *name, struct device *device,
1102                                     unsigned int opt_flags)
1103 {
1104         struct firmware_priv *fw_priv;
1105         long timeout;
1106         int ret;
1107
1108         timeout = firmware_loading_timeout();
1109         if (opt_flags & FW_OPT_NOWAIT) {
1110                 timeout = usermodehelper_read_lock_wait(timeout);
1111                 if (!timeout) {
1112                         dev_dbg(device, "firmware: %s loading timed out\n",
1113                                 name);
1114                         return -EBUSY;
1115                 }
1116         } else {
1117                 ret = usermodehelper_read_trylock();
1118                 if (WARN_ON(ret)) {
1119                         dev_err(device, "firmware: %s will not be loaded\n",
1120                                 name);
1121                         return ret;
1122                 }
1123         }
1124
1125         fw_priv = fw_create_instance(firmware, name, device, opt_flags);
1126         if (IS_ERR(fw_priv)) {
1127                 ret = PTR_ERR(fw_priv);
1128                 goto out_unlock;
1129         }
1130
1131         fw_priv->buf = firmware->priv;
1132         ret = _request_firmware_load(fw_priv, opt_flags, timeout);
1133
1134         if (!ret)
1135                 ret = assign_firmware_buf(firmware, device, opt_flags);
1136
1137 out_unlock:
1138         usermodehelper_read_unlock();
1139
1140         return ret;
1141 }
1142
1143 #else /* CONFIG_FW_LOADER_USER_HELPER */
1144 static inline int
1145 fw_load_from_user_helper(struct firmware *firmware, const char *name,
1146                          struct device *device, unsigned int opt_flags)
1147 {
1148         return -ENOENT;
1149 }
1150
1151 static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
1152
1153 #endif /* CONFIG_FW_LOADER_USER_HELPER */
1154
1155 /* prepare firmware and firmware_buf structs;
1156  * return 0 if a firmware is already assigned, 1 if need to load one,
1157  * or a negative error code
1158  */
1159 static int
1160 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
1161                           struct device *device, void *dbuf, size_t size)
1162 {
1163         struct firmware *firmware;
1164         struct firmware_buf *buf;
1165         int ret;
1166
1167         *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1168         if (!firmware) {
1169                 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1170                         __func__);
1171                 return -ENOMEM;
1172         }
1173
1174         if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
1175                 dev_dbg(device, "using built-in %s\n", name);
1176                 return 0; /* assigned */
1177         }
1178
1179         ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
1180
1181         /*
1182          * bind with 'buf' now to avoid warning in failure path
1183          * of requesting firmware.
1184          */
1185         firmware->priv = buf;
1186
1187         if (ret > 0) {
1188                 ret = fw_state_wait(&buf->fw_st);
1189                 if (!ret) {
1190                         fw_set_page_data(buf, firmware);
1191                         return 0; /* assigned */
1192                 }
1193         }
1194
1195         if (ret < 0)
1196                 return ret;
1197         return 1; /* need to load */
1198 }
1199
1200 /*
1201  * Batched requests need only one wake, we need to do this step last due to the
1202  * fallback mechanism. The buf is protected with kref_get(), and it won't be
1203  * released until the last user calls release_firmware().
1204  *
1205  * Failed batched requests are possible as well, in such cases we just share
1206  * the struct firmware_buf and won't release it until all requests are woken
1207  * and have gone through this same path.
1208  */
1209 static void fw_abort_batch_reqs(struct firmware *fw)
1210 {
1211         struct firmware_buf *buf;
1212
1213         /* Loaded directly? */
1214         if (!fw || !fw->priv)
1215                 return;
1216
1217         buf = fw->priv;
1218         if (!fw_state_is_aborted(&buf->fw_st))
1219                 fw_state_aborted(&buf->fw_st);
1220 }
1221
1222 /* called from request_firmware() and request_firmware_work_func() */
1223 static int
1224 _request_firmware(const struct firmware **firmware_p, const char *name,
1225                   struct device *device, void *buf, size_t size,
1226                   unsigned int opt_flags)
1227 {
1228         struct firmware *fw = NULL;
1229         int ret;
1230
1231         if (!firmware_p)
1232                 return -EINVAL;
1233
1234         if (!name || name[0] == '\0') {
1235                 ret = -EINVAL;
1236                 goto out;
1237         }
1238
1239         ret = _request_firmware_prepare(&fw, name, device, buf, size);
1240         if (ret <= 0) /* error or already assigned */
1241                 goto out;
1242
1243         if (!firmware_enabled()) {
1244                 WARN(1, "firmware request while host is not available\n");
1245                 ret = -EHOSTDOWN;
1246                 goto out;
1247         }
1248
1249         ret = fw_get_filesystem_firmware(device, fw->priv);
1250         if (ret) {
1251                 if (!(opt_flags & FW_OPT_NO_WARN))
1252                         dev_warn(device,
1253                                  "Direct firmware load for %s failed with error %d\n",
1254                                  name, ret);
1255                 if (opt_flags & FW_OPT_USERHELPER) {
1256                         dev_warn(device, "Falling back to user helper\n");
1257                         ret = fw_load_from_user_helper(fw, name, device,
1258                                                        opt_flags);
1259                 }
1260         } else
1261                 ret = assign_firmware_buf(fw, device, opt_flags);
1262
1263  out:
1264         if (ret < 0) {
1265                 fw_abort_batch_reqs(fw);
1266                 release_firmware(fw);
1267                 fw = NULL;
1268         }
1269
1270         *firmware_p = fw;
1271         return ret;
1272 }
1273
1274 /**
1275  * request_firmware: - send firmware request and wait for it
1276  * @firmware_p: pointer to firmware image
1277  * @name: name of firmware file
1278  * @device: device for which firmware is being loaded
1279  *
1280  *      @firmware_p will be used to return a firmware image by the name
1281  *      of @name for device @device.
1282  *
1283  *      Should be called from user context where sleeping is allowed.
1284  *
1285  *      @name will be used as $FIRMWARE in the uevent environment and
1286  *      should be distinctive enough not to be confused with any other
1287  *      firmware image for this or any other device.
1288  *
1289  *      Caller must hold the reference count of @device.
1290  *
1291  *      The function can be called safely inside device's suspend and
1292  *      resume callback.
1293  **/
1294 int
1295 request_firmware(const struct firmware **firmware_p, const char *name,
1296                  struct device *device)
1297 {
1298         int ret;
1299
1300         /* Need to pin this module until return */
1301         __module_get(THIS_MODULE);
1302         ret = _request_firmware(firmware_p, name, device, NULL, 0,
1303                                 FW_OPT_UEVENT | FW_OPT_FALLBACK);
1304         module_put(THIS_MODULE);
1305         return ret;
1306 }
1307 EXPORT_SYMBOL(request_firmware);
1308
1309 /**
1310  * request_firmware_direct: - load firmware directly without usermode helper
1311  * @firmware_p: pointer to firmware image
1312  * @name: name of firmware file
1313  * @device: device for which firmware is being loaded
1314  *
1315  * This function works pretty much like request_firmware(), but this doesn't
1316  * fall back to usermode helper even if the firmware couldn't be loaded
1317  * directly from fs.  Hence it's useful for loading optional firmwares, which
1318  * aren't always present, without extra long timeouts of udev.
1319  **/
1320 int request_firmware_direct(const struct firmware **firmware_p,
1321                             const char *name, struct device *device)
1322 {
1323         int ret;
1324
1325         __module_get(THIS_MODULE);
1326         ret = _request_firmware(firmware_p, name, device, NULL, 0,
1327                                 FW_OPT_UEVENT | FW_OPT_NO_WARN);
1328         module_put(THIS_MODULE);
1329         return ret;
1330 }
1331 EXPORT_SYMBOL_GPL(request_firmware_direct);
1332
1333 /**
1334  * request_firmware_into_buf - load firmware into a previously allocated buffer
1335  * @firmware_p: pointer to firmware image
1336  * @name: name of firmware file
1337  * @device: device for which firmware is being loaded and DMA region allocated
1338  * @buf: address of buffer to load firmware into
1339  * @size: size of buffer
1340  *
1341  * This function works pretty much like request_firmware(), but it doesn't
1342  * allocate a buffer to hold the firmware data. Instead, the firmware
1343  * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1344  * data member is pointed at @buf.
1345  *
1346  * This function doesn't cache firmware either.
1347  */
1348 int
1349 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1350                           struct device *device, void *buf, size_t size)
1351 {
1352         int ret;
1353
1354         __module_get(THIS_MODULE);
1355         ret = _request_firmware(firmware_p, name, device, buf, size,
1356                                 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1357                                 FW_OPT_NOCACHE);
1358         module_put(THIS_MODULE);
1359         return ret;
1360 }
1361 EXPORT_SYMBOL(request_firmware_into_buf);
1362
1363 /**
1364  * release_firmware: - release the resource associated with a firmware image
1365  * @fw: firmware resource to release
1366  **/
1367 void release_firmware(const struct firmware *fw)
1368 {
1369         if (fw) {
1370                 if (!fw_is_builtin_firmware(fw))
1371                         firmware_free_data(fw);
1372                 kfree(fw);
1373         }
1374 }
1375 EXPORT_SYMBOL(release_firmware);
1376
1377 /* Async support */
1378 struct firmware_work {
1379         struct work_struct work;
1380         struct module *module;
1381         const char *name;
1382         struct device *device;
1383         void *context;
1384         void (*cont)(const struct firmware *fw, void *context);
1385         unsigned int opt_flags;
1386 };
1387
1388 static void request_firmware_work_func(struct work_struct *work)
1389 {
1390         struct firmware_work *fw_work;
1391         const struct firmware *fw;
1392
1393         fw_work = container_of(work, struct firmware_work, work);
1394
1395         _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
1396                           fw_work->opt_flags);
1397         fw_work->cont(fw, fw_work->context);
1398         put_device(fw_work->device); /* taken in request_firmware_nowait() */
1399
1400         module_put(fw_work->module);
1401         kfree_const(fw_work->name);
1402         kfree(fw_work);
1403 }
1404
1405 /**
1406  * request_firmware_nowait - asynchronous version of request_firmware
1407  * @module: module requesting the firmware
1408  * @uevent: sends uevent to copy the firmware image if this flag
1409  *      is non-zero else the firmware copy must be done manually.
1410  * @name: name of firmware file
1411  * @device: device for which firmware is being loaded
1412  * @gfp: allocation flags
1413  * @context: will be passed over to @cont, and
1414  *      @fw may be %NULL if firmware request fails.
1415  * @cont: function will be called asynchronously when the firmware
1416  *      request is over.
1417  *
1418  *      Caller must hold the reference count of @device.
1419  *
1420  *      Asynchronous variant of request_firmware() for user contexts:
1421  *              - sleep for as small periods as possible since it may
1422  *                increase kernel boot time of built-in device drivers
1423  *                requesting firmware in their ->probe() methods, if
1424  *                @gfp is GFP_KERNEL.
1425  *
1426  *              - can't sleep at all if @gfp is GFP_ATOMIC.
1427  **/
1428 int
1429 request_firmware_nowait(
1430         struct module *module, bool uevent,
1431         const char *name, struct device *device, gfp_t gfp, void *context,
1432         void (*cont)(const struct firmware *fw, void *context))
1433 {
1434         struct firmware_work *fw_work;
1435
1436         fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1437         if (!fw_work)
1438                 return -ENOMEM;
1439
1440         fw_work->module = module;
1441         fw_work->name = kstrdup_const(name, gfp);
1442         if (!fw_work->name) {
1443                 kfree(fw_work);
1444                 return -ENOMEM;
1445         }
1446         fw_work->device = device;
1447         fw_work->context = context;
1448         fw_work->cont = cont;
1449         fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
1450                 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1451
1452         if (!try_module_get(module)) {
1453                 kfree_const(fw_work->name);
1454                 kfree(fw_work);
1455                 return -EFAULT;
1456         }
1457
1458         get_device(fw_work->device);
1459         INIT_WORK(&fw_work->work, request_firmware_work_func);
1460         schedule_work(&fw_work->work);
1461         return 0;
1462 }
1463 EXPORT_SYMBOL(request_firmware_nowait);
1464
1465 #ifdef CONFIG_PM_SLEEP
1466 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1467
1468 /**
1469  * cache_firmware - cache one firmware image in kernel memory space
1470  * @fw_name: the firmware image name
1471  *
1472  * Cache firmware in kernel memory so that drivers can use it when
1473  * system isn't ready for them to request firmware image from userspace.
1474  * Once it returns successfully, driver can use request_firmware or its
1475  * nowait version to get the cached firmware without any interacting
1476  * with userspace
1477  *
1478  * Return 0 if the firmware image has been cached successfully
1479  * Return !0 otherwise
1480  *
1481  */
1482 static int cache_firmware(const char *fw_name)
1483 {
1484         int ret;
1485         const struct firmware *fw;
1486
1487         pr_debug("%s: %s\n", __func__, fw_name);
1488
1489         ret = request_firmware(&fw, fw_name, NULL);
1490         if (!ret)
1491                 kfree(fw);
1492
1493         pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1494
1495         return ret;
1496 }
1497
1498 static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1499 {
1500         struct firmware_buf *tmp;
1501         struct firmware_cache *fwc = &fw_cache;
1502
1503         spin_lock(&fwc->lock);
1504         tmp = __fw_lookup_buf(fw_name);
1505         spin_unlock(&fwc->lock);
1506
1507         return tmp;
1508 }
1509
1510 /**
1511  * uncache_firmware - remove one cached firmware image
1512  * @fw_name: the firmware image name
1513  *
1514  * Uncache one firmware image which has been cached successfully
1515  * before.
1516  *
1517  * Return 0 if the firmware cache has been removed successfully
1518  * Return !0 otherwise
1519  *
1520  */
1521 static int uncache_firmware(const char *fw_name)
1522 {
1523         struct firmware_buf *buf;
1524         struct firmware fw;
1525
1526         pr_debug("%s: %s\n", __func__, fw_name);
1527
1528         if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1529                 return 0;
1530
1531         buf = fw_lookup_buf(fw_name);
1532         if (buf) {
1533                 fw_free_buf(buf);
1534                 return 0;
1535         }
1536
1537         return -EINVAL;
1538 }
1539
1540 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1541 {
1542         struct fw_cache_entry *fce;
1543
1544         fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1545         if (!fce)
1546                 goto exit;
1547
1548         fce->name = kstrdup_const(name, GFP_ATOMIC);
1549         if (!fce->name) {
1550                 kfree(fce);
1551                 fce = NULL;
1552                 goto exit;
1553         }
1554 exit:
1555         return fce;
1556 }
1557
1558 static int __fw_entry_found(const char *name)
1559 {
1560         struct firmware_cache *fwc = &fw_cache;
1561         struct fw_cache_entry *fce;
1562
1563         list_for_each_entry(fce, &fwc->fw_names, list) {
1564                 if (!strcmp(fce->name, name))
1565                         return 1;
1566         }
1567         return 0;
1568 }
1569
1570 static int fw_cache_piggyback_on_request(const char *name)
1571 {
1572         struct firmware_cache *fwc = &fw_cache;
1573         struct fw_cache_entry *fce;
1574         int ret = 0;
1575
1576         spin_lock(&fwc->name_lock);
1577         if (__fw_entry_found(name))
1578                 goto found;
1579
1580         fce = alloc_fw_cache_entry(name);
1581         if (fce) {
1582                 ret = 1;
1583                 list_add(&fce->list, &fwc->fw_names);
1584                 pr_debug("%s: fw: %s\n", __func__, name);
1585         }
1586 found:
1587         spin_unlock(&fwc->name_lock);
1588         return ret;
1589 }
1590
1591 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1592 {
1593         kfree_const(fce->name);
1594         kfree(fce);
1595 }
1596
1597 static void __async_dev_cache_fw_image(void *fw_entry,
1598                                        async_cookie_t cookie)
1599 {
1600         struct fw_cache_entry *fce = fw_entry;
1601         struct firmware_cache *fwc = &fw_cache;
1602         int ret;
1603
1604         ret = cache_firmware(fce->name);
1605         if (ret) {
1606                 spin_lock(&fwc->name_lock);
1607                 list_del(&fce->list);
1608                 spin_unlock(&fwc->name_lock);
1609
1610                 free_fw_cache_entry(fce);
1611         }
1612 }
1613
1614 /* called with dev->devres_lock held */
1615 static void dev_create_fw_entry(struct device *dev, void *res,
1616                                 void *data)
1617 {
1618         struct fw_name_devm *fwn = res;
1619         const char *fw_name = fwn->name;
1620         struct list_head *head = data;
1621         struct fw_cache_entry *fce;
1622
1623         fce = alloc_fw_cache_entry(fw_name);
1624         if (fce)
1625                 list_add(&fce->list, head);
1626 }
1627
1628 static int devm_name_match(struct device *dev, void *res,
1629                            void *match_data)
1630 {
1631         struct fw_name_devm *fwn = res;
1632         return (fwn->magic == (unsigned long)match_data);
1633 }
1634
1635 static void dev_cache_fw_image(struct device *dev, void *data)
1636 {
1637         LIST_HEAD(todo);
1638         struct fw_cache_entry *fce;
1639         struct fw_cache_entry *fce_next;
1640         struct firmware_cache *fwc = &fw_cache;
1641
1642         devres_for_each_res(dev, fw_name_devm_release,
1643                             devm_name_match, &fw_cache,
1644                             dev_create_fw_entry, &todo);
1645
1646         list_for_each_entry_safe(fce, fce_next, &todo, list) {
1647                 list_del(&fce->list);
1648
1649                 spin_lock(&fwc->name_lock);
1650                 /* only one cache entry for one firmware */
1651                 if (!__fw_entry_found(fce->name)) {
1652                         list_add(&fce->list, &fwc->fw_names);
1653                 } else {
1654                         free_fw_cache_entry(fce);
1655                         fce = NULL;
1656                 }
1657                 spin_unlock(&fwc->name_lock);
1658
1659                 if (fce)
1660                         async_schedule_domain(__async_dev_cache_fw_image,
1661                                               (void *)fce,
1662                                               &fw_cache_domain);
1663         }
1664 }
1665
1666 static void __device_uncache_fw_images(void)
1667 {
1668         struct firmware_cache *fwc = &fw_cache;
1669         struct fw_cache_entry *fce;
1670
1671         spin_lock(&fwc->name_lock);
1672         while (!list_empty(&fwc->fw_names)) {
1673                 fce = list_entry(fwc->fw_names.next,
1674                                 struct fw_cache_entry, list);
1675                 list_del(&fce->list);
1676                 spin_unlock(&fwc->name_lock);
1677
1678                 uncache_firmware(fce->name);
1679                 free_fw_cache_entry(fce);
1680
1681                 spin_lock(&fwc->name_lock);
1682         }
1683         spin_unlock(&fwc->name_lock);
1684 }
1685
1686 /**
1687  * device_cache_fw_images - cache devices' firmware
1688  *
1689  * If one device called request_firmware or its nowait version
1690  * successfully before, the firmware names are recored into the
1691  * device's devres link list, so device_cache_fw_images can call
1692  * cache_firmware() to cache these firmwares for the device,
1693  * then the device driver can load its firmwares easily at
1694  * time when system is not ready to complete loading firmware.
1695  */
1696 static void device_cache_fw_images(void)
1697 {
1698         struct firmware_cache *fwc = &fw_cache;
1699         int old_timeout;
1700         DEFINE_WAIT(wait);
1701
1702         pr_debug("%s\n", __func__);
1703
1704         /* cancel uncache work */
1705         cancel_delayed_work_sync(&fwc->work);
1706
1707         /*
1708          * use small loading timeout for caching devices' firmware
1709          * because all these firmware images have been loaded
1710          * successfully at lease once, also system is ready for
1711          * completing firmware loading now. The maximum size of
1712          * firmware in current distributions is about 2M bytes,
1713          * so 10 secs should be enough.
1714          */
1715         old_timeout = loading_timeout;
1716         loading_timeout = 10;
1717
1718         mutex_lock(&fw_lock);
1719         fwc->state = FW_LOADER_START_CACHE;
1720         dpm_for_each_dev(NULL, dev_cache_fw_image);
1721         mutex_unlock(&fw_lock);
1722
1723         /* wait for completion of caching firmware for all devices */
1724         async_synchronize_full_domain(&fw_cache_domain);
1725
1726         loading_timeout = old_timeout;
1727 }
1728
1729 /**
1730  * device_uncache_fw_images - uncache devices' firmware
1731  *
1732  * uncache all firmwares which have been cached successfully
1733  * by device_uncache_fw_images earlier
1734  */
1735 static void device_uncache_fw_images(void)
1736 {
1737         pr_debug("%s\n", __func__);
1738         __device_uncache_fw_images();
1739 }
1740
1741 static void device_uncache_fw_images_work(struct work_struct *work)
1742 {
1743         device_uncache_fw_images();
1744 }
1745
1746 /**
1747  * device_uncache_fw_images_delay - uncache devices firmwares
1748  * @delay: number of milliseconds to delay uncache device firmwares
1749  *
1750  * uncache all devices's firmwares which has been cached successfully
1751  * by device_cache_fw_images after @delay milliseconds.
1752  */
1753 static void device_uncache_fw_images_delay(unsigned long delay)
1754 {
1755         queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1756                            msecs_to_jiffies(delay));
1757 }
1758
1759 /**
1760  * fw_pm_notify - notifier for suspend/resume
1761  * @notify_block: unused
1762  * @mode: mode we are switching to
1763  * @unused: unused
1764  *
1765  * Used to modify the firmware_class state as we move in between states.
1766  * The firmware_class implements a firmware cache to enable device driver
1767  * to fetch firmware upon resume before the root filesystem is ready. We
1768  * disable API calls which do not use the built-in firmware or the firmware
1769  * cache when we know these calls will not work.
1770  *
1771  * The inner logic behind all this is a bit complex so it is worth summarizing
1772  * the kernel's own suspend/resume process with context and focus on how this
1773  * can impact the firmware API.
1774  *
1775  * First a review on how we go to suspend::
1776  *
1777  *      pm_suspend() --> enter_state() -->
1778  *      sys_sync()
1779  *      suspend_prepare() -->
1780  *              __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
1781  *              suspend_freeze_processes() -->
1782  *                      freeze_processes() -->
1783  *                              __usermodehelper_set_disable_depth(UMH_DISABLED);
1784  *                              freeze all tasks ...
1785  *                      freeze_kernel_threads()
1786  *      suspend_devices_and_enter() -->
1787  *              dpm_suspend_start() -->
1788  *                              dpm_prepare()
1789  *                              dpm_suspend()
1790  *              suspend_enter()  -->
1791  *                      platform_suspend_prepare()
1792  *                      dpm_suspend_late()
1793  *                      freeze_enter()
1794  *                      syscore_suspend()
1795  *
1796  * When we resume we bail out of a loop from suspend_devices_and_enter() and
1797  * unwind back out to the caller enter_state() where we were before as follows::
1798  *
1799  *      enter_state() -->
1800  *      suspend_devices_and_enter() --> (bail from loop)
1801  *              dpm_resume_end() -->
1802  *                      dpm_resume()
1803  *                      dpm_complete()
1804  *      suspend_finish() -->
1805  *              suspend_thaw_processes() -->
1806  *                      thaw_processes() -->
1807  *                              __usermodehelper_set_disable_depth(UMH_FREEZING);
1808  *                              thaw_workqueues();
1809  *                              thaw all processes ...
1810  *                              usermodehelper_enable();
1811  *              pm_notifier_call_chain(PM_POST_SUSPEND);
1812  *
1813  * fw_pm_notify() works through pm_notifier_call_chain().
1814  */
1815 static int fw_pm_notify(struct notifier_block *notify_block,
1816                         unsigned long mode, void *unused)
1817 {
1818         switch (mode) {
1819         case PM_HIBERNATION_PREPARE:
1820         case PM_SUSPEND_PREPARE:
1821         case PM_RESTORE_PREPARE:
1822                 /*
1823                  * kill pending fallback requests with a custom fallback
1824                  * to avoid stalling suspend.
1825                  */
1826                 kill_pending_fw_fallback_reqs(true);
1827                 device_cache_fw_images();
1828                 disable_firmware();
1829                 break;
1830
1831         case PM_POST_SUSPEND:
1832         case PM_POST_HIBERNATION:
1833         case PM_POST_RESTORE:
1834                 /*
1835                  * In case that system sleep failed and syscore_suspend is
1836                  * not called.
1837                  */
1838                 mutex_lock(&fw_lock);
1839                 fw_cache.state = FW_LOADER_NO_CACHE;
1840                 mutex_unlock(&fw_lock);
1841                 enable_firmware();
1842
1843                 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1844                 break;
1845         }
1846
1847         return 0;
1848 }
1849
1850 /* stop caching firmware once syscore_suspend is reached */
1851 static int fw_suspend(void)
1852 {
1853         fw_cache.state = FW_LOADER_NO_CACHE;
1854         return 0;
1855 }
1856
1857 static struct syscore_ops fw_syscore_ops = {
1858         .suspend = fw_suspend,
1859 };
1860 #else
1861 static int fw_cache_piggyback_on_request(const char *name)
1862 {
1863         return 0;
1864 }
1865 #endif
1866
1867 static void __init fw_cache_init(void)
1868 {
1869         spin_lock_init(&fw_cache.lock);
1870         INIT_LIST_HEAD(&fw_cache.head);
1871         fw_cache.state = FW_LOADER_NO_CACHE;
1872
1873 #ifdef CONFIG_PM_SLEEP
1874         spin_lock_init(&fw_cache.name_lock);
1875         INIT_LIST_HEAD(&fw_cache.fw_names);
1876
1877         INIT_DELAYED_WORK(&fw_cache.work,
1878                           device_uncache_fw_images_work);
1879
1880         fw_cache.pm_notify.notifier_call = fw_pm_notify;
1881         register_pm_notifier(&fw_cache.pm_notify);
1882
1883         register_syscore_ops(&fw_syscore_ops);
1884 #endif
1885 }
1886
1887 static int fw_shutdown_notify(struct notifier_block *unused1,
1888                               unsigned long unused2, void *unused3)
1889 {
1890         disable_firmware();
1891         /*
1892          * Kill all pending fallback requests to avoid both stalling shutdown,
1893          * and avoid a deadlock with the usermode_lock.
1894          */
1895         kill_pending_fw_fallback_reqs(false);
1896
1897         return NOTIFY_DONE;
1898 }
1899
1900 static struct notifier_block fw_shutdown_nb = {
1901         .notifier_call = fw_shutdown_notify,
1902 };
1903
1904 static int __init firmware_class_init(void)
1905 {
1906         enable_firmware();
1907         fw_cache_init();
1908         register_reboot_notifier(&fw_shutdown_nb);
1909 #ifdef CONFIG_FW_LOADER_USER_HELPER
1910         return class_register(&firmware_class);
1911 #else
1912         return 0;
1913 #endif
1914 }
1915
1916 static void __exit firmware_class_exit(void)
1917 {
1918         disable_firmware();
1919 #ifdef CONFIG_PM_SLEEP
1920         unregister_syscore_ops(&fw_syscore_ops);
1921         unregister_pm_notifier(&fw_cache.pm_notify);
1922 #endif
1923         unregister_reboot_notifier(&fw_shutdown_nb);
1924 #ifdef CONFIG_FW_LOADER_USER_HELPER
1925         class_unregister(&firmware_class);
1926 #endif
1927 }
1928
1929 fs_initcall(firmware_class_init);
1930 module_exit(firmware_class_exit);