]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_reserved_pool.c
1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  */
23 /*
24  * This file contains functions for reserved memory pool management
25  */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29
30 #include <asm/set_memory.h>
31
32 #include "atomisp_internal.h"
33 #include "hmm/hmm_pool.h"
34
35 /*
36  * reserved memory pool ops.
37  */
38 static unsigned int get_pages_from_reserved_pool(void *pool,
39                                         struct hmm_page_object *page_obj,
40                                         unsigned int size, bool cached)
41 {
42         unsigned long flags;
43         unsigned int i = 0;
44         unsigned int repool_pgnr;
45         int j;
46         struct hmm_reserved_pool_info *repool_info = pool;
47
48         if (!repool_info)
49                 return 0;
50
51         spin_lock_irqsave(&repool_info->list_lock, flags);
52         if (repool_info->initialized) {
53                 repool_pgnr = repool_info->index;
54
55                 for (j = repool_pgnr-1; j >= 0; j--) {
56                         page_obj[i].page = repool_info->pages[j];
57                         page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
58                         i++;
59                         repool_info->index--;
60                         if (i == size)
61                                 break;
62                 }
63         }
64         spin_unlock_irqrestore(&repool_info->list_lock, flags);
65         return i;
66 }
67
68 static void free_pages_to_reserved_pool(void *pool,
69                                         struct hmm_page_object *page_obj)
70 {
71         unsigned long flags;
72         struct hmm_reserved_pool_info *repool_info = pool;
73
74         if (!repool_info)
75                 return;
76
77         spin_lock_irqsave(&repool_info->list_lock, flags);
78
79         if (repool_info->initialized &&
80             repool_info->index < repool_info->pgnr &&
81             page_obj->type == HMM_PAGE_TYPE_RESERVED) {
82                 repool_info->pages[repool_info->index++] = page_obj->page;
83         }
84
85         spin_unlock_irqrestore(&repool_info->list_lock, flags);
86 }
87
88 static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
89                                         unsigned int pool_size)
90 {
91         struct hmm_reserved_pool_info *pool_info;
92
93         pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
94                                 GFP_KERNEL);
95         if (unlikely(!pool_info)) {
96                 dev_err(atomisp_dev, "out of memory for repool_info.\n");
97                 return -ENOMEM;
98         }
99
100         pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
101                         GFP_KERNEL);
102         if (unlikely(!pool_info->pages)) {
103                 dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
104                 kfree(pool_info);
105                 return -ENOMEM;
106         }
107
108         pool_info->index = 0;
109         pool_info->pgnr = 0;
110         spin_lock_init(&pool_info->list_lock);
111         pool_info->initialized = true;
112
113         *repool_info = pool_info;
114
115         return 0;
116 }
117
118 static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
119 {
120         int ret;
121         unsigned int blk_pgnr;
122         unsigned int pgnr = pool_size;
123         unsigned int order = 0;
124         unsigned int i = 0;
125         int fail_number = 0;
126         struct page *pages;
127         int j;
128         struct hmm_reserved_pool_info *repool_info;
129         if (pool_size == 0)
130                 return 0;
131
132         ret = hmm_reserved_pool_setup(&repool_info, pool_size);
133         if (ret) {
134                 dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
135                 return ret;
136         }
137
138         pgnr = pool_size;
139
140         i = 0;
141         order = MAX_ORDER;
142
143         while (pgnr) {
144                 blk_pgnr = 1U << order;
145                 while (blk_pgnr > pgnr) {
146                         order--;
147                         blk_pgnr >>= 1U;
148                 }
149                 BUG_ON(order > MAX_ORDER);
150
151                 pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
152                 if (unlikely(!pages)) {
153                         if (order == 0) {
154                                 fail_number++;
155                                 dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
156                                                 __func__, fail_number);
157                                 /* if fail five times, will goto end */
158
159                                 /* FIXME: whether is the mechanism is ok? */
160                                 if (fail_number == ALLOC_PAGE_FAIL_NUM)
161                                         goto end;
162                         } else {
163                                 order--;
164                         }
165                 } else {
166                         blk_pgnr = 1U << order;
167
168                         ret = set_pages_uc(pages, blk_pgnr);
169                         if (ret) {
170                                 dev_err(atomisp_dev,
171                                                 "set pages uncached failed\n");
172                                 __free_pages(pages, order);
173                                 goto end;
174                         }
175
176                         for (j = 0; j < blk_pgnr; j++)
177                                 repool_info->pages[i++] = pages + j;
178
179                         repool_info->index += blk_pgnr;
180                         repool_info->pgnr += blk_pgnr;
181
182                         pgnr -= blk_pgnr;
183
184                         fail_number = 0;
185                 }
186         }
187
188 end:
189         repool_info->initialized = true;
190
191         *pool = repool_info;
192
193         dev_info(atomisp_dev,
194                         "hmm_reserved_pool init successfully,"
195                         "hmm_reserved_pool is with %d pages.\n",
196                         repool_info->pgnr);
197         return 0;
198 }
199
200 static void hmm_reserved_pool_exit(void **pool)
201 {
202         unsigned long flags;
203         int i, ret;
204         unsigned int pgnr;
205         struct hmm_reserved_pool_info *repool_info = *pool;
206
207         if (!repool_info)
208                 return;
209
210         spin_lock_irqsave(&repool_info->list_lock, flags);
211         if (!repool_info->initialized) {
212                 spin_unlock_irqrestore(&repool_info->list_lock, flags);
213                 return;
214         }
215         pgnr = repool_info->pgnr;
216         repool_info->index = 0;
217         repool_info->pgnr = 0;
218         repool_info->initialized = false;
219         spin_unlock_irqrestore(&repool_info->list_lock, flags);
220
221         for (i = 0; i < pgnr; i++) {
222                 ret = set_pages_wb(repool_info->pages[i], 1);
223                 if (ret)
224                         dev_err(atomisp_dev,
225                                 "set page to WB err...ret=%d\n", ret);
226                 /*
227                 W/A: set_pages_wb seldom return value = -EFAULT
228                 indicate that address of page is not in valid
229                 range(0xffff880000000000~0xffffc7ffffffffff)
230                 then, _free_pages would panic; Do not know why
231                 page address be valid, it maybe memory corruption by lowmemory
232                 */
233                 if (!ret)
234                         __free_pages(repool_info->pages[i], 0);
235         }
236
237         kfree(repool_info->pages);
238         kfree(repool_info);
239
240         *pool = NULL;
241 }
242
243 static int hmm_reserved_pool_inited(void *pool)
244 {
245         struct hmm_reserved_pool_info *repool_info = pool;
246
247         if (!repool_info)
248                 return 0;
249
250         return repool_info->initialized;
251 }
252
253 struct hmm_pool_ops reserved_pops = {
254         .pool_init              = hmm_reserved_pool_init,
255         .pool_exit              = hmm_reserved_pool_exit,
256         .pool_alloc_pages       = get_pages_from_reserved_pool,
257         .pool_free_pages        = free_pages_to_reserved_pool,
258         .pool_inited            = hmm_reserved_pool_inited,
259 };