]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/lustre/lustre/llite/rw26.c
Merge tag 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck...
[karo-tx-linux.git] / drivers / staging / lustre / lustre / llite / rw26.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lustre/llite/rw26.c
37  *
38  * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
39  */
40
41 #include <linux/kernel.h>
42 #include <linux/mm.h>
43 #include <linux/string.h>
44 #include <linux/stat.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <asm/uaccess.h>
48
49 #include <linux/migrate.h>
50 #include <linux/fs.h>
51 #include <linux/buffer_head.h>
52 #include <linux/mpage.h>
53 #include <linux/writeback.h>
54 #include <linux/stat.h>
55 #include <asm/uaccess.h>
56 #include <linux/mm.h>
57 #include <linux/pagemap.h>
58
59 #define DEBUG_SUBSYSTEM S_LLITE
60
61 #include <lustre_lite.h>
62 #include "llite_internal.h"
63 #include <linux/lustre_compat25.h>
64
65 /**
66  * Implements Linux VM address_space::invalidatepage() method. This method is
67  * called when the page is truncate from a file, either as a result of
68  * explicit truncate, or when inode is removed from memory (as a result of
69  * final iput(), umount, or memory pressure induced icache shrinking).
70  *
71  * [0, offset] bytes of the page remain valid (this is for a case of not-page
72  * aligned truncate). Lustre leaves partially truncated page in the cache,
73  * relying on struct inode::i_size to limit further accesses.
74  */
75 static void ll_invalidatepage(struct page *vmpage, unsigned long offset)
76 {
77         struct inode     *inode;
78         struct lu_env    *env;
79         struct cl_page   *page;
80         struct cl_object *obj;
81
82         int refcheck;
83
84         LASSERT(PageLocked(vmpage));
85         LASSERT(!PageWriteback(vmpage));
86
87         /*
88          * It is safe to not check anything in invalidatepage/releasepage
89          * below because they are run with page locked and all our io is
90          * happening with locked page too
91          */
92         if (offset == 0) {
93                 env = cl_env_get(&refcheck);
94                 if (!IS_ERR(env)) {
95                         inode = vmpage->mapping->host;
96                         obj = ll_i2info(inode)->lli_clob;
97                         if (obj != NULL) {
98                                 page = cl_vmpage_page(vmpage, obj);
99                                 if (page != NULL) {
100                                         lu_ref_add(&page->cp_reference,
101                                                    "delete", vmpage);
102                                         cl_page_delete(env, page);
103                                         lu_ref_del(&page->cp_reference,
104                                                    "delete", vmpage);
105                                         cl_page_put(env, page);
106                                 }
107                         } else
108                                 LASSERT(vmpage->private == 0);
109                         cl_env_put(env, &refcheck);
110                 }
111         }
112 }
113
114 #ifdef HAVE_RELEASEPAGE_WITH_INT
115 #define RELEASEPAGE_ARG_TYPE int
116 #else
117 #define RELEASEPAGE_ARG_TYPE gfp_t
118 #endif
119 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
120 {
121         struct cl_env_nest nest;
122         struct lu_env     *env;
123         struct cl_object  *obj;
124         struct cl_page    *page;
125         struct address_space *mapping;
126         int result;
127
128         LASSERT(PageLocked(vmpage));
129         if (PageWriteback(vmpage) || PageDirty(vmpage))
130                 return 0;
131
132         mapping = vmpage->mapping;
133         if (mapping == NULL)
134                 return 1;
135
136         obj = ll_i2info(mapping->host)->lli_clob;
137         if (obj == NULL)
138                 return 1;
139
140         /* 1 for page allocator, 1 for cl_page and 1 for page cache */
141         if (page_count(vmpage) > 3)
142                 return 0;
143
144         /* TODO: determine what gfp should be used by @gfp_mask. */
145         env = cl_env_nested_get(&nest);
146         if (IS_ERR(env))
147                 /* If we can't allocate an env we won't call cl_page_put()
148                  * later on which further means it's impossible to drop
149                  * page refcount by cl_page, so ask kernel to not free
150                  * this page. */
151                 return 0;
152
153         page = cl_vmpage_page(vmpage, obj);
154         result = page == NULL;
155         if (page != NULL) {
156                 if (!cl_page_in_use(page)) {
157                         result = 1;
158                         cl_page_delete(env, page);
159                 }
160                 cl_page_put(env, page);
161         }
162         cl_env_nested_put(&nest, env);
163         return result;
164 }
165
166 static int ll_set_page_dirty(struct page *vmpage)
167 {
168 #if 0
169         struct cl_page    *page = vvp_vmpage_page_transient(vmpage);
170         struct vvp_object *obj  = cl_inode2vvp(vmpage->mapping->host);
171         struct vvp_page   *cpg;
172
173         /*
174          * XXX should page method be called here?
175          */
176         LASSERT(&obj->co_cl == page->cp_obj);
177         cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
178         /*
179          * XXX cannot do much here, because page is possibly not locked:
180          * sys_munmap()->...
181          *     ->unmap_page_range()->zap_pte_range()->set_page_dirty().
182          */
183         vvp_write_pending(obj, cpg);
184 #endif
185         RETURN(__set_page_dirty_nobuffers(vmpage));
186 }
187
188 #define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
189
190 static inline int ll_get_user_pages(int rw, unsigned long user_addr,
191                                     size_t size, struct page ***pages,
192                                     int *max_pages)
193 {
194         int result = -ENOMEM;
195
196         /* set an arbitrary limit to prevent arithmetic overflow */
197         if (size > MAX_DIRECTIO_SIZE) {
198                 *pages = NULL;
199                 return -EFBIG;
200         }
201
202         *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
203         *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
204
205         OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
206         if (*pages) {
207                 down_read(&current->mm->mmap_sem);
208                 result = get_user_pages(current, current->mm, user_addr,
209                                         *max_pages, (rw == READ), 0, *pages,
210                                         NULL);
211                 up_read(&current->mm->mmap_sem);
212                 if (unlikely(result <= 0))
213                         OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
214         }
215
216         return result;
217 }
218
219 /*  ll_free_user_pages - tear down page struct array
220  *  @pages: array of page struct pointers underlying target buffer */
221 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
222 {
223         int i;
224
225         for (i = 0; i < npages; i++) {
226                 if (pages[i] == NULL)
227                         break;
228                 if (do_dirty)
229                         set_page_dirty_lock(pages[i]);
230                 page_cache_release(pages[i]);
231         }
232
233         OBD_FREE_LARGE(pages, npages * sizeof(*pages));
234 }
235
236 ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
237                            int rw, struct inode *inode,
238                            struct ll_dio_pages *pv)
239 {
240         struct cl_page    *clp;
241         struct cl_2queue  *queue;
242         struct cl_object  *obj = io->ci_obj;
243         int i;
244         ssize_t rc = 0;
245         loff_t file_offset  = pv->ldp_start_offset;
246         long size          = pv->ldp_size;
247         int page_count      = pv->ldp_nr;
248         struct page **pages = pv->ldp_pages;
249         long page_size      = cl_page_size(obj);
250         bool do_io;
251         int  io_pages       = 0;
252         ENTRY;
253
254         queue = &io->ci_queue;
255         cl_2queue_init(queue);
256         for (i = 0; i < page_count; i++) {
257                 if (pv->ldp_offsets)
258                     file_offset = pv->ldp_offsets[i];
259
260                 LASSERT(!(file_offset & (page_size - 1)));
261                 clp = cl_page_find(env, obj, cl_index(obj, file_offset),
262                                    pv->ldp_pages[i], CPT_TRANSIENT);
263                 if (IS_ERR(clp)) {
264                         rc = PTR_ERR(clp);
265                         break;
266                 }
267
268                 rc = cl_page_own(env, io, clp);
269                 if (rc) {
270                         LASSERT(clp->cp_state == CPS_FREEING);
271                         cl_page_put(env, clp);
272                         break;
273                 }
274
275                 do_io = true;
276
277                 /* check the page type: if the page is a host page, then do
278                  * write directly */
279                 if (clp->cp_type == CPT_CACHEABLE) {
280                         struct page *vmpage = cl_page_vmpage(env, clp);
281                         struct page *src_page;
282                         struct page *dst_page;
283                         void       *src;
284                         void       *dst;
285
286                         src_page = (rw == WRITE) ? pages[i] : vmpage;
287                         dst_page = (rw == WRITE) ? vmpage : pages[i];
288
289                         src = ll_kmap_atomic(src_page, KM_USER0);
290                         dst = ll_kmap_atomic(dst_page, KM_USER1);
291                         memcpy(dst, src, min(page_size, size));
292                         ll_kunmap_atomic(dst, KM_USER1);
293                         ll_kunmap_atomic(src, KM_USER0);
294
295                         /* make sure page will be added to the transfer by
296                          * cl_io_submit()->...->vvp_page_prep_write(). */
297                         if (rw == WRITE)
298                                 set_page_dirty(vmpage);
299
300                         if (rw == READ) {
301                                 /* do not issue the page for read, since it
302                                  * may reread a ra page which has NOT uptodate
303                                  * bit set. */
304                                 cl_page_disown(env, io, clp);
305                                 do_io = false;
306                         }
307                 }
308
309                 if (likely(do_io)) {
310                         cl_2queue_add(queue, clp);
311
312                         /*
313                          * Set page clip to tell transfer formation engine
314                          * that page has to be sent even if it is beyond KMS.
315                          */
316                         cl_page_clip(env, clp, 0, min(size, page_size));
317
318                         ++io_pages;
319                 }
320
321                 /* drop the reference count for cl_page_find */
322                 cl_page_put(env, clp);
323                 size -= page_size;
324                 file_offset += page_size;
325         }
326
327         if (rc == 0 && io_pages) {
328                 rc = cl_io_submit_sync(env, io,
329                                        rw == READ ? CRT_READ : CRT_WRITE,
330                                        queue, 0);
331         }
332         if (rc == 0)
333                 rc = pv->ldp_size;
334
335         cl_2queue_discard(env, io, queue);
336         cl_2queue_disown(env, io, queue);
337         cl_2queue_fini(env, queue);
338         RETURN(rc);
339 }
340 EXPORT_SYMBOL(ll_direct_rw_pages);
341
342 static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
343                                    int rw, struct inode *inode,
344                                    struct address_space *mapping,
345                                    size_t size, loff_t file_offset,
346                                    struct page **pages, int page_count)
347 {
348     struct ll_dio_pages pvec = { .ldp_pages     = pages,
349                                  .ldp_nr           = page_count,
350                                  .ldp_size       = size,
351                                  .ldp_offsets      = NULL,
352                                  .ldp_start_offset = file_offset
353                                };
354
355     return ll_direct_rw_pages(env, io, rw, inode, &pvec);
356 }
357
358 #ifdef KMALLOC_MAX_SIZE
359 #define MAX_MALLOC KMALLOC_MAX_SIZE
360 #else
361 #define MAX_MALLOC (128 * 1024)
362 #endif
363
364 /* This is the maximum size of a single O_DIRECT request, based on the
365  * kmalloc limit.  We need to fit all of the brw_page structs, each one
366  * representing PAGE_SIZE worth of user data, into a single buffer, and
367  * then truncate this to be a full-sized RPC.  For 4kB PAGE_SIZE this is
368  * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
369 #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
370                       ~(DT_MAX_BRW_SIZE - 1))
371 static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
372                                const struct iovec *iov, loff_t file_offset,
373                                unsigned long nr_segs)
374 {
375         struct lu_env *env;
376         struct cl_io *io;
377         struct file *file = iocb->ki_filp;
378         struct inode *inode = file->f_mapping->host;
379         struct ccc_object *obj = cl_inode2ccc(inode);
380         long count = iov_length(iov, nr_segs);
381         long tot_bytes = 0, result = 0;
382         struct ll_inode_info *lli = ll_i2info(inode);
383         unsigned long seg = 0;
384         long size = MAX_DIO_SIZE;
385         int refcheck;
386         ENTRY;
387
388         if (!lli->lli_has_smd)
389                 RETURN(-EBADF);
390
391         /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
392         if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
393                 RETURN(-EINVAL);
394
395         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), "
396                "offset=%lld=%llx, pages %lu (max %lu)\n",
397                inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
398                file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
399                MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
400
401         /* Check that all user buffers are aligned as well */
402         for (seg = 0; seg < nr_segs; seg++) {
403                 if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) ||
404                     (iov[seg].iov_len & ~CFS_PAGE_MASK))
405                         RETURN(-EINVAL);
406         }
407
408         env = cl_env_get(&refcheck);
409         LASSERT(!IS_ERR(env));
410         io = ccc_env_io(env)->cui_cl.cis_io;
411         LASSERT(io != NULL);
412
413         /* 0. Need locking between buffered and direct access. and race with
414          *    size changing by concurrent truncates and writes.
415          * 1. Need inode mutex to operate transient pages.
416          */
417         if (rw == READ)
418                 mutex_lock(&inode->i_mutex);
419
420         LASSERT(obj->cob_transient_pages == 0);
421         for (seg = 0; seg < nr_segs; seg++) {
422                 long iov_left = iov[seg].iov_len;
423                 unsigned long user_addr = (unsigned long)iov[seg].iov_base;
424
425                 if (rw == READ) {
426                         if (file_offset >= i_size_read(inode))
427                                 break;
428                         if (file_offset + iov_left > i_size_read(inode))
429                                 iov_left = i_size_read(inode) - file_offset;
430                 }
431
432                 while (iov_left > 0) {
433                         struct page **pages;
434                         int page_count, max_pages = 0;
435                         long bytes;
436
437                         bytes = min(size, iov_left);
438                         page_count = ll_get_user_pages(rw, user_addr, bytes,
439                                                        &pages, &max_pages);
440                         if (likely(page_count > 0)) {
441                                 if (unlikely(page_count <  max_pages))
442                                         bytes = page_count << PAGE_CACHE_SHIFT;
443                                 result = ll_direct_IO_26_seg(env, io, rw, inode,
444                                                              file->f_mapping,
445                                                              bytes, file_offset,
446                                                              pages, page_count);
447                                 ll_free_user_pages(pages, max_pages, rw==READ);
448                         } else if (page_count == 0) {
449                                 GOTO(out, result = -EFAULT);
450                         } else {
451                                 result = page_count;
452                         }
453                         if (unlikely(result <= 0)) {
454                                 /* If we can't allocate a large enough buffer
455                                  * for the request, shrink it to a smaller
456                                  * PAGE_SIZE multiple and try again.
457                                  * We should always be able to kmalloc for a
458                                  * page worth of page pointers = 4MB on i386. */
459                                 if (result == -ENOMEM &&
460                                     size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
461                                            PAGE_CACHE_SIZE) {
462                                         size = ((((size / 2) - 1) |
463                                                  ~CFS_PAGE_MASK) + 1) &
464                                                 CFS_PAGE_MASK;
465                                         CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
466                                                size);
467                                         continue;
468                                 }
469
470                                 GOTO(out, result);
471                         }
472
473                         tot_bytes += result;
474                         file_offset += result;
475                         iov_left -= result;
476                         user_addr += result;
477                 }
478         }
479 out:
480         LASSERT(obj->cob_transient_pages == 0);
481         if (rw == READ)
482                 mutex_unlock(&inode->i_mutex);
483
484         if (tot_bytes > 0) {
485                 if (rw == WRITE) {
486                         struct lov_stripe_md *lsm;
487
488                         lsm = ccc_inode_lsm_get(inode);
489                         LASSERT(lsm != NULL);
490                         lov_stripe_lock(lsm);
491                         obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
492                         lov_stripe_unlock(lsm);
493                         ccc_inode_lsm_put(inode, lsm);
494                 }
495         }
496
497         cl_env_put(env, &refcheck);
498         RETURN(tot_bytes ? : result);
499 }
500
501 static int ll_write_begin(struct file *file, struct address_space *mapping,
502                          loff_t pos, unsigned len, unsigned flags,
503                          struct page **pagep, void **fsdata)
504 {
505         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
506         struct page *page;
507         int rc;
508         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
509         ENTRY;
510
511         page = grab_cache_page_write_begin(mapping, index, flags);
512         if (!page)
513                 RETURN(-ENOMEM);
514
515         *pagep = page;
516
517         rc = ll_prepare_write(file, page, from, from + len);
518         if (rc) {
519                 unlock_page(page);
520                 page_cache_release(page);
521         }
522         RETURN(rc);
523 }
524
525 static int ll_write_end(struct file *file, struct address_space *mapping,
526                         loff_t pos, unsigned len, unsigned copied,
527                         struct page *page, void *fsdata)
528 {
529         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
530         int rc;
531
532         rc = ll_commit_write(file, page, from, from + copied);
533         unlock_page(page);
534         page_cache_release(page);
535
536         return rc ?: copied;
537 }
538
539 #ifdef CONFIG_MIGRATION
540 int ll_migratepage(struct address_space *mapping,
541                 struct page *newpage, struct page *page
542                 , enum migrate_mode mode
543                 )
544 {
545         /* Always fail page migration until we have a proper implementation */
546         return -EIO;
547 }
548 #endif
549
550 #ifndef MS_HAS_NEW_AOPS
551 struct address_space_operations ll_aops = {
552         .readpage       = ll_readpage,
553 //      .readpages      = ll_readpages,
554         .direct_IO      = ll_direct_IO_26,
555         .writepage      = ll_writepage,
556         .writepages     = ll_writepages,
557         .set_page_dirty = ll_set_page_dirty,
558         .write_begin    = ll_write_begin,
559         .write_end      = ll_write_end,
560         .invalidatepage = ll_invalidatepage,
561         .releasepage    = (void *)ll_releasepage,
562 #ifdef CONFIG_MIGRATION
563         .migratepage    = ll_migratepage,
564 #endif
565         .bmap      = NULL
566 };
567 #else
568 struct address_space_operations_ext ll_aops = {
569         .orig_aops.readpage       = ll_readpage,
570 //      .orig_aops.readpages      = ll_readpages,
571         .orig_aops.direct_IO      = ll_direct_IO_26,
572         .orig_aops.writepage      = ll_writepage,
573         .orig_aops.writepages     = ll_writepages,
574         .orig_aops.set_page_dirty = ll_set_page_dirty,
575         .orig_aops.prepare_write  = ll_prepare_write,
576         .orig_aops.commit_write   = ll_commit_write,
577         .orig_aops.invalidatepage = ll_invalidatepage,
578         .orig_aops.releasepage    = ll_releasepage,
579 #ifdef CONFIG_MIGRATION
580         .orig_aops.migratepage    = ll_migratepage,
581 #endif
582         .orig_aops.bmap    = NULL,
583         .write_begin    = ll_write_begin,
584         .write_end      = ll_write_end
585 };
586 #endif