X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=mm%2Finternal.h;h=f652660ab475e4e86a52d6108308c361b9830cd3;hb=29b09a083348807bcd1242d98e3f105589be200a;hp=b8c91b342e244153ec9b24b4673e6dd2575af267;hpb=47ab7826cae651cb22fff457292056a4bb5ee0c4;p=karo-tx-linux.git diff --git a/mm/internal.h b/mm/internal.h index b8c91b342e24..f652660ab475 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -12,6 +12,7 @@ #define __MM_INTERNAL_H #include +#include void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); @@ -118,19 +119,15 @@ struct compact_control { unsigned long nr_freepages; /* Number of isolated free pages */ unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ - unsigned long start_free_pfn; /* where we started the search */ unsigned long migrate_pfn; /* isolate_migratepages search base */ bool sync; /* Synchronous migration */ - bool wrapped; /* Order > 0 compactions are - incremental, once free_pfn - and migrate_pfn meet, we restart - from the top of the zone; - remember we wrapped around. */ + bool ignore_skip_hint; /* Scan blocks even if marked skip */ int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - bool *contended; /* True if a lock was contended */ + bool contended; /* True if a lock was contended */ + struct page **page; /* Page captured of requested size */ }; unsigned long @@ -340,7 +337,6 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, #define ZONE_RECLAIM_FULL -1 #define ZONE_RECLAIM_SOME 0 #define ZONE_RECLAIM_SUCCESS 1 -#endif extern int hwpoison_filter(struct page *p); @@ -356,3 +352,67 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long); extern void set_pageblock_order(void); +unsigned long reclaim_clean_pages_from_list(struct zone *zone, + struct list_head *page_list); +/* The ALLOC_WMARK bits are used as an index to zone->watermark */ +#define ALLOC_WMARK_MIN WMARK_MIN +#define ALLOC_WMARK_LOW WMARK_LOW +#define ALLOC_WMARK_HIGH WMARK_HIGH +#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ + +/* Mask to get the watermark bits */ +#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) + +#define ALLOC_HARDER 0x10 /* try to alloc harder */ +#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ +#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ +#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ + +/* + * Unnecessary readahead harms performance, especially for SSD devices, where + * large reads are significantly more expensive than small ones. + * These implements simple swap random access detection. In swap page fault: if + * the page is found in swapcache, decrease a counter in the vma, otherwise we + * need to perform sync swapin and the counter is increased. Optionally swapin + * will perform readahead if the counter is below a threshold. + */ +#ifdef CONFIG_SWAP +#define SWAPRA_MISS_THRESHOLD (100) +#define SWAPRA_MAX_MISS ((SWAPRA_MISS_THRESHOLD) * 10) +static inline void swap_cache_hit(struct vm_area_struct *vma) +{ + if (vma && vma->anon_vma) + atomic_dec_if_positive(&vma->anon_vma->swapra_miss); +} + +static inline void swap_cache_miss(struct vm_area_struct *vma) +{ + if (!vma || !vma->anon_vma) + return; + if (atomic_read(&vma->anon_vma->swapra_miss) < SWAPRA_MAX_MISS) + atomic_inc(&vma->anon_vma->swapra_miss); +} + +static inline int swap_cache_skip_readahead(struct vm_area_struct *vma) +{ + if (!vma || !vma->anon_vma) + return 0; + return atomic_read(&vma->anon_vma->swapra_miss) > + SWAPRA_MISS_THRESHOLD; +} +#else +static inline void swap_cache_hit(struct vm_area_struct *vma) +{ +} + +static inline void swap_cache_miss(struct vm_area_struct *vma) +{ +} + +static inline int swap_cache_skip_readahead(struct vm_area_struct *vma) +{ + return 0; +} +#endif /* CONFIG_SWAP */ + +#endif /* __MM_INTERNAL_H */