diff options
Diffstat (limited to 'kernel/mm')
-rw-r--r-- | kernel/mm/page_alloc.c | 7 | ||||
-rw-r--r-- | kernel/mm/slab.c | 4 | ||||
-rw-r--r-- | kernel/mm/slub.c | 2 | ||||
-rw-r--r-- | kernel/mm/vmscan.c | 2 |
4 files changed, 9 insertions, 6 deletions
diff --git a/kernel/mm/page_alloc.c b/kernel/mm/page_alloc.c index 5b70c9977..41bd90d60 100644 --- a/kernel/mm/page_alloc.c +++ b/kernel/mm/page_alloc.c @@ -1021,12 +1021,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, set_page_owner(page, order, gfp_flags); /* - * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to + * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking * steps that will free more memory. The caller should avoid the page * being used for !PFMEMALLOC purposes. */ - page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); + if (alloc_flags & ALLOC_NO_WATERMARKS) + set_page_pfmemalloc(page); + else + clear_page_pfmemalloc(page); return 0; } diff --git a/kernel/mm/slab.c b/kernel/mm/slab.c index 7eb38dd1c..3dd2d1ff9 100644 --- a/kernel/mm/slab.c +++ b/kernel/mm/slab.c @@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, } /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ - if (unlikely(page->pfmemalloc)) + if (page_is_pfmemalloc(page)) pfmemalloc_active = true; nr_pages = (1 << cachep->gfporder); @@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, add_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_pages); __SetPageSlab(page); - if (page->pfmemalloc) + if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { diff --git a/kernel/mm/slub.c b/kernel/mm/slub.c index f657453ad..905e283d7 100644 --- a/kernel/mm/slub.c +++ b/kernel/mm/slub.c @@ -1409,7 +1409,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) order = compound_order(page); page->slab_cache = s; __SetPageSlab(page); - if (page->pfmemalloc) + if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); start = page_address(page); diff --git a/kernel/mm/vmscan.c b/kernel/mm/vmscan.c index 0d024fc8a..1a17bd7c0 100644 --- a/kernel/mm/vmscan.c +++ b/kernel/mm/vmscan.c @@ -1153,7 +1153,7 @@ cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); - putback_lru_page(page); + list_add(&page->lru, &ret_pages); continue; activate_locked: |