summaryrefslogtreecommitdiffstats
path: root/kernel/mm/memory-failure.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/mm/memory-failure.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/mm/memory-failure.c')
-rw-r--r--kernel/mm/memory-failure.c500
1 files changed, 246 insertions, 254 deletions
diff --git a/kernel/mm/memory-failure.c b/kernel/mm/memory-failure.c
index 9f48145c8..750b7893e 100644
--- a/kernel/mm/memory-failure.c
+++ b/kernel/mm/memory-failure.c
@@ -20,6 +20,14 @@
* this code has to be extremely careful. Generally it tries to use
* normal locking rules, as in get the standard locks, even if that means
* the error handling takes potentially a long time.
+ *
+ * It can be very tempting to add handling for obscure cases here.
+ * In general any code for handling new cases should only be added iff:
+ * - You know how to test it.
+ * - You have a test that can be added to mce-test
+ * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
+ * - The case actually shows up as a frequent (top 10) page state in
+ * tools/vm/page-types when running a real workload.
*
* There are several operations here with exponential complexity because
* of unsuitable VM data structures. For example the operation to map back
@@ -28,13 +36,6 @@
* are rare we hope to get away with this. This avoids impacting the core
* VM.
*/
-
-/*
- * Notebook:
- * - hugetlb needs more code
- * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
- * - pass bad pages to kdump next kernel
- */
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
@@ -55,7 +56,9 @@
#include <linux/memory_hotplug.h>
#include <linux/mm_inline.h>
#include <linux/kfifo.h>
+#include <linux/ratelimit.h>
#include "internal.h"
+#include "ras/ras_event.h"
int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -128,27 +131,15 @@ static int hwpoison_filter_flags(struct page *p)
* can only guarantee that the page either belongs to the memcg tasks, or is
* a freed page.
*/
-#ifdef CONFIG_MEMCG_SWAP
+#ifdef CONFIG_MEMCG
u64 hwpoison_filter_memcg;
EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
static int hwpoison_filter_task(struct page *p)
{
- struct mem_cgroup *mem;
- struct cgroup_subsys_state *css;
- unsigned long ino;
-
if (!hwpoison_filter_memcg)
return 0;
- mem = try_get_mem_cgroup_from_page(p);
- if (!mem)
- return -EINVAL;
-
- css = mem_cgroup_css(mem);
- ino = cgroup_ino(css->cgroup);
- css_put(css);
-
- if (ino != hwpoison_filter_memcg)
+ if (page_cgroup_ino(p) != hwpoison_filter_memcg)
return -EINVAL;
return 0;
@@ -503,68 +494,34 @@ static void collect_procs(struct page *page, struct list_head *tokill,
kfree(tk);
}
-/*
- * Error handlers for various types of pages.
- */
-
-enum outcome {
- IGNORED, /* Error: cannot be handled */
- FAILED, /* Error: handling failed */
- DELAYED, /* Will be handled later */
- RECOVERED, /* Successfully recovered */
-};
-
static const char *action_name[] = {
- [IGNORED] = "Ignored",
- [FAILED] = "Failed",
- [DELAYED] = "Delayed",
- [RECOVERED] = "Recovered",
-};
-
-enum action_page_type {
- MSG_KERNEL,
- MSG_KERNEL_HIGH_ORDER,
- MSG_SLAB,
- MSG_DIFFERENT_COMPOUND,
- MSG_POISONED_HUGE,
- MSG_HUGE,
- MSG_FREE_HUGE,
- MSG_UNMAP_FAILED,
- MSG_DIRTY_SWAPCACHE,
- MSG_CLEAN_SWAPCACHE,
- MSG_DIRTY_MLOCKED_LRU,
- MSG_CLEAN_MLOCKED_LRU,
- MSG_DIRTY_UNEVICTABLE_LRU,
- MSG_CLEAN_UNEVICTABLE_LRU,
- MSG_DIRTY_LRU,
- MSG_CLEAN_LRU,
- MSG_TRUNCATED_LRU,
- MSG_BUDDY,
- MSG_BUDDY_2ND,
- MSG_UNKNOWN,
+ [MF_IGNORED] = "Ignored",
+ [MF_FAILED] = "Failed",
+ [MF_DELAYED] = "Delayed",
+ [MF_RECOVERED] = "Recovered",
};
static const char * const action_page_types[] = {
- [MSG_KERNEL] = "reserved kernel page",
- [MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
- [MSG_SLAB] = "kernel slab page",
- [MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
- [MSG_POISONED_HUGE] = "huge page already hardware poisoned",
- [MSG_HUGE] = "huge page",
- [MSG_FREE_HUGE] = "free huge page",
- [MSG_UNMAP_FAILED] = "unmapping failed page",
- [MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
- [MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
- [MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
- [MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
- [MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
- [MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
- [MSG_DIRTY_LRU] = "dirty LRU page",
- [MSG_CLEAN_LRU] = "clean LRU page",
- [MSG_TRUNCATED_LRU] = "already truncated LRU page",
- [MSG_BUDDY] = "free buddy page",
- [MSG_BUDDY_2ND] = "free buddy page (2nd try)",
- [MSG_UNKNOWN] = "unknown page",
+ [MF_MSG_KERNEL] = "reserved kernel page",
+ [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
+ [MF_MSG_SLAB] = "kernel slab page",
+ [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
+ [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
+ [MF_MSG_HUGE] = "huge page",
+ [MF_MSG_FREE_HUGE] = "free huge page",
+ [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
+ [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
+ [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
+ [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
+ [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
+ [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
+ [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
+ [MF_MSG_DIRTY_LRU] = "dirty LRU page",
+ [MF_MSG_CLEAN_LRU] = "clean LRU page",
+ [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
+ [MF_MSG_BUDDY] = "free buddy page",
+ [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
+ [MF_MSG_UNKNOWN] = "unknown page",
};
/*
@@ -598,7 +555,7 @@ static int delete_from_lru_cache(struct page *p)
*/
static int me_kernel(struct page *p, unsigned long pfn)
{
- return IGNORED;
+ return MF_IGNORED;
}
/*
@@ -607,7 +564,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
static int me_unknown(struct page *p, unsigned long pfn)
{
printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
- return FAILED;
+ return MF_FAILED;
}
/*
@@ -616,7 +573,7 @@ static int me_unknown(struct page *p, unsigned long pfn)
static int me_pagecache_clean(struct page *p, unsigned long pfn)
{
int err;
- int ret = FAILED;
+ int ret = MF_FAILED;
struct address_space *mapping;
delete_from_lru_cache(p);
@@ -626,7 +583,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
* should be the one m_f() holds.
*/
if (PageAnon(p))
- return RECOVERED;
+ return MF_RECOVERED;
/*
* Now truncate the page in the page cache. This is really
@@ -640,7 +597,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
/*
* Page has been teared down in the meanwhile
*/
- return FAILED;
+ return MF_FAILED;
}
/*
@@ -657,7 +614,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
!try_to_release_page(p, GFP_NOIO)) {
pr_info("MCE %#lx: failed to release buffers\n", pfn);
} else {
- ret = RECOVERED;
+ ret = MF_RECOVERED;
}
} else {
/*
@@ -665,7 +622,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
* This fails on dirty or anything with private pages
*/
if (invalidate_inode_page(p))
- ret = RECOVERED;
+ ret = MF_RECOVERED;
else
printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
pfn);
@@ -751,9 +708,9 @@ static int me_swapcache_dirty(struct page *p, unsigned long pfn)
ClearPageUptodate(p);
if (!delete_from_lru_cache(p))
- return DELAYED;
+ return MF_DELAYED;
else
- return FAILED;
+ return MF_FAILED;
}
static int me_swapcache_clean(struct page *p, unsigned long pfn)
@@ -761,9 +718,9 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn)
delete_from_swap_cache(p);
if (!delete_from_lru_cache(p))
- return RECOVERED;
+ return MF_RECOVERED;
else
- return FAILED;
+ return MF_FAILED;
}
/*
@@ -776,6 +733,10 @@ static int me_huge_page(struct page *p, unsigned long pfn)
{
int res = 0;
struct page *hpage = compound_head(p);
+
+ if (!PageHuge(hpage))
+ return MF_DELAYED;
+
/*
* We can safely recover from error on free or reserved (i.e.
* not in-use) hugepage by dequeuing it from freelist.
@@ -789,9 +750,9 @@ static int me_huge_page(struct page *p, unsigned long pfn)
if (!(page_mapping(hpage) || PageAnon(hpage))) {
res = dequeue_hwpoisoned_huge_page(hpage);
if (!res)
- return RECOVERED;
+ return MF_RECOVERED;
}
- return DELAYED;
+ return MF_DELAYED;
}
/*
@@ -815,18 +776,16 @@ static int me_huge_page(struct page *p, unsigned long pfn)
#define lru (1UL << PG_lru)
#define swapbacked (1UL << PG_swapbacked)
#define head (1UL << PG_head)
-#define tail (1UL << PG_tail)
-#define compound (1UL << PG_compound)
#define slab (1UL << PG_slab)
#define reserved (1UL << PG_reserved)
static struct page_state {
unsigned long mask;
unsigned long res;
- enum action_page_type type;
+ enum mf_action_page_type type;
int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
- { reserved, reserved, MSG_KERNEL, me_kernel },
+ { reserved, reserved, MF_MSG_KERNEL, me_kernel },
/*
* free pages are specially detected outside this table:
* PG_buddy pages only make a small fraction of all free pages.
@@ -837,31 +796,26 @@ static struct page_state {
* currently unused objects without touching them. But just
* treat it as standard kernel for now.
*/
- { slab, slab, MSG_SLAB, me_kernel },
+ { slab, slab, MF_MSG_SLAB, me_kernel },
-#ifdef CONFIG_PAGEFLAGS_EXTENDED
- { head, head, MSG_HUGE, me_huge_page },
- { tail, tail, MSG_HUGE, me_huge_page },
-#else
- { compound, compound, MSG_HUGE, me_huge_page },
-#endif
+ { head, head, MF_MSG_HUGE, me_huge_page },
- { sc|dirty, sc|dirty, MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
- { sc|dirty, sc, MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
+ { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
+ { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
- { mlock|dirty, mlock|dirty, MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
- { mlock|dirty, mlock, MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
+ { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
+ { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
- { unevict|dirty, unevict|dirty, MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
- { unevict|dirty, unevict, MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
+ { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
+ { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
- { lru|dirty, lru|dirty, MSG_DIRTY_LRU, me_pagecache_dirty },
- { lru|dirty, lru, MSG_CLEAN_LRU, me_pagecache_clean },
+ { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
+ { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
/*
* Catchall entry: must be at end.
*/
- { 0, 0, MSG_UNKNOWN, me_unknown },
+ { 0, 0, MF_MSG_UNKNOWN, me_unknown },
};
#undef dirty
@@ -881,8 +835,11 @@ static struct page_state {
* "Dirty/Clean" indication is not 100% accurate due to the possibility of
* setting PG_dirty outside page lock. See also comment above set_page_dirty().
*/
-static void action_result(unsigned long pfn, enum action_page_type type, int result)
+static void action_result(unsigned long pfn, enum mf_action_page_type type,
+ enum mf_result result)
{
+ trace_memory_failure_event(pfn, type, result);
+
pr_err("MCE %#lx: recovery action for %s: %s\n",
pfn, action_page_types[type], action_name[result]);
}
@@ -896,13 +853,13 @@ static int page_action(struct page_state *ps, struct page *p,
result = ps->action(p, pfn);
count = page_count(p) - 1;
- if (ps->action == me_swapcache_dirty && result == DELAYED)
+ if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
count--;
if (count != 0) {
printk(KERN_ERR
"MCE %#lx: %s still referenced by %d users\n",
pfn, action_page_types[ps->type], count);
- result = FAILED;
+ result = MF_FAILED;
}
action_result(pfn, ps->type, result);
@@ -911,8 +868,74 @@ static int page_action(struct page_state *ps, struct page *p,
* Could adjust zone counters here to correct for the missing page.
*/
- return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
+ return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
+}
+
+/**
+ * get_hwpoison_page() - Get refcount for memory error handling:
+ * @page: raw error page (hit by memory error)
+ *
+ * Return: return 0 if failed to grab the refcount, otherwise true (some
+ * non-zero value.)
+ */
+int get_hwpoison_page(struct page *page)
+{
+ struct page *head = compound_head(page);
+
+ if (PageHuge(head))
+ return get_page_unless_zero(head);
+
+ /*
+ * Thp tail page has special refcounting rule (refcount of tail pages
+ * is stored in ->_mapcount,) so we can't call get_page_unless_zero()
+ * directly for tail pages.
+ */
+ if (PageTransHuge(head)) {
+ /*
+ * Non anonymous thp exists only in allocation/free time. We
+ * can't handle such a case correctly, so let's give it up.
+ * This should be better than triggering BUG_ON when kernel
+ * tries to touch the "partially handled" page.
+ */
+ if (!PageAnon(head)) {
+ pr_err("MCE: %#lx: non anonymous thp\n",
+ page_to_pfn(page));
+ return 0;
+ }
+
+ if (get_page_unless_zero(head)) {
+ if (PageTail(page))
+ get_page(page);
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ return get_page_unless_zero(page);
+}
+EXPORT_SYMBOL_GPL(get_hwpoison_page);
+
+/**
+ * put_hwpoison_page() - Put refcount for memory error handling:
+ * @page: raw error page (hit by memory error)
+ */
+void put_hwpoison_page(struct page *page)
+{
+ struct page *head = compound_head(page);
+
+ if (PageHuge(head)) {
+ put_page(head);
+ return;
+ }
+
+ if (PageTransHuge(head))
+ if (page != head)
+ put_page(head);
+
+ put_page(page);
}
+EXPORT_SYMBOL_GPL(put_hwpoison_page);
/*
* Do all that is necessary to remove user space mappings. Unmap
@@ -927,7 +950,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
int ret;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
- struct page *ppage;
/*
* Here we are interested only in user-mapped pages, so skip any
@@ -977,59 +999,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
}
/*
- * ppage: poisoned page
- * if p is regular page(4k page)
- * ppage == real poisoned page;
- * else p is hugetlb or THP, ppage == head page.
- */
- ppage = hpage;
-
- if (PageTransHuge(hpage)) {
- /*
- * Verify that this isn't a hugetlbfs head page, the check for
- * PageAnon is just for avoid tripping a split_huge_page
- * internal debug check, as split_huge_page refuses to deal with
- * anything that isn't an anon page. PageAnon can't go away fro
- * under us because we hold a refcount on the hpage, without a
- * refcount on the hpage. split_huge_page can't be safely called
- * in the first place, having a refcount on the tail isn't
- * enough * to be safe.
- */
- if (!PageHuge(hpage) && PageAnon(hpage)) {
- if (unlikely(split_huge_page(hpage))) {
- /*
- * FIXME: if splitting THP is failed, it is
- * better to stop the following operation rather
- * than causing panic by unmapping. System might
- * survive if the page is freed later.
- */
- printk(KERN_INFO
- "MCE %#lx: failed to split THP\n", pfn);
-
- BUG_ON(!PageHWPoison(p));
- return SWAP_FAIL;
- }
- /*
- * We pinned the head page for hwpoison handling,
- * now we split the thp and we are interested in
- * the hwpoisoned raw page, so move the refcount
- * to it. Similarly, page lock is shifted.
- */
- if (hpage != p) {
- if (!(flags & MF_COUNT_INCREASED)) {
- put_page(hpage);
- get_page(p);
- }
- lock_page(p);
- unlock_page(hpage);
- *hpagep = p;
- }
- /* THP is split, so ppage should be the real poisoned page. */
- ppage = p;
- }
- }
-
- /*
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
* because ttu takes the rmap data structures down.
@@ -1038,12 +1007,12 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* there's nothing that can be done.
*/
if (kill)
- collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
+ collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- ret = try_to_unmap(ppage, ttu);
+ ret = try_to_unmap(hpage, ttu);
if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
- pfn, page_mapcount(ppage));
+ pfn, page_mapcount(hpage));
/*
* Now that the dirty bit has been propagated to the
@@ -1055,7 +1024,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
- forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
+ forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
kill_procs(&tokill, forcekill, trapno,
ret != SWAP_SUCCESS, p, pfn, flags);
@@ -1101,6 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
struct page_state *ps;
struct page *p;
struct page *hpage;
+ struct page *orig_head;
int res;
unsigned int nr_pages;
unsigned long page_flags;
@@ -1116,7 +1086,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
p = pfn_to_page(pfn);
- hpage = compound_head(p);
+ orig_head = hpage = compound_head(p);
if (TestSetPageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
return 0;
@@ -1133,7 +1103,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
nr_pages = 1 << compound_order(hpage);
else /* normal page or thp */
nr_pages = 1;
- atomic_long_add(nr_pages, &num_poisoned_pages);
+ num_poisoned_pages_add(nr_pages);
/*
* We need/can do nothing about count=0 pages.
@@ -1149,10 +1119,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
*/
- if (!(flags & MF_COUNT_INCREASED) &&
- !get_page_unless_zero(hpage)) {
+ if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
if (is_free_buddy_page(p)) {
- action_result(pfn, MSG_BUDDY, DELAYED);
+ action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
return 0;
} else if (PageHuge(hpage)) {
/*
@@ -1162,23 +1131,38 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
if (PageHWPoison(hpage)) {
if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
|| (p != hpage && TestSetPageHWPoison(hpage))) {
- atomic_long_sub(nr_pages, &num_poisoned_pages);
+ num_poisoned_pages_sub(nr_pages);
unlock_page(hpage);
return 0;
}
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
- action_result(pfn, MSG_FREE_HUGE,
- res ? IGNORED : DELAYED);
+ action_result(pfn, MF_MSG_FREE_HUGE,
+ res ? MF_IGNORED : MF_DELAYED);
unlock_page(hpage);
return res;
} else {
- action_result(pfn, MSG_KERNEL_HIGH_ORDER, IGNORED);
+ action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
return -EBUSY;
}
}
+ if (!PageHuge(p) && PageTransHuge(hpage)) {
+ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+ if (!PageAnon(hpage))
+ pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+ else
+ pr_err("MCE: %#lx: thp split failed\n", pfn);
+ if (TestClearPageHWPoison(p))
+ num_poisoned_pages_sub(nr_pages);
+ put_hwpoison_page(p);
+ return -EBUSY;
+ }
+ VM_BUG_ON_PAGE(!page_count(p), p);
+ hpage = compound_head(p);
+ }
+
/*
* We ignore non-LRU pages for good reasons.
* - PG_locked is only well defined for LRU pages and a few others
@@ -1188,18 +1172,18 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* walked by the page reclaim code, however that's not a big loss.
*/
if (!PageHuge(p)) {
- if (!PageLRU(hpage))
- shake_page(hpage, 0);
- if (!PageLRU(hpage)) {
+ if (!PageLRU(p))
+ shake_page(p, 0);
+ if (!PageLRU(p)) {
/*
* shake_page could have turned it free.
*/
if (is_free_buddy_page(p)) {
if (flags & MF_COUNT_INCREASED)
- action_result(pfn, MSG_BUDDY, DELAYED);
+ action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
else
- action_result(pfn, MSG_BUDDY_2ND,
- DELAYED);
+ action_result(pfn, MF_MSG_BUDDY_2ND,
+ MF_DELAYED);
return 0;
}
}
@@ -1211,8 +1195,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* The page could have changed compound pages during the locking.
* If this happens just bail out.
*/
- if (compound_head(p) != hpage) {
- action_result(pfn, MSG_DIFFERENT_COMPOUND, IGNORED);
+ if (PageCompound(p) && compound_head(p) != orig_head) {
+ action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
res = -EBUSY;
goto out;
}
@@ -1231,16 +1215,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
- atomic_long_sub(nr_pages, &num_poisoned_pages);
- put_page(hpage);
- res = 0;
- goto out;
+ num_poisoned_pages_sub(nr_pages);
+ unlock_page(hpage);
+ put_hwpoison_page(hpage);
+ return 0;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
- atomic_long_sub(nr_pages, &num_poisoned_pages);
+ num_poisoned_pages_sub(nr_pages);
unlock_page(hpage);
- put_page(hpage);
+ put_hwpoison_page(hpage);
return 0;
}
@@ -1252,9 +1236,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* on the head page to show that the hugepage is hwpoisoned
*/
if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
- action_result(pfn, MSG_POISONED_HUGE, IGNORED);
+ action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
unlock_page(hpage);
- put_page(hpage);
+ put_hwpoison_page(hpage);
return 0;
}
/*
@@ -1281,7 +1265,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
!= SWAP_SUCCESS) {
- action_result(pfn, MSG_UNMAP_FAILED, IGNORED);
+ action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
goto out;
}
@@ -1290,7 +1274,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* Torn down by someone else?
*/
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
- action_result(pfn, MSG_TRUNCATED_LRU, IGNORED);
+ action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
res = -EBUSY;
goto out;
}
@@ -1413,6 +1397,12 @@ static int __init memory_failure_init(void)
}
core_initcall(memory_failure_init);
+#define unpoison_pr_info(fmt, pfn, rs) \
+({ \
+ if (__ratelimit(rs)) \
+ pr_info(fmt, pfn); \
+})
+
/**
* unpoison_memory - Unpoison a previously poisoned page
* @pfn: Page number of the to be unpoisoned page
@@ -1431,6 +1421,8 @@ int unpoison_memory(unsigned long pfn)
struct page *p;
int freeit = 0;
unsigned int nr_pages;
+ static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
if (!pfn_valid(pfn))
return -ENXIO;
@@ -1439,7 +1431,26 @@ int unpoison_memory(unsigned long pfn)
page = compound_head(p);
if (!PageHWPoison(p)) {
- pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
+ unpoison_pr_info("MCE: Page was already unpoisoned %#lx\n",
+ pfn, &unpoison_rs);
+ return 0;
+ }
+
+ if (page_count(page) > 1) {
+ unpoison_pr_info("MCE: Someone grabs the hwpoison page %#lx\n",
+ pfn, &unpoison_rs);
+ return 0;
+ }
+
+ if (page_mapped(page)) {
+ unpoison_pr_info("MCE: Someone maps the hwpoison page %#lx\n",
+ pfn, &unpoison_rs);
+ return 0;
+ }
+
+ if (page_mapping(page)) {
+ unpoison_pr_info("MCE: the hwpoison page has non-NULL mapping %#lx\n",
+ pfn, &unpoison_rs);
return 0;
}
@@ -1449,13 +1460,14 @@ int unpoison_memory(unsigned long pfn)
* In such case, we yield to memory_failure() and make unpoison fail.
*/
if (!PageHuge(page) && PageTransHuge(page)) {
- pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
- return 0;
+ unpoison_pr_info("MCE: Memory failure is now running on %#lx\n",
+ pfn, &unpoison_rs);
+ return 0;
}
nr_pages = 1 << compound_order(page);
- if (!get_page_unless_zero(page)) {
+ if (!get_hwpoison_page(p)) {
/*
* Since HWPoisoned hugepage should have non-zero refcount,
* race between memory failure and unpoison seems to happen.
@@ -1463,12 +1475,14 @@ int unpoison_memory(unsigned long pfn)
* to the end.
*/
if (PageHuge(page)) {
- pr_info("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
+ unpoison_pr_info("MCE: Memory failure is now running on free hugepage %#lx\n",
+ pfn, &unpoison_rs);
return 0;
}
if (TestClearPageHWPoison(p))
- atomic_long_dec(&num_poisoned_pages);
- pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ num_poisoned_pages_dec();
+ unpoison_pr_info("MCE: Software-unpoisoned free page %#lx\n",
+ pfn, &unpoison_rs);
return 0;
}
@@ -1480,17 +1494,18 @@ int unpoison_memory(unsigned long pfn)
* the free buddy page pool.
*/
if (TestClearPageHWPoison(page)) {
- pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
- atomic_long_sub(nr_pages, &num_poisoned_pages);
+ unpoison_pr_info("MCE: Software-unpoisoned page %#lx\n",
+ pfn, &unpoison_rs);
+ num_poisoned_pages_sub(nr_pages);
freeit = 1;
if (PageHuge(page))
clear_page_hwpoison_huge_page(page);
}
unlock_page(page);
- put_page(page);
+ put_hwpoison_page(page);
if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
- put_page(page);
+ put_hwpoison_page(page);
return 0;
}
@@ -1503,7 +1518,7 @@ static struct page *new_page(struct page *p, unsigned long private, int **x)
return alloc_huge_page_node(page_hstate(compound_head(p)),
nid);
else
- return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+ return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
}
/*
@@ -1523,7 +1538,7 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
* When the target page is a free hugepage, just remove it
* from free hugepage list.
*/
- if (!get_page_unless_zero(compound_head(p))) {
+ if (!get_hwpoison_page(p)) {
if (PageHuge(p)) {
pr_info("%s: %#lx free huge page\n", __func__, pfn);
ret = 0;
@@ -1550,16 +1565,16 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
/*
* Try to free it.
*/
- put_page(page);
+ put_hwpoison_page(page);
shake_page(page, 1);
/*
* Did it turn free?
*/
ret = __get_any_page(page, pfn, 0);
- if (!PageLRU(page)) {
+ if (ret == 1 && !PageLRU(page)) {
/* Drop page reference which is from __get_any_page() */
- put_page(page);
+ put_hwpoison_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
@@ -1582,7 +1597,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
lock_page(hpage);
if (PageHWPoison(hpage)) {
unlock_page(hpage);
- put_page(hpage);
+ put_hwpoison_page(hpage);
pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
return -EBUSY;
}
@@ -1593,7 +1608,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
* get_any_page() and isolate_huge_page() takes a refcount each,
* so need to drop one here.
*/
- put_page(hpage);
+ put_hwpoison_page(hpage);
if (!ret) {
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
return -EBUSY;
@@ -1617,11 +1632,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
- &num_poisoned_pages);
+ num_poisoned_pages_add(1 << compound_order(hpage));
} else {
SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ num_poisoned_pages_inc();
}
}
return ret;
@@ -1642,7 +1656,7 @@ static int __soft_offline_page(struct page *page, int flags)
wait_on_page_writeback(page);
if (PageHWPoison(page)) {
unlock_page(page);
- put_page(page);
+ put_hwpoison_page(page);
pr_info("soft offline: %#lx page already poisoned\n", pfn);
return -EBUSY;
}
@@ -1657,10 +1671,10 @@ static int __soft_offline_page(struct page *page, int flags)
* would need to fix isolation locking first.
*/
if (ret == 1) {
- put_page(page);
+ put_hwpoison_page(page);
pr_info("soft_offline: %#lx: invalidated\n", pfn);
SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ num_poisoned_pages_inc();
return 0;
}
@@ -1674,7 +1688,7 @@ static int __soft_offline_page(struct page *page, int flags)
* Drop page reference which is came from get_any_page()
* successful isolate_lru_page() already took another one.
*/
- put_page(page);
+ put_hwpoison_page(page);
if (!ret) {
LIST_HEAD(pagelist);
inc_zone_page_state(page, NR_ISOLATED_ANON +
@@ -1694,22 +1708,6 @@ static int __soft_offline_page(struct page *page, int flags)
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
- } else {
- /*
- * After page migration succeeds, the source page can
- * be trapped in pagevec and actual freeing is delayed.
- * Freeing code works differently based on PG_hwpoison,
- * so there's a race. We need to make sure that the
- * source page should be freed back to buddy before
- * setting PG_hwpoison.
- */
- if (!is_free_buddy_page(page))
- drain_all_pages(page_zone(page));
- SetPageHWPoison(page);
- if (!is_free_buddy_page(page))
- pr_info("soft offline: %#lx: page leaked\n",
- pfn);
- atomic_long_inc(&num_poisoned_pages);
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
@@ -1748,26 +1746,22 @@ int soft_offline_page(struct page *page, int flags)
if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn);
+ if (flags & MF_COUNT_INCREASED)
+ put_hwpoison_page(page);
return -EBUSY;
}
if (!PageHuge(page) && PageTransHuge(hpage)) {
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
pr_info("soft offline: %#lx: failed to split THP\n",
pfn);
+ if (flags & MF_COUNT_INCREASED)
+ put_hwpoison_page(page);
return -EBUSY;
}
}
get_online_mems();
- /*
- * Isolate the page, so that it doesn't get reallocated if it
- * was free. This flag should be kept set until the source page
- * is freed and PG_hwpoison on it is set.
- */
- if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
- set_migratetype_isolate(page, true);
-
ret = get_any_page(page, pfn, flags);
put_online_mems();
if (ret > 0) { /* for in-use pages */
@@ -1779,13 +1773,11 @@ int soft_offline_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
if (!dequeue_hwpoisoned_huge_page(hpage))
- atomic_long_add(1 << compound_order(hpage),
- &num_poisoned_pages);
+ num_poisoned_pages_add(1 << compound_order(hpage));
} else {
if (!TestSetPageHWPoison(page))
- atomic_long_inc(&num_poisoned_pages);
+ num_poisoned_pages_inc();
}
}
- unset_migratetype_isolate(page, MIGRATE_MOVABLE);
return ret;
}