summaryrefslogtreecommitdiffstats
path: root/kernel/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/mm/filemap.c')
-rw-r--r--kernel/mm/filemap.c106
1 files changed, 59 insertions, 47 deletions
diff --git a/kernel/mm/filemap.c b/kernel/mm/filemap.c
index 44301361c..91dc614cb 100644
--- a/kernel/mm/filemap.c
+++ b/kernel/mm/filemap.c
@@ -109,6 +109,51 @@
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
+static int page_cache_tree_insert(struct address_space *mapping,
+ struct page *page, void **shadowp)
+{
+ struct radix_tree_node *node;
+ void **slot;
+ int error;
+
+ error = __radix_tree_create(&mapping->page_tree, page->index,
+ &node, &slot);
+ if (error)
+ return error;
+ if (*slot) {
+ void *p;
+
+ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+ if (!radix_tree_exceptional_entry(p))
+ return -EEXIST;
+ if (shadowp)
+ *shadowp = p;
+ mapping->nrshadows--;
+ if (node)
+ workingset_node_shadows_dec(node);
+ }
+ radix_tree_replace_slot(slot, page);
+ mapping->nrpages++;
+ if (node) {
+ workingset_node_pages_inc(node);
+ /*
+ * Don't track node that contains actual pages.
+ *
+ * Avoid acquiring the list_lru lock if already
+ * untracked. The list_empty() test is safe as
+ * node->private_list is protected by
+ * mapping->tree_lock.
+ */
+ if (!list_empty(&node->private_list)) {
+ local_lock(workingset_shadow_lock);
+ list_lru_del(&__workingset_shadow_nodes,
+ &node->private_list);
+ local_unlock(workingset_shadow_lock);
+ }
+ }
+ return 0;
+}
+
static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
@@ -122,6 +167,14 @@ static void page_cache_tree_delete(struct address_space *mapping,
__radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
+ if (!node) {
+ /*
+ * We need a node to properly account shadow
+ * entries. Don't plant any without. XXX
+ */
+ shadow = NULL;
+ }
+
if (shadow) {
mapping->nrshadows++;
/*
@@ -540,9 +593,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
memcg = mem_cgroup_begin_page_stat(old);
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(old, NULL, memcg);
- error = radix_tree_insert(&mapping->page_tree, offset, new);
+ error = page_cache_tree_insert(mapping, new, NULL);
BUG_ON(error);
- mapping->nrpages++;
/*
* hugetlb pages do not participate in page cache accounting.
@@ -564,51 +616,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
-static int page_cache_tree_insert(struct address_space *mapping,
- struct page *page, void **shadowp)
-{
- struct radix_tree_node *node;
- void **slot;
- int error;
-
- error = __radix_tree_create(&mapping->page_tree, page->index,
- &node, &slot);
- if (error)
- return error;
- if (*slot) {
- void *p;
-
- p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
- if (!radix_tree_exceptional_entry(p))
- return -EEXIST;
- if (shadowp)
- *shadowp = p;
- mapping->nrshadows--;
- if (node)
- workingset_node_shadows_dec(node);
- }
- radix_tree_replace_slot(slot, page);
- mapping->nrpages++;
- if (node) {
- workingset_node_pages_inc(node);
- /*
- * Don't track node that contains actual pages.
- *
- * Avoid acquiring the list_lru lock if already
- * untracked. The list_empty() test is safe as
- * node->private_list is protected by
- * mapping->tree_lock.
- */
- if (!list_empty(&node->private_list)) {
- local_lock(workingset_shadow_lock);
- list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
- local_unlock(workingset_shadow_lock);
- }
- }
- return 0;
-}
-
static int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask,
@@ -1557,6 +1564,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
cond_resched();
find_page:
+ if (fatal_signal_pending(current)) {
+ error = -EINTR;
+ goto out;
+ }
+
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,