summaryrefslogtreecommitdiffstats
path: root/kernel/fs/ext4/readpage.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/fs/ext4/readpage.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/fs/ext4/readpage.c')
-rw-r--r--kernel/fs/ext4/readpage.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/fs/ext4/readpage.c b/kernel/fs/ext4/readpage.c
index 171b9ac4b..5dc5e9506 100644
--- a/kernel/fs/ext4/readpage.c
+++ b/kernel/fs/ext4/readpage.c
@@ -54,15 +54,15 @@ static void completion_pages(struct work_struct *work)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx =
- container_of(work, struct ext4_crypto_ctx, work);
- struct bio *bio = ctx->bio;
+ container_of(work, struct ext4_crypto_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = ext4_decrypt(ctx, page);
+ int ret = ext4_decrypt(page);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
@@ -98,7 +98,7 @@ static inline bool ext4_bio_encrypted(struct bio *bio)
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
-static void mpage_end_io(struct bio *bio, int err)
+static void mpage_end_io(struct bio *bio)
{
struct bio_vec *bv;
int i;
@@ -106,19 +106,19 @@ static void mpage_end_io(struct bio *bio, int err)
if (ext4_bio_encrypted(bio)) {
struct ext4_crypto_ctx *ctx = bio->bi_private;
- if (err) {
+ if (bio->bi_error) {
ext4_release_crypto_ctx(ctx);
} else {
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- queue_work(ext4_read_workqueue, &ctx->work);
+ INIT_WORK(&ctx->r.work, completion_pages);
+ ctx->r.bio = bio;
+ queue_work(ext4_read_workqueue, &ctx->r.work);
return;
}
}
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- if (!err) {
+ if (!bio->bi_error) {
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (pages) {
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL))
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ mapping_gfp_constraint(mapping, GFP_KERNEL)))
goto next_page;
}
@@ -284,7 +284,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
goto set_error_page;
}
bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
+ min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
ext4_release_crypto_ctx(ctx);