From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/fs/btrfs/free-space-cache.c | 164 +++++++++++++++++++++++++------------ 1 file changed, 110 insertions(+), 54 deletions(-) (limited to 'kernel/fs/btrfs/free-space-cache.c') diff --git a/kernel/fs/btrfs/free-space-cache.c b/kernel/fs/btrfs/free-space-cache.c index 9dbe5b548..cfe99bec4 100644 --- a/kernel/fs/btrfs/free-space-cache.c +++ b/kernel/fs/btrfs/free-space-cache.c @@ -85,8 +85,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, } mapping_set_gfp_mask(inode->i_mapping, - mapping_gfp_mask(inode->i_mapping) & - ~(__GFP_FS | __GFP_HIGHMEM)); + mapping_gfp_constraint(inode->i_mapping, + ~(__GFP_FS | __GFP_HIGHMEM))); return inode; } @@ -231,6 +231,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, { int ret = 0; struct btrfs_path *path = btrfs_alloc_path(); + bool locked = false; if (!path) { ret = -ENOMEM; @@ -238,6 +239,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, } if (block_group) { + locked = true; mutex_lock(&trans->transaction->cache_write_mutex); if (!list_empty(&block_group->io_list)) { list_del_init(&block_group->io_list); @@ -269,18 +271,14 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, */ ret = btrfs_truncate_inode_items(trans, root, inode, 0, BTRFS_EXTENT_DATA_KEY); - if (ret) { - mutex_unlock(&trans->transaction->cache_write_mutex); - btrfs_abort_transaction(trans, root, ret); - return ret; - } + if (ret) + goto fail; ret = btrfs_update_inode(trans, root, inode); - if (block_group) - mutex_unlock(&trans->transaction->cache_write_mutex); - fail: + if (locked) + mutex_unlock(&trans->transaction->cache_write_mutex); if (ret) btrfs_abort_transaction(trans, root, ret); @@ -452,9 +450,9 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) gen = io_ctl->cur; if (le64_to_cpu(*gen) != generation) { - printk_ratelimited(KERN_ERR "BTRFS: space cache generation " - "(%Lu) does not match inode (%Lu)\n", *gen, - generation); + btrfs_err_rl(io_ctl->root->fs_info, + "space cache generation (%llu) does not match inode (%llu)", + *gen, generation); io_ctl_unmap_page(io_ctl); return -EIO; } @@ -508,8 +506,8 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) PAGE_CACHE_SIZE - offset); btrfs_csum_final(crc, (char *)&crc); if (val != crc) { - printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free " - "space cache\n"); + btrfs_err_rl(io_ctl->root->fs_info, + "csum mismatch on free space cache"); io_ctl_unmap_page(io_ctl); return -EIO; } @@ -893,7 +891,7 @@ out: spin_unlock(&block_group->lock); ret = 0; - btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", block_group->key.objectid); } @@ -1217,7 +1215,7 @@ out: * @offset - the offset for the key we'll insert * * This function writes out a free space cache struct to disk for quick recovery - * on mount. This will return 0 if it was successfull in writing the cache out, + * on mount. This will return 0 if it was successful in writing the cache out, * or an errno if it was not. */ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, @@ -1732,7 +1730,7 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, */ static int search_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, - u64 *bytes) + u64 *bytes, bool for_alloc) { unsigned long found_bits = 0; unsigned long max_bits = 0; @@ -1740,11 +1738,26 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, unsigned long next_zero; unsigned long extent_bits; + /* + * Skip searching the bitmap if we don't have a contiguous section that + * is large enough for this allocation. + */ + if (for_alloc && + bitmap_info->max_extent_size && + bitmap_info->max_extent_size < *bytes) { + *bytes = bitmap_info->max_extent_size; + return -1; + } + i = offset_to_bit(bitmap_info->offset, ctl->unit, max_t(u64, *offset, bitmap_info->offset)); bits = bytes_to_bits(*bytes, ctl->unit); for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { + if (for_alloc && bits == 1) { + found_bits = 1; + break; + } next_zero = find_next_zero_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); extent_bits = next_zero - i; @@ -1764,6 +1777,7 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, } *bytes = (u64)(max_bits) * ctl->unit; + bitmap_info->max_extent_size = *bytes; return -1; } @@ -1815,7 +1829,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, if (entry->bitmap) { u64 size = *bytes; - ret = search_bitmap(ctl, entry, &tmp, &size); + ret = search_bitmap(ctl, entry, &tmp, &size, true); if (!ret) { *offset = tmp; *bytes = size; @@ -1876,7 +1890,8 @@ again: search_start = *offset; search_bytes = ctl->unit; search_bytes = min(search_bytes, end - search_start + 1); - ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes, + false); if (ret < 0 || search_start != *offset) return -EINVAL; @@ -1921,7 +1936,7 @@ again: search_start = *offset; search_bytes = ctl->unit; ret = search_bitmap(ctl, bitmap_info, &search_start, - &search_bytes); + &search_bytes, false); if (ret < 0 || search_start != *offset) return -EAGAIN; @@ -1945,6 +1960,12 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, bitmap_set_bits(ctl, info, offset, bytes_to_set); + /* + * We set some bytes, we have no idea what the max extent size is + * anymore. + */ + info->max_extent_size = 0; + return bytes_to_set; } @@ -1953,12 +1974,19 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { struct btrfs_block_group_cache *block_group = ctl->private; + bool forced = false; + +#ifdef CONFIG_BTRFS_DEBUG + if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root, + block_group)) + forced = true; +#endif /* * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap */ - if (ctl->free_extents < ctl->extents_thresh) { + if (!forced && ctl->free_extents < ctl->extents_thresh) { /* * If this block group has some small extents we don't want to * use up all of our free slots in the cache with them, we want @@ -2663,7 +2691,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, search_start = min_start; search_bytes = bytes; - err = search_bitmap(ctl, entry, &search_start, &search_bytes); + err = search_bitmap(ctl, entry, &search_start, &search_bytes, true); if (err) { if (search_bytes > *max_extent_size) *max_extent_size = search_bytes; @@ -2777,6 +2805,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, unsigned long want_bits; unsigned long min_bits; unsigned long found_bits; + unsigned long max_bits = 0; unsigned long start = 0; unsigned long total_found = 0; int ret; @@ -2786,6 +2815,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, want_bits = bytes_to_bits(bytes, ctl->unit); min_bits = bytes_to_bits(min_bytes, ctl->unit); + /* + * Don't bother looking for a cluster in this bitmap if it's heavily + * fragmented. + */ + if (entry->max_extent_size && + entry->max_extent_size < cont1_bytes) + return -ENOSPC; again: found_bits = 0; for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { @@ -2793,13 +2829,19 @@ again: BITS_PER_BITMAP, i); if (next_zero - i >= min_bits) { found_bits = next_zero - i; + if (found_bits > max_bits) + max_bits = found_bits; break; } + if (next_zero - i > max_bits) + max_bits = next_zero - i; i = next_zero; } - if (!found_bits) + if (!found_bits) { + entry->max_extent_size = (u64)max_bits * ctl->unit; return -ENOSPC; + } if (!total_found) { start = i; @@ -2930,7 +2972,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, u64 cont1_bytes, u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - struct btrfs_free_space *entry; + struct btrfs_free_space *entry = NULL; int ret = -ENOSPC; u64 bitmap_offset = offset_to_bitmap(ctl, offset); @@ -2941,8 +2983,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, * The bitmap that covers offset won't be in the list unless offset * is just its start offset. */ - entry = list_first_entry(bitmaps, struct btrfs_free_space, list); - if (entry->offset != bitmap_offset) { + if (!list_empty(bitmaps)) + entry = list_first_entry(bitmaps, struct btrfs_free_space, list); + + if (!entry || entry->offset != bitmap_offset) { entry = tree_search_offset(ctl, bitmap_offset, 1, 0); if (entry && list_empty(&entry->list)) list_add(&entry->list, bitmaps); @@ -3058,6 +3102,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) spin_lock_init(&cluster->refill_lock); cluster->root = RB_ROOT; cluster->max_size = 0; + cluster->fragmented = false; INIT_LIST_HEAD(&cluster->block_group_list); cluster->block_group = NULL; } @@ -3225,7 +3270,7 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group, } bytes = minlen; - ret2 = search_bitmap(ctl, entry, &start, &bytes); + ret2 = search_bitmap(ctl, entry, &start, &bytes, false); if (ret2 || start >= end) { spin_unlock(&ctl->tree_lock); mutex_unlock(&ctl->cache_writeout_mutex); @@ -3274,35 +3319,23 @@ next: return ret; } -int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, - u64 *trimmed, u64 start, u64 end, u64 minlen) +void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache) { - int ret; + atomic_inc(&cache->trimming); +} - *trimmed = 0; +void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) +{ + struct extent_map_tree *em_tree; + struct extent_map *em; + bool cleanup; spin_lock(&block_group->lock); - if (block_group->removed) { - spin_unlock(&block_group->lock); - return 0; - } - atomic_inc(&block_group->trimming); + cleanup = (atomic_dec_and_test(&block_group->trimming) && + block_group->removed); spin_unlock(&block_group->lock); - ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); - if (ret) - goto out; - - ret = trim_bitmaps(block_group, trimmed, start, end, minlen); -out: - spin_lock(&block_group->lock); - if (atomic_dec_and_test(&block_group->trimming) && - block_group->removed) { - struct extent_map_tree *em_tree; - struct extent_map *em; - - spin_unlock(&block_group->lock); - + if (cleanup) { lock_chunks(block_group->fs_info->chunk_root); em_tree = &block_group->fs_info->mapping_tree.map_tree; write_lock(&em_tree->lock); @@ -3326,10 +3359,31 @@ out: * this block group have left 1 entry each one. Free them. */ __btrfs_remove_free_space_cache(block_group->free_space_ctl); - } else { + } +} + +int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen) +{ + int ret; + + *trimmed = 0; + + spin_lock(&block_group->lock); + if (block_group->removed) { spin_unlock(&block_group->lock); + return 0; } + btrfs_get_block_group_trimming(block_group); + spin_unlock(&block_group->lock); + ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); + if (ret) + goto out; + + ret = trim_bitmaps(block_group, trimmed, start, end, minlen); +out: + btrfs_put_block_group_trimming(block_group); return ret; } @@ -3369,7 +3423,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) u64 count = 1; int ret; - ret = search_bitmap(ctl, entry, &offset, &count); + ret = search_bitmap(ctl, entry, &offset, &count, true); /* Logic error; Should be empty if it can't find anything */ ASSERT(!ret); @@ -3525,6 +3579,7 @@ again: spin_lock(&ctl->tree_lock); info->offset = offset; info->bytes = bytes; + info->max_extent_size = 0; ret = link_free_space(ctl, info); spin_unlock(&ctl->tree_lock); if (ret) @@ -3552,6 +3607,7 @@ again: } bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); + bytes -= bytes_added; offset += bytes_added; spin_unlock(&ctl->tree_lock); @@ -3595,7 +3651,7 @@ have_info: bit_off = offset; bit_bytes = ctl->unit; - ret = search_bitmap(ctl, info, &bit_off, &bit_bytes); + ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false); if (!ret) { if (bit_off == offset) { ret = 1; -- cgit 1.2.3-korg