diff options
Diffstat (limited to 'kernel/lib')
81 files changed, 7715 insertions, 681 deletions
diff --git a/kernel/lib/842/842.h b/kernel/lib/842/842.h new file mode 100644 index 000000000..e0a122bc1 --- /dev/null +++ b/kernel/lib/842/842.h @@ -0,0 +1,129 @@ + +#ifndef __842_H__ +#define __842_H__ + +/* The 842 compressed format is made up of multiple blocks, each of + * which have the format: + * + * <template>[arg1][arg2][arg3][arg4] + * + * where there are between 0 and 4 template args, depending on the specific + * template operation. For normal operations, each arg is either a specific + * number of data bytes to add to the output buffer, or an index pointing + * to a previously-written number of data bytes to copy to the output buffer. + * + * The template code is a 5-bit value. This code indicates what to do with + * the following data. Template codes from 0 to 0x19 should use the template + * table, the static "decomp_ops" table used in decompress. For each template + * (table row), there are between 1 and 4 actions; each action corresponds to + * an arg following the template code bits. Each action is either a "data" + * type action, or a "index" type action, and each action results in 2, 4, or 8 + * bytes being written to the output buffer. Each template (i.e. all actions + * in the table row) will add up to 8 bytes being written to the output buffer. + * Any row with less than 4 actions is padded with noop actions, indicated by + * N0 (for which there is no corresponding arg in the compressed data buffer). + * + * "Data" actions, indicated in the table by D2, D4, and D8, mean that the + * corresponding arg is 2, 4, or 8 bytes, respectively, in the compressed data + * buffer should be copied directly to the output buffer. + * + * "Index" actions, indicated in the table by I2, I4, and I8, mean the + * corresponding arg is an index parameter that points to, respectively, a 2, + * 4, or 8 byte value already in the output buffer, that should be copied to + * the end of the output buffer. Essentially, the index points to a position + * in a ring buffer that contains the last N bytes of output buffer data. + * The number of bits for each index's arg are: 8 bits for I2, 9 bits for I4, + * and 8 bits for I8. Since each index points to a 2, 4, or 8 byte section, + * this means that I2 can reference 512 bytes ((2^8 bits = 256) * 2 bytes), I4 + * can reference 2048 bytes ((2^9 = 512) * 4 bytes), and I8 can reference 2048 + * bytes ((2^8 = 256) * 8 bytes). Think of it as a kind-of ring buffer for + * each of I2, I4, and I8 that are updated for each byte written to the output + * buffer. In this implementation, the output buffer is directly used for each + * index; there is no additional memory required. Note that the index is into + * a ring buffer, not a sliding window; for example, if there have been 260 + * bytes written to the output buffer, an I2 index of 0 would index to byte 256 + * in the output buffer, while an I2 index of 16 would index to byte 16 in the + * output buffer. + * + * There are also 3 special template codes; 0x1b for "repeat", 0x1c for + * "zeros", and 0x1e for "end". The "repeat" operation is followed by a 6 bit + * arg N indicating how many times to repeat. The last 8 bytes written to the + * output buffer are written again to the output buffer, N + 1 times. The + * "zeros" operation, which has no arg bits, writes 8 zeros to the output + * buffer. The "end" operation, which also has no arg bits, signals the end + * of the compressed data. There may be some number of padding (don't care, + * but usually 0) bits after the "end" operation bits, to fill the buffer + * length to a specific byte multiple (usually a multiple of 8, 16, or 32 + * bytes). + * + * This software implementation also uses one of the undefined template values, + * 0x1d as a special "short data" template code, to represent less than 8 bytes + * of uncompressed data. It is followed by a 3 bit arg N indicating how many + * data bytes will follow, and then N bytes of data, which should be copied to + * the output buffer. This allows the software 842 compressor to accept input + * buffers that are not an exact multiple of 8 bytes long. However, those + * compressed buffers containing this sw-only template will be rejected by + * the 842 hardware decompressor, and must be decompressed with this software + * library. The 842 software compression module includes a parameter to + * disable using this sw-only "short data" template, and instead simply + * reject any input buffer that is not a multiple of 8 bytes long. + * + * After all actions for each operation code are processed, another template + * code is in the next 5 bits. The decompression ends once the "end" template + * code is detected. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/crc32.h> +#include <asm/unaligned.h> + +#include <linux/sw842.h> + +/* special templates */ +#define OP_REPEAT (0x1B) +#define OP_ZEROS (0x1C) +#define OP_END (0x1E) + +/* sw only template - this is not in the hw design; it's used only by this + * software compressor and decompressor, to allow input buffers that aren't + * a multiple of 8. + */ +#define OP_SHORT_DATA (0x1D) + +/* additional bits of each op param */ +#define OP_BITS (5) +#define REPEAT_BITS (6) +#define SHORT_DATA_BITS (3) +#define I2_BITS (8) +#define I4_BITS (9) +#define I8_BITS (8) +#define CRC_BITS (32) + +#define REPEAT_BITS_MAX (0x3f) +#define SHORT_DATA_BITS_MAX (0x7) + +/* Arbitrary values used to indicate action */ +#define OP_ACTION (0x70) +#define OP_ACTION_INDEX (0x10) +#define OP_ACTION_DATA (0x20) +#define OP_ACTION_NOOP (0x40) +#define OP_AMOUNT (0x0f) +#define OP_AMOUNT_0 (0x00) +#define OP_AMOUNT_2 (0x02) +#define OP_AMOUNT_4 (0x04) +#define OP_AMOUNT_8 (0x08) + +#define D2 (OP_ACTION_DATA | OP_AMOUNT_2) +#define D4 (OP_ACTION_DATA | OP_AMOUNT_4) +#define D8 (OP_ACTION_DATA | OP_AMOUNT_8) +#define I2 (OP_ACTION_INDEX | OP_AMOUNT_2) +#define I4 (OP_ACTION_INDEX | OP_AMOUNT_4) +#define I8 (OP_ACTION_INDEX | OP_AMOUNT_8) +#define N0 (OP_ACTION_NOOP | OP_AMOUNT_0) + +/* the max of the regular templates - not including the special templates */ +#define OPS_MAX (0x1a) + +#endif diff --git a/kernel/lib/842/842_compress.c b/kernel/lib/842/842_compress.c new file mode 100644 index 000000000..4051339bd --- /dev/null +++ b/kernel/lib/842/842_compress.c @@ -0,0 +1,639 @@ +/* + * 842 Software Compression + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * See 842.h for details of the 842 compressed format. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define MODULE_NAME "842_compress" + +#include <linux/hashtable.h> + +#include "842.h" +#include "842_debugfs.h" + +#define SW842_HASHTABLE8_BITS (10) +#define SW842_HASHTABLE4_BITS (11) +#define SW842_HASHTABLE2_BITS (10) + +/* By default, we allow compressing input buffers of any length, but we must + * use the non-standard "short data" template so the decompressor can correctly + * reproduce the uncompressed data buffer at the right length. However the + * hardware 842 compressor will not recognize the "short data" template, and + * will fail to decompress any compressed buffer containing it (I have no idea + * why anyone would want to use software to compress and hardware to decompress + * but that's beside the point). This parameter forces the compression + * function to simply reject any input buffer that isn't a multiple of 8 bytes + * long, instead of using the "short data" template, so that all compressed + * buffers produced by this function will be decompressable by the 842 hardware + * decompressor. Unless you have a specific need for that, leave this disabled + * so that any length buffer can be compressed. + */ +static bool sw842_strict; +module_param_named(strict, sw842_strict, bool, 0644); + +static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */ + { I8, N0, N0, N0, 0x19 }, /* 8 */ + { I4, I4, N0, N0, 0x18 }, /* 18 */ + { I4, I2, I2, N0, 0x17 }, /* 25 */ + { I2, I2, I4, N0, 0x13 }, /* 25 */ + { I2, I2, I2, I2, 0x12 }, /* 32 */ + { I4, I2, D2, N0, 0x16 }, /* 33 */ + { I4, D2, I2, N0, 0x15 }, /* 33 */ + { I2, D2, I4, N0, 0x0e }, /* 33 */ + { D2, I2, I4, N0, 0x09 }, /* 33 */ + { I2, I2, I2, D2, 0x11 }, /* 40 */ + { I2, I2, D2, I2, 0x10 }, /* 40 */ + { I2, D2, I2, I2, 0x0d }, /* 40 */ + { D2, I2, I2, I2, 0x08 }, /* 40 */ + { I4, D4, N0, N0, 0x14 }, /* 41 */ + { D4, I4, N0, N0, 0x04 }, /* 41 */ + { I2, I2, D4, N0, 0x0f }, /* 48 */ + { I2, D2, I2, D2, 0x0c }, /* 48 */ + { I2, D4, I2, N0, 0x0b }, /* 48 */ + { D2, I2, I2, D2, 0x07 }, /* 48 */ + { D2, I2, D2, I2, 0x06 }, /* 48 */ + { D4, I2, I2, N0, 0x03 }, /* 48 */ + { I2, D2, D4, N0, 0x0a }, /* 56 */ + { D2, I2, D4, N0, 0x05 }, /* 56 */ + { D4, I2, D2, N0, 0x02 }, /* 56 */ + { D4, D2, I2, N0, 0x01 }, /* 56 */ + { D8, N0, N0, N0, 0x00 }, /* 64 */ +}; + +struct sw842_hlist_node8 { + struct hlist_node node; + u64 data; + u8 index; +}; + +struct sw842_hlist_node4 { + struct hlist_node node; + u32 data; + u16 index; +}; + +struct sw842_hlist_node2 { + struct hlist_node node; + u16 data; + u8 index; +}; + +#define INDEX_NOT_FOUND (-1) +#define INDEX_NOT_CHECKED (-2) + +struct sw842_param { + u8 *in; + u8 *instart; + u64 ilen; + u8 *out; + u64 olen; + u8 bit; + u64 data8[1]; + u32 data4[2]; + u16 data2[4]; + int index8[1]; + int index4[2]; + int index2[4]; + DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS); + DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS); + DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS); + struct sw842_hlist_node8 node8[1 << I8_BITS]; + struct sw842_hlist_node4 node4[1 << I4_BITS]; + struct sw842_hlist_node2 node2[1 << I2_BITS]; +}; + +#define get_input_data(p, o, b) \ + be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o)))) + +#define init_hashtable_nodes(p, b) do { \ + int _i; \ + hash_init((p)->htable##b); \ + for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \ + (p)->node##b[_i].index = _i; \ + (p)->node##b[_i].data = 0; \ + INIT_HLIST_NODE(&(p)->node##b[_i].node); \ + } \ +} while (0) + +#define find_index(p, b, n) ({ \ + struct sw842_hlist_node##b *_n; \ + p->index##b[n] = INDEX_NOT_FOUND; \ + hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ + if (p->data##b[n] == _n->data) { \ + p->index##b[n] = _n->index; \ + break; \ + } \ + } \ + p->index##b[n] >= 0; \ +}) + +#define check_index(p, b, n) \ + ((p)->index##b[n] == INDEX_NOT_CHECKED \ + ? find_index(p, b, n) \ + : (p)->index##b[n] >= 0) + +#define replace_hash(p, b, i, d) do { \ + struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ + hash_del(&_n->node); \ + _n->data = (p)->data##b[d]; \ + pr_debug("add hash index%x %x pos %x data %lx\n", b, \ + (unsigned int)_n->index, \ + (unsigned int)((p)->in - (p)->instart), \ + (unsigned long)_n->data); \ + hash_add((p)->htable##b, &_n->node, _n->data); \ +} while (0) + +static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; + +static int add_bits(struct sw842_param *p, u64 d, u8 n); + +static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) +{ + int ret; + + if (n <= s) + return -EINVAL; + + ret = add_bits(p, d >> s, n - s); + if (ret) + return ret; + return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); +} + +static int add_bits(struct sw842_param *p, u64 d, u8 n) +{ + int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; + u64 o; + u8 *out = p->out; + + pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); + + if (n > 64) + return -EINVAL; + + /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0), + * or if we're at the end of the output buffer and would write past end + */ + if (bits > 64) + return __split_add_bits(p, d, n, 32); + else if (p->olen < 8 && bits > 32 && bits <= 56) + return __split_add_bits(p, d, n, 16); + else if (p->olen < 4 && bits > 16 && bits <= 24) + return __split_add_bits(p, d, n, 8); + + if (DIV_ROUND_UP(bits, 8) > p->olen) + return -ENOSPC; + + o = *out & bmask[b]; + d <<= s; + + if (bits <= 8) + *out = o | d; + else if (bits <= 16) + put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out); + else if (bits <= 24) + put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out); + else if (bits <= 32) + put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out); + else if (bits <= 40) + put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out); + else if (bits <= 48) + put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out); + else if (bits <= 56) + put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out); + else + put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out); + + p->bit += n; + + if (p->bit > 7) { + p->out += p->bit / 8; + p->olen -= p->bit / 8; + p->bit %= 8; + } + + return 0; +} + +static int add_template(struct sw842_param *p, u8 c) +{ + int ret, i, b = 0; + u8 *t = comp_ops[c]; + bool inv = false; + + if (c >= OPS_MAX) + return -EINVAL; + + pr_debug("template %x\n", t[4]); + + ret = add_bits(p, t[4], OP_BITS); + if (ret) + return ret; + + for (i = 0; i < 4; i++) { + pr_debug("op %x\n", t[i]); + + switch (t[i] & OP_AMOUNT) { + case OP_AMOUNT_8: + if (b) + inv = true; + else if (t[i] & OP_ACTION_INDEX) + ret = add_bits(p, p->index8[0], I8_BITS); + else if (t[i] & OP_ACTION_DATA) + ret = add_bits(p, p->data8[0], 64); + else + inv = true; + break; + case OP_AMOUNT_4: + if (b == 2 && t[i] & OP_ACTION_DATA) + ret = add_bits(p, get_input_data(p, 2, 32), 32); + else if (b != 0 && b != 4) + inv = true; + else if (t[i] & OP_ACTION_INDEX) + ret = add_bits(p, p->index4[b >> 2], I4_BITS); + else if (t[i] & OP_ACTION_DATA) + ret = add_bits(p, p->data4[b >> 2], 32); + else + inv = true; + break; + case OP_AMOUNT_2: + if (b != 0 && b != 2 && b != 4 && b != 6) + inv = true; + if (t[i] & OP_ACTION_INDEX) + ret = add_bits(p, p->index2[b >> 1], I2_BITS); + else if (t[i] & OP_ACTION_DATA) + ret = add_bits(p, p->data2[b >> 1], 16); + else + inv = true; + break; + case OP_AMOUNT_0: + inv = (b != 8) || !(t[i] & OP_ACTION_NOOP); + break; + default: + inv = true; + break; + } + + if (ret) + return ret; + + if (inv) { + pr_err("Invalid templ %x op %d : %x %x %x %x\n", + c, i, t[0], t[1], t[2], t[3]); + return -EINVAL; + } + + b += t[i] & OP_AMOUNT; + } + + if (b != 8) { + pr_err("Invalid template %x len %x : %x %x %x %x\n", + c, b, t[0], t[1], t[2], t[3]); + return -EINVAL; + } + + if (sw842_template_counts) + atomic_inc(&template_count[t[4]]); + + return 0; +} + +static int add_repeat_template(struct sw842_param *p, u8 r) +{ + int ret; + + /* repeat param is 0-based */ + if (!r || --r > REPEAT_BITS_MAX) + return -EINVAL; + + ret = add_bits(p, OP_REPEAT, OP_BITS); + if (ret) + return ret; + + ret = add_bits(p, r, REPEAT_BITS); + if (ret) + return ret; + + if (sw842_template_counts) + atomic_inc(&template_repeat_count); + + return 0; +} + +static int add_short_data_template(struct sw842_param *p, u8 b) +{ + int ret, i; + + if (!b || b > SHORT_DATA_BITS_MAX) + return -EINVAL; + + ret = add_bits(p, OP_SHORT_DATA, OP_BITS); + if (ret) + return ret; + + ret = add_bits(p, b, SHORT_DATA_BITS); + if (ret) + return ret; + + for (i = 0; i < b; i++) { + ret = add_bits(p, p->in[i], 8); + if (ret) + return ret; + } + + if (sw842_template_counts) + atomic_inc(&template_short_data_count); + + return 0; +} + +static int add_zeros_template(struct sw842_param *p) +{ + int ret = add_bits(p, OP_ZEROS, OP_BITS); + + if (ret) + return ret; + + if (sw842_template_counts) + atomic_inc(&template_zeros_count); + + return 0; +} + +static int add_end_template(struct sw842_param *p) +{ + int ret = add_bits(p, OP_END, OP_BITS); + + if (ret) + return ret; + + if (sw842_template_counts) + atomic_inc(&template_end_count); + + return 0; +} + +static bool check_template(struct sw842_param *p, u8 c) +{ + u8 *t = comp_ops[c]; + int i, match, b = 0; + + if (c >= OPS_MAX) + return false; + + for (i = 0; i < 4; i++) { + if (t[i] & OP_ACTION_INDEX) { + if (t[i] & OP_AMOUNT_2) + match = check_index(p, 2, b >> 1); + else if (t[i] & OP_AMOUNT_4) + match = check_index(p, 4, b >> 2); + else if (t[i] & OP_AMOUNT_8) + match = check_index(p, 8, 0); + else + return false; + if (!match) + return false; + } + + b += t[i] & OP_AMOUNT; + } + + return true; +} + +static void get_next_data(struct sw842_param *p) +{ + p->data8[0] = get_input_data(p, 0, 64); + p->data4[0] = get_input_data(p, 0, 32); + p->data4[1] = get_input_data(p, 4, 32); + p->data2[0] = get_input_data(p, 0, 16); + p->data2[1] = get_input_data(p, 2, 16); + p->data2[2] = get_input_data(p, 4, 16); + p->data2[3] = get_input_data(p, 6, 16); +} + +/* update the hashtable entries. + * only call this after finding/adding the current template + * the dataN fields for the current 8 byte block must be already updated + */ +static void update_hashtables(struct sw842_param *p) +{ + u64 pos = p->in - p->instart; + u64 n8 = (pos >> 3) % (1 << I8_BITS); + u64 n4 = (pos >> 2) % (1 << I4_BITS); + u64 n2 = (pos >> 1) % (1 << I2_BITS); + + replace_hash(p, 8, n8, 0); + replace_hash(p, 4, n4, 0); + replace_hash(p, 4, n4, 1); + replace_hash(p, 2, n2, 0); + replace_hash(p, 2, n2, 1); + replace_hash(p, 2, n2, 2); + replace_hash(p, 2, n2, 3); +} + +/* find the next template to use, and add it + * the p->dataN fields must already be set for the current 8 byte block + */ +static int process_next(struct sw842_param *p) +{ + int ret, i; + + p->index8[0] = INDEX_NOT_CHECKED; + p->index4[0] = INDEX_NOT_CHECKED; + p->index4[1] = INDEX_NOT_CHECKED; + p->index2[0] = INDEX_NOT_CHECKED; + p->index2[1] = INDEX_NOT_CHECKED; + p->index2[2] = INDEX_NOT_CHECKED; + p->index2[3] = INDEX_NOT_CHECKED; + + /* check up to OPS_MAX - 1; last op is our fallback */ + for (i = 0; i < OPS_MAX - 1; i++) { + if (check_template(p, i)) + break; + } + + ret = add_template(p, i); + if (ret) + return ret; + + return 0; +} + +/** + * sw842_compress + * + * Compress the uncompressed buffer of length @ilen at @in to the output buffer + * @out, using no more than @olen bytes, using the 842 compression format. + * + * Returns: 0 on success, error on failure. The @olen parameter + * will contain the number of output bytes written on success, or + * 0 on error. + */ +int sw842_compress(const u8 *in, unsigned int ilen, + u8 *out, unsigned int *olen, void *wmem) +{ + struct sw842_param *p = (struct sw842_param *)wmem; + int ret; + u64 last, next, pad, total; + u8 repeat_count = 0; + u32 crc; + + BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); + + init_hashtable_nodes(p, 8); + init_hashtable_nodes(p, 4); + init_hashtable_nodes(p, 2); + + p->in = (u8 *)in; + p->instart = p->in; + p->ilen = ilen; + p->out = out; + p->olen = *olen; + p->bit = 0; + + total = p->olen; + + *olen = 0; + + /* if using strict mode, we can only compress a multiple of 8 */ + if (sw842_strict && (ilen % 8)) { + pr_err("Using strict mode, can't compress len %d\n", ilen); + return -EINVAL; + } + + /* let's compress at least 8 bytes, mkay? */ + if (unlikely(ilen < 8)) + goto skip_comp; + + /* make initial 'last' different so we don't match the first time */ + last = ~get_unaligned((u64 *)p->in); + + while (p->ilen > 7) { + next = get_unaligned((u64 *)p->in); + + /* must get the next data, as we need to update the hashtable + * entries with the new data every time + */ + get_next_data(p); + + /* we don't care about endianness in last or next; + * we're just comparing 8 bytes to another 8 bytes, + * they're both the same endianness + */ + if (next == last) { + /* repeat count bits are 0-based, so we stop at +1 */ + if (++repeat_count <= REPEAT_BITS_MAX) + goto repeat; + } + if (repeat_count) { + ret = add_repeat_template(p, repeat_count); + repeat_count = 0; + if (next == last) /* reached max repeat bits */ + goto repeat; + } + + if (next == 0) + ret = add_zeros_template(p); + else + ret = process_next(p); + + if (ret) + return ret; + +repeat: + last = next; + update_hashtables(p); + p->in += 8; + p->ilen -= 8; + } + + if (repeat_count) { + ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; + } + +skip_comp: + if (p->ilen > 0) { + ret = add_short_data_template(p, p->ilen); + if (ret) + return ret; + + p->in += p->ilen; + p->ilen = 0; + } + + ret = add_end_template(p); + if (ret) + return ret; + + /* + * crc(0:31) is appended to target data starting with the next + * bit after End of stream template. + * nx842 calculates CRC for data in big-endian format. So doing + * same here so that sw842 decompression can be used for both + * compressed data. + */ + crc = crc32_be(0, in, ilen); + ret = add_bits(p, crc, CRC_BITS); + if (ret) + return ret; + + if (p->bit) { + p->out++; + p->olen--; + p->bit = 0; + } + + /* pad compressed length to multiple of 8 */ + pad = (8 - ((total - p->olen) % 8)) % 8; + if (pad) { + if (pad > p->olen) /* we were so close! */ + return -ENOSPC; + memset(p->out, 0, pad); + p->out += pad; + p->olen -= pad; + } + + if (unlikely((total - p->olen) > UINT_MAX)) + return -ENOSPC; + + *olen = total - p->olen; + + return 0; +} +EXPORT_SYMBOL_GPL(sw842_compress); + +static int __init sw842_init(void) +{ + if (sw842_template_counts) + sw842_debugfs_create(); + + return 0; +} +module_init(sw842_init); + +static void __exit sw842_exit(void) +{ + if (sw842_template_counts) + sw842_debugfs_remove(); +} +module_exit(sw842_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software 842 Compressor"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); diff --git a/kernel/lib/842/842_debugfs.h b/kernel/lib/842/842_debugfs.h new file mode 100644 index 000000000..e7f3bffaf --- /dev/null +++ b/kernel/lib/842/842_debugfs.h @@ -0,0 +1,52 @@ + +#ifndef __842_DEBUGFS_H__ +#define __842_DEBUGFS_H__ + +#include <linux/debugfs.h> + +static bool sw842_template_counts; +module_param_named(template_counts, sw842_template_counts, bool, 0444); + +static atomic_t template_count[OPS_MAX], template_repeat_count, + template_zeros_count, template_short_data_count, template_end_count; + +static struct dentry *sw842_debugfs_root; + +static int __init sw842_debugfs_create(void) +{ + umode_t m = S_IRUGO | S_IWUSR; + int i; + + if (!debugfs_initialized()) + return -ENODEV; + + sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); + if (IS_ERR(sw842_debugfs_root)) + return PTR_ERR(sw842_debugfs_root); + + for (i = 0; i < ARRAY_SIZE(template_count); i++) { + char name[32]; + + snprintf(name, 32, "template_%02x", i); + debugfs_create_atomic_t(name, m, sw842_debugfs_root, + &template_count[i]); + } + debugfs_create_atomic_t("template_repeat", m, sw842_debugfs_root, + &template_repeat_count); + debugfs_create_atomic_t("template_zeros", m, sw842_debugfs_root, + &template_zeros_count); + debugfs_create_atomic_t("template_short_data", m, sw842_debugfs_root, + &template_short_data_count); + debugfs_create_atomic_t("template_end", m, sw842_debugfs_root, + &template_end_count); + + return 0; +} + +static void __exit sw842_debugfs_remove(void) +{ + if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root)) + debugfs_remove_recursive(sw842_debugfs_root); +} + +#endif diff --git a/kernel/lib/842/842_decompress.c b/kernel/lib/842/842_decompress.c new file mode 100644 index 000000000..8881dad2a --- /dev/null +++ b/kernel/lib/842/842_decompress.c @@ -0,0 +1,422 @@ +/* + * 842 Software Decompression + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * See 842.h for details of the 842 compressed format. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define MODULE_NAME "842_decompress" + +#include "842.h" +#include "842_debugfs.h" + +/* rolling fifo sizes */ +#define I2_FIFO_SIZE (2 * (1 << I2_BITS)) +#define I4_FIFO_SIZE (4 * (1 << I4_BITS)) +#define I8_FIFO_SIZE (8 * (1 << I8_BITS)) + +static u8 decomp_ops[OPS_MAX][4] = { + { D8, N0, N0, N0 }, + { D4, D2, I2, N0 }, + { D4, I2, D2, N0 }, + { D4, I2, I2, N0 }, + { D4, I4, N0, N0 }, + { D2, I2, D4, N0 }, + { D2, I2, D2, I2 }, + { D2, I2, I2, D2 }, + { D2, I2, I2, I2 }, + { D2, I2, I4, N0 }, + { I2, D2, D4, N0 }, + { I2, D4, I2, N0 }, + { I2, D2, I2, D2 }, + { I2, D2, I2, I2 }, + { I2, D2, I4, N0 }, + { I2, I2, D4, N0 }, + { I2, I2, D2, I2 }, + { I2, I2, I2, D2 }, + { I2, I2, I2, I2 }, + { I2, I2, I4, N0 }, + { I4, D4, N0, N0 }, + { I4, D2, I2, N0 }, + { I4, I2, D2, N0 }, + { I4, I2, I2, N0 }, + { I4, I4, N0, N0 }, + { I8, N0, N0, N0 } +}; + +struct sw842_param { + u8 *in; + u8 bit; + u64 ilen; + u8 *out; + u8 *ostart; + u64 olen; +}; + +#define beN_to_cpu(d, s) \ + ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ + (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ + (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ + WARN(1, "pr_debug param err invalid size %x\n", s)) + +static int next_bits(struct sw842_param *p, u64 *d, u8 n); + +static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s) +{ + u64 tmp = 0; + int ret; + + if (n <= s) { + pr_debug("split_next_bits invalid n %u s %u\n", n, s); + return -EINVAL; + } + + ret = next_bits(p, &tmp, n - s); + if (ret) + return ret; + ret = next_bits(p, d, s); + if (ret) + return ret; + *d |= tmp << s; + return 0; +} + +static int next_bits(struct sw842_param *p, u64 *d, u8 n) +{ + u8 *in = p->in, b = p->bit, bits = b + n; + + if (n > 64) { + pr_debug("next_bits invalid n %u\n", n); + return -EINVAL; + } + + /* split this up if reading > 8 bytes, or if we're at the end of + * the input buffer and would read past the end + */ + if (bits > 64) + return __split_next_bits(p, d, n, 32); + else if (p->ilen < 8 && bits > 32 && bits <= 56) + return __split_next_bits(p, d, n, 16); + else if (p->ilen < 4 && bits > 16 && bits <= 24) + return __split_next_bits(p, d, n, 8); + + if (DIV_ROUND_UP(bits, 8) > p->ilen) + return -EOVERFLOW; + + if (bits <= 8) + *d = *in >> (8 - bits); + else if (bits <= 16) + *d = be16_to_cpu(get_unaligned((__be16 *)in)) >> (16 - bits); + else if (bits <= 32) + *d = be32_to_cpu(get_unaligned((__be32 *)in)) >> (32 - bits); + else + *d = be64_to_cpu(get_unaligned((__be64 *)in)) >> (64 - bits); + + *d &= GENMASK_ULL(n - 1, 0); + + p->bit += n; + + if (p->bit > 7) { + p->in += p->bit / 8; + p->ilen -= p->bit / 8; + p->bit %= 8; + } + + return 0; +} + +static int do_data(struct sw842_param *p, u8 n) +{ + u64 v; + int ret; + + if (n > p->olen) + return -ENOSPC; + + ret = next_bits(p, &v, n * 8); + if (ret) + return ret; + + switch (n) { + case 2: + put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out); + break; + case 4: + put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out); + break; + case 8: + put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out); + break; + default: + return -EINVAL; + } + + p->out += n; + p->olen -= n; + + return 0; +} + +static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) +{ + u64 index, offset, total = round_down(p->out - p->ostart, 8); + int ret; + + ret = next_bits(p, &index, bits); + if (ret) + return ret; + + offset = index * size; + + /* a ring buffer of fsize is used; correct the offset */ + if (total > fsize) { + /* this is where the current fifo is */ + u64 section = round_down(total, fsize); + /* the current pos in the fifo */ + u64 pos = total - section; + + /* if the offset is past/at the pos, we need to + * go back to the last fifo section + */ + if (offset >= pos) + section -= fsize; + + offset += section; + } + + if (offset + size > total) { + pr_debug("index%x %lx points past end %lx\n", size, + (unsigned long)offset, (unsigned long)total); + return -EINVAL; + } + + pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", + size, (unsigned long)index, (unsigned long)(index * size), + (unsigned long)offset, (unsigned long)total, + (unsigned long)beN_to_cpu(&p->ostart[offset], size)); + + memcpy(p->out, &p->ostart[offset], size); + p->out += size; + p->olen -= size; + + return 0; +} + +static int do_index(struct sw842_param *p, u8 n) +{ + switch (n) { + case 2: + return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE); + case 4: + return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE); + case 8: + return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE); + default: + return -EINVAL; + } +} + +static int do_op(struct sw842_param *p, u8 o) +{ + int i, ret = 0; + + if (o >= OPS_MAX) + return -EINVAL; + + for (i = 0; i < 4; i++) { + u8 op = decomp_ops[o][i]; + + pr_debug("op is %x\n", op); + + switch (op & OP_ACTION) { + case OP_ACTION_DATA: + ret = do_data(p, op & OP_AMOUNT); + break; + case OP_ACTION_INDEX: + ret = do_index(p, op & OP_AMOUNT); + break; + case OP_ACTION_NOOP: + break; + default: + pr_err("Interal error, invalid op %x\n", op); + return -EINVAL; + } + + if (ret) + return ret; + } + + if (sw842_template_counts) + atomic_inc(&template_count[o]); + + return 0; +} + +/** + * sw842_decompress + * + * Decompress the 842-compressed buffer of length @ilen at @in + * to the output buffer @out, using no more than @olen bytes. + * + * The compressed buffer must be only a single 842-compressed buffer, + * with the standard format described in the comments in 842.h + * Processing will stop when the 842 "END" template is detected, + * not the end of the buffer. + * + * Returns: 0 on success, error on failure. The @olen parameter + * will contain the number of output bytes written on success, or + * 0 on error. + */ +int sw842_decompress(const u8 *in, unsigned int ilen, + u8 *out, unsigned int *olen) +{ + struct sw842_param p; + int ret; + u64 op, rep, tmp, bytes, total; + u64 crc; + + p.in = (u8 *)in; + p.bit = 0; + p.ilen = ilen; + p.out = out; + p.ostart = out; + p.olen = *olen; + + total = p.olen; + + *olen = 0; + + do { + ret = next_bits(&p, &op, OP_BITS); + if (ret) + return ret; + + pr_debug("template is %lx\n", (unsigned long)op); + + switch (op) { + case OP_REPEAT: + ret = next_bits(&p, &rep, REPEAT_BITS); + if (ret) + return ret; + + if (p.out == out) /* no previous bytes */ + return -EINVAL; + + /* copy rep + 1 */ + rep++; + + if (rep * 8 > p.olen) + return -ENOSPC; + + while (rep-- > 0) { + memcpy(p.out, p.out - 8, 8); + p.out += 8; + p.olen -= 8; + } + + if (sw842_template_counts) + atomic_inc(&template_repeat_count); + + break; + case OP_ZEROS: + if (8 > p.olen) + return -ENOSPC; + + memset(p.out, 0, 8); + p.out += 8; + p.olen -= 8; + + if (sw842_template_counts) + atomic_inc(&template_zeros_count); + + break; + case OP_SHORT_DATA: + ret = next_bits(&p, &bytes, SHORT_DATA_BITS); + if (ret) + return ret; + + if (!bytes || bytes > SHORT_DATA_BITS_MAX) + return -EINVAL; + + while (bytes-- > 0) { + ret = next_bits(&p, &tmp, 8); + if (ret) + return ret; + *p.out = (u8)tmp; + p.out++; + p.olen--; + } + + if (sw842_template_counts) + atomic_inc(&template_short_data_count); + + break; + case OP_END: + if (sw842_template_counts) + atomic_inc(&template_end_count); + + break; + default: /* use template */ + ret = do_op(&p, op); + if (ret) + return ret; + break; + } + } while (op != OP_END); + + /* + * crc(0:31) is saved in compressed data starting with the + * next bit after End of stream template. + */ + ret = next_bits(&p, &crc, CRC_BITS); + if (ret) + return ret; + + /* + * Validate CRC saved in compressed data. + */ + if (crc != (u64)crc32_be(0, out, total - p.olen)) { + pr_debug("CRC mismatch for decompression\n"); + return -EINVAL; + } + + if (unlikely((total - p.olen) > UINT_MAX)) + return -ENOSPC; + + *olen = total - p.olen; + + return 0; +} +EXPORT_SYMBOL_GPL(sw842_decompress); + +static int __init sw842_init(void) +{ + if (sw842_template_counts) + sw842_debugfs_create(); + + return 0; +} +module_init(sw842_init); + +static void __exit sw842_exit(void) +{ + if (sw842_template_counts) + sw842_debugfs_remove(); +} +module_exit(sw842_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software 842 Decompressor"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); diff --git a/kernel/lib/842/Makefile b/kernel/lib/842/Makefile new file mode 100644 index 000000000..5d24c0baf --- /dev/null +++ b/kernel/lib/842/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_842_COMPRESS) += 842_compress.o +obj-$(CONFIG_842_DECOMPRESS) += 842_decompress.o diff --git a/kernel/lib/Kconfig b/kernel/lib/Kconfig index 8689649d5..f75de578c 100644 --- a/kernel/lib/Kconfig +++ b/kernel/lib/Kconfig @@ -53,9 +53,6 @@ config GENERIC_IO config STMP_DEVICE bool -config PERCPU_RWSEM - bool - config ARCH_USE_CMPXCHG_LOCKREF bool @@ -212,11 +209,20 @@ config RANDOM32_SELFTEST # # compression support is select'ed if needed # +config 842_COMPRESS + select CRC32 + tristate + +config 842_DECOMPRESS + select CRC32 + tristate + config ZLIB_INFLATE tristate config ZLIB_DEFLATE tristate + select BITREVERSE config LZO_COMPRESS tristate @@ -455,16 +461,6 @@ config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE config LRU_CACHE tristate -config AVERAGE - bool "Averaging functions" - help - This option is provided for the case where no in-kernel-tree - modules require averaging functions, but a module built outside - the kernel tree does. Such modules that use library averaging - functions require Y here. - - If unsure, say N. - config CLZ_TAB bool @@ -516,6 +512,13 @@ config UCS2_STRING source "lib/fonts/Kconfig" +config SG_SPLIT + def_bool n + help + Provides a heler to split scatterlists into chunks, each chunk being a + scatterlist. This should be selected by a driver or an API which + whishes to split a scatterlist amongst multiple DMA channel. + # # sg chaining option # @@ -523,4 +526,10 @@ source "lib/fonts/Kconfig" config ARCH_HAS_SG_CHAIN def_bool n +config ARCH_HAS_PMEM_API + bool + +config ARCH_HAS_MMIO_FLUSH + bool + endmenu diff --git a/kernel/lib/Kconfig.debug b/kernel/lib/Kconfig.debug index ba2b0c87e..8c15b29d5 100644 --- a/kernel/lib/Kconfig.debug +++ b/kernel/lib/Kconfig.debug @@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 + default 0 if KASAN default 1024 if !64BIT default 2048 if 64BIT help @@ -311,6 +312,15 @@ config DEBUG_SECTION_MISMATCH - Enable verbose reporting from modpost in order to help resolve the section mismatches that are reported. +config SECTION_MISMATCH_WARN_ONLY + bool "Make section mismatch errors non-fatal" + default y + help + If you say N here, the build process will fail if there are any + section mismatch, instead of just throwing warnings. + + If unsure, say Y. + # # Select this config option from the architecture Kconfig, if it # is preferred to always offer frame pointers as a config @@ -841,9 +851,14 @@ config SCHED_DEBUG that can help debug the scheduler. The runtime overhead of this option is minimal. +config SCHED_INFO + bool + default n + config SCHEDSTATS bool "Collect scheduler statistics" depends on DEBUG_KERNEL && PROC_FS + select SCHED_INFO help If you say Y here, additional code will be inserted into the scheduler and related routines to collect statistics about @@ -911,12 +926,6 @@ config DEBUG_RT_MUTEXES This allows rt mutex semantics violations and rt mutex related deadlocks (lockups) to be detected and reported automatically. -config RT_MUTEX_TESTER - bool "Built-in scriptable tester for rt-mutexes" - depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN - help - This option enables a rt-mutex tester. - config DEBUG_SPINLOCK bool "Spinlock and rw-lock debugging: basic checks" depends on DEBUG_KERNEL @@ -1233,6 +1242,7 @@ config RCU_TORTURE_TEST depends on DEBUG_KERNEL select TORTURE_TEST select SRCU + select TASKS_RCU default n help This option provides a kernel module that runs torture tests @@ -1261,12 +1271,38 @@ config RCU_TORTURE_TEST_RUNNABLE Say N here if you want the RCU torture tests to start only after being manually enabled via /proc. +config RCU_TORTURE_TEST_SLOW_PREINIT + bool "Slow down RCU grace-period pre-initialization to expose races" + depends on RCU_TORTURE_TEST + help + This option delays grace-period pre-initialization (the + propagation of CPU-hotplug changes up the rcu_node combining + tree) for a few jiffies between initializing each pair of + consecutive rcu_node structures. This helps to expose races + involving grace-period pre-initialization, in other words, it + makes your kernel less stable. It can also greatly increase + grace-period latency, especially on systems with large numbers + of CPUs. This is useful when torture-testing RCU, but in + almost no other circumstance. + + Say Y here if you want your system to crash and hang more often. + Say N if you want a sane system. + +config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY + int "How much to slow down RCU grace-period pre-initialization" + range 0 5 + default 3 + depends on RCU_TORTURE_TEST_SLOW_PREINIT + help + This option specifies the number of jiffies to wait between + each rcu_node structure pre-initialization step. + config RCU_TORTURE_TEST_SLOW_INIT bool "Slow down RCU grace-period initialization to expose races" depends on RCU_TORTURE_TEST help - This option makes grace-period initialization block for a - few jiffies between initializing each pair of consecutive + This option delays grace-period initialization for a few + jiffies between initializing each pair of consecutive rcu_node structures. This helps to expose races involving grace-period initialization, in other words, it makes your kernel less stable. It can also greatly increase grace-period @@ -1286,6 +1322,30 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY This option specifies the number of jiffies to wait between each rcu_node structure initialization. +config RCU_TORTURE_TEST_SLOW_CLEANUP + bool "Slow down RCU grace-period cleanup to expose races" + depends on RCU_TORTURE_TEST + help + This option delays grace-period cleanup for a few jiffies + between cleaning up each pair of consecutive rcu_node + structures. This helps to expose races involving grace-period + cleanup, in other words, it makes your kernel less stable. + It can also greatly increase grace-period latency, especially + on systems with large numbers of CPUs. This is useful when + torture-testing RCU, but in almost no other circumstance. + + Say Y here if you want your system to crash and hang more often. + Say N if you want a sane system. + +config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY + int "How much to slow down RCU grace-period cleanup" + range 0 5 + default 3 + depends on RCU_TORTURE_TEST_SLOW_CLEANUP + help + This option specifies the number of jiffies to wait between + each rcu_node structure cleanup operation. + config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" depends on RCU_STALL_COMMON @@ -1297,20 +1357,6 @@ config RCU_CPU_STALL_TIMEOUT RCU grace period persists, additional CPU stall warnings are printed at more widely spaced intervals. -config RCU_CPU_STALL_INFO - bool "Print additional diagnostics on RCU CPU stall" - depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL - default y - help - For each stalled CPU that is aware of the current RCU grace - period, print out additional per-CPU diagnostic information - regarding scheduling-clock ticks, idle state, and, - for RCU_FAST_NO_HZ kernels, idle-entry state. - - Say N if you are unsure. - - Say Y if you want to enable such diagnostics. - config RCU_TRACE bool "Enable tracing for RCU" depends on DEBUG_KERNEL @@ -1322,6 +1368,17 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. +config RCU_EQS_DEBUG + bool "Provide debugging asserts for adding NO_HZ support to an arch" + depends on DEBUG_KERNEL + help + This option provides consistency checks in RCU's handling of + NO_HZ. These checks have proven quite helpful in detecting + bugs in arch-specific NO_HZ code. + + Say N here if you need ultimate kernel/user switch latencies + Say Y if you are unsure + endmenu # "RCU Debugging" config DEBUG_BLOCK_EXT_DEVT @@ -1475,6 +1532,13 @@ config FAIL_MMC_REQUEST and to test how the mmc host driver handles retries from the block device. +config FAIL_FUTEX + bool "Fault-injection capability for futexes" + select DEBUG_FS + depends on FAULT_INJECTION && FUTEX + help + Provide fault-injection capability for futexes. + config FAULT_INJECTION_DEBUG_FS bool "Debugfs entries for fault-injection capabilities" depends on FAULT_INJECTION && SYSFS && DEBUG_FS @@ -1631,6 +1695,9 @@ config TEST_STRING_HELPERS config TEST_KSTRTOX tristate "Test kstrto*() family of functions at runtime" +config TEST_PRINTF + tristate "Test printf() family of functions at runtime" + config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" default n @@ -1773,6 +1840,15 @@ config MEMTEST memtest=17, mean do 17 test patterns. If you are unsure how to answer this question, answer N. +config TEST_STATIC_KEYS + tristate "Test static keys" + default n + depends on m + help + Test the static key interfaces. + + If unsure, say N. + source "samples/Kconfig" source "lib/Kconfig.kgdb" diff --git a/kernel/lib/Kconfig.kasan b/kernel/lib/Kconfig.kasan index 777eda7d1..0fee5acd5 100644 --- a/kernel/lib/Kconfig.kasan +++ b/kernel/lib/Kconfig.kasan @@ -15,12 +15,7 @@ config KASAN global variables requires gcc 5.0 or later. This feature consumes about 1/8 of available memory and brings about ~x3 performance slowdown. - For better error detection enable CONFIG_STACKTRACE, - and add slub_debug=U to boot cmdline. - -config KASAN_SHADOW_OFFSET - hex - default 0xdffffc0000000000 if X86_64 + For better error detection enable CONFIG_STACKTRACE. choice prompt "Instrumentation type" diff --git a/kernel/lib/Makefile b/kernel/lib/Makefile index 6c3793333..7f1de2661 100644 --- a/kernel/lib/Makefile +++ b/kernel/lib/Makefile @@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o md5.o irq_regs.o argv_split.o \ proportions.o flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o seq_buf.o + earlycpio.o seq_buf.o nmi_backtrace.o obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o lib-$(CONFIG_MMU) += ioremap.o @@ -26,7 +26,8 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o + percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ + once.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o @@ -39,12 +40,18 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LKM) += test_module.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o +obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o +obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o +obj-$(CONFIG_TEST_PRINTF) += test_printf.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG CFLAGS_kobject_uevent.o += -DDEBUG endif +obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o +CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) + obj-$(CONFIG_GENERIC_IOMAP) += iomap.o obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o @@ -78,6 +85,8 @@ obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_CRC8) += crc8.o obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o +obj-$(CONFIG_842_COMPRESS) += 842/ +obj-$(CONFIG_842_DECOMPRESS) += 842/ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ @@ -133,8 +142,6 @@ obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o -obj-$(CONFIG_AVERAGE) += average.o - obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o obj-$(CONFIG_CORDIC) += cordic.o @@ -155,6 +162,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o +obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ diff --git a/kernel/lib/asn1_decoder.c b/kernel/lib/asn1_decoder.c index 1a000bb05..2b3f46c04 100644 --- a/kernel/lib/asn1_decoder.c +++ b/kernel/lib/asn1_decoder.c @@ -24,15 +24,20 @@ static const unsigned char asn1_op_lengths[ASN1_OP__NR] = { [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_ANY] = 1, + [ASN1_OP_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_MATCH_ANY_ACT] = 1 + 1, + [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_ANY] = 1, + [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1, + [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_FAIL] = 1, [ASN1_OP_COMPLETE] = 1, [ASN1_OP_ACT] = 1 + 1, + [ASN1_OP_MAYBE_ACT] = 1 + 1, [ASN1_OP_RETURN] = 1, [ASN1_OP_END_SEQ] = 1, [ASN1_OP_END_SEQ_OF] = 1 + 1, @@ -177,6 +182,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder, unsigned char flags = 0; #define FLAG_INDEFINITE_LENGTH 0x01 #define FLAG_MATCHED 0x02 +#define FLAG_LAST_MATCHED 0x04 /* Last tag matched */ #define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag * - ie. whether or not we are going to parse * a compound type. @@ -208,9 +214,9 @@ next_op: unsigned char tmp; /* Skip conditional matches if possible */ - if ((op & ASN1_OP_MATCH__COND && - flags & FLAG_MATCHED) || - dp == datalen) { + if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) || + (op & ASN1_OP_MATCH__SKIP && dp == datalen)) { + flags &= ~FLAG_LAST_MATCHED; pc += asn1_op_lengths[op]; goto next_op; } @@ -302,7 +308,9 @@ next_op: /* Decide how to handle the operation */ switch (op) { case ASN1_OP_MATCH_ANY_ACT: + case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_ANY_ACT: + case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len); if (ret < 0) return ret; @@ -319,8 +327,10 @@ next_op: case ASN1_OP_MATCH: case ASN1_OP_MATCH_OR_SKIP: case ASN1_OP_MATCH_ANY: + case ASN1_OP_MATCH_ANY_OR_SKIP: case ASN1_OP_COND_MATCH_OR_SKIP: case ASN1_OP_COND_MATCH_ANY: + case ASN1_OP_COND_MATCH_ANY_OR_SKIP: skip_data: if (!(flags & FLAG_CONS)) { if (flags & FLAG_INDEFINITE_LENGTH) { @@ -422,8 +432,15 @@ next_op: pc += asn1_op_lengths[op]; goto next_op; + case ASN1_OP_MAYBE_ACT: + if (!(flags & FLAG_LAST_MATCHED)) { + pc += asn1_op_lengths[op]; + goto next_op; + } case ASN1_OP_ACT: ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); + if (ret < 0) + return ret; pc += asn1_op_lengths[op]; goto next_op; @@ -431,6 +448,7 @@ next_op: if (unlikely(jsp <= 0)) goto jump_stack_underflow; pc = jump_stack[--jsp]; + flags |= FLAG_MATCHED | FLAG_LAST_MATCHED; goto next_op; default: @@ -438,7 +456,8 @@ next_op: } /* Shouldn't reach here */ - pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op); + pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n", + op, pc); return -EBADMSG; data_overrun_error: diff --git a/kernel/lib/atomic64.c b/kernel/lib/atomic64.c index 1298c05ef..2886ebac6 100644 --- a/kernel/lib/atomic64.c +++ b/kernel/lib/atomic64.c @@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +ATOMIC64_OP(and, &=) +ATOMIC64_OP(or, |=) +ATOMIC64_OP(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN diff --git a/kernel/lib/atomic64_test.c b/kernel/lib/atomic64_test.c index 0211d30d8..83c33a5bc 100644 --- a/kernel/lib/atomic64_test.c +++ b/kernel/lib/atomic64_test.c @@ -16,8 +16,39 @@ #include <linux/kernel.h> #include <linux/atomic.h> +#define TEST(bit, op, c_op, val) \ +do { \ + atomic##bit##_set(&v, v0); \ + r = v0; \ + atomic##bit##_##op(val, &v); \ + r c_op val; \ + WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \ + (unsigned long long)atomic##bit##_read(&v), \ + (unsigned long long)r); \ +} while (0) + +static __init void test_atomic(void) +{ + int v0 = 0xaaa31337; + int v1 = 0xdeadbeef; + int onestwos = 0x11112222; + int one = 1; + + atomic_t v; + int r; + + TEST(, add, +=, onestwos); + TEST(, add, +=, -one); + TEST(, sub, -=, onestwos); + TEST(, sub, -=, -one); + TEST(, or, |=, v1); + TEST(, and, &=, v1); + TEST(, xor, ^=, v1); + TEST(, andnot, &= ~, v1); +} + #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) -static __init int test_atomic64(void) +static __init void test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; @@ -34,15 +65,14 @@ static __init int test_atomic64(void) BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); - INIT(v0); - atomic64_add(onestwos, &v); - r += onestwos; - BUG_ON(v.counter != r); - - INIT(v0); - atomic64_add(-one, &v); - r += -one; - BUG_ON(v.counter != r); + TEST(64, add, +=, onestwos); + TEST(64, add, +=, -one); + TEST(64, sub, -=, onestwos); + TEST(64, sub, -=, -one); + TEST(64, or, |=, v1); + TEST(64, and, &=, v1); + TEST(64, xor, ^=, v1); + TEST(64, andnot, &= ~, v1); INIT(v0); r += onestwos; @@ -55,16 +85,6 @@ static __init int test_atomic64(void) BUG_ON(v.counter != r); INIT(v0); - atomic64_sub(onestwos, &v); - r -= onestwos; - BUG_ON(v.counter != r); - - INIT(v0); - atomic64_sub(-one, &v); - r -= -one; - BUG_ON(v.counter != r); - - INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); @@ -147,6 +167,12 @@ static __init int test_atomic64(void) BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); +} + +static __init int test_atomics(void) +{ + test_atomic(); + test_atomic64(); #ifdef CONFIG_X86 pr_info("passed for %s platform %s CX8 and %s SSE\n", @@ -166,4 +192,4 @@ static __init int test_atomic64(void) return 0; } -core_initcall(test_atomic64); +core_initcall(test_atomics); diff --git a/kernel/lib/average.c b/kernel/lib/average.c deleted file mode 100644 index 114d1beae..000000000 --- a/kernel/lib/average.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * lib/average.c - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. - */ - -#include <linux/export.h> -#include <linux/average.h> -#include <linux/kernel.h> -#include <linux/bug.h> -#include <linux/log2.h> - -/** - * DOC: Exponentially Weighted Moving Average (EWMA) - * - * These are generic functions for calculating Exponentially Weighted Moving - * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled - * up internal representation of the average value to prevent rounding errors. - * The factor for scaling up and the exponential weight (or decay rate) have to - * be specified thru the init fuction. The structure should not be accessed - * directly but only thru the helper functions. - */ - -/** - * ewma_init() - Initialize EWMA parameters - * @avg: Average structure - * @factor: Factor to use for the scaled up internal value. The maximum value - * of averages can be ULONG_MAX/(factor*weight). For performance reasons - * factor has to be a power of 2. - * @weight: Exponential weight, or decay rate. This defines how fast the - * influence of older values decreases. For performance reasons weight has - * to be a power of 2. - * - * Initialize the EWMA parameters for a given struct ewma @avg. - */ -void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight) -{ - WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor)); - - avg->weight = ilog2(weight); - avg->factor = ilog2(factor); - avg->internal = 0; -} -EXPORT_SYMBOL(ewma_init); - -/** - * ewma_add() - Exponentially weighted moving average (EWMA) - * @avg: Average structure - * @val: Current value - * - * Add a sample to the average. - */ -struct ewma *ewma_add(struct ewma *avg, unsigned long val) -{ - unsigned long internal = ACCESS_ONCE(avg->internal); - - ACCESS_ONCE(avg->internal) = internal ? - (((internal << avg->weight) - internal) + - (val << avg->factor)) >> avg->weight : - (val << avg->factor); - return avg; -} -EXPORT_SYMBOL(ewma_add); diff --git a/kernel/lib/bitmap.c b/kernel/lib/bitmap.c index 40162f87e..814814397 100644 --- a/kernel/lib/bitmap.c +++ b/kernel/lib/bitmap.c @@ -367,7 +367,8 @@ int __bitmap_parse(const char *buf, unsigned int buflen, nchunks = nbits = totaldigits = c = 0; do { - chunk = ndigits = 0; + chunk = 0; + ndigits = totaldigits; /* Get the next chunk of the bitmap */ while (buflen) { @@ -406,9 +407,9 @@ int __bitmap_parse(const char *buf, unsigned int buflen, return -EOVERFLOW; chunk = (chunk << 4) | hex_to_bin(c); - ndigits++; totaldigits++; + totaldigits++; } - if (ndigits == 0) + if (ndigits == totaldigits) return -EINVAL; if (nchunks == 0 && chunk == 0) continue; @@ -462,19 +463,20 @@ EXPORT_SYMBOL(bitmap_parse_user); * Output format is a comma-separated list of decimal numbers and * ranges if list is specified or hex digits grouped into comma-separated * sets of 8 digits/set. Returns the number of characters written to buf. + * + * It is assumed that @buf is a pointer into a PAGE_SIZE area and that + * sufficient storage remains at @buf to accommodate the + * bitmap_print_to_pagebuf() output. */ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits) { - ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; int n = 0; - if (len > 1) { - n = list ? scnprintf(buf, len, "%*pbl", nmaskbits, maskp) : - scnprintf(buf, len, "%*pb", nmaskbits, maskp); - buf[n++] = '\n'; - buf[n] = '\0'; - } + if (len > 1) + n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) : + scnprintf(buf, len, "%*pb\n", nmaskbits, maskp); return n; } EXPORT_SYMBOL(bitmap_print_to_pagebuf); @@ -504,7 +506,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, int nmaskbits) { unsigned a, b; - int c, old_c, totaldigits; + int c, old_c, totaldigits, ndigits; const char __user __force *ubuf = (const char __user __force *)buf; int at_start, in_range; @@ -514,6 +516,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, at_start = 1; in_range = 0; a = b = 0; + ndigits = totaldigits; /* Get the next cpu# or a range of cpu#'s */ while (buflen) { @@ -527,23 +530,27 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, if (isspace(c)) continue; - /* - * If the last character was a space and the current - * character isn't '\0', we've got embedded whitespace. - * This is a no-no, so throw an error. - */ - if (totaldigits && c && isspace(old_c)) - return -EINVAL; - /* A '\0' or a ',' signal the end of a cpu# or range */ if (c == '\0' || c == ',') break; + /* + * whitespaces between digits are not allowed, + * but it's ok if whitespaces are on head or tail. + * when old_c is whilespace, + * if totaldigits == ndigits, whitespace is on head. + * if whitespace is on tail, it should not run here. + * as c was ',' or '\0', + * the last code line has broken the current loop. + */ + if ((totaldigits != ndigits) && isspace(old_c)) + return -EINVAL; if (c == '-') { if (at_start || in_range) return -EINVAL; b = 0; in_range = 1; + at_start = 1; continue; } @@ -556,15 +563,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, at_start = 0; totaldigits++; } + if (ndigits == totaldigits) + continue; + /* if no digit is after '-', it's wrong*/ + if (at_start && in_range) + return -EINVAL; if (!(a <= b)) return -EINVAL; if (b >= nmaskbits) return -ERANGE; - if (!at_start) { - while (a <= b) { - set_bit(a, maskp); - a++; - } + while (a <= b) { + set_bit(a, maskp); + a++; } } while (buflen && c == ','); return 0; diff --git a/kernel/lib/btree.c b/kernel/lib/btree.c index 4264871ea..f93a94527 100644 --- a/kernel/lib/btree.c +++ b/kernel/lib/btree.c @@ -5,7 +5,7 @@ * * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> * Bits and pieces stolen from Peter Zijlstra's code, which is - * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> + * Copyright 2007, Red Hat Inc. Peter Zijlstra * GPLv2 * * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch diff --git a/kernel/lib/bug.c b/kernel/lib/bug.c index 0c3bd9552..cff145f03 100644 --- a/kernel/lib/bug.c +++ b/kernel/lib/bug.c @@ -66,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) struct module *mod; const struct bug_entry *bug = NULL; - rcu_read_lock(); + rcu_read_lock_sched(); list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { unsigned i; @@ -77,7 +77,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) } bug = NULL; out: - rcu_read_unlock(); + rcu_read_unlock_sched(); return bug; } @@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, char *secstrings; unsigned int i; + lockdep_assert_held(&module_mutex); + mod->bug_table = NULL; mod->num_bugs = 0; @@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, void module_bug_cleanup(struct module *mod) { + lockdep_assert_held(&module_mutex); list_del_rcu(&mod->bug_list); } diff --git a/kernel/lib/cpu_rmap.c b/kernel/lib/cpu_rmap.c index 4f134d890..f610b2a10 100644 --- a/kernel/lib/cpu_rmap.c +++ b/kernel/lib/cpu_rmap.c @@ -191,7 +191,7 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, /* Update distances based on topology */ for_each_cpu(cpu, update_mask) { if (cpu_rmap_copy_neigh(rmap, cpu, - topology_thread_cpumask(cpu), 1)) + topology_sibling_cpumask(cpu), 1)) continue; if (cpu_rmap_copy_neigh(rmap, cpu, topology_core_cpumask(cpu), 2)) diff --git a/kernel/lib/crc-itu-t.c b/kernel/lib/crc-itu-t.c index a63472b82..b3219d0ab 100644 --- a/kernel/lib/crc-itu-t.c +++ b/kernel/lib/crc-itu-t.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <linux/crc-itu-t.h> -/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */ +/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, diff --git a/kernel/lib/crc-t10dif.c b/kernel/lib/crc-t10dif.c index dfe6ec17c..1ad33e555 100644 --- a/kernel/lib/crc-t10dif.c +++ b/kernel/lib/crc-t10dif.c @@ -19,7 +19,7 @@ static struct crypto_shash *crct10dif_tfm; static struct static_key crct10dif_fallback __read_mostly; -__u16 crc_t10dif(const unsigned char *buffer, size_t len) +__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) { struct { struct shash_desc shash; @@ -28,17 +28,23 @@ __u16 crc_t10dif(const unsigned char *buffer, size_t len) int err; if (static_key_false(&crct10dif_fallback)) - return crc_t10dif_generic(0, buffer, len); + return crc_t10dif_generic(crc, buffer, len); desc.shash.tfm = crct10dif_tfm; desc.shash.flags = 0; - *(__u16 *)desc.ctx = 0; + *(__u16 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, buffer, len); BUG_ON(err); return *(__u16 *)desc.ctx; } +EXPORT_SYMBOL(crc_t10dif_update); + +__u16 crc_t10dif(const unsigned char *buffer, size_t len) +{ + return crc_t10dif_update(0, buffer, len); +} EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) diff --git a/kernel/lib/debug_info.c b/kernel/lib/debug_info.c new file mode 100644 index 000000000..2edbe2751 --- /dev/null +++ b/kernel/lib/debug_info.c @@ -0,0 +1,27 @@ +/* + * This file exists solely to ensure debug information for some core + * data structures is included in the final image even for + * CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However, + * adding appropriate #includes is fine. + */ +#include <stdarg.h> + +#include <linux/cred.h> +#include <linux/crypto.h> +#include <linux/dcache.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/fscache-cache.h> +#include <linux/io.h> +#include <linux/kallsyms.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/net.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <net/addrconf.h> +#include <net/sock.h> +#include <net/tcp.h> diff --git a/kernel/lib/decompress.c b/kernel/lib/decompress.c index 528ff932d..62696dff5 100644 --- a/kernel/lib/decompress.c +++ b/kernel/lib/decompress.c @@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len, { const struct compress_format *cf; - if (len < 2) + if (len < 2) { + if (name) + *name = NULL; return NULL; /* Need at least this much... */ + } pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]); diff --git a/kernel/lib/decompress_unlzma.c b/kernel/lib/decompress_unlzma.c index decb64629..ed7a1fd81 100644 --- a/kernel/lib/decompress_unlzma.c +++ b/kernel/lib/decompress_unlzma.c @@ -620,7 +620,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, long in_len, num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); - if (p == 0) + if (p == NULL) goto exit_2; num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); for (i = 0; i < num_probs; i++) diff --git a/kernel/lib/devres.c b/kernel/lib/devres.c index fbe2aac52..8c8567263 100644 --- a/kernel/lib/devres.c +++ b/kernel/lib/devres.c @@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap); * @dev: generic device to handle the resource for * @res: resource to be handled * - * Checks that a resource is a valid memory region, requests the memory region - * and ioremaps it either as cacheable or as non-cacheable memory depending on - * the resource's flags. All operations are managed and will be undone on - * driver detach. + * Checks that a resource is a valid memory region, requests the memory + * region and ioremaps it. All operations are managed and will be undone + * on driver detach. * * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. Usage example: @@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) return IOMEM_ERR_PTR(-EBUSY); } - if (res->flags & IORESOURCE_CACHEABLE) - dest_ptr = devm_ioremap(dev, res->start, size); - else - dest_ptr = devm_ioremap_nocache(dev, res->start, size); - + dest_ptr = devm_ioremap(dev, res->start, size); if (!dest_ptr) { dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); @@ -423,7 +418,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) if (!iomap) return; - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + for (i = 0; i < PCIM_IOMAP_MAX; i++) { if (!(mask & (1 << i))) continue; diff --git a/kernel/lib/digsig.c b/kernel/lib/digsig.c index ae05ea393..07be6c1ef 100644 --- a/kernel/lib/digsig.c +++ b/kernel/lib/digsig.c @@ -79,12 +79,13 @@ static int digsig_verify_rsa(struct key *key, unsigned char *out1 = NULL; const char *m; MPI in = NULL, res = NULL, pkey[2]; - uint8_t *p, *datap, *endp; - struct user_key_payload *ukp; + uint8_t *p, *datap; + const uint8_t *endp; + const struct user_key_payload *ukp; struct pubkey_hdr *pkh; down_read(&key->sem); - ukp = key->payload.data; + ukp = user_key_payload(key); if (ukp->datalen < sizeof(*pkh)) goto err1; diff --git a/kernel/lib/div64.c b/kernel/lib/div64.c index 19ea7ed4b..62a698a43 100644 --- a/kernel/lib/div64.c +++ b/kernel/lib/div64.c @@ -162,7 +162,7 @@ s64 div64_s64(s64 dividend, s64 divisor) { s64 quot, t; - quot = div64_u64(abs64(dividend), abs64(divisor)); + quot = div64_u64(abs(dividend), abs(divisor)); t = (dividend ^ divisor) >> 63; return (quot ^ t) - t; diff --git a/kernel/lib/dma-debug.c b/kernel/lib/dma-debug.c index dace71fe4..4a1515f4b 100644 --- a/kernel/lib/dma-debug.c +++ b/kernel/lib/dma-debug.c @@ -100,7 +100,7 @@ static LIST_HEAD(free_entries); static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ -static u32 global_disable __read_mostly; +static bool global_disable __read_mostly; /* Early initialization disable flag, set at the end of dma_debug_init */ static bool dma_debug_initialized __read_mostly; @@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) { - if (overlap(addr, len, _text, _etext) || + if (overlap(addr, len, _stext, _etext) || overlap(addr, len, __start_rodata, __end_rodata)) err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); } @@ -1249,6 +1249,14 @@ static void check_sync(struct device *dev, dir2name[entry->direction], dir2name[ref->direction]); + if (ref->sg_call_ents && ref->type == dma_debug_sg && + ref->sg_call_ents != entry->sg_call_ents) { + err_printk(ref->dev, entry, "DMA-API: device driver syncs " + "DMA sg list with different entry count " + "[map count=%d] [sync count=%d]\n", + entry->sg_call_ents, ref->sg_call_ents); + } + out: put_hash_bucket(bucket, &flags); } @@ -1456,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, entry->type = dma_debug_coherent; entry->dev = dev; entry->pfn = page_to_pfn(virt_to_page(virt)); - entry->offset = (size_t) virt & PAGE_MASK; + entry->offset = (size_t) virt & ~PAGE_MASK; entry->size = size; entry->dev_addr = dma_addr; entry->direction = DMA_BIDIRECTIONAL; @@ -1472,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, .type = dma_debug_coherent, .dev = dev, .pfn = page_to_pfn(virt_to_page(virt)), - .offset = (size_t) virt & PAGE_MASK, + .offset = (size_t) virt & ~PAGE_MASK, .dev_addr = addr, .size = size, .direction = DMA_BIDIRECTIONAL, diff --git a/kernel/lib/dump_stack.c b/kernel/lib/dump_stack.c index 7ccbc6ff8..c30d07e99 100644 --- a/kernel/lib/dump_stack.c +++ b/kernel/lib/dump_stack.c @@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1); asmlinkage __visible void dump_stack(void) { + unsigned long flags; int was_locked; int old; int cpu; @@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void) * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ - migrate_disable(); - retry: + local_irq_save(flags); cpu = smp_processor_id(); old = atomic_cmpxchg(&dump_lock, -1, cpu); if (old == -1) { @@ -43,6 +43,7 @@ retry: } else if (old == cpu) { was_locked = 1; } else { + local_irq_restore(flags); cpu_relax(); goto retry; } @@ -52,7 +53,7 @@ retry: if (!was_locked) atomic_set(&dump_lock, -1); - migrate_enable(); + local_irq_restore(flags); } #else asmlinkage __visible void dump_stack(void) diff --git a/kernel/lib/dynamic_debug.c b/kernel/lib/dynamic_debug.c index d8f3d3150..e3952e9c8 100644 --- a/kernel/lib/dynamic_debug.c +++ b/kernel/lib/dynamic_debug.c @@ -42,7 +42,7 @@ extern struct _ddebug __stop___verbose[]; struct ddebug_table { struct list_head link; - char *mod_name; + const char *mod_name; unsigned int num_ddebugs; struct _ddebug *ddebugs; }; @@ -841,12 +841,12 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *name) { struct ddebug_table *dt; - char *new_name; + const char *new_name; dt = kzalloc(sizeof(*dt), GFP_KERNEL); if (dt == NULL) return -ENOMEM; - new_name = kstrdup(name, GFP_KERNEL); + new_name = kstrdup_const(name, GFP_KERNEL); if (new_name == NULL) { kfree(dt); return -ENOMEM; @@ -887,7 +887,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val, /* handle both dyndbg and $module.dyndbg params at boot */ static int ddebug_dyndbg_boot_param_cb(char *param, char *val, - const char *unused) + const char *unused, void *arg) { vpr_info("%s=\"%s\"\n", param, val); return ddebug_dyndbg_param_cb(param, val, NULL, 0); @@ -907,7 +907,7 @@ int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module) static void ddebug_table_free(struct ddebug_table *dt) { list_del_init(&dt->link); - kfree(dt->mod_name); + kfree_const(dt->mod_name); kfree(dt); } @@ -1028,7 +1028,7 @@ static int __init dynamic_debug_init(void) */ cmdline = kstrdup(saved_command_line, GFP_KERNEL); parse_args("dyndbg params", cmdline, NULL, - 0, 0, 0, &ddebug_dyndbg_boot_param_cb); + 0, 0, 0, NULL, &ddebug_dyndbg_boot_param_cb); kfree(cmdline); return 0; diff --git a/kernel/lib/fault-inject.c b/kernel/lib/fault-inject.c index f1cdeb024..6a823a53e 100644 --- a/kernel/lib/fault-inject.c +++ b/kernel/lib/fault-inject.c @@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr) printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " "space %d, times %d\n", attr->dname, - attr->probability, attr->interval, + attr->interval, attr->probability, atomic_read(&attr->space), atomic_read(&attr->times)); if (attr->verbose > 1) diff --git a/kernel/lib/genalloc.c b/kernel/lib/genalloc.c index d214866ee..116a166b0 100644 --- a/kernel/lib/genalloc.c +++ b/kernel/lib/genalloc.c @@ -160,6 +160,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) pool->min_alloc_order = min_alloc_order; pool->algo = gen_pool_first_fit; pool->data = NULL; + pool->name = NULL; } return pool; } @@ -252,8 +253,8 @@ void gen_pool_destroy(struct gen_pool *pool) kfree(chunk); } + kfree_const(pool->name); kfree(pool); - return; } EXPORT_SYMBOL(gen_pool_destroy); @@ -570,57 +571,92 @@ static void devm_gen_pool_release(struct device *dev, void *res) gen_pool_destroy(*(struct gen_pool **)res); } +static int devm_gen_pool_match(struct device *dev, void *res, void *data) +{ + struct gen_pool **p = res; + + /* NULL data matches only a pool without an assigned name */ + if (!data && !(*p)->name) + return 1; + + if (!data || !(*p)->name) + return 0; + + return !strcmp((*p)->name, data); +} + +/** + * gen_pool_get - Obtain the gen_pool (if any) for a device + * @dev: device to retrieve the gen_pool from + * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device + * + * Returns the gen_pool for the device if one is present, or NULL. + */ +struct gen_pool *gen_pool_get(struct device *dev, const char *name) +{ + struct gen_pool **p; + + p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, + (void *)name); + if (!p) + return NULL; + return *p; +} +EXPORT_SYMBOL_GPL(gen_pool_get); + /** * devm_gen_pool_create - managed gen_pool_create * @dev: device that provides the gen_pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents - * @nid: node id of the node the pool structure should be allocated on, or -1 + * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes + * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. The pool will be * automatically destroyed by the device management code. */ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, - int nid) + int nid, const char *name) { struct gen_pool **ptr, *pool; + const char *pool_name = NULL; + + /* Check that genpool to be created is uniquely addressed on device */ + if (gen_pool_get(dev, name)) + return ERR_PTR(-EINVAL); + + if (name) { + pool_name = kstrdup_const(name, GFP_KERNEL); + if (!pool_name) + return ERR_PTR(-ENOMEM); + } ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) - return NULL; + goto free_pool_name; pool = gen_pool_create(min_alloc_order, nid); - if (pool) { - *ptr = pool; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } + if (!pool) + goto free_devres; + + *ptr = pool; + pool->name = pool_name; + devres_add(dev, ptr); return pool; -} -EXPORT_SYMBOL(devm_gen_pool_create); -/** - * dev_get_gen_pool - Obtain the gen_pool (if any) for a device - * @dev: device to retrieve the gen_pool from - * - * Returns the gen_pool for the device if one is present, or NULL. - */ -struct gen_pool *dev_get_gen_pool(struct device *dev) -{ - struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, - NULL); +free_devres: + devres_free(ptr); +free_pool_name: + kfree_const(pool_name); - if (!p) - return NULL; - return *p; + return ERR_PTR(-ENOMEM); } -EXPORT_SYMBOL_GPL(dev_get_gen_pool); +EXPORT_SYMBOL(devm_gen_pool_create); #ifdef CONFIG_OF /** - * of_get_named_gen_pool - find a pool by phandle property + * of_gen_pool_get - find a pool by phandle property * @np: device node * @propname: property name containing phandle(s) * @index: index into the phandle array @@ -629,20 +665,34 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool); * address of the device tree node pointed at by the phandle property, * or NULL if not found. */ -struct gen_pool *of_get_named_gen_pool(struct device_node *np, +struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { struct platform_device *pdev; - struct device_node *np_pool; + struct device_node *np_pool, *parent; + const char *name = NULL; + struct gen_pool *pool = NULL; np_pool = of_parse_phandle(np, propname, index); if (!np_pool) return NULL; + pdev = of_find_device_by_node(np_pool); + if (!pdev) { + /* Check if named gen_pool is created by parent node device */ + parent = of_get_parent(np_pool); + pdev = of_find_device_by_node(parent); + of_node_put(parent); + + of_property_read_string(np_pool, "label", &name); + if (!name) + name = np_pool->name; + } + if (pdev) + pool = gen_pool_get(&pdev->dev, name); of_node_put(np_pool); - if (!pdev) - return NULL; - return dev_get_gen_pool(&pdev->dev); + + return pool; } -EXPORT_SYMBOL_GPL(of_get_named_gen_pool); +EXPORT_SYMBOL_GPL(of_gen_pool_get); #endif /* CONFIG_OF */ diff --git a/kernel/lib/halfmd4.c b/kernel/lib/halfmd4.c index a8fe6274a..137e861d9 100644 --- a/kernel/lib/halfmd4.c +++ b/kernel/lib/halfmd4.c @@ -1,6 +1,7 @@ #include <linux/compiler.h> #include <linux/export.h> #include <linux/cryptohash.h> +#include <linux/bitops.h> /* F, G and H are basic MD4 functions: selection, majority, parity */ #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) @@ -14,7 +15,7 @@ * Rotation is separate from addition to prevent recomputation */ #define ROUND(f, a, b, c, d, x, s) \ - (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) + (a += f(b, c, d) + x, a = rol32(a, s)) #define K1 0 #define K2 013240474631UL #define K3 015666365641UL diff --git a/kernel/lib/hexdump.c b/kernel/lib/hexdump.c index 7ea096998..992457b12 100644 --- a/kernel/lib/hexdump.c +++ b/kernel/lib/hexdump.c @@ -11,6 +11,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/export.h> +#include <asm/unaligned.h> const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); @@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%16.16llx", j ? " " : "", - (unsigned long long)*(ptr8 + j)); + get_unaligned(ptr8 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; @@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%8.8x", j ? " " : "", - *(ptr4 + j)); + get_unaligned(ptr4 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; @@ -161,18 +162,22 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%4.4x", j ? " " : "", - *(ptr2 + j)); + get_unaligned(ptr2 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; } } else { for (j = 0; j < len; j++) { - if (linebuflen < lx + 3) + if (linebuflen < lx + 2) goto overflow2; ch = ptr[j]; linebuf[lx++] = hex_asc_hi(ch); + if (linebuflen < lx + 2) + goto overflow2; linebuf[lx++] = hex_asc_lo(ch); + if (linebuflen < lx + 2) + goto overflow2; linebuf[lx++] = ' '; } if (j) diff --git a/kernel/lib/idr.c b/kernel/lib/idr.c index d0681a357..9decbe914 100644 --- a/kernel/lib/idr.c +++ b/kernel/lib/idr.c @@ -46,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); static DEFINE_PER_CPU(int, idr_preload_cnt); static DEFINE_SPINLOCK(simple_ida_lock); +#ifdef CONFIG_PREEMPT_RT_FULL +static DEFINE_LOCAL_IRQ_LOCK(idr_lock); + +static inline void idr_preload_lock(void) +{ + local_lock(idr_lock); +} + +static inline void idr_preload_unlock(void) +{ + local_unlock(idr_lock); +} + +void idr_preload_end(void) +{ + idr_preload_unlock(); +} +EXPORT_SYMBOL(idr_preload_end); +#else +static inline void idr_preload_lock(void) +{ + preempt_disable(); +} + +static inline void idr_preload_unlock(void) +{ + preempt_enable(); +} +#endif + + /* the maximum ID which can be allocated given idr->layers */ static int idr_max(int layers) { @@ -116,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) * context. See idr_preload() for details. */ if (!in_interrupt()) { - preempt_disable(); + idr_preload_lock(); new = __this_cpu_read(idr_preload_head); if (new) { __this_cpu_write(idr_preload_head, new->ary[0]); __this_cpu_dec(idr_preload_cnt); new->ary[0] = NULL; } - preempt_enable(); + idr_preload_unlock(); if (new) return new; } @@ -367,36 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, idr_mark_full(pa, id); } -#ifdef CONFIG_PREEMPT_RT_FULL -static DEFINE_LOCAL_IRQ_LOCK(idr_lock); - -static inline void idr_preload_lock(void) -{ - local_lock(idr_lock); -} - -static inline void idr_preload_unlock(void) -{ - local_unlock(idr_lock); -} - -void idr_preload_end(void) -{ - idr_preload_unlock(); -} -EXPORT_SYMBOL(idr_preload_end); -#else -static inline void idr_preload_lock(void) -{ - preempt_disable(); -} - -static inline void idr_preload_unlock(void) -{ - preempt_enable(); -} -#endif - /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading @@ -429,7 +430,7 @@ void idr_preload(gfp_t gfp_mask) * allocation guarantee. Disallow usage from those contexts. */ WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfp_mask & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); idr_preload_lock(); @@ -483,7 +484,7 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) struct idr_layer *pa[MAX_IDR_LEVEL + 1]; int id; - might_sleep_if(gfp_mask & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); /* sanity checks */ if (WARN_ON_ONCE(start < 0)) diff --git a/kernel/lib/iommu-common.c b/kernel/lib/iommu-common.c index df30632f0..858dc1aae 100644 --- a/kernel/lib/iommu-common.c +++ b/kernel/lib/iommu-common.c @@ -11,18 +11,13 @@ #include <linux/dma-mapping.h> #include <linux/hash.h> -#ifndef DMA_ERROR_CODE -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) -#endif - static unsigned long iommu_large_alloc = 15; static DEFINE_PER_CPU(unsigned int, iommu_hash_common); static inline bool need_flush(struct iommu_map_table *iommu) { - return (iommu->lazy_flush != NULL && - (iommu->flags & IOMMU_NEED_FLUSH) != 0); + return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); } static inline void set_flush(struct iommu_map_table *iommu) @@ -119,12 +114,12 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, unsigned long align_mask = 0; if (align_order > 0) - align_mask = 0xffffffffffffffffl >> (64 - align_order); + align_mask = ~0ul >> (BITS_PER_LONG - align_order); /* Sanity check */ if (unlikely(npages == 0)) { WARN_ON_ONCE(1); - return DMA_ERROR_CODE; + return IOMMU_ERROR_CODE; } if (largealloc) { @@ -207,11 +202,12 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, goto again; } else { /* give up */ - n = DMA_ERROR_CODE; + n = IOMMU_ERROR_CODE; goto bail; } } - if (n < pool->hint || need_flush(iommu)) { + if (iommu->lazy_flush && + (n < pool->hint || need_flush(iommu))) { clear_flush(iommu); iommu->lazy_flush(iommu); } @@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, unsigned long flags; unsigned long shift = iommu->table_shift; - if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */ + if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ entry = (dma_addr - iommu->table_map_base) >> shift; pool = get_pool(iommu, entry); diff --git a/kernel/lib/is_single_threaded.c b/kernel/lib/is_single_threaded.c index bd2bea963..391fd2397 100644 --- a/kernel/lib/is_single_threaded.c +++ b/kernel/lib/is_single_threaded.c @@ -36,8 +36,7 @@ bool current_is_single_threaded(void) if (unlikely(p == task->group_leader)) continue; - t = p; - do { + for_each_thread(p, t) { if (unlikely(t->mm == mm)) goto found; if (likely(t->mm)) @@ -48,7 +47,7 @@ bool current_is_single_threaded(void) * forked before exiting. */ smp_rmb(); - } while_each_thread(p, t); + } } ret = true; found: diff --git a/kernel/lib/kasprintf.c b/kernel/lib/kasprintf.c index 32f12150f..f194e6e59 100644 --- a/kernel/lib/kasprintf.c +++ b/kernel/lib/kasprintf.c @@ -31,6 +31,22 @@ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) } EXPORT_SYMBOL(kvasprintf); +/* + * If fmt contains no % (or is exactly %s), use kstrdup_const. If fmt + * (or the sole vararg) points to rodata, we will then save a memory + * allocation and string copy. In any case, the return value should be + * freed using kfree_const(). + */ +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap) +{ + if (!strchr(fmt, '%')) + return kstrdup_const(fmt, gfp); + if (!strcmp(fmt, "%s")) + return kstrdup_const(va_arg(ap, const char*), gfp); + return kvasprintf(gfp, fmt, ap); +} +EXPORT_SYMBOL(kvasprintf_const); + char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; diff --git a/kernel/lib/klist.c b/kernel/lib/klist.c index 89b485a2a..0507fa5d8 100644 --- a/kernel/lib/klist.c +++ b/kernel/lib/klist.c @@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; - i->i_cur = n; - if (n) - kref_get(&n->n_ref); + i->i_cur = NULL; + if (n && kref_get_unless_zero(&n->n_ref)) + i->i_cur = n; } EXPORT_SYMBOL_GPL(klist_iter_init_node); @@ -324,6 +324,47 @@ static struct klist_node *to_klist_node(struct list_head *n) } /** + * klist_prev - Ante up prev node in list. + * @i: Iterator structure. + * + * First grab list lock. Decrement the reference count of the previous + * node, if there was one. Grab the prev node, increment its reference + * count, drop the lock, and return that prev node. + */ +struct klist_node *klist_prev(struct klist_iter *i) +{ + void (*put)(struct klist_node *) = i->i_klist->put; + struct klist_node *last = i->i_cur; + struct klist_node *prev; + + spin_lock(&i->i_klist->k_lock); + + if (last) { + prev = to_klist_node(last->n_node.prev); + if (!klist_dec_and_del(last)) + put = NULL; + } else + prev = to_klist_node(i->i_klist->k_list.prev); + + i->i_cur = NULL; + while (prev != to_klist_node(&i->i_klist->k_list)) { + if (likely(!knode_dead(prev))) { + kref_get(&prev->n_ref); + i->i_cur = prev; + break; + } + prev = to_klist_node(prev->n_node.prev); + } + + spin_unlock(&i->i_klist->k_lock); + + if (put && last) + put(last); + return i->i_cur; +} +EXPORT_SYMBOL_GPL(klist_prev); + +/** * klist_next - Ante up next node in list. * @i: Iterator structure. * diff --git a/kernel/lib/kobject.c b/kernel/lib/kobject.c index 3b841b97f..7cbccd2b4 100644 --- a/kernel/lib/kobject.c +++ b/kernel/lib/kobject.c @@ -257,23 +257,34 @@ static int kobject_add_internal(struct kobject *kobj) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { - const char *old_name = kobj->name; - char *s; + const char *s; if (kobj->name && !fmt) return 0; - kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); - if (!kobj->name) { - kobj->name = old_name; + s = kvasprintf_const(GFP_KERNEL, fmt, vargs); + if (!s) return -ENOMEM; - } - /* ewww... some of these buggers have '/' in the name ... */ - while ((s = strchr(kobj->name, '/'))) - s[0] = '!'; + /* + * ewww... some of these buggers have '/' in the name ... If + * that's the case, we need to make sure we have an actual + * allocated copy to modify, since kvasprintf_const may have + * returned something from .rodata. + */ + if (strchr(s, '/')) { + char *t; + + t = kstrdup(s, GFP_KERNEL); + kfree_const(s); + if (!t) + return -ENOMEM; + strreplace(t, '/', '!'); + s = t; + } + kfree_const(kobj->name); + kobj->name = s; - kfree(old_name); return 0; } @@ -340,8 +351,9 @@ error: } EXPORT_SYMBOL(kobject_init); -static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, - const char *fmt, va_list vargs) +static __printf(3, 0) int kobject_add_varg(struct kobject *kobj, + struct kobject *parent, + const char *fmt, va_list vargs) { int retval; @@ -468,7 +480,7 @@ int kobject_rename(struct kobject *kobj, const char *new_name) envp[0] = devpath_string; envp[1] = NULL; - name = dup_name = kstrdup(new_name, GFP_KERNEL); + name = dup_name = kstrdup_const(new_name, GFP_KERNEL); if (!name) { error = -ENOMEM; goto out; @@ -488,7 +500,7 @@ int kobject_rename(struct kobject *kobj, const char *new_name) kobject_uevent_env(kobj, KOBJ_MOVE, envp); out: - kfree(dup_name); + kfree_const(dup_name); kfree(devpath_string); kfree(devpath); kobject_put(kobj); @@ -548,6 +560,7 @@ out: kfree(devpath); return error; } +EXPORT_SYMBOL_GPL(kobject_move); /** * kobject_del - unlink kobject from hierarchy. @@ -569,6 +582,7 @@ void kobject_del(struct kobject *kobj) kobject_put(kobj->parent); kobj->parent = NULL; } +EXPORT_SYMBOL(kobject_del); /** * kobject_get - increment refcount for object. @@ -585,6 +599,7 @@ struct kobject *kobject_get(struct kobject *kobj) } return kobj; } +EXPORT_SYMBOL(kobject_get); static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) { @@ -633,7 +648,7 @@ static void kobject_cleanup(struct kobject *kobj) /* free name if we allocated it */ if (name) { pr_debug("kobject: '%s': free name\n", name); - kfree(name); + kfree_const(name); } } @@ -676,6 +691,7 @@ void kobject_put(struct kobject *kobj) kref_put(&kobj->kref, kobject_release); } } +EXPORT_SYMBOL(kobject_put); static void dynamic_kobj_release(struct kobject *kobj) { @@ -804,6 +820,7 @@ int kset_register(struct kset *k) kobject_uevent(&k->kobj, KOBJ_ADD); return 0; } +EXPORT_SYMBOL(kset_register); /** * kset_unregister - remove a kset. @@ -816,6 +833,7 @@ void kset_unregister(struct kset *k) kobject_del(&k->kobj); kobject_put(&k->kobj); } +EXPORT_SYMBOL(kset_unregister); /** * kset_find_obj - search for object in kset. @@ -1052,10 +1070,3 @@ void kobj_ns_drop(enum kobj_ns_type type, void *ns) kobj_ns_ops_tbl[type]->drop_ns(ns); spin_unlock(&kobj_ns_type_lock); } - -EXPORT_SYMBOL(kobject_get); -EXPORT_SYMBOL(kobject_put); -EXPORT_SYMBOL(kobject_del); - -EXPORT_SYMBOL(kset_register); -EXPORT_SYMBOL(kset_unregister); diff --git a/kernel/lib/kstrtox.c b/kernel/lib/kstrtox.c index ec8da78df..94be244e8 100644 --- a/kernel/lib/kstrtox.c +++ b/kernel/lib/kstrtox.c @@ -152,7 +152,7 @@ int kstrtoll(const char *s, unsigned int base, long long *res) rv = _kstrtoull(s + 1, base, &tmp); if (rv < 0) return rv; - if ((long long)(-tmp) >= 0) + if ((long long)-tmp > 0) return -ERANGE; *res = -tmp; } else { diff --git a/kernel/lib/libcrc32c.c b/kernel/lib/libcrc32c.c index 6a08ce7d6..acf9da449 100644 --- a/kernel/lib/libcrc32c.c +++ b/kernel/lib/libcrc32c.c @@ -74,3 +74,4 @@ module_exit(libcrc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crc32c"); diff --git a/kernel/lib/list_sort.c b/kernel/lib/list_sort.c index b29015102..3fe401067 100644 --- a/kernel/lib/list_sort.c +++ b/kernel/lib/list_sort.c @@ -289,5 +289,5 @@ exit: kfree(elts); return err; } -module_init(list_sort_test); +late_initcall(list_sort_test); #endif /* CONFIG_TEST_LIST_SORT */ diff --git a/kernel/lib/llist.c b/kernel/lib/llist.c index 0b0e9779d..ae5872b1d 100644 --- a/kernel/lib/llist.c +++ b/kernel/lib/llist.c @@ -66,12 +66,12 @@ struct llist_node *llist_del_first(struct llist_head *head) { struct llist_node *entry, *old_entry, *next; - entry = head->first; + entry = smp_load_acquire(&head->first); for (;;) { if (entry == NULL) return NULL; old_entry = entry; - next = entry->next; + next = READ_ONCE(entry->next); entry = cmpxchg(&head->first, old_entry, next); if (entry == old_entry) break; diff --git a/kernel/lib/lockref.c b/kernel/lib/lockref.c index 494994bf1..5a92189ad 100644 --- a/kernel/lib/lockref.c +++ b/kernel/lib/lockref.c @@ -4,14 +4,6 @@ #if USE_CMPXCHG_LOCKREF /* - * Allow weakly-ordered memory architectures to provide barrier-less - * cmpxchg semantics for lockref updates. - */ -#ifndef cmpxchg64_relaxed -# define cmpxchg64_relaxed cmpxchg64 -#endif - -/* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. */ diff --git a/kernel/lib/lz4/lz4_decompress.c b/kernel/lib/lz4/lz4_decompress.c index 26cc6029b..6d940c72b 100644 --- a/kernel/lib/lz4/lz4_decompress.c +++ b/kernel/lib/lz4/lz4_decompress.c @@ -140,8 +140,12 @@ static int lz4_uncompress(const char *source, char *dest, int osize) /* Error: request to write beyond destination buffer */ if (cpy > oend) goto _output_error; +#if LZ4_ARCH64 + if ((ref + COPYLENGTH) > oend) +#else if ((ref + COPYLENGTH) > oend || (op + COPYLENGTH) > oend) +#endif goto _output_error; LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); while (op < cpy) @@ -266,7 +270,13 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, if (cpy > oend - COPYLENGTH) { if (cpy > oend) goto _output_error; /* write outside of buf */ - +#if LZ4_ARCH64 + if ((ref + COPYLENGTH) > oend) +#else + if ((ref + COPYLENGTH) > oend || + (op + COPYLENGTH) > oend) +#endif + goto _output_error; LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); while (op < cpy) *op++ = *ref++; diff --git a/kernel/lib/mpi/longlong.h b/kernel/lib/mpi/longlong.h index a89d04159..b90e255c2 100644 --- a/kernel/lib/mpi/longlong.h +++ b/kernel/lib/mpi/longlong.h @@ -19,7 +19,7 @@ * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, * MA 02111-1307, USA. */ -#include <asm-generic/bitops/count_zeros.h> +#include <linux/count_zeros.h> /* You have to define the following before including this file: * diff --git a/kernel/lib/mpi/mpicoder.c b/kernel/lib/mpi/mpicoder.c index 4cc644273..3db76b8c1 100644 --- a/kernel/lib/mpi/mpicoder.c +++ b/kernel/lib/mpi/mpicoder.c @@ -19,7 +19,7 @@ */ #include <linux/bitops.h> -#include <asm-generic/bitops/count_zeros.h> +#include <linux/count_zeros.h> #include "mpi-internal.h" #define MAX_EXTERN_MPI_BITS 16384 @@ -128,28 +128,43 @@ leave: } EXPORT_SYMBOL_GPL(mpi_read_from_buffer); -/**************** - * Return an allocated buffer with the MPI (msb first). - * NBYTES receives the length of this buffer. Caller must free the - * return string (This function does return a 0 byte buffer with NBYTES - * set to zero if the value of A is zero. If sign is not NULL, it will - * be set to the sign of the A. +/** + * mpi_read_buffer() - read MPI to a bufer provided by user (msb first) + * + * @a: a multi precision integer + * @buf: bufer to which the output will be written to. Needs to be at + * leaset mpi_get_size(a) long. + * @buf_len: size of the buf. + * @nbytes: receives the actual length of the data written. + * @sign: if not NULL, it will be set to the sign of a. + * + * Return: 0 on success or error code in case of error */ -void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) +int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, + int *sign) { - uint8_t *p, *buffer; + uint8_t *p; mpi_limb_t alimb; - int i; - unsigned int n; + unsigned int n = mpi_get_size(a); + int i, lzeros = 0; + + if (buf_len < n || !buf || !nbytes) + return -EINVAL; if (sign) *sign = a->sign; - *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; - if (!n) - n++; /* avoid zero length allocation */ - p = buffer = kmalloc(n, GFP_KERNEL); - if (!p) - return NULL; + + p = (void *)&a->d[a->nlimbs] - 1; + + for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) { + if (!*p) + lzeros++; + else + break; + } + + p = buf; + *nbytes = n - lzeros; for (i = a->nlimbs - 1; i >= 0; i--) { alimb = a->d[i]; @@ -170,16 +185,62 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) #else #error please implement for this limb size. #endif + + if (lzeros > 0) { + if (lzeros >= sizeof(alimb)) { + p -= sizeof(alimb); + } else { + mpi_limb_t *limb1 = (void *)p - sizeof(alimb); + mpi_limb_t *limb2 = (void *)p - sizeof(alimb) + + lzeros; + *limb1 = *limb2; + p -= lzeros; + } + lzeros -= sizeof(alimb); + } } + return 0; +} +EXPORT_SYMBOL_GPL(mpi_read_buffer); + +/* + * mpi_get_buffer() - Returns an allocated buffer with the MPI (msb first). + * Caller must free the return string. + * This function does return a 0 byte buffer with nbytes set to zero if the + * value of A is zero. + * + * @a: a multi precision integer. + * @nbytes: receives the length of this buffer. + * @sign: if not NULL, it will be set to the sign of the a. + * + * Return: Pointer to MPI buffer or NULL on error + */ +void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) +{ + uint8_t *buf; + unsigned int n; + int ret; + + if (!nbytes) + return NULL; + + n = mpi_get_size(a); - /* this is sub-optimal but we need to do the shift operation - * because the caller has to free the returned buffer */ - for (p = buffer; !*p && *nbytes; p++, --*nbytes) - ; - if (p != buffer) - memmove(buffer, p, *nbytes); + if (!n) + n++; + + buf = kmalloc(n, GFP_KERNEL); + + if (!buf) + return NULL; - return buffer; + ret = mpi_read_buffer(a, buf, n, nbytes, sign); + + if (ret) { + kfree(buf); + return NULL; + } + return buf; } EXPORT_SYMBOL_GPL(mpi_get_buffer); @@ -258,3 +319,202 @@ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) return 0; } EXPORT_SYMBOL_GPL(mpi_set_buffer); + +/** + * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first) + * + * This function works in the same way as the mpi_read_buffer, but it + * takes an sgl instead of u8 * buf. + * + * @a: a multi precision integer + * @sgl: scatterlist to write to. Needs to be at least + * mpi_get_size(a) long. + * @nbytes: in/out param - it has the be set to the maximum number of + * bytes that can be written to sgl. This has to be at least + * the size of the integer a. On return it receives the actual + * length of the data written. + * @sign: if not NULL, it will be set to the sign of a. + * + * Return: 0 on success or error code in case of error + */ +int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, + int *sign) +{ + u8 *p, *p2; + mpi_limb_t alimb, alimb2; + unsigned int n = mpi_get_size(a); + int i, x, y = 0, lzeros = 0, buf_len; + + if (!nbytes || *nbytes < n) + return -EINVAL; + + if (sign) + *sign = a->sign; + + p = (void *)&a->d[a->nlimbs] - 1; + + for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) { + if (!*p) + lzeros++; + else + break; + } + + *nbytes = n - lzeros; + buf_len = sgl->length; + p2 = sg_virt(sgl); + + for (i = a->nlimbs - 1; i >= 0; i--) { + alimb = a->d[i]; + p = (u8 *)&alimb2; +#if BYTES_PER_MPI_LIMB == 4 + *p++ = alimb >> 24; + *p++ = alimb >> 16; + *p++ = alimb >> 8; + *p++ = alimb; +#elif BYTES_PER_MPI_LIMB == 8 + *p++ = alimb >> 56; + *p++ = alimb >> 48; + *p++ = alimb >> 40; + *p++ = alimb >> 32; + *p++ = alimb >> 24; + *p++ = alimb >> 16; + *p++ = alimb >> 8; + *p++ = alimb; +#else +#error please implement for this limb size. +#endif + if (lzeros > 0) { + if (lzeros >= sizeof(alimb)) { + p -= sizeof(alimb); + continue; + } else { + mpi_limb_t *limb1 = (void *)p - sizeof(alimb); + mpi_limb_t *limb2 = (void *)p - sizeof(alimb) + + lzeros; + *limb1 = *limb2; + p -= lzeros; + y = lzeros; + } + lzeros -= sizeof(alimb); + } + + p = p - (sizeof(alimb) - y); + + for (x = 0; x < sizeof(alimb) - y; x++) { + if (!buf_len) { + sgl = sg_next(sgl); + if (!sgl) + return -EINVAL; + buf_len = sgl->length; + p2 = sg_virt(sgl); + } + *p2++ = *p++; + buf_len--; + } + y = 0; + } + return 0; +} +EXPORT_SYMBOL_GPL(mpi_write_to_sgl); + +/* + * mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with + * data from the sgl + * + * This function works in the same way as the mpi_read_raw_data, but it + * takes an sgl instead of void * buffer. i.e. it allocates + * a new MPI and reads the content of the sgl to the MPI. + * + * @sgl: scatterlist to read from + * @len: number of bytes to read + * + * Return: Pointer to a new MPI or NULL on error + */ +MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len) +{ + struct scatterlist *sg; + int x, i, j, z, lzeros, ents; + unsigned int nbits, nlimbs, nbytes; + mpi_limb_t a; + MPI val = NULL; + + lzeros = 0; + ents = sg_nents(sgl); + + for_each_sg(sgl, sg, ents, i) { + const u8 *buff = sg_virt(sg); + int len = sg->length; + + while (len && !*buff) { + lzeros++; + len--; + buff++; + } + + if (len && *buff) + break; + + ents--; + lzeros = 0; + } + + sgl = sg; + + if (!ents) + nbytes = 0; + else + nbytes = len - lzeros; + + nbits = nbytes * 8; + if (nbits > MAX_EXTERN_MPI_BITS) { + pr_info("MPI: mpi too large (%u bits)\n", nbits); + return NULL; + } + + if (nbytes > 0) + nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)); + else + nbits = 0; + + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); + val = mpi_alloc(nlimbs); + if (!val) + return NULL; + + val->nbits = nbits; + val->sign = 0; + val->nlimbs = nlimbs; + + if (nbytes == 0) + return val; + + j = nlimbs - 1; + a = 0; + z = 0; + x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; + x %= BYTES_PER_MPI_LIMB; + + for_each_sg(sgl, sg, ents, i) { + const u8 *buffer = sg_virt(sg) + lzeros; + int len = sg->length - lzeros; + int buf_shift = x; + + if (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB)) + len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB); + + for (; x < len + buf_shift; x++) { + a <<= 8; + a |= *buffer++; + if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { + val->d[j--] = a; + a = 0; + } + } + z += x; + x = 0; + lzeros = 0; + } + return val; +} +EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl); diff --git a/kernel/lib/mpi/mpiutil.c b/kernel/lib/mpi/mpiutil.c index bf076d281..314f4dfa6 100644 --- a/kernel/lib/mpi/mpiutil.c +++ b/kernel/lib/mpi/mpiutil.c @@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a) if (!a) return; - kfree(a); + kzfree(a); } void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) @@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs) if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); - kfree(a->d); + kzfree(a->d); a->d = p; } else { a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); @@ -112,7 +112,7 @@ void mpi_free(MPI a) return; if (a->flags & 4) - kfree(a->d); + kzfree(a->d); else mpi_free_limb_space(a->d); diff --git a/kernel/lib/nmi_backtrace.c b/kernel/lib/nmi_backtrace.c new file mode 100644 index 000000000..6019c53c6 --- /dev/null +++ b/kernel/lib/nmi_backtrace.c @@ -0,0 +1,171 @@ +/* + * NMI backtrace support + * + * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, + * with the following header: + * + * HW NMI watchdog support + * + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. + * + * Arch specific calls to support NMI watchdog + * + * Bits copied from original nmi.c file + */ +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/kprobes.h> +#include <linux/nmi.h> +#include <linux/seq_buf.h> + +#ifdef arch_trigger_all_cpu_backtrace +/* For reliability, we're prepared to waste bits here. */ +static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; +static cpumask_t printtrace_mask; + +#define NMI_BUF_SIZE 4096 + +struct nmi_seq_buf { + unsigned char buffer[NMI_BUF_SIZE]; + struct seq_buf seq; +}; + +/* Safe printing in NMI context */ +static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); + +/* "in progress" flag of arch_trigger_all_cpu_backtrace */ +static unsigned long backtrace_flag; + +static void print_seq_line(struct nmi_seq_buf *s, int start, int end) +{ + const char *buf = s->buffer + start; + + printk("%.*s", (end - start) + 1, buf); +} + +/* + * When raise() is called it will be is passed a pointer to the + * backtrace_mask. Architectures that call nmi_cpu_backtrace() + * directly from their raise() functions may rely on the mask + * they are passed being updated as a side effect of this call. + */ +void nmi_trigger_all_cpu_backtrace(bool include_self, + void (*raise)(cpumask_t *mask)) +{ + struct nmi_seq_buf *s; + int i, cpu, this_cpu = get_cpu(); + + if (test_and_set_bit(0, &backtrace_flag)) { + /* + * If there is already a trigger_all_cpu_backtrace() in progress + * (backtrace_flag == 1), don't output double cpu dump infos. + */ + put_cpu(); + return; + } + + cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); + if (!include_self) + cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); + + cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask)); + + /* + * Set up per_cpu seq_buf buffers that the NMIs running on the other + * CPUs will write to. + */ + for_each_cpu(cpu, to_cpumask(backtrace_mask)) { + s = &per_cpu(nmi_print_seq, cpu); + seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE); + } + + if (!cpumask_empty(to_cpumask(backtrace_mask))) { + pr_info("Sending NMI to %s CPUs:\n", + (include_self ? "all" : "other")); + raise(to_cpumask(backtrace_mask)); + } + + /* Wait for up to 10 seconds for all CPUs to do the backtrace */ + for (i = 0; i < 10 * 1000; i++) { + if (cpumask_empty(to_cpumask(backtrace_mask))) + break; + mdelay(1); + touch_softlockup_watchdog(); + } + + /* + * Now that all the NMIs have triggered, we can dump out their + * back traces safely to the console. + */ + for_each_cpu(cpu, &printtrace_mask) { + int len, last_i = 0; + + s = &per_cpu(nmi_print_seq, cpu); + len = seq_buf_used(&s->seq); + if (!len) + continue; + + /* Print line by line. */ + for (i = 0; i < len; i++) { + if (s->buffer[i] == '\n') { + print_seq_line(s, last_i, i); + last_i = i + 1; + } + } + /* Check if there was a partial line. */ + if (last_i < len) { + print_seq_line(s, last_i, len - 1); + pr_cont("\n"); + } + } + + clear_bit(0, &backtrace_flag); + smp_mb__after_atomic(); + put_cpu(); +} + +/* + * It is not safe to call printk() directly from NMI handlers. + * It may be fine if the NMI detected a lock up and we have no choice + * but to do so, but doing a NMI on all other CPUs to get a back trace + * can be done with a sysrq-l. We don't want that to lock up, which + * can happen if the NMI interrupts a printk in progress. + * + * Instead, we redirect the vprintk() to this nmi_vprintk() that writes + * the content into a per cpu seq_buf buffer. Then when the NMIs are + * all done, we can safely dump the contents of the seq_buf to a printk() + * from a non NMI context. + */ +static int nmi_vprintk(const char *fmt, va_list args) +{ + struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); + unsigned int len = seq_buf_used(&s->seq); + + seq_buf_vprintf(&s->seq, fmt, args); + return seq_buf_used(&s->seq) - len; +} + +bool nmi_cpu_backtrace(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { + printk_func_t printk_func_save = this_cpu_read(printk_func); + + /* Replace printk to write into the NMI seq */ + this_cpu_write(printk_func, nmi_vprintk); + pr_warn("NMI backtrace for cpu %d\n", cpu); + if (regs) + show_regs(regs); + else + dump_stack(); + this_cpu_write(printk_func, printk_func_save); + + cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); + return true; + } + + return false; +} +NOKPROBE_SYMBOL(nmi_cpu_backtrace); +#endif diff --git a/kernel/lib/once.c b/kernel/lib/once.c new file mode 100644 index 000000000..05c860462 --- /dev/null +++ b/kernel/lib/once.c @@ -0,0 +1,62 @@ +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/once.h> +#include <linux/random.h> + +struct once_work { + struct work_struct work; + struct static_key *key; +}; + +static void once_deferred(struct work_struct *w) +{ + struct once_work *work; + + work = container_of(w, struct once_work, work); + BUG_ON(!static_key_enabled(work->key)); + static_key_slow_dec(work->key); + kfree(work); +} + +static void once_disable_jump(struct static_key *key) +{ + struct once_work *w; + + w = kmalloc(sizeof(*w), GFP_ATOMIC); + if (!w) + return; + + INIT_WORK(&w->work, once_deferred); + w->key = key; + schedule_work(&w->work); +} + +static DEFINE_SPINLOCK(once_lock); + +bool __do_once_start(bool *done, unsigned long *flags) + __acquires(once_lock) +{ + spin_lock_irqsave(&once_lock, *flags); + if (*done) { + spin_unlock_irqrestore(&once_lock, *flags); + /* Keep sparse happy by restoring an even lock count on + * this lock. In case we return here, we don't call into + * __do_once_done but return early in the DO_ONCE() macro. + */ + __acquire(once_lock); + return false; + } + + return true; +} +EXPORT_SYMBOL(__do_once_start); + +void __do_once_done(bool *done, struct static_key *once_key, + unsigned long *flags) + __releases(once_lock) +{ + *done = true; + spin_unlock_irqrestore(&once_lock, *flags); + once_disable_jump(once_key); +} +EXPORT_SYMBOL(__do_once_done); diff --git a/kernel/lib/pci_iomap.c b/kernel/lib/pci_iomap.c index bcce5f149..c10fba461 100644 --- a/kernel/lib/pci_iomap.c +++ b/kernel/lib/pci_iomap.c @@ -41,17 +41,59 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, len = maxlen; if (flags & IORESOURCE_IO) return __pci_ioport_map(dev, start, len); - if (flags & IORESOURCE_MEM) { - if (flags & IORESOURCE_CACHEABLE) - return ioremap(start, len); - return ioremap_nocache(start, len); - } + if (flags & IORESOURCE_MEM) + return ioremap(start, len); /* What? */ return NULL; } EXPORT_SYMBOL(pci_iomap_range); /** + * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @offset: map memory at the given offset in BAR + * @maxlen: max length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR from offset to the end, pass %0 here. + * */ +void __iomem *pci_iomap_wc_range(struct pci_dev *dev, + int bar, + unsigned long offset, + unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + + if (flags & IORESOURCE_IO) + return NULL; + + if (len <= offset || !start) + return NULL; + + len -= offset; + start += offset; + if (maxlen && len > maxlen) + len = maxlen; + + if (flags & IORESOURCE_MEM) + return ioremap_wc(start, len); + + /* What? */ + return NULL; +} +EXPORT_SYMBOL_GPL(pci_iomap_wc_range); + +/** * pci_iomap - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number @@ -70,4 +112,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) return pci_iomap_range(dev, bar, 0, maxlen); } EXPORT_SYMBOL(pci_iomap); + +/** + * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @maxlen: length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR without checking for its length first, pass %0 here. + * */ +void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_wc_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL_GPL(pci_iomap_wc); #endif /* CONFIG_PCI */ diff --git a/kernel/lib/percpu_ida.c b/kernel/lib/percpu_ida.c index b1529f408..822a2c027 100644 --- a/kernel/lib/percpu_ida.c +++ b/kernel/lib/percpu_ida.c @@ -138,7 +138,7 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). * * @gfp indicates whether or not to wait until a free id is available (it's not - * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep + * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep * however long it takes until another thread frees an id (same semantics as a * mempool). * diff --git a/kernel/lib/proportions.c b/kernel/lib/proportions.c index 6f724298f..efa54f259 100644 --- a/kernel/lib/proportions.c +++ b/kernel/lib/proportions.c @@ -1,7 +1,7 @@ /* * Floating proportions * - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Description: * diff --git a/kernel/lib/radix-tree.c b/kernel/lib/radix-tree.c index 77015b1cc..f27e0bcb7 100644 --- a/kernel/lib/radix-tree.c +++ b/kernel/lib/radix-tree.c @@ -33,7 +33,7 @@ #include <linux/string.h> #include <linux/bitops.h> #include <linux/rcupdate.h> -#include <linux/preempt_mask.h> /* in_interrupt() */ +#include <linux/preempt.h> /* in_interrupt() */ /* @@ -65,7 +65,8 @@ static struct kmem_cache *radix_tree_node_cachep; */ struct radix_tree_preload { int nr; - struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; + /* nodes->private_data points to next preallocated node */ + struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; @@ -187,7 +188,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) * preloading in the interrupt anyway as all the allocations have to * be atomic. So just do normal allocation when in interrupt. */ - if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) { + if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { struct radix_tree_preload *rtp; /* @@ -197,8 +198,9 @@ radix_tree_node_alloc(struct radix_tree_root *root) */ rtp = &get_cpu_var(radix_tree_preloads); if (rtp->nr) { - ret = rtp->nodes[rtp->nr - 1]; - rtp->nodes[rtp->nr - 1] = NULL; + ret = rtp->nodes; + rtp->nodes = ret->private_data; + ret->private_data = NULL; rtp->nr--; } put_cpu_var(radix_tree_preloads); @@ -249,7 +251,7 @@ radix_tree_node_free(struct radix_tree_node *node) * with preemption not disabled. * * To make use of this facility, the radix tree must be initialised without - * __GFP_WAIT being passed to INIT_RADIX_TREE(). + * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ static int __radix_tree_preload(gfp_t gfp_mask) { @@ -259,17 +261,20 @@ static int __radix_tree_preload(gfp_t gfp_mask) preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); - while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { + while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { preempt_enable(); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); - if (rtp->nr < ARRAY_SIZE(rtp->nodes)) - rtp->nodes[rtp->nr++] = node; - else + if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { + node->private_data = rtp->nodes; + rtp->nodes = node; + rtp->nr++; + } else { kmem_cache_free(radix_tree_node_cachep, node); + } } ret = 0; out: @@ -283,12 +288,12 @@ out: * with preemption not disabled. * * To make use of this facility, the radix tree must be initialised without - * __GFP_WAIT being passed to INIT_RADIX_TREE(). + * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ int radix_tree_preload(gfp_t gfp_mask) { /* Warn on non-sensical use... */ - WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT)); + WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); return __radix_tree_preload(gfp_mask); } EXPORT_SYMBOL(radix_tree_preload); @@ -300,7 +305,7 @@ EXPORT_SYMBOL(radix_tree_preload); */ int radix_tree_maybe_preload(gfp_t gfp_mask) { - if (gfp_mask & __GFP_WAIT) + if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask); /* Preloading doesn't help anything with this gfp mask, skip it */ preempt_disable(); @@ -1017,9 +1022,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, return 0; radix_tree_for_each_slot(slot, root, &iter, first_index) { - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); + results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; + if (radix_tree_is_indirect_ptr(results[ret])) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (++ret == max_items) break; } @@ -1096,9 +1105,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); + results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; + if (radix_tree_is_indirect_ptr(results[ret])) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (++ret == max_items) break; } @@ -1466,15 +1479,16 @@ static int radix_tree_callback(struct notifier_block *nfb, { int cpu = (long)hcpu; struct radix_tree_preload *rtp; + struct radix_tree_node *node; /* Free per-cpu pool of perloaded nodes */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { - kmem_cache_free(radix_tree_node_cachep, - rtp->nodes[rtp->nr-1]); - rtp->nodes[rtp->nr-1] = NULL; - rtp->nr--; + node = rtp->nodes; + rtp->nodes = node->private_data; + kmem_cache_free(radix_tree_node_cachep, node); + rtp->nr--; } } return NOTIFY_OK; diff --git a/kernel/lib/raid6/Makefile b/kernel/lib/raid6/Makefile index c7dab0645..3b10a48fa 100644 --- a/kernel/lib/raid6/Makefile +++ b/kernel/lib/raid6/Makefile @@ -15,7 +15,7 @@ quiet_cmd_unroll = UNROLL $@ < $< > $@ || ( rm -f $@ && exit 1 ) ifeq ($(CONFIG_ALTIVEC),y) -altivec_flags := -maltivec -mabi=altivec +altivec_flags := -maltivec $(call cc-option,-mabi=altivec) endif # The GCC option -ffreestanding is required in order to compile code containing diff --git a/kernel/lib/raid6/neon.c b/kernel/lib/raid6/neon.c index d9ad6ee28..7076ef1ba 100644 --- a/kernel/lib/raid6/neon.c +++ b/kernel/lib/raid6/neon.c @@ -40,9 +40,20 @@ (unsigned long)bytes, ptrs); \ kernel_neon_end(); \ } \ + static void raid6_neon ## _n ## _xor_syndrome(int disks, \ + int start, int stop, \ + size_t bytes, void **ptrs) \ + { \ + void raid6_neon ## _n ## _xor_syndrome_real(int, \ + int, int, unsigned long, void**); \ + kernel_neon_begin(); \ + raid6_neon ## _n ## _xor_syndrome_real(disks, \ + start, stop, (unsigned long)bytes, ptrs); \ + kernel_neon_end(); \ + } \ struct raid6_calls const raid6_neonx ## _n = { \ raid6_neon ## _n ## _gen_syndrome, \ - NULL, /* XOR not yet implemented */ \ + raid6_neon ## _n ## _xor_syndrome, \ raid6_have_neon, \ "neonx" #_n, \ 0 \ diff --git a/kernel/lib/raid6/neon.uc b/kernel/lib/raid6/neon.uc index 1b9ed7933..4fa51b761 100644 --- a/kernel/lib/raid6/neon.uc +++ b/kernel/lib/raid6/neon.uc @@ -3,6 +3,7 @@ * neon.uc - RAID-6 syndrome calculation using ARM NEON instructions * * Copyright (C) 2012 Rob Herring + * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org> * * Based on altivec.uc: * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved @@ -78,3 +79,48 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) vst1q_u8(&q[d+NSIZE*$$], wq$$); } } + +void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs) +{ + uint8_t **dptr = (uint8_t **)ptrs; + uint8_t *p, *q; + int d, z, z0; + + register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; + const unative_t x1d = NBYTES(0x1d); + + z0 = stop; /* P/Q right side optimization */ + p = dptr[disks-2]; /* XOR parity */ + q = dptr[disks-1]; /* RS syndrome */ + + for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { + wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); + wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$); + + /* P/Q data pages */ + for ( z = z0-1 ; z >= start ; z-- ) { + wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); + wp$$ = veorq_u8(wp$$, wd$$); + w2$$ = MASK(wq$$); + w1$$ = SHLBYTE(wq$$); + + w2$$ = vandq_u8(w2$$, x1d); + w1$$ = veorq_u8(w1$$, w2$$); + wq$$ = veorq_u8(w1$$, wd$$); + } + /* P/Q left side optimization */ + for ( z = start-1 ; z >= 0 ; z-- ) { + w2$$ = MASK(wq$$); + w1$$ = SHLBYTE(wq$$); + + w2$$ = vandq_u8(w2$$, x1d); + wq$$ = veorq_u8(w1$$, w2$$); + } + w1$$ = vld1q_u8(&q[d+NSIZE*$$]); + wq$$ = veorq_u8(wq$$, w1$$); + + vst1q_u8(&p[d+NSIZE*$$], wp$$); + vst1q_u8(&q[d+NSIZE*$$], wq$$); + } +} diff --git a/kernel/lib/raid6/x86.h b/kernel/lib/raid6/x86.h index b7595484a..8fe9d9662 100644 --- a/kernel/lib/raid6/x86.h +++ b/kernel/lib/raid6/x86.h @@ -23,7 +23,7 @@ #ifdef __KERNEL__ /* Real code */ -#include <asm/i387.h> +#include <asm/fpu/api.h> #else /* Dummy code for user space testing */ diff --git a/kernel/lib/random32.c b/kernel/lib/random32.c index 0bee183fa..12111910c 100644 --- a/kernel/lib/random32.c +++ b/kernel/lib/random32.c @@ -181,7 +181,7 @@ void prandom_seed(u32 entropy) * No locking on the CPUs, but then somewhat random results are, well, * expected. */ - for_each_possible_cpu (i) { + for_each_possible_cpu(i) { struct rnd_state *state = &per_cpu(net_rand_state, i); state->s1 = __seed(state->s1 ^ entropy, 2U); @@ -201,7 +201,7 @@ static int __init prandom_init(void) prandom_state_selftest(); for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state,i); + struct rnd_state *state = &per_cpu(net_rand_state, i); u32 weak_seed = (i + jiffies) ^ random_get_entropy(); prandom_seed_early(state, weak_seed, true); @@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void) add_timer(&seed_timer); } +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) +{ + int i; + + for_each_possible_cpu(i) { + struct rnd_state *state = per_cpu_ptr(pcpu_state, i); + u32 seeds[4]; + + get_random_bytes(&seeds, sizeof(seeds)); + state->s1 = __seed(seeds[0], 2U); + state->s2 = __seed(seeds[1], 8U); + state->s3 = __seed(seeds[2], 16U); + state->s4 = __seed(seeds[3], 128U); + + prandom_warmup(state); + } +} + /* * Generate better values after random number generator * is fully initialized. */ static void __prandom_reseed(bool late) { - int i; unsigned long flags; static bool latch = false; static DEFINE_SPINLOCK(lock); @@ -266,19 +283,7 @@ static void __prandom_reseed(bool late) goto out; latch = true; - - for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state,i); - u32 seeds[4]; - - get_random_bytes(&seeds, sizeof(seeds)); - state->s1 = __seed(seeds[0], 2U); - state->s2 = __seed(seeds[1], 8U); - state->s3 = __seed(seeds[2], 16U); - state->s4 = __seed(seeds[3], 128U); - - prandom_warmup(state); - } + prandom_seed_full_state(&net_rand_state); out: spin_unlock_irqrestore(&lock, flags); } diff --git a/kernel/lib/rbtree.c b/kernel/lib/rbtree.c index c16c81a3d..d15d6c432 100644 --- a/kernel/lib/rbtree.c +++ b/kernel/lib/rbtree.c @@ -23,6 +23,7 @@ #include <linux/rbtree_augmented.h> #include <linux/export.h> +#include <linux/rcupdate.h> /* * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree @@ -44,6 +45,30 @@ * parentheses and have some accompanying text comment. */ +/* + * Notes on lockless lookups: + * + * All stores to the tree structure (rb_left and rb_right) must be done using + * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the + * tree structure as seen in program order. + * + * These two requirements will allow lockless iteration of the tree -- not + * correct iteration mind you, tree rotations are not atomic so a lookup might + * miss entire subtrees. + * + * But they do guarantee that any such traversal will only see valid elements + * and that it will indeed complete -- does not get stuck in a loop. + * + * It also guarantees that if the lookup returns an element it is the 'correct' + * one. But not returning an element does _NOT_ mean it's not present. + * + * NOTE: + * + * Stores to __rb_parent_color are not important for simple lookups so those + * are left undone as of now. Nor did I check for loops involving parent + * pointers. + */ + static inline void rb_set_black(struct rb_node *rb) { rb->__rb_parent_color |= RB_BLACK; @@ -129,8 +154,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root, * This still leaves us in violation of 4), the * continuation into Case 3 will fix that. */ - parent->rb_right = tmp = node->rb_left; - node->rb_left = parent; + tmp = node->rb_left; + WRITE_ONCE(parent->rb_right, tmp); + WRITE_ONCE(node->rb_left, parent); if (tmp) rb_set_parent_color(tmp, parent, RB_BLACK); @@ -149,8 +175,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root, * / \ * n U */ - gparent->rb_left = tmp; /* == parent->rb_right */ - parent->rb_right = gparent; + WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */ + WRITE_ONCE(parent->rb_right, gparent); if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); __rb_rotate_set_parents(gparent, parent, root, RB_RED); @@ -171,8 +197,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root, tmp = parent->rb_left; if (node == tmp) { /* Case 2 - right rotate at parent */ - parent->rb_left = tmp = node->rb_right; - node->rb_right = parent; + tmp = node->rb_right; + WRITE_ONCE(parent->rb_left, tmp); + WRITE_ONCE(node->rb_right, parent); if (tmp) rb_set_parent_color(tmp, parent, RB_BLACK); @@ -183,8 +210,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } /* Case 3 - left rotate at gparent */ - gparent->rb_right = tmp; /* == parent->rb_left */ - parent->rb_left = gparent; + WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */ + WRITE_ONCE(parent->rb_left, gparent); if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); __rb_rotate_set_parents(gparent, parent, root, RB_RED); @@ -224,8 +251,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, * / \ / \ * Sl Sr N Sl */ - parent->rb_right = tmp1 = sibling->rb_left; - sibling->rb_left = parent; + tmp1 = sibling->rb_left; + WRITE_ONCE(parent->rb_right, tmp1); + WRITE_ONCE(sibling->rb_left, parent); rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); @@ -275,9 +303,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, * \ * Sr */ - sibling->rb_left = tmp1 = tmp2->rb_right; - tmp2->rb_right = sibling; - parent->rb_right = tmp2; + tmp1 = tmp2->rb_right; + WRITE_ONCE(sibling->rb_left, tmp1); + WRITE_ONCE(tmp2->rb_right, sibling); + WRITE_ONCE(parent->rb_right, tmp2); if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); @@ -297,8 +326,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, * / \ / \ * (sl) sr N (sl) */ - parent->rb_right = tmp2 = sibling->rb_left; - sibling->rb_left = parent; + tmp2 = sibling->rb_left; + WRITE_ONCE(parent->rb_right, tmp2); + WRITE_ONCE(sibling->rb_left, parent); rb_set_parent_color(tmp1, sibling, RB_BLACK); if (tmp2) rb_set_parent(tmp2, parent); @@ -310,8 +340,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, sibling = parent->rb_left; if (rb_is_red(sibling)) { /* Case 1 - right rotate at parent */ - parent->rb_left = tmp1 = sibling->rb_right; - sibling->rb_right = parent; + tmp1 = sibling->rb_right; + WRITE_ONCE(parent->rb_left, tmp1); + WRITE_ONCE(sibling->rb_right, parent); rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); @@ -336,9 +367,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, break; } /* Case 3 - right rotate at sibling */ - sibling->rb_right = tmp1 = tmp2->rb_left; - tmp2->rb_left = sibling; - parent->rb_left = tmp2; + tmp1 = tmp2->rb_left; + WRITE_ONCE(sibling->rb_right, tmp1); + WRITE_ONCE(tmp2->rb_left, sibling); + WRITE_ONCE(parent->rb_left, tmp2); if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); @@ -347,8 +379,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, sibling = tmp2; } /* Case 4 - left rotate at parent + color flips */ - parent->rb_left = tmp2 = sibling->rb_right; - sibling->rb_right = parent; + tmp2 = sibling->rb_right; + WRITE_ONCE(parent->rb_left, tmp2); + WRITE_ONCE(sibling->rb_right, parent); rb_set_parent_color(tmp1, sibling, RB_BLACK); if (tmp2) rb_set_parent(tmp2, parent); @@ -558,3 +591,13 @@ struct rb_node *rb_first_postorder(const struct rb_root *root) return rb_left_deepest_node(root->rb_node); } EXPORT_SYMBOL(rb_first_postorder); + +void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + rcu_assign_pointer(*rb_link, node); +} +EXPORT_SYMBOL(rb_link_node_rcu); diff --git a/kernel/lib/rhashtable.c b/kernel/lib/rhashtable.c index cf910e48f..51282f579 100644 --- a/kernel/lib/rhashtable.c +++ b/kernel/lib/rhashtable.c @@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) head = rht_dereference_bucket(new_tbl->buckets[new_hash], new_tbl, new_hash); - if (rht_is_a_nulls(head)) - INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); - else - RCU_INIT_POINTER(entry->next, head); + RCU_INIT_POINTER(entry->next, head); rcu_assign_pointer(new_tbl->buckets[new_hash], entry); spin_unlock(new_bucket_lock); @@ -392,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht, return false; } -int rhashtable_insert_rehash(struct rhashtable *ht) +int rhashtable_insert_rehash(struct rhashtable *ht, + struct bucket_table *tbl) { struct bucket_table *old_tbl; struct bucket_table *new_tbl; - struct bucket_table *tbl; unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); - tbl = rhashtable_last_table(ht, old_tbl); size = tbl->size; + err = -EBUSY; + if (rht_grow_above_75(ht, tbl)) size *= 2; /* Do not schedule more than one rehash */ else if (old_tbl != tbl) - return -EBUSY; + goto fail; + + err = -ENOMEM; new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); - if (new_tbl == NULL) { - /* Schedule async resize/rehash to try allocation - * non-atomic context. - */ - schedule_work(&ht->run_work); - return -ENOMEM; - } + if (new_tbl == NULL) + goto fail; err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { @@ -429,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht) schedule_work(&ht->run_work); return err; + +fail: + /* Do not fail the insert if someone else did a rehash. */ + if (likely(rcu_dereference_raw(tbl->future_tbl))) + return 0; + + /* Schedule async rehash to retry allocation in process context. */ + if (err == -ENOMEM) + schedule_work(&ht->run_work); + + return err; } EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); -int rhashtable_insert_slow(struct rhashtable *ht, const void *key, - struct rhash_head *obj, - struct bucket_table *tbl) +struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, + const void *key, + struct rhash_head *obj, + struct bucket_table *tbl) { struct rhash_head *head; unsigned int hash; @@ -470,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, exit: spin_unlock(rht_bucket_lock(tbl, hash)); - return err; + if (err == 0) + return NULL; + else if (err == -EAGAIN) + return tbl; + else + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(rhashtable_insert_slow); @@ -506,10 +518,11 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) if (!iter->walker) return -ENOMEM; - mutex_lock(&ht->mutex); - iter->walker->tbl = rht_dereference(ht->tbl, ht); + spin_lock(&ht->lock); + iter->walker->tbl = + rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); list_add(&iter->walker->list, &iter->walker->tbl->walkers); - mutex_unlock(&ht->mutex); + spin_unlock(&ht->lock); return 0; } @@ -523,10 +536,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); */ void rhashtable_walk_exit(struct rhashtable_iter *iter) { - mutex_lock(&iter->ht->mutex); + spin_lock(&iter->ht->lock); if (iter->walker->tbl) list_del(&iter->walker->list); - mutex_unlock(&iter->ht->mutex); + spin_unlock(&iter->ht->lock); kfree(iter->walker); } EXPORT_SYMBOL_GPL(rhashtable_walk_exit); @@ -550,14 +563,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) { struct rhashtable *ht = iter->ht; - mutex_lock(&ht->mutex); + rcu_read_lock(); + spin_lock(&ht->lock); if (iter->walker->tbl) list_del(&iter->walker->list); - - rcu_read_lock(); - - mutex_unlock(&ht->mutex); + spin_unlock(&ht->lock); if (!iter->walker->tbl) { iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); @@ -585,7 +596,6 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter) struct bucket_table *tbl = iter->walker->tbl; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; - void *obj = NULL; if (p) { p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); @@ -605,8 +615,7 @@ next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; - obj = rht_obj(ht, p); - goto out; + return rht_obj(ht, p); } iter->skip = 0; @@ -624,9 +633,7 @@ next: return ERR_PTR(-EAGAIN); } -out: - - return obj; + return NULL; } EXPORT_SYMBOL_GPL(rhashtable_walk_next); @@ -730,9 +737,6 @@ int rhashtable_init(struct rhashtable *ht, if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) return -EINVAL; - if (params->nelem_hint) - size = rounded_hashtable_size(params); - memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); @@ -752,6 +756,9 @@ int rhashtable_init(struct rhashtable *ht, ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); + if (params->nelem_hint) + size = rounded_hashtable_size(&ht->p); + /* The maximum (not average) chain length grows with the * size of the hash table, at a rate of (log N)/(log log N). * The value of 16 is selected so that even if the hash diff --git a/kernel/lib/scatterlist.c b/kernel/lib/scatterlist.c index f6d1f8899..ebe3b7edd 100644 --- a/kernel/lib/scatterlist.c +++ b/kernel/lib/scatterlist.c @@ -56,6 +56,38 @@ int sg_nents(struct scatterlist *sg) } EXPORT_SYMBOL(sg_nents); +/** + * sg_nents_for_len - return total count of entries in scatterlist + * needed to satisfy the supplied length + * @sg: The scatterlist + * @len: The total required length + * + * Description: + * Determines the number of entries in sg that are required to meet + * the supplied length, taking into acount chaining as well + * + * Returns: + * the number of sg entries needed, negative error on failure + * + **/ +int sg_nents_for_len(struct scatterlist *sg, u64 len) +{ + int nents; + u64 total; + + if (!len) + return 0; + + for (nents = 0, total = 0; sg; sg = sg_next(sg)) { + nents++; + total += sg->length; + if (total >= len) + return nents; + } + + return -EINVAL; +} +EXPORT_SYMBOL(sg_nents_for_len); /** * sg_last - return the last scatterlist entry in a list @@ -73,16 +105,12 @@ EXPORT_SYMBOL(sg_nents); **/ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - struct scatterlist *ret = &sgl[nents - 1]; -#else struct scatterlist *sg, *ret = NULL; unsigned int i; for_each_sg(sgl, sg, nents, i) ret = sg; -#endif #ifdef CONFIG_DEBUG_SG BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); @@ -618,9 +646,8 @@ EXPORT_SYMBOL(sg_miter_stop); * Returns the number of copied bytes. * **/ -static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip, - bool to_buffer) +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; @@ -657,6 +684,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, local_irq_restore_nort(flags); return offset; } +EXPORT_SYMBOL(sg_copy_buffer); /** * sg_copy_from_buffer - Copy from a linear buffer to an SG list @@ -669,9 +697,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, * **/ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen) + const void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); @@ -697,16 +725,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * **/ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip) + const void *buf, size_t buflen, off_t skip) { - return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); } EXPORT_SYMBOL(sg_pcopy_from_buffer); @@ -715,8 +743,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy to - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * diff --git a/kernel/lib/sg_split.c b/kernel/lib/sg_split.c new file mode 100644 index 000000000..b063410c3 --- /dev/null +++ b/kernel/lib/sg_split.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr> + * + * Scatterlist splitting helpers. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/scatterlist.h> +#include <linux/slab.h> + +struct sg_splitter { + struct scatterlist *in_sg0; + int nents; + off_t skip_sg0; + unsigned int length_last_sg; + + struct scatterlist *out_sg; +}; + +static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits, + off_t skip, const size_t *sizes, + struct sg_splitter *splitters, bool mapped) +{ + int i; + unsigned int sglen; + size_t size = sizes[0], len; + struct sg_splitter *curr = splitters; + struct scatterlist *sg; + + for (i = 0; i < nb_splits; i++) { + splitters[i].in_sg0 = NULL; + splitters[i].nents = 0; + } + + for_each_sg(in, sg, nents, i) { + sglen = mapped ? sg_dma_len(sg) : sg->length; + if (skip > sglen) { + skip -= sglen; + continue; + } + + len = min_t(size_t, size, sglen - skip); + if (!curr->in_sg0) { + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + } + size -= len; + curr->nents++; + curr->length_last_sg = len; + + while (!size && (skip + len < sglen) && (--nb_splits > 0)) { + curr++; + size = *(++sizes); + skip += len; + len = min_t(size_t, size, sglen - skip); + + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + curr->nents = 1; + curr->length_last_sg = len; + size -= len; + } + skip = 0; + + if (!size && --nb_splits > 0) { + curr++; + size = *(++sizes); + } + + if (!nb_splits) + break; + } + + return (size || !splitters[0].in_sg0) ? -EINVAL : 0; +} + +static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + *out_sg = *in_sg; + if (!j) { + out_sg->offset += split->skip_sg0; + out_sg->length -= split->skip_sg0; + } else { + out_sg->offset = 0; + } + sg_dma_address(out_sg) = 0; + sg_dma_len(out_sg) = 0; + in_sg = sg_next(in_sg); + } + out_sg[-1].length = split->length_last_sg; + sg_mark_end(out_sg - 1); + } +} + +static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + sg_dma_address(out_sg) = sg_dma_address(in_sg); + sg_dma_len(out_sg) = sg_dma_len(in_sg); + if (!j) { + sg_dma_address(out_sg) += split->skip_sg0; + sg_dma_len(out_sg) -= split->skip_sg0; + } + in_sg = sg_next(in_sg); + } + sg_dma_len(--out_sg) = split->length_last_sg; + } +} + +/** + * sg_split - split a scatterlist into several scatterlists + * @in: the input sg list + * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped. + * @skip: the number of bytes to skip in the input sg list + * @nb_splits: the number of desired sg outputs + * @split_sizes: the respective size of each output sg list in bytes + * @out: an array where to store the allocated output sg lists + * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might + * be NULL if sglist not already mapped (in_mapped_nents = 0) + * @gfp_mask: the allocation flag + * + * This function splits the input sg list into nb_splits sg lists, which are + * allocated and stored into out. + * The @in is split into : + * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in + * - @out[1], which covers bytes [@skip + split_sizes[0] .. + * @skip + @split_sizes[0] + @split_sizes[1] -1] + * etc ... + * It will be the caller's duty to kfree() out array members. + * + * Returns 0 upon success, or error code + */ +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask) +{ + int i, ret; + struct sg_splitter *splitters; + + splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask); + if (!splitters) + return -ENOMEM; + + ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes, + splitters, false); + if (ret < 0) + goto err; + + ret = -ENOMEM; + for (i = 0; i < nb_splits; i++) { + splitters[i].out_sg = kmalloc_array(splitters[i].nents, + sizeof(struct scatterlist), + gfp_mask); + if (!splitters[i].out_sg) + goto err; + } + + /* + * The order of these 3 calls is important and should be kept. + */ + sg_split_phys(splitters, nb_splits); + ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip, + split_sizes, splitters, true); + if (ret < 0) + goto err; + sg_split_mapped(splitters, nb_splits); + + for (i = 0; i < nb_splits; i++) { + out[i] = splitters[i].out_sg; + if (out_mapped_nents) + out_mapped_nents[i] = splitters[i].nents; + } + + kfree(splitters); + return 0; + +err: + for (i = 0; i < nb_splits; i++) + kfree(splitters[i].out_sg); + kfree(splitters); + return ret; +} +EXPORT_SYMBOL(sg_split); diff --git a/kernel/lib/show_mem.c b/kernel/lib/show_mem.c index adc98e182..1feed6a2b 100644 --- a/kernel/lib/show_mem.c +++ b/kernel/lib/show_mem.c @@ -38,11 +38,9 @@ void show_mem(unsigned int filter) printk("%lu pages RAM\n", total); printk("%lu pages HighMem/MovableOnly\n", highmem); + printk("%lu pages reserved\n", reserved); #ifdef CONFIG_CMA - printk("%lu pages reserved\n", (reserved - totalcma_pages)); printk("%lu pages cma reserved\n", totalcma_pages); -#else - printk("%lu pages reserved\n", reserved); #endif #ifdef CONFIG_QUICKLIST printk("%lu pages in pagetable cache\n", diff --git a/kernel/lib/sort.c b/kernel/lib/sort.c index 43c9fe73a..fc20df42a 100644 --- a/kernel/lib/sort.c +++ b/kernel/lib/sort.c @@ -8,6 +8,12 @@ #include <linux/export.h> #include <linux/sort.h> +static int alignment_ok(const void *base, int align) +{ + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + ((unsigned long)base & (align - 1)) == 0; +} + static void u32_swap(void *a, void *b, int size) { u32 t = *(u32 *)a; @@ -15,6 +21,13 @@ static void u32_swap(void *a, void *b, int size) *(u32 *)b = t; } +static void u64_swap(void *a, void *b, int size) +{ + u64 t = *(u64 *)a; + *(u64 *)a = *(u64 *)b; + *(u64 *)b = t; +} + static void generic_swap(void *a, void *b, int size) { char t; @@ -50,8 +63,14 @@ void sort(void *base, size_t num, size_t size, /* pre-scale counters for performance */ int i = (num/2 - 1) * size, n = num * size, c, r; - if (!swap_func) - swap_func = (size == 4 ? u32_swap : generic_swap); + if (!swap_func) { + if (size == 4 && alignment_ok(base, 4)) + swap_func = u32_swap; + else if (size == 8 && alignment_ok(base, 8)) + swap_func = u64_swap; + else + swap_func = generic_swap; + } /* heapify */ for ( ; i >= 0; i -= size) { diff --git a/kernel/lib/string.c b/kernel/lib/string.c index bb3d4b699..0323c0d56 100644 --- a/kernel/lib/string.c +++ b/kernel/lib/string.c @@ -27,6 +27,10 @@ #include <linux/bug.h> #include <linux/errno.h> +#include <asm/byteorder.h> +#include <asm/word-at-a-time.h> +#include <asm/page.h> + #ifndef __HAVE_ARCH_STRNCASECMP /** * strncasecmp - Case insensitive, length-limited string comparison @@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size) EXPORT_SYMBOL(strlcpy); #endif +#ifndef __HAVE_ARCH_STRSCPY +/** + * strscpy - Copy a C-string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: Size of destination buffer + * + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. + */ +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + size_t max = count; + long res = 0; + + if (count == 0) + return -E2BIG; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * If src is unaligned, don't cross a page boundary, + * since we don't know if the next page is mapped. + */ + if ((long)src & (sizeof(long) - 1)) { + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); + if (limit < max) + max = limit; + } +#else + /* If src or dest is unaligned, don't do word-at-a-time. */ + if (((long) dest | (long) src) & (sizeof(long) - 1)) + max = 0; +#endif + + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + + c = *(unsigned long *)(src+res); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + *(unsigned long *)(dest+res) = c & zero_bytemask(data); + return res + find_zero(data); + } + *(unsigned long *)(dest+res) = c; + res += sizeof(unsigned long); + count -= sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL(strscpy); +#endif + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another @@ -815,7 +904,7 @@ void *memchr_inv(const void *start, int c, size_t bytes) value64 = value; #if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 - value64 *= 0x0101010101010101; + value64 *= 0x0101010101010101ULL; #elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) value64 *= 0x01010101; value64 |= value64 << 32; @@ -849,3 +938,20 @@ void *memchr_inv(const void *start, int c, size_t bytes) return check_bytes8(start, value, bytes % 8); } EXPORT_SYMBOL(memchr_inv); + +/** + * strreplace - Replace all occurrences of character in string. + * @s: The string to operate on. + * @old: The character being replaced. + * @new: The character @old is replaced with. + * + * Returns pointer to the nul byte at the end of @s. + */ +char *strreplace(char *s, char old, char new) +{ + for (; *s; ++s) + if (*s == old) + *s = new; + return s; +} +EXPORT_SYMBOL(strreplace); diff --git a/kernel/lib/string_helpers.c b/kernel/lib/string_helpers.c index c98ae818e..5c88204b6 100644 --- a/kernel/lib/string_helpers.c +++ b/kernel/lib/string_helpers.c @@ -43,46 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, [STRING_UNITS_10] = 1000, [STRING_UNITS_2] = 1024, }; - int i, j; - u32 remainder = 0, sf_cap, exp; + static const unsigned int rounding[] = { 500, 50, 5 }; + int i = 0, j; + u32 remainder = 0, sf_cap; char tmp[8]; const char *unit; tmp[0] = '\0'; - i = 0; - if (!size) + + if (blk_size == 0) + size = 0; + if (size == 0) goto out; - while (blk_size >= divisor[units]) { - remainder = do_div(blk_size, divisor[units]); + /* This is Napier's algorithm. Reduce the original block size to + * + * coefficient * divisor[units]^i + * + * we do the reduction so both coefficients are just under 32 bits so + * that multiplying them together won't overflow 64 bits and we keep + * as much precision as possible in the numbers. + * + * Note: it's safe to throw away the remainders here because all the + * precision is in the coefficients. + */ + while (blk_size >> 32) { + do_div(blk_size, divisor[units]); i++; } - exp = divisor[units] / (u32)blk_size; - if (size >= exp) { - remainder = do_div(size, divisor[units]); - remainder *= blk_size; + while (size >> 32) { + do_div(size, divisor[units]); i++; - } else { - remainder *= size; } + /* now perform the actual multiplication keeping i as the sum of the + * two logarithms */ size *= blk_size; - size += remainder / divisor[units]; - remainder %= divisor[units]; + /* and logarithmically reduce it until it's just under the divisor */ while (size >= divisor[units]) { remainder = do_div(size, divisor[units]); i++; } + /* work out in j how many digits of precision we need from the + * remainder */ sf_cap = size; for (j = 0; sf_cap*10 < 1000; j++) sf_cap *= 10; - if (j) { + if (units == STRING_UNITS_2) { + /* express the remainder as a decimal. It's currently the + * numerator of a fraction whose denominator is + * divisor[units], which is 1 << 10 for STRING_UNITS_2 */ remainder *= 1000; - remainder /= divisor[units]; + remainder >>= 10; + } + + /* add a 5 to the digit below what will be printed to ensure + * an arithmetical round up and carry it through to size */ + remainder += rounding[j]; + if (remainder >= 1000) { + remainder -= 1000; + size += 1; + } + + if (j) { snprintf(tmp, sizeof(tmp), ".%03u", remainder); tmp[j+1] = '\0'; } @@ -410,7 +437,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * @dst: destination buffer (escaped) * @osz: destination buffer size * @flags: combination of the flags (bitwise OR): - * %ESCAPE_SPACE: + * %ESCAPE_SPACE: (special white space, not space itself) * '\f' - form feed * '\n' - new line * '\r' - carriage return @@ -432,16 +459,18 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * all previous together * %ESCAPE_HEX: * '\xHH' - byte with hexadecimal value HH (2 digits) - * @esc: NULL-terminated string of characters any of which, if found in - * the source, has to be escaped + * @only: NULL-terminated string containing characters used to limit + * the selected escape class. If characters are included in @only + * that would not normally be escaped by the classes selected + * in @flags, they will be copied to @dst unescaped. * * Description: * The process of escaping byte buffer includes several parts. They are applied * in the following sequence. * 1. The character is matched to the printable class, if asked, and in * case of match it passes through to the output. - * 2. The character is not matched to the one from @esc string and thus - * must go as is to the output. + * 2. The character is not matched to the one from @only string and thus + * must go as-is to the output. * 3. The character is checked if it falls into the class given by @flags. * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any * character. Note that they actually can't go together, otherwise @@ -458,11 +487,11 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * dst for a '\0' terminator if and only if ret < osz. */ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, - unsigned int flags, const char *esc) + unsigned int flags, const char *only) { char *p = dst; char *end = p + osz; - bool is_dict = esc && *esc; + bool is_dict = only && *only; while (isz--) { unsigned char c = *src++; @@ -471,7 +500,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, * Apply rules in the following sequence: * - the character is printable, when @flags has * %ESCAPE_NP bit set - * - the @esc string is supplied and does not contain a + * - the @only string is supplied and does not contain a * character under question * - the character doesn't fall into a class of symbols * defined by given @flags @@ -479,7 +508,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, * output buffer. */ if ((flags & ESCAPE_NP && isprint(c)) || - (is_dict && !strchr(esc, c))) { + (is_dict && !strchr(only, c))) { /* do nothing */ } else { if (flags & ESCAPE_SPACE && escape_space(c, &p, end)) diff --git a/kernel/lib/swiotlb.c b/kernel/lib/swiotlb.c index 3c365ab6c..76f29ecba 100644 --- a/kernel/lib/swiotlb.c +++ b/kernel/lib/swiotlb.c @@ -29,10 +29,10 @@ #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/gfp.h> +#include <linux/scatterlist.h> #include <asm/io.h> #include <asm/dma.h> -#include <asm/scatterlist.h> #include <linux/init.h> #include <linux/bootmem.h> @@ -656,7 +656,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); if (paddr == SWIOTLB_MAP_ERROR) - return NULL; + goto err_warn; ret = phys_to_virt(paddr); dev_addr = phys_to_dma(hwdev, paddr); @@ -670,7 +670,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); - return NULL; + goto err_warn; } } @@ -678,6 +678,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, memset(ret, 0, size); return ret; + +err_warn: + pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", + dev_name(hwdev), size); + dump_stack(); + + return NULL; } EXPORT_SYMBOL(swiotlb_alloc_coherent); diff --git a/kernel/lib/test-hexdump.c b/kernel/lib/test-hexdump.c index c227cc43e..5241df36e 100644 --- a/kernel/lib/test-hexdump.c +++ b/kernel/lib/test-hexdump.c @@ -25,19 +25,19 @@ static const char * const test_data_1_le[] __initconst = { "4c", "d1", "19", "99", "43", "b1", "af", "0c", }; -static const char *test_data_2_le[] __initdata = { +static const char * const test_data_2_le[] __initconst = { "32be", "7bdb", "180a", "b293", "ba70", "24c4", "837d", "9b34", "9ca6", "ad31", "0f9c", "e9ac", "d14c", "9919", "b143", "0caf", }; -static const char *test_data_4_le[] __initdata = { +static const char * const test_data_4_le[] __initconst = { "7bdb32be", "b293180a", "24c4ba70", "9b34837d", "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143", }; -static const char *test_data_8_le[] __initdata = { +static const char * const test_data_8_le[] __initconst = { "b293180a7bdb32be", "9b34837d24c4ba70", "e9ac0f9cad319ca6", "0cafb1439919d14c", }; diff --git a/kernel/lib/test-kstrtox.c b/kernel/lib/test-kstrtox.c index 4137bca5f..f355f6716 100644 --- a/kernel/lib/test-kstrtox.c +++ b/kernel/lib/test-kstrtox.c @@ -260,6 +260,7 @@ static void __init test_kstrtoll_ok(void) {"4294967297", 10, 4294967297LL}, {"9223372036854775807", 10, 9223372036854775807LL}, + {"-0", 10, 0LL}, {"-1", 10, -1LL}, {"-2", 10, -2LL}, {"-9223372036854775808", 10, LLONG_MIN}, @@ -277,11 +278,6 @@ static void __init test_kstrtoll_fail(void) {"-9223372036854775809", 10}, {"-18446744073709551614", 10}, {"-18446744073709551615", 10}, - /* negative zero isn't an integer in Linux */ - {"-0", 0}, - {"-0", 8}, - {"-0", 10}, - {"-0", 16}, /* sign is first character if any */ {"-+1", 0}, {"-+1", 8}, diff --git a/kernel/lib/test-string_helpers.c b/kernel/lib/test-string_helpers.c index 8e376efd8..98866a770 100644 --- a/kernel/lib/test-string_helpers.c +++ b/kernel/lib/test-string_helpers.c @@ -326,6 +326,39 @@ out: kfree(out_test); } +#define string_get_size_maxbuf 16 +#define test_string_get_size_one(size, blk_size, units, exp_result) \ + do { \ + BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf); \ + __test_string_get_size((size), (blk_size), (units), \ + (exp_result)); \ + } while (0) + + +static __init void __test_string_get_size(const u64 size, const u64 blk_size, + const enum string_size_units units, + const char *exp_result) +{ + char buf[string_get_size_maxbuf]; + + string_get_size(size, blk_size, units, buf, sizeof(buf)); + if (!memcmp(buf, exp_result, strlen(exp_result) + 1)) + return; + + buf[sizeof(buf) - 1] = '\0'; + pr_warn("Test 'test_string_get_size_one' failed!\n"); + pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n", + size, blk_size, units); + pr_warn("expected: '%s', got '%s'\n", exp_result, buf); +} + +static __init void test_string_get_size(void) +{ + test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB"); + test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB"); + test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B"); +} + static int __init test_string_helpers_init(void) { unsigned int i; @@ -344,6 +377,9 @@ static int __init test_string_helpers_init(void) for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++) test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1); + /* Test string_get_size() */ + test_string_get_size(); + return -EINVAL; } module_init(test_string_helpers_init); diff --git a/kernel/lib/test_bpf.c b/kernel/lib/test_bpf.c index 80d78c51f..10cd1860e 100644 --- a/kernel/lib/test_bpf.c +++ b/kernel/lib/test_bpf.c @@ -18,9 +18,12 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/filter.h> +#include <linux/bpf.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> +#include <linux/random.h> +#include <linux/highmem.h> /* General test specific settings */ #define MAX_SUBTESTS 3 @@ -54,6 +57,7 @@ /* Flags that can be passed to test cases */ #define FLAG_NO_DATA BIT(0) #define FLAG_EXPECTED_FAIL BIT(1) +#define FLAG_SKB_FRAG BIT(2) enum { CLASSIC = BIT(6), /* Old BPF instructions only. */ @@ -67,6 +71,10 @@ struct bpf_test { union { struct sock_filter insns[MAX_INSNS]; struct bpf_insn insns_int[MAX_INSNS]; + struct { + void *insns; + unsigned int len; + } ptr; } u; __u8 aux; __u8 data[MAX_DATA]; @@ -74,8 +82,358 @@ struct bpf_test { int data_size; __u32 result; } test[MAX_SUBTESTS]; + int (*fill_helper)(struct bpf_test *self); + __u8 frag_data[MAX_DATA]; }; +/* Large test cases need separate allocation and fill handler. */ + +static int bpf_fill_maxinsns1(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + __u32 k = ~0; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len; i++, k--) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, k); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns2(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len; i++) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns3(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + struct rnd_state rnd; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + prandom_seed_state(&rnd, 3141592653589793238ULL); + + for (i = 0; i < len - 1; i++) { + __u32 k = prandom_u32_state(&rnd); + + insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k); + } + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns4(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS + 1; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len; i++) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns5(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0); + + for (i = 1; i < len - 1; i++) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns6(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len - 1; i++) + insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + + SKF_AD_VLAN_TAG_PRESENT); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns7(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len - 4; i++) + insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + + SKF_AD_CPU); + + insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0); + insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + + SKF_AD_CPU); + insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0); + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns8(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i, jmp_off = len - 3; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff); + + for (i = 1; i < len - 1; i++) + insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns9(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2); + insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab); + insn[2] = BPF_EXIT_INSN(); + + for (i = 3; i < len - 2; i++) + insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe); + + insn[len - 2] = BPF_EXIT_INSN(); + insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1)); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns10(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS, hlen = len - 2; + struct bpf_insn *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < hlen / 2; i++) + insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i); + for (i = hlen - 1; i > hlen / 2; i--) + insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i); + + insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1); + insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac); + insn[hlen + 1] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int __bpf_fill_ja(struct bpf_test *self, unsigned int len, + unsigned int plen) +{ + struct sock_filter *insn; + unsigned int rlen; + int i, j; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + rlen = (len % plen) - 1; + + for (i = 0; i + plen < len; i += plen) + for (j = 0; j < plen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, + plen - 1 - j, 0, 0); + for (j = 0; j < rlen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j, + 0, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns11(struct bpf_test *self) +{ + /* Hits 70 passes on x86_64, so cannot get JITed there. */ + return __bpf_fill_ja(self, BPF_MAXINSNS, 68); +} + +static int bpf_fill_ja(struct bpf_test *self) +{ + /* Hits exactly 11 passes on x86_64 JIT. */ + return __bpf_fill_ja(self, 12, 9); +} + +static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len - 1; i += 2) { + insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0); + insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_CPU); + } + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +#define PUSH_CNT 68 +/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */ +static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i = 0, j, k = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_MOV64_REG(R6, R1); +loop: + for (j = 0; j < PUSH_CNT; j++) { + insn[i++] = BPF_LD_ABS(BPF_B, 0); + insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2); + i++; + insn[i++] = BPF_MOV64_REG(R1, R6); + insn[i++] = BPF_MOV64_IMM(R2, 1); + insn[i++] = BPF_MOV64_IMM(R3, 2); + insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + bpf_skb_vlan_push_proto.func - __bpf_call_base); + insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2); + i++; + } + + for (j = 0; j < PUSH_CNT; j++) { + insn[i++] = BPF_LD_ABS(BPF_B, 0); + insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2); + i++; + insn[i++] = BPF_MOV64_REG(R1, R6); + insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + bpf_skb_vlan_pop_proto.func - __bpf_call_base); + insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2); + i++; + } + if (++k < 5) + goto loop; + + for (; i < len - 1; i++) + insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef); + + insn[len - 1] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + static struct bpf_test tests[] = { { "TAX", @@ -1755,7 +2113,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R0, 0x1ffffffffLL), + BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */ BPF_EXIT_INSN(), }, INTERNAL, @@ -1805,6 +2164,2961 @@ static struct bpf_test tests[] = { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, { { 38, 256 } } }, + /* BPF_ALU | BPF_MOV | BPF_X */ + { + "ALU_MOV_X: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_MOV_X: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU64_MOV_X: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOV_X: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + /* BPF_ALU | BPF_MOV | BPF_K */ + { + "ALU_MOV_K: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_MOV_K: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x00000000ffffffffLL), + BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MOV_K: dst = 2", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOV_K: dst = 2147483647", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_OR_K: dst = 0x0", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0), + BPF_ALU64_IMM(BPF_MOV, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MOV_K: dst = -1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_ADD | BPF_X */ + { + "ALU_ADD_X: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_X: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU64_ADD_X: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_X: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + /* BPF_ALU | BPF_ADD | BPF_K */ + { + "ALU_ADD_K: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_K: 3 + 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_ADD, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_K: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x00000000ffffffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_K: 3 + 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_ADD, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_K: 1 + 2147483646 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_ADD, R0, 2147483646), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_ADD_K: 2147483646 + -2147483647 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483646), + BPF_ALU64_IMM(BPF_ADD, R0, -2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + { + "ALU64_ADD_K: 1 + 0 = 1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0x1), + BPF_ALU64_IMM(BPF_ADD, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_SUB | BPF_X */ + { + "ALU_SUB_X: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_SUB_X: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_SUB_X: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_SUB_X: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_SUB | BPF_K */ + { + "ALU_SUB_K: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_SUB, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_SUB_K: 3 - 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_SUB, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_SUB_K: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_SUB_K: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_SUB, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_SUB_K: 3 - 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_SUB, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_SUB_K: 4294967294 - 4294967295 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + { + "ALU64_ADD_K: 2147483646 - 2147483647 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483646), + BPF_ALU64_IMM(BPF_SUB, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + /* BPF_ALU | BPF_MUL | BPF_X */ + { + "ALU_MUL_X: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 3), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xFFFFFFF0 } }, + }, + { + "ALU_MUL_X: -1 * -1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, -1), + BPF_ALU32_IMM(BPF_MOV, R1, -1), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MUL_X: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 3), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU64_MUL_X: 1 * 2147483647 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + /* BPF_ALU | BPF_MUL | BPF_K */ + { + "ALU_MUL_K: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MUL, R0, 3), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU_MUL_K: 3 * 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MUL, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xFFFFFFF0 } }, + }, + { + "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0x00000000ffffffff), + BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MUL_K: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU64_IMM(BPF_MUL, R0, 3), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU64_MUL_K: 3 * 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MUL, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_MUL_K: 1 * 2147483647 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_MUL, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_MUL_K: 1 * -2147483647 = -2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_MUL, R0, -2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -2147483647 } }, + }, + { + "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_DIV | BPF_X */ + { + "ALU_DIV_X: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_X: 4294967295 / 4294967295 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_X: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_X: 2147483647 / 2147483647 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), + BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R4, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x0000000000000001LL), + BPF_ALU64_REG(BPF_DIV, R2, R4), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_DIV | BPF_K */ + { + "ALU_DIV_K: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_DIV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_K: 3 / 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_DIV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_K: 4294967295 / 4294967295 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x1UL), + BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_DIV_K: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU64_IMM(BPF_DIV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_K: 3 / 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_DIV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_K: 2147483647 / 2147483647 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU64_IMM(BPF_DIV, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x0000000000000001LL), + BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_MOD | BPF_X */ + { + "ALU_MOD_X: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_MOD_X: 4294967295 % 4294967293 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U), + BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOD_X: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MOD_X: 2147483647 % 2147483645 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483645), + BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + /* BPF_ALU | BPF_MOD | BPF_K */ + { + "ALU_MOD_K: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_MOD_K: 3 % 1 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOD, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "ALU_MOD_K: 4294967295 % 4294967293 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOD_K: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MOD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MOD_K: 3 % 1 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MOD, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "ALU64_MOD_K: 2147483647 % 2147483645 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU64_IMM(BPF_MOD, R0, 2147483645), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + /* BPF_ALU | BPF_AND | BPF_X */ + { + "ALU_AND_X: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_X: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_ALU | BPF_AND | BPF_K */ + { + "ALU_AND_K: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_AND, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_K: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_AND, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000000000000000LL), + BPF_ALU64_IMM(BPF_AND, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_OR | BPF_X */ + { + "ALU_OR_X: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_X: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_ALU | BPF_OR | BPF_K */ + { + "ALU_OR_K: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_OR, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_OR_K: 0 & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_K: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_OR, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_OR, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000000000000000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_XOR | BPF_X */ + { + "ALU_XOR_X: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_MOV, R1, 6), + BPF_ALU32_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_X: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_MOV, R1, 6), + BPF_ALU64_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + /* BPF_ALU | BPF_XOR | BPF_K */ + { + "ALU_XOR_K: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_XOR, R0, 6), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_K: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU64_IMM(BPF_XOR, R0, 6), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_XOR, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffff00000000ffffLL), + BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000000000000000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_LSH | BPF_X */ + { + "ALU_LSH_X: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_LSH_X: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + { + "ALU64_LSH_X: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_LSH_X: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + /* BPF_ALU | BPF_LSH | BPF_K */ + { + "ALU_LSH_K: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_LSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_LSH_K: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_LSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + { + "ALU64_LSH_K: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_LSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_LSH_K: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_LSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + /* BPF_ALU | BPF_RSH | BPF_X */ + { + "ALU_RSH_X: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_RSH_X: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_X: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_X: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_RSH | BPF_K */ + { + "ALU_RSH_K: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_RSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_RSH_K: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_RSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_K: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU64_IMM(BPF_RSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_K: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU64_IMM(BPF_RSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_ARSH | BPF_X */ + { + "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xff00ff0000000000LL), + BPF_ALU32_IMM(BPF_MOV, R1, 40), + BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff00ff } }, + }, + /* BPF_ALU | BPF_ARSH | BPF_K */ + { + "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xff00ff0000000000LL), + BPF_ALU64_IMM(BPF_ARSH, R0, 40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff00ff } }, + }, + /* BPF_ALU | BPF_NEG */ + { + "ALU_NEG: -(3) = -3", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 3), + BPF_ALU32_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -3 } }, + }, + { + "ALU_NEG: -(-3) = 3", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -3), + BPF_ALU32_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_NEG: -(3) = -3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -3 } }, + }, + { + "ALU64_NEG: -(-3) = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, -3), + BPF_ALU64_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + /* BPF_ALU | BPF_END | BPF_FROM_BE */ + { + "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 16), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_be16(0xcdef) } }, + }, + { + "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 32), + BPF_ALU64_REG(BPF_MOV, R1, R0), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_be32(0x89abcdef) } }, + }, + { + "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 64), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } }, + }, + /* BPF_ALU | BPF_END | BPF_FROM_LE */ + { + "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 16), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_le16(0xcdef) } }, + }, + { + "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 32), + BPF_ALU64_REG(BPF_MOV, R1, R0), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_le32(0x89abcdef) } }, + }, + { + "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 64), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } }, + }, + /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */ + { + "ST_MEM_B: Store/Load byte: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_B, R10, -40, 0xff), + BPF_LDX_MEM(BPF_B, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xff } }, + }, + { + "ST_MEM_B: Store/Load byte: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0x7f), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7f } }, + }, + { + "STX_MEM_B: Store/Load byte: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffLL), + BPF_STX_MEM(BPF_B, R10, R1, -40), + BPF_LDX_MEM(BPF_B, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xff } }, + }, + { + "ST_MEM_H: Store/Load half word: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0xffff), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff } }, + }, + { + "ST_MEM_H: Store/Load half word: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0x7fff), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7fff } }, + }, + { + "STX_MEM_H: Store/Load half word: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffLL), + BPF_STX_MEM(BPF_H, R10, R1, -40), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff } }, + }, + { + "ST_MEM_W: Store/Load word: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ST_MEM_W: Store/Load word: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7fffffff } }, + }, + { + "STX_MEM_W: Store/Load word: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffffffLL), + BPF_STX_MEM(BPF_W, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ST_MEM_DW: Store/Load double word: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ST_MEM_DW: Store/Load double word: max negative 2", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffff00000000ffffLL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_DW, R2, R10, -40), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ST_MEM_DW: Store/Load double word: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7fffffff } }, + }, + { + "STX_MEM_DW: Store/Load double word: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_STX_MEM(BPF_W, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_STX | BPF_XADD | BPF_W/DW */ + { + "STX_XADD_W: Test: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_W, R10, -40, 0x10), + BPF_STX_XADD(BPF_W, R10, R0, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x22 } }, + }, + { + "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_DW, R10, -40, 0x10), + BPF_STX_XADD(BPF_DW, R10, R0, -40), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x22 } }, + }, + /* BPF_JMP | BPF_EXIT */ + { + "JMP_EXIT", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x4711), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0x4712), + }, + INTERNAL, + { }, + { { 0, 0x4711 } }, + }, + /* BPF_JMP | BPF_JA */ + { + "JMP_JA: Unconditional jump: if (true) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGT | BPF_K */ + { + "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGT, R1, -2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGE | BPF_K */ + { + "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGE, R1, -2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGT | BPF_K */ + { + "JMP_JGT_K: if (3 > 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGT, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGE | BPF_K */ + { + "JMP_JGE_K: if (3 >= 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGT | BPF_K jump backwards */ + { + "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", + .u.insns_int = { + BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ + BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */ + BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JGE_K: if (3 >= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JNE | BPF_K */ + { + "JMP_JNE_K: if (3 != 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JEQ | BPF_K */ + { + "JMP_JEQ_K: if (3 == 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JEQ, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSET | BPF_K */ + { + "JMP_JSET_K: if (0x3 & 0x2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSET_K: if (0x3 & 0xffffffff) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGT | BPF_X */ + { + "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGE | BPF_X */ + { + "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGT | BPF_X */ + { + "JMP_JGT_X: if (3 > 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGE | BPF_X */ + { + "JMP_JGE_X: if (3 >= 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JGE_X: if (3 >= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JNE | BPF_X */ + { + "JMP_JNE_X: if (3 != 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JEQ | BPF_X */ + { + "JMP_JEQ_X: if (3 == 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JEQ, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSET | BPF_X */ + { + "JMP_JSET_X: if (0x3 & 0x2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSET_X: if (0x3 & 0xffffffff) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 0xffffffff), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JA: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_ja, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Maximum possible literals", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xffffffff } }, + .fill_helper = bpf_fill_maxinsns1, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Single literal", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xfefefefe } }, + .fill_helper = bpf_fill_maxinsns2, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Run/add until end", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0x947bf368 } }, + .fill_helper = bpf_fill_maxinsns3, + }, + { + "BPF_MAXINSNS: Too many instructions", + { }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = bpf_fill_maxinsns4, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Very long jump", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xabababab } }, + .fill_helper = bpf_fill_maxinsns5, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Ctx heavy transformations", + { }, + CLASSIC, + { }, + { + { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, + { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + }, + .fill_helper = bpf_fill_maxinsns6, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Call heavy transformations", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 1, 0 }, { 10, 0 } }, + .fill_helper = bpf_fill_maxinsns7, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Jump heavy test", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xffffffff } }, + .fill_helper = bpf_fill_maxinsns8, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Very long jump backwards", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0xcbababab } }, + .fill_helper = bpf_fill_maxinsns9, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Edge hopping nuthouse", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0xabababac } }, + .fill_helper = bpf_fill_maxinsns10, + }, + { + "BPF_MAXINSNS: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_maxinsns11, + }, + { + "BPF_MAXINSNS: ld_abs+get_processor_id", + { }, + CLASSIC, + { }, + { { 1, 0xbee } }, + .fill_helper = bpf_fill_ld_abs_get_processor_id, + }, + { + "BPF_MAXINSNS: ld_abs+vlan_push/pop", + { }, + INTERNAL, + { 0x34 }, + { { 1, 0xbef } }, + .fill_helper = bpf_fill_ld_abs_vlan_push_pop, + }, + /* + * LD_IND / LD_ABS on fragmented SKBs + */ + { + "LD_IND byte frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x42} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_IND halfword frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x4344} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_IND word frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x21071983} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_IND halfword mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x0519} }, + .frag_data = { 0x19, 0x82 }, + }, + { + "LD_IND word mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x25051982} }, + .frag_data = { 0x19, 0x82 }, + }, + { + "LD_ABS byte frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x42} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_ABS halfword frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x4344} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_ABS word frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x21071983} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_ABS halfword mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x0519} }, + .frag_data = { 0x19, 0x82 }, + }, + { + "LD_ABS word mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x25051982} }, + .frag_data = { 0x19, 0x82 }, + }, + /* + * LD_IND / LD_ABS on non fragmented SKBs + */ + { + /* + * this tests that the JIT/interpreter correctly resets X + * before using it in an LD_IND instruction. + */ + "LD_IND byte default X", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x1] = 0x42 }, + { {0x40, 0x42 } }, + }, + { + "LD_IND byte positive offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 } }, + }, + { + "LD_IND byte negative offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x05 } }, + }, + { + "LD_IND halfword positive offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + }, + { {0x40, 0xdd88 } }, + }, + { + "LD_IND halfword negative offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + }, + { {0x40, 0xbb66 } }, + }, + { + "LD_IND halfword unaligned", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + }, + { {0x40, 0x66cc } }, + }, + { + "LD_IND word positive offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xee99ffaa } }, + }, + { + "LD_IND word negative offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xaa55bb66 } }, + }, + { + "LD_IND word unaligned (addr & 3 == 2)", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xbb66cc77 } }, + }, + { + "LD_IND word unaligned (addr & 3 == 1)", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x55bb66cc } }, + }, + { + "LD_IND word unaligned (addr & 3 == 3)", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x66cc77dd } }, + }, + { + "LD_ABS byte", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xcc } }, + }, + { + "LD_ABS halfword", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xdd88 } }, + }, + { + "LD_ABS halfword unaligned", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x99ff } }, + }, + { + "LD_ABS word", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xaa55bb66 } }, + }, + { + "LD_ABS word unaligned (addr & 3 == 2)", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xdd88ee99 } }, + }, + { + "LD_ABS word unaligned (addr & 3 == 1)", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x77dd88ee } }, + }, + { + "LD_ABS word unaligned (addr & 3 == 3)", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x88ee99ff } }, + }, + /* + * verify that the interpreter or JIT correctly sets A and X + * to 0. + */ + { + "ADD default X", + .u.insns = { + /* + * A = 0x42 + * A = A + X + * ret A + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, + }, + { + "ADD default A", + .u.insns = { + /* + * A = A + 0x42 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, + }, + { + "SUB default X", + .u.insns = { + /* + * A = 0x66 + * A = A - X + * ret A + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x66), + BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x66 } }, + }, + { + "SUB default A", + .u.insns = { + /* + * A = A - -0x66 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x66 } }, + }, + { + "MUL default X", + .u.insns = { + /* + * A = 0x42 + * A = A * X + * ret A + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, + }, + { + "MUL default A", + .u.insns = { + /* + * A = A * 0x66 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, + }, + { + "DIV default X", + .u.insns = { + /* + * A = 0x42 + * A = A / X ; this halt the filter execution if X is 0 + * ret 0x42 + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_K, 0x42), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, + }, + { + "DIV default A", + .u.insns = { + /* + * A = A / 1 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, + }, + { + "MOD default X", + .u.insns = { + /* + * A = 0x42 + * A = A mod X ; this halt the filter execution if X is 0 + * ret 0x42 + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_K, 0x42), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, + }, + { + "MOD default A", + .u.insns = { + /* + * A = A mod 1 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, + }, + { + "JMP EQ default A", + .u.insns = { + /* + * cmp A, 0x0, 0, 1 + * ret 0x42 + * ret 0x66 + */ + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 0x42), + BPF_STMT(BPF_RET | BPF_K, 0x66), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, + }, + { + "JMP EQ default X", + .u.insns = { + /* + * A = 0x0 + * cmp A, X, 0, 1 + * ret 0x42 + * ret 0x66 + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 0x42), + BPF_STMT(BPF_RET | BPF_K, 0x66), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, + }, }; static struct net_device dev; @@ -1840,6 +5154,9 @@ static struct sk_buff *populate_skb(char *buf, int size) static void *generate_test_data(struct bpf_test *test, int sub) { + struct sk_buff *skb; + struct page *page; + if (test->aux & FLAG_NO_DATA) return NULL; @@ -1847,7 +5164,38 @@ static void *generate_test_data(struct bpf_test *test, int sub) * subtests generate skbs of different sizes based on * the same data. */ - return populate_skb(test->data, test->test[sub].data_size); + skb = populate_skb(test->data, test->test[sub].data_size); + if (!skb) + return NULL; + + if (test->aux & FLAG_SKB_FRAG) { + /* + * when the test requires a fragmented skb, add a + * single fragment to the skb, filled with + * test->frag_data. + */ + void *ptr; + + page = alloc_page(GFP_KERNEL); + + if (!page) + goto err_kfree_skb; + + ptr = kmap(page); + if (!ptr) + goto err_free_page; + memcpy(ptr, test->frag_data, MAX_DATA); + kunmap(page); + skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA); + } + + return skb; + +err_free_page: + __free_page(page); +err_kfree_skb: + kfree_skb(skb); + return NULL; } static void release_test_data(const struct bpf_test *test, void *data) @@ -1858,10 +5206,15 @@ static void release_test_data(const struct bpf_test *test, void *data) kfree_skb(data); } -static int probe_filter_length(struct sock_filter *fp) +static int filter_length(int which) { - int len = 0; + struct sock_filter *fp; + int len; + if (tests[which].fill_helper) + return tests[which].u.ptr.len; + + fp = tests[which].u.insns; for (len = MAX_INSNS - 1; len > 0; --len) if (fp[len].code != 0 || fp[len].k != 0) break; @@ -1869,16 +5222,25 @@ static int probe_filter_length(struct sock_filter *fp) return len + 1; } +static void *filter_pointer(int which) +{ + if (tests[which].fill_helper) + return tests[which].u.ptr.insns; + else + return tests[which].u.insns; +} + static struct bpf_prog *generate_filter(int which, int *err) { - struct bpf_prog *fp; - struct sock_fprog_kern fprog; - unsigned int flen = probe_filter_length(tests[which].u.insns); __u8 test_type = tests[which].aux & TEST_TYPE_MASK; + unsigned int flen = filter_length(which); + void *fptr = filter_pointer(which); + struct sock_fprog_kern fprog; + struct bpf_prog *fp; switch (test_type) { case CLASSIC: - fprog.filter = tests[which].u.insns; + fprog.filter = fptr; fprog.len = flen; *err = bpf_prog_create(&fp, &fprog); @@ -1914,8 +5276,9 @@ static struct bpf_prog *generate_filter(int which, int *err) } fp->len = flen; - memcpy(fp->insnsi, tests[which].u.insns_int, - fp->len * sizeof(struct bpf_insn)); + /* Type doesn't really matter here as long as it's not unspec. */ + fp->type = BPF_PROG_TYPE_SOCKET_FILTER; + memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); bpf_prog_select_runtime(fp); break; @@ -1945,14 +5308,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data, u64 start, finish; int ret = 0, i; - start = ktime_to_us(ktime_get()); + start = ktime_get_ns(); for (i = 0; i < runs; i++) ret = BPF_PROG_RUN(fp, data); - finish = ktime_to_us(ktime_get()); + finish = ktime_get_ns(); - *duration = (finish - start) * 1000ULL; + *duration = finish - start; do_div(*duration, runs); return ret; @@ -1972,6 +5335,11 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) break; data = generate_test_data(test, i); + if (!data && !(test->aux & FLAG_NO_DATA)) { + pr_cont("data generation failed "); + err_cnt++; + break; + } ret = __run_one(fp, data, runs, &duration); release_test_data(test, data); @@ -1987,14 +5355,109 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) return err_cnt; } +static char test_name[64]; +module_param_string(test_name, test_name, sizeof(test_name), 0); + +static int test_id = -1; +module_param(test_id, int, 0); + +static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 }; +module_param_array(test_range, int, NULL, 0); + +static __init int find_test_index(const char *test_name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!strcmp(tests[i].descr, test_name)) + return i; + } + return -1; +} + +static __init int prepare_bpf_tests(void) +{ + int i; + + if (test_id >= 0) { + /* + * if a test_id was specified, use test_range to + * cover only that test. + */ + if (test_id >= ARRAY_SIZE(tests)) { + pr_err("test_bpf: invalid test_id specified.\n"); + return -EINVAL; + } + + test_range[0] = test_id; + test_range[1] = test_id; + } else if (*test_name) { + /* + * if a test_name was specified, find it and setup + * test_range to cover only that test. + */ + int idx = find_test_index(test_name); + + if (idx < 0) { + pr_err("test_bpf: no test named '%s' found.\n", + test_name); + return -EINVAL; + } + test_range[0] = idx; + test_range[1] = idx; + } else { + /* + * check that the supplied test_range is valid. + */ + if (test_range[0] >= ARRAY_SIZE(tests) || + test_range[1] >= ARRAY_SIZE(tests) || + test_range[0] < 0 || test_range[1] < 0) { + pr_err("test_bpf: test_range is out of bound.\n"); + return -EINVAL; + } + + if (test_range[1] < test_range[0]) { + pr_err("test_bpf: test_range is ending before it starts.\n"); + return -EINVAL; + } + } + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (tests[i].fill_helper && + tests[i].fill_helper(&tests[i]) < 0) + return -ENOMEM; + } + + return 0; +} + +static __init void destroy_bpf_tests(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (tests[i].fill_helper) + kfree(tests[i].u.ptr.insns); + } +} + +static bool exclude_test(int test_id) +{ + return test_id < test_range[0] || test_id > test_range[1]; +} + static __init int test_bpf(void) { int i, err_cnt = 0, pass_cnt = 0; + int jit_cnt = 0, run_cnt = 0; for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_prog *fp; int err; + if (exclude_test(i)) + continue; + pr_info("#%d %s ", i, tests[i].descr); fp = generate_filter(i, &err); @@ -2006,6 +5469,13 @@ static __init int test_bpf(void) return err; } + + pr_cont("jited:%u ", fp->jited); + + run_cnt++; + if (fp->jited) + jit_cnt++; + err = run_one(fp, &tests[i]); release_filter(fp, i); @@ -2018,13 +5488,24 @@ static __init int test_bpf(void) } } - pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); + pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n", + pass_cnt, err_cnt, jit_cnt, run_cnt); + return err_cnt ? -EINVAL : 0; } static int __init test_bpf_init(void) { - return test_bpf(); + int ret; + + ret = prepare_bpf_tests(); + if (ret < 0) + return ret; + + ret = test_bpf(); + + destroy_bpf_tests(); + return ret; } static void __exit test_bpf_exit(void) diff --git a/kernel/lib/test_kasan.c b/kernel/lib/test_kasan.c index 098c08edd..c32f3b004 100644 --- a/kernel/lib/test_kasan.c +++ b/kernel/lib/test_kasan.c @@ -65,7 +65,7 @@ static noinline void __init kmalloc_node_oob_right(void) kfree(ptr); } -static noinline void __init kmalloc_large_oob_rigth(void) +static noinline void __init kmalloc_large_oob_right(void) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; @@ -114,7 +114,7 @@ static noinline void __init kmalloc_oob_krealloc_less(void) kfree(ptr1); return; } - ptr2[size1] = 'x'; + ptr2[size2] = 'x'; kfree(ptr2); } @@ -138,6 +138,71 @@ static noinline void __init kmalloc_oob_16(void) kfree(ptr2); } +static noinline void __init kmalloc_oob_memset_2(void) +{ + char *ptr; + size_t size = 8; + + pr_info("out-of-bounds in memset2\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + memset(ptr+7, 0, 2); + kfree(ptr); +} + +static noinline void __init kmalloc_oob_memset_4(void) +{ + char *ptr; + size_t size = 8; + + pr_info("out-of-bounds in memset4\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + memset(ptr+5, 0, 4); + kfree(ptr); +} + + +static noinline void __init kmalloc_oob_memset_8(void) +{ + char *ptr; + size_t size = 8; + + pr_info("out-of-bounds in memset8\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + memset(ptr+1, 0, 8); + kfree(ptr); +} + +static noinline void __init kmalloc_oob_memset_16(void) +{ + char *ptr; + size_t size = 16; + + pr_info("out-of-bounds in memset16\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + memset(ptr+1, 0, 16); + kfree(ptr); +} + static noinline void __init kmalloc_oob_in_memset(void) { char *ptr; @@ -259,11 +324,15 @@ static int __init kmalloc_tests_init(void) kmalloc_oob_right(); kmalloc_oob_left(); kmalloc_node_oob_right(); - kmalloc_large_oob_rigth(); + kmalloc_large_oob_right(); kmalloc_oob_krealloc_more(); kmalloc_oob_krealloc_less(); kmalloc_oob_16(); kmalloc_oob_in_memset(); + kmalloc_oob_memset_2(); + kmalloc_oob_memset_4(); + kmalloc_oob_memset_8(); + kmalloc_oob_memset_16(); kmalloc_uaf(); kmalloc_uaf_memset(); kmalloc_uaf2(); diff --git a/kernel/lib/test_printf.c b/kernel/lib/test_printf.c new file mode 100644 index 000000000..c5a666af9 --- /dev/null +++ b/kernel/lib/test_printf.c @@ -0,0 +1,362 @@ +/* + * Test cases for printf facility. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <linux/socket.h> +#include <linux/in.h> + +#define BUF_SIZE 256 +#define FILL_CHAR '$' + +#define PTR1 ((void*)0x01234567) +#define PTR2 ((void*)(long)(int)0xfedcba98) + +#if BITS_PER_LONG == 64 +#define PTR1_ZEROES "000000000" +#define PTR1_SPACES " " +#define PTR1_STR "1234567" +#define PTR2_STR "fffffffffedcba98" +#define PTR_WIDTH 16 +#else +#define PTR1_ZEROES "0" +#define PTR1_SPACES " " +#define PTR1_STR "1234567" +#define PTR2_STR "fedcba98" +#define PTR_WIDTH 8 +#endif +#define PTR_WIDTH_STR stringify(PTR_WIDTH) + +static unsigned total_tests __initdata; +static unsigned failed_tests __initdata; +static char *test_buffer __initdata; + +static int __printf(4, 0) __init +do_test(int bufsize, const char *expect, int elen, + const char *fmt, va_list ap) +{ + va_list aq; + int ret, written; + + total_tests++; + + memset(test_buffer, FILL_CHAR, BUF_SIZE); + va_copy(aq, ap); + ret = vsnprintf(test_buffer, bufsize, fmt, aq); + va_end(aq); + + if (ret != elen) { + pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n", + bufsize, fmt, ret, elen); + return 1; + } + + if (!bufsize) { + if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE)) { + pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n", + fmt); + return 1; + } + return 0; + } + + written = min(bufsize-1, elen); + if (test_buffer[written]) { + pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n", + bufsize, fmt); + return 1; + } + + if (memcmp(test_buffer, expect, written)) { + pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n", + bufsize, fmt, test_buffer, written, expect); + return 1; + } + return 0; +} + +static void __printf(3, 4) __init +__test(const char *expect, int elen, const char *fmt, ...) +{ + va_list ap; + int rand; + char *p; + + BUG_ON(elen >= BUF_SIZE); + + va_start(ap, fmt); + + /* + * Every fmt+args is subjected to four tests: Three where we + * tell vsnprintf varying buffer sizes (plenty, not quite + * enough and 0), and then we also test that kvasprintf would + * be able to print it as expected. + */ + failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap); + rand = 1 + prandom_u32_max(elen+1); + /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */ + failed_tests += do_test(rand, expect, elen, fmt, ap); + failed_tests += do_test(0, expect, elen, fmt, ap); + + p = kvasprintf(GFP_KERNEL, fmt, ap); + if (p) { + if (memcmp(p, expect, elen+1)) { + pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n", + fmt, p, expect); + failed_tests++; + } + kfree(p); + } + va_end(ap); +} + +#define test(expect, fmt, ...) \ + __test(expect, strlen(expect), fmt, ##__VA_ARGS__) + +static void __init +test_basic(void) +{ + /* Work around annoying "warning: zero-length gnu_printf format string". */ + char nul = '\0'; + + test("", &nul); + test("100%", "100%%"); + test("xxx%yyy", "xxx%cyyy", '%'); + __test("xxx\0yyy", 7, "xxx%cyyy", '\0'); +} + +static void __init +test_number(void) +{ + test("0x1234abcd ", "%#-12x", 0x1234abcd); + test(" 0x1234abcd", "%#12x", 0x1234abcd); + test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234); +} + +static void __init +test_string(void) +{ + test("", "%s%.0s", "", "123"); + test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456"); + test("1 | 2|3 | 4|5 ", "%-3s|%3s|%-*s|%*s|%*s", "1", "2", 3, "3", 3, "4", -3, "5"); + /* + * POSIX and C99 say that a missing precision should be + * treated as a precision of 0. However, the kernel's printf + * implementation treats this case as if the . wasn't + * present. Let's add a test case documenting the current + * behaviour; should anyone ever feel the need to follow the + * standards more closely, this can be revisited. + */ + test("a||", "%.s|%.0s|%.*s", "a", "b", 0, "c"); + test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c"); +} + +static void __init +plain(void) +{ + test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2); + /* + * The field width is overloaded for some %p extensions to + * pass another piece of information. For plain pointers, the + * behaviour is slightly odd: One cannot pass either the 0 + * flag nor a precision to %p without gcc complaining, and if + * one explicitly gives a field width, the number is no longer + * zero-padded. + */ + test("|" PTR1_STR PTR1_SPACES " | " PTR1_SPACES PTR1_STR "|", + "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1); + test("|" PTR2_STR " | " PTR2_STR "|", + "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2); + + /* + * Unrecognized %p extensions are treated as plain %p, but the + * alphanumeric suffix is ignored (that is, does not occur in + * the output.) + */ + test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1); + test("|"PTR2_STR"|", "|%p0y|", PTR2); +} + +static void __init +symbol_ptr(void) +{ +} + +static void __init +kernel_ptr(void) +{ +} + +static void __init +struct_resource(void) +{ +} + +static void __init +addr(void) +{ +} + +static void __init +escaped_str(void) +{ +} + +static void __init +hex_string(void) +{ + const char buf[3] = {0xc0, 0xff, 0xee}; + + test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee", + "%3ph|%3phC|%3phD|%3phN", buf, buf, buf, buf); + test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee", + "%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf); +} + +static void __init +mac(void) +{ + const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05}; + + test("2d:48:d6:fc:7a:05", "%pM", addr); + test("05:7a:fc:d6:48:2d", "%pMR", addr); + test("2d-48-d6-fc-7a-05", "%pMF", addr); + test("2d48d6fc7a05", "%pm", addr); + test("057afcd6482d", "%pmR", addr); +} + +static void __init +ip4(void) +{ + struct sockaddr_in sa; + + sa.sin_family = AF_INET; + sa.sin_port = cpu_to_be16(12345); + sa.sin_addr.s_addr = cpu_to_be32(0x7f000001); + + test("127.000.000.001|127.0.0.1", "%pi4|%pI4", &sa.sin_addr, &sa.sin_addr); + test("127.000.000.001|127.0.0.1", "%piS|%pIS", &sa, &sa); + sa.sin_addr.s_addr = cpu_to_be32(0x01020304); + test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa); +} + +static void __init +ip6(void) +{ +} + +static void __init +ip(void) +{ + ip4(); + ip6(); +} + +static void __init +uuid(void) +{ + const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, + 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; + + test("00010203-0405-0607-0809-0a0b0c0d0e0f", "%pUb", uuid); + test("00010203-0405-0607-0809-0A0B0C0D0E0F", "%pUB", uuid); + test("03020100-0504-0706-0809-0a0b0c0d0e0f", "%pUl", uuid); + test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid); +} + +static void __init +dentry(void) +{ +} + +static void __init +struct_va_format(void) +{ +} + +static void __init +struct_clk(void) +{ +} + +static void __init +bitmap(void) +{ + DECLARE_BITMAP(bits, 20); + const int primes[] = {2,3,5,7,11,13,17,19}; + int i; + + bitmap_zero(bits, 20); + test("00000|00000", "%20pb|%*pb", bits, 20, bits); + test("|", "%20pbl|%*pbl", bits, 20, bits); + + for (i = 0; i < ARRAY_SIZE(primes); ++i) + set_bit(primes[i], bits); + test("a28ac|a28ac", "%20pb|%*pb", bits, 20, bits); + test("2-3,5,7,11,13,17,19|2-3,5,7,11,13,17,19", "%20pbl|%*pbl", bits, 20, bits); + + bitmap_fill(bits, 20); + test("fffff|fffff", "%20pb|%*pb", bits, 20, bits); + test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits); +} + +static void __init +netdev_features(void) +{ +} + +static void __init +test_pointer(void) +{ + plain(); + symbol_ptr(); + kernel_ptr(); + struct_resource(); + addr(); + escaped_str(); + hex_string(); + mac(); + ip(); + uuid(); + dentry(); + struct_va_format(); + struct_clk(); + bitmap(); + netdev_features(); +} + +static int __init +test_printf_init(void) +{ + test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); + if (!test_buffer) + return -ENOMEM; + + test_basic(); + test_number(); + test_string(); + test_pointer(); + + kfree(test_buffer); + + if (failed_tests == 0) + pr_info("all %u tests passed\n", total_tests); + else + pr_warn("failed %u out of %u tests\n", failed_tests, total_tests); + + return failed_tests ? -EINVAL : 0; +} + +module_init(test_printf_init); + +MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>"); +MODULE_LICENSE("GPL"); diff --git a/kernel/lib/test_rhashtable.c b/kernel/lib/test_rhashtable.c index b2957540d..8c1ad1ced 100644 --- a/kernel/lib/test_rhashtable.c +++ b/kernel/lib/test_rhashtable.c @@ -1,14 +1,9 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * - * Based on the following paper: - * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf - * - * Code partially derived from nft_hash - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -21,25 +16,56 @@ #include <linux/init.h> #include <linux/jhash.h> #include <linux/kernel.h> +#include <linux/kthread.h> #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/rhashtable.h> +#include <linux/semaphore.h> #include <linux/slab.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> + +#define MAX_ENTRIES 1000000 +#define TEST_INSERT_FAIL INT_MAX + +static int entries = 50000; +module_param(entries, int, 0); +MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)"); + +static int runs = 4; +module_param(runs, int, 0); +MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); +static int max_size = 65536; +module_param(max_size, int, 0); +MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)"); -#define TEST_HT_SIZE 8 -#define TEST_ENTRIES 2048 -#define TEST_PTR ((void *) 0xdeadbeef) -#define TEST_NEXPANDS 4 +static bool shrinking = false; +module_param(shrinking, bool, 0); +MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)"); + +static int size = 8; +module_param(size, int, 0); +MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)"); + +static int tcount = 10; +module_param(tcount, int, 0); +MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); struct test_obj { - void *ptr; int value; struct rhash_head node; }; -static const struct rhashtable_params test_rht_params = { - .nelem_hint = TEST_HT_SIZE, +struct thread_data { + int id; + struct task_struct *task; + struct test_obj *objs; +}; + +static struct test_obj array[MAX_ENTRIES]; + +static struct rhashtable_params test_rht_params = { .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), @@ -47,15 +73,21 @@ static const struct rhashtable_params test_rht_params = { .nulls_base = (3U << RHT_BASE_SHIFT), }; +static struct semaphore prestart_sem; +static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); + static int __init test_rht_lookup(struct rhashtable *ht) { unsigned int i; - for (i = 0; i < TEST_ENTRIES * 2; i++) { + for (i = 0; i < entries * 2; i++) { struct test_obj *obj; bool expected = !(i % 2); u32 key = i; + if (array[i / 2].value == TEST_INSERT_FAIL) + expected = false; + obj = rhashtable_lookup_fast(ht, &key, test_rht_params); if (expected && !obj) { @@ -66,140 +98,302 @@ static int __init test_rht_lookup(struct rhashtable *ht) key); return -EEXIST; } else if (expected && obj) { - if (obj->ptr != TEST_PTR || obj->value != i) { - pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", - obj->ptr, TEST_PTR, obj->value, i); + if (obj->value != i) { + pr_warn("Test failed: Lookup value mismatch %u!=%u\n", + obj->value, i); return -EINVAL; } } + + cond_resched_rcu(); } return 0; } -static void test_bucket_stats(struct rhashtable *ht, bool quiet) +static void test_bucket_stats(struct rhashtable *ht) { - unsigned int cnt, rcu_cnt, i, total = 0; + unsigned int err, total = 0, chain_len = 0; + struct rhashtable_iter hti; struct rhash_head *pos; - struct test_obj *obj; - struct bucket_table *tbl; - tbl = rht_dereference_rcu(ht->tbl, ht); - for (i = 0; i < tbl->size; i++) { - rcu_cnt = cnt = 0; + err = rhashtable_walk_init(ht, &hti); + if (err) { + pr_warn("Test failed: allocation error"); + return; + } - if (!quiet) - pr_info(" [%#4x/%u]", i, tbl->size); + err = rhashtable_walk_start(&hti); + if (err && err != -EAGAIN) { + pr_warn("Test failed: iterator failed: %d\n", err); + return; + } - rht_for_each_entry_rcu(obj, pos, tbl, i, node) { - cnt++; - total++; - if (!quiet) - pr_cont(" [%p],", obj); + while ((pos = rhashtable_walk_next(&hti))) { + if (PTR_ERR(pos) == -EAGAIN) { + pr_info("Info: encountered resize\n"); + chain_len++; + continue; + } else if (IS_ERR(pos)) { + pr_warn("Test failed: rhashtable_walk_next() error: %ld\n", + PTR_ERR(pos)); + break; } - rht_for_each_entry_rcu(obj, pos, tbl, i, node) - rcu_cnt++; - - if (rcu_cnt != cnt) - pr_warn("Test failed: Chain count mismach %d != %d", - cnt, rcu_cnt); - - if (!quiet) - pr_cont("\n [%#x] first element: %p, chain length: %u\n", - i, tbl->buckets[i], cnt); + total++; } - pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", - total, atomic_read(&ht->nelems), TEST_ENTRIES); + rhashtable_walk_stop(&hti); + rhashtable_walk_exit(&hti); - if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) + pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", + total, atomic_read(&ht->nelems), entries, chain_len); + + if (total != atomic_read(&ht->nelems) || total != entries) pr_warn("Test failed: Total count mismatch ^^^"); } -static int __init test_rhashtable(struct rhashtable *ht) +static s64 __init test_rhashtable(struct rhashtable *ht) { - struct bucket_table *tbl; struct test_obj *obj; - struct rhash_head *pos, *next; int err; - unsigned int i; + unsigned int i, insert_fails = 0; + s64 start, end; /* * Insertion Test: - * Insert TEST_ENTRIES into table with all keys even numbers + * Insert entries into table with all keys even numbers */ - pr_info(" Adding %d keys\n", TEST_ENTRIES); - for (i = 0; i < TEST_ENTRIES; i++) { - struct test_obj *obj; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (!obj) { - err = -ENOMEM; - goto error; - } + pr_info(" Adding %d keys\n", entries); + start = ktime_get_ns(); + for (i = 0; i < entries; i++) { + struct test_obj *obj = &array[i]; - obj->ptr = TEST_PTR; obj->value = i * 2; err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); - if (err) { - kfree(obj); - goto error; + if (err == -ENOMEM || err == -EBUSY) { + /* Mark failed inserts but continue */ + obj->value = TEST_INSERT_FAIL; + insert_fails++; + } else if (err) { + return err; } + + cond_resched(); } + if (insert_fails) + pr_info(" %u insertions failed due to memory pressure\n", + insert_fails); + + test_bucket_stats(ht); rcu_read_lock(); - test_bucket_stats(ht, true); test_rht_lookup(ht); rcu_read_unlock(); - rcu_read_lock(); - test_bucket_stats(ht, true); - rcu_read_unlock(); + test_bucket_stats(ht); - pr_info(" Deleting %d keys\n", TEST_ENTRIES); - for (i = 0; i < TEST_ENTRIES; i++) { + pr_info(" Deleting %d keys\n", entries); + for (i = 0; i < entries; i++) { u32 key = i * 2; - obj = rhashtable_lookup_fast(ht, &key, test_rht_params); - BUG_ON(!obj); + if (array[i].value != TEST_INSERT_FAIL) { + obj = rhashtable_lookup_fast(ht, &key, test_rht_params); + BUG_ON(!obj); + + rhashtable_remove_fast(ht, &obj->node, test_rht_params); + } - rhashtable_remove_fast(ht, &obj->node, test_rht_params); - kfree(obj); + cond_resched(); } - return 0; + end = ktime_get_ns(); + pr_info(" Duration of test: %lld ns\n", end - start); + + return end - start; +} -error: - tbl = rht_dereference_rcu(ht->tbl, ht); - for (i = 0; i < tbl->size; i++) - rht_for_each_entry_safe(obj, pos, next, tbl, i, node) - kfree(obj); +static struct rhashtable ht; +static int thread_lookup_test(struct thread_data *tdata) +{ + int i, err = 0; + + for (i = 0; i < entries; i++) { + struct test_obj *obj; + int key = (tdata->id << 16) | i; + + obj = rhashtable_lookup_fast(&ht, &key, test_rht_params); + if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) { + pr_err(" found unexpected object %d\n", key); + err++; + } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) { + pr_err(" object %d not found!\n", key); + err++; + } else if (obj && (obj->value != key)) { + pr_err(" wrong object returned (got %d, expected %d)\n", + obj->value, key); + err++; + } + } return err; } -static struct rhashtable ht; +static int threadfunc(void *data) +{ + int i, step, err = 0, insert_fails = 0; + struct thread_data *tdata = data; + + up(&prestart_sem); + if (down_interruptible(&startup_sem)) + pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); + + for (i = 0; i < entries; i++) { + tdata->objs[i].value = (tdata->id << 16) | i; + err = rhashtable_insert_fast(&ht, &tdata->objs[i].node, + test_rht_params); + if (err == -ENOMEM || err == -EBUSY) { + tdata->objs[i].value = TEST_INSERT_FAIL; + insert_fails++; + } else if (err) { + pr_err(" thread[%d]: rhashtable_insert_fast failed\n", + tdata->id); + goto out; + } + } + if (insert_fails) + pr_info(" thread[%d]: %d insert failures\n", + tdata->id, insert_fails); + + err = thread_lookup_test(tdata); + if (err) { + pr_err(" thread[%d]: rhashtable_lookup_test failed\n", + tdata->id); + goto out; + } + + for (step = 10; step > 0; step--) { + for (i = 0; i < entries; i += step) { + if (tdata->objs[i].value == TEST_INSERT_FAIL) + continue; + err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, + test_rht_params); + if (err) { + pr_err(" thread[%d]: rhashtable_remove_fast failed\n", + tdata->id); + goto out; + } + tdata->objs[i].value = TEST_INSERT_FAIL; + } + err = thread_lookup_test(tdata); + if (err) { + pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n", + tdata->id); + goto out; + } + } +out: + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + return err; +} static int __init test_rht_init(void) { - int err; + int i, err, started_threads = 0, failed_threads = 0; + u64 total_time = 0; + struct thread_data *tdata; + struct test_obj *objs; + + entries = min(entries, MAX_ENTRIES); + + test_rht_params.automatic_shrinking = shrinking; + test_rht_params.max_size = max_size; + test_rht_params.nelem_hint = size; + + pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", + size, max_size, shrinking); + + for (i = 0; i < runs; i++) { + s64 time; + + pr_info("Test %02d:\n", i); + memset(&array, 0, sizeof(array)); + err = rhashtable_init(&ht, &test_rht_params); + if (err < 0) { + pr_warn("Test failed: Unable to initialize hashtable: %d\n", + err); + continue; + } - pr_info("Running resizable hashtable tests...\n"); + time = test_rhashtable(&ht); + rhashtable_destroy(&ht); + if (time < 0) { + pr_warn("Test failed: return code %lld\n", time); + return -EINVAL; + } + + total_time += time; + } + + do_div(total_time, runs); + pr_info("Average test time: %llu\n", total_time); + + if (!tcount) + return 0; + + pr_info("Testing concurrent rhashtable access from %d threads\n", + tcount); + sema_init(&prestart_sem, 1 - tcount); + tdata = vzalloc(tcount * sizeof(struct thread_data)); + if (!tdata) + return -ENOMEM; + objs = vzalloc(tcount * entries * sizeof(struct test_obj)); + if (!objs) { + vfree(tdata); + return -ENOMEM; + } err = rhashtable_init(&ht, &test_rht_params); if (err < 0) { pr_warn("Test failed: Unable to initialize hashtable: %d\n", err); - return err; + vfree(tdata); + vfree(objs); + return -EINVAL; } - - err = test_rhashtable(&ht); - + for (i = 0; i < tcount; i++) { + tdata[i].id = i; + tdata[i].objs = objs + i * entries; + tdata[i].task = kthread_run(threadfunc, &tdata[i], + "rhashtable_thrad[%d]", i); + if (IS_ERR(tdata[i].task)) + pr_err(" kthread_run failed for thread %d\n", i); + else + started_threads++; + } + if (down_interruptible(&prestart_sem)) + pr_err(" down interruptible failed\n"); + for (i = 0; i < tcount; i++) + up(&startup_sem); + for (i = 0; i < tcount; i++) { + if (IS_ERR(tdata[i].task)) + continue; + if ((err = kthread_stop(tdata[i].task))) { + pr_warn("Test failed: thread %d returned: %d\n", + i, err); + failed_threads++; + } + } + pr_info("Started %d threads, %d failed\n", + started_threads, failed_threads); rhashtable_destroy(&ht); - - return err; + vfree(tdata); + vfree(objs); + return 0; } static void __exit test_rht_exit(void) diff --git a/kernel/lib/test_static_key_base.c b/kernel/lib/test_static_key_base.c new file mode 100644 index 000000000..729447aea --- /dev/null +++ b/kernel/lib/test_static_key_base.c @@ -0,0 +1,68 @@ +/* + * Kernel module for testing static keys. + * + * Copyright 2015 Akamai Technologies Inc. All Rights Reserved + * + * Authors: + * Jason Baron <jbaron@akamai.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/jump_label.h> + +/* old keys */ +struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE; +EXPORT_SYMBOL_GPL(base_old_true_key); +struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE; +EXPORT_SYMBOL_GPL(base_inv_old_true_key); +struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE; +EXPORT_SYMBOL_GPL(base_old_false_key); +struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE; +EXPORT_SYMBOL_GPL(base_inv_old_false_key); + +/* new keys */ +DEFINE_STATIC_KEY_TRUE(base_true_key); +EXPORT_SYMBOL_GPL(base_true_key); +DEFINE_STATIC_KEY_TRUE(base_inv_true_key); +EXPORT_SYMBOL_GPL(base_inv_true_key); +DEFINE_STATIC_KEY_FALSE(base_false_key); +EXPORT_SYMBOL_GPL(base_false_key); +DEFINE_STATIC_KEY_FALSE(base_inv_false_key); +EXPORT_SYMBOL_GPL(base_inv_false_key); + +static void invert_key(struct static_key *key) +{ + if (static_key_enabled(key)) + static_key_disable(key); + else + static_key_enable(key); +} + +static int __init test_static_key_base_init(void) +{ + invert_key(&base_inv_old_true_key); + invert_key(&base_inv_old_false_key); + invert_key(&base_inv_true_key.key); + invert_key(&base_inv_false_key.key); + + return 0; +} + +static void __exit test_static_key_base_exit(void) +{ +} + +module_init(test_static_key_base_init); +module_exit(test_static_key_base_exit); + +MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>"); +MODULE_LICENSE("GPL"); diff --git a/kernel/lib/test_static_keys.c b/kernel/lib/test_static_keys.c new file mode 100644 index 000000000..c61b299e3 --- /dev/null +++ b/kernel/lib/test_static_keys.c @@ -0,0 +1,225 @@ +/* + * Kernel module for testing static keys. + * + * Copyright 2015 Akamai Technologies Inc. All Rights Reserved + * + * Authors: + * Jason Baron <jbaron@akamai.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/jump_label.h> + +/* old keys */ +struct static_key old_true_key = STATIC_KEY_INIT_TRUE; +struct static_key old_false_key = STATIC_KEY_INIT_FALSE; + +/* new api */ +DEFINE_STATIC_KEY_TRUE(true_key); +DEFINE_STATIC_KEY_FALSE(false_key); + +/* external */ +extern struct static_key base_old_true_key; +extern struct static_key base_inv_old_true_key; +extern struct static_key base_old_false_key; +extern struct static_key base_inv_old_false_key; + +/* new api */ +extern struct static_key_true base_true_key; +extern struct static_key_true base_inv_true_key; +extern struct static_key_false base_false_key; +extern struct static_key_false base_inv_false_key; + + +struct test_key { + bool init_state; + struct static_key *key; + bool (*test_key)(void); +}; + +#define test_key_func(key, branch) \ + ({bool func(void) { return branch(key); } func; }) + +static void invert_key(struct static_key *key) +{ + if (static_key_enabled(key)) + static_key_disable(key); + else + static_key_enable(key); +} + +static void invert_keys(struct test_key *keys, int size) +{ + struct static_key *previous = NULL; + int i; + + for (i = 0; i < size; i++) { + if (previous != keys[i].key) { + invert_key(keys[i].key); + previous = keys[i].key; + } + } +} + +static int verify_keys(struct test_key *keys, int size, bool invert) +{ + int i; + bool ret, init; + + for (i = 0; i < size; i++) { + ret = static_key_enabled(keys[i].key); + init = keys[i].init_state; + if (ret != (invert ? !init : init)) + return -EINVAL; + ret = keys[i].test_key(); + if (static_key_enabled(keys[i].key)) { + if (!ret) + return -EINVAL; + } else { + if (ret) + return -EINVAL; + } + } + return 0; +} + +static int __init test_static_key_init(void) +{ + int ret; + int size; + + struct test_key static_key_tests[] = { + /* internal keys - old keys */ + { + .init_state = true, + .key = &old_true_key, + .test_key = test_key_func(&old_true_key, static_key_true), + }, + { + .init_state = false, + .key = &old_false_key, + .test_key = test_key_func(&old_false_key, static_key_false), + }, + /* internal keys - new keys */ + { + .init_state = true, + .key = &true_key.key, + .test_key = test_key_func(&true_key, static_branch_likely), + }, + { + .init_state = true, + .key = &true_key.key, + .test_key = test_key_func(&true_key, static_branch_unlikely), + }, + { + .init_state = false, + .key = &false_key.key, + .test_key = test_key_func(&false_key, static_branch_likely), + }, + { + .init_state = false, + .key = &false_key.key, + .test_key = test_key_func(&false_key, static_branch_unlikely), + }, + /* external keys - old keys */ + { + .init_state = true, + .key = &base_old_true_key, + .test_key = test_key_func(&base_old_true_key, static_key_true), + }, + { + .init_state = false, + .key = &base_inv_old_true_key, + .test_key = test_key_func(&base_inv_old_true_key, static_key_true), + }, + { + .init_state = false, + .key = &base_old_false_key, + .test_key = test_key_func(&base_old_false_key, static_key_false), + }, + { + .init_state = true, + .key = &base_inv_old_false_key, + .test_key = test_key_func(&base_inv_old_false_key, static_key_false), + }, + /* external keys - new keys */ + { + .init_state = true, + .key = &base_true_key.key, + .test_key = test_key_func(&base_true_key, static_branch_likely), + }, + { + .init_state = true, + .key = &base_true_key.key, + .test_key = test_key_func(&base_true_key, static_branch_unlikely), + }, + { + .init_state = false, + .key = &base_inv_true_key.key, + .test_key = test_key_func(&base_inv_true_key, static_branch_likely), + }, + { + .init_state = false, + .key = &base_inv_true_key.key, + .test_key = test_key_func(&base_inv_true_key, static_branch_unlikely), + }, + { + .init_state = false, + .key = &base_false_key.key, + .test_key = test_key_func(&base_false_key, static_branch_likely), + }, + { + .init_state = false, + .key = &base_false_key.key, + .test_key = test_key_func(&base_false_key, static_branch_unlikely), + }, + { + .init_state = true, + .key = &base_inv_false_key.key, + .test_key = test_key_func(&base_inv_false_key, static_branch_likely), + }, + { + .init_state = true, + .key = &base_inv_false_key.key, + .test_key = test_key_func(&base_inv_false_key, static_branch_unlikely), + }, + }; + + size = ARRAY_SIZE(static_key_tests); + + ret = verify_keys(static_key_tests, size, false); + if (ret) + goto out; + + invert_keys(static_key_tests, size); + ret = verify_keys(static_key_tests, size, true); + if (ret) + goto out; + + invert_keys(static_key_tests, size); + ret = verify_keys(static_key_tests, size, false); + if (ret) + goto out; + return 0; +out: + return ret; +} + +static void __exit test_static_key_exit(void) +{ +} + +module_init(test_static_key_init); +module_exit(test_static_key_exit); + +MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>"); +MODULE_LICENSE("GPL"); diff --git a/kernel/lib/timerqueue.c b/kernel/lib/timerqueue.c index a382e4a32..782ae8ca2 100644 --- a/kernel/lib/timerqueue.c +++ b/kernel/lib/timerqueue.c @@ -36,7 +36,7 @@ * Adds the timer node to the timerqueue, sorted by the * node's expires value. */ -void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) +bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) { struct rb_node **p = &head->head.rb_node; struct rb_node *parent = NULL; @@ -56,8 +56,11 @@ void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &head->head); - if (!head->next || node->expires.tv64 < head->next->expires.tv64) + if (!head->next || node->expires.tv64 < head->next->expires.tv64) { head->next = node; + return true; + } + return false; } EXPORT_SYMBOL_GPL(timerqueue_add); @@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(timerqueue_add); * * Removes the timer node from the timerqueue. */ -void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) +bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) { WARN_ON_ONCE(RB_EMPTY_NODE(&node->node)); @@ -82,6 +85,7 @@ void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) } rb_erase(&node->node, &head->head); RB_CLEAR_NODE(&node->node); + return head->next != NULL; } EXPORT_SYMBOL_GPL(timerqueue_del); diff --git a/kernel/lib/ucs2_string.c b/kernel/lib/ucs2_string.c index 6f500ef23..f0b323abb 100644 --- a/kernel/lib/ucs2_string.c +++ b/kernel/lib/ucs2_string.c @@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) } } EXPORT_SYMBOL(ucs2_strncmp); + +unsigned long +ucs2_utf8size(const ucs2_char_t *src) +{ + unsigned long i; + unsigned long j = 0; + + for (i = 0; i < ucs2_strlen(src); i++) { + u16 c = src[i]; + + if (c >= 0x800) + j += 3; + else if (c >= 0x80) + j += 2; + else + j += 1; + } + + return j; +} +EXPORT_SYMBOL(ucs2_utf8size); + +/* + * copy at most maxlength bytes of whole utf8 characters to dest from the + * ucs2 string src. + * + * The return value is the number of characters copied, not including the + * final NUL character. + */ +unsigned long +ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) +{ + unsigned int i; + unsigned long j = 0; + unsigned long limit = ucs2_strnlen(src, maxlength); + + for (i = 0; maxlength && i < limit; i++) { + u16 c = src[i]; + + if (c >= 0x800) { + if (maxlength < 3) + break; + maxlength -= 3; + dest[j++] = 0xe0 | (c & 0xf000) >> 12; + dest[j++] = 0x80 | (c & 0x0fc0) >> 6; + dest[j++] = 0x80 | (c & 0x003f); + } else if (c >= 0x80) { + if (maxlength < 2) + break; + maxlength -= 2; + dest[j++] = 0xc0 | (c & 0x7c0) >> 6; + dest[j++] = 0x80 | (c & 0x03f); + } else { + maxlength -= 1; + dest[j++] = c & 0x7f; + } + } + if (maxlength) + dest[j] = '\0'; + return j; +} +EXPORT_SYMBOL(ucs2_as_utf8); diff --git a/kernel/lib/vsprintf.c b/kernel/lib/vsprintf.c index da39c608a..f9cee8e12 100644 --- a/kernel/lib/vsprintf.c +++ b/kernel/lib/vsprintf.c @@ -17,6 +17,7 @@ */ #include <stdarg.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ #include <linux/types.h> @@ -1448,6 +1449,8 @@ int kptr_restrict __read_mostly; * (legacy clock framework) of the clock * - 'Cr' For a clock, it prints the current rate of the clock * + * ** Please update also Documentation/printk-formats.txt when making changes ** + * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. @@ -1456,7 +1459,7 @@ static noinline_for_stack char *pointer(const char *fmt, char *buf, char *end, void *ptr, struct printf_spec spec) { - int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0); + const int default_width = 2 * sizeof(void *); if (!ptr && *fmt != 'K') { /* @@ -1768,14 +1771,14 @@ qualifier: case 'n': /* - * Since %n poses a greater security risk than utility, treat - * it as an invalid format specifier. Warn about its use so - * that new instances don't get added. + * Since %n poses a greater security risk than + * utility, treat it as any other invalid or + * unsupported format specifier. */ - WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt); /* Fall-through */ default: + WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt); spec->type = FORMAT_TYPE_INVALID; return fmt - start; } @@ -1810,41 +1813,16 @@ qualifier: * @fmt: The format string to use * @args: Arguments for the format string * - * This function follows C99 vsnprintf, but has some extensions: - * %pS output the name of a text symbol with offset - * %ps output the name of a text symbol without offset - * %pF output the name of a function pointer with its offset - * %pf output the name of a function pointer without its offset - * %pB output the name of a backtrace symbol with its offset - * %pR output the address range in a struct resource with decoded flags - * %pr output the address range in a struct resource with raw flags - * %pb output the bitmap with field width as the number of bits - * %pbl output the bitmap as range list with field width as the number of bits - * %pM output a 6-byte MAC address with colons - * %pMR output a 6-byte MAC address with colons in reversed order - * %pMF output a 6-byte MAC address with dashes - * %pm output a 6-byte MAC address without colons - * %pmR output a 6-byte MAC address without colons in reversed order - * %pI4 print an IPv4 address without leading zeros - * %pi4 print an IPv4 address with leading zeros - * %pI6 print an IPv6 address with colons - * %pi6 print an IPv6 address without colons - * %pI6c print an IPv6 address as specified by RFC 5952 - * %pIS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address - * %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address - * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper - * case. - * %*pE[achnops] print an escaped buffer - * %*ph[CDN] a variable-length hex string with a separator (supports up to 64 - * bytes of the input) - * %pC output the name (Common Clock Framework) or address (legacy clock - * framework) of a clock - * %pCn output the name (Common Clock Framework) or address (legacy clock - * framework) of a clock - * %pCr output the current rate of a clock - * %n is ignored + * This function generally follows C99 vsnprintf, but has some + * extensions and a few limitations: + * + * %n is unsupported + * %p* is handled by pointer() * - * ** Please update Documentation/printk-formats.txt when making changes ** + * See pointer() or Documentation/printk-formats.txt for more + * extensive description. + * + * ** Please update the documentation in both places when making changes ** * * The return value is the number of characters which would * be generated for the given input, excluding the trailing @@ -1943,10 +1921,15 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) break; case FORMAT_TYPE_INVALID: - if (str < end) - *str = '%'; - ++str; - break; + /* + * Presumably the arguments passed gcc's type + * checking, but there is no safe or sane way + * for us to continue parsing the format and + * fetching from the va_list; the remaining + * specifiers and arguments would be out of + * sync. + */ + goto out; default: switch (spec.type) { @@ -1991,6 +1974,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) } } +out: if (size > 0) { if (str < end) *str = '\0'; @@ -2188,9 +2172,10 @@ do { \ switch (spec.type) { case FORMAT_TYPE_NONE: - case FORMAT_TYPE_INVALID: case FORMAT_TYPE_PERCENT_CHAR: break; + case FORMAT_TYPE_INVALID: + goto out; case FORMAT_TYPE_WIDTH: case FORMAT_TYPE_PRECISION: @@ -2252,6 +2237,7 @@ do { \ } } +out: return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; #undef save_arg } @@ -2285,7 +2271,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) char *str, *end; const char *args = (const char *)bin_buf; - if (WARN_ON_ONCE((int) size < 0)) + if (WARN_ON_ONCE(size > INT_MAX)) return 0; str = buf; @@ -2374,12 +2360,14 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) break; case FORMAT_TYPE_PERCENT_CHAR: - case FORMAT_TYPE_INVALID: if (str < end) *str = '%'; ++str; break; + case FORMAT_TYPE_INVALID: + goto out; + default: { unsigned long long num; @@ -2422,6 +2410,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) } /* switch(spec.type) */ } /* while(*fmt) */ +out: if (size > 0) { if (str < end) *str = '\0'; diff --git a/kernel/lib/zlib_deflate/deftree.c b/kernel/lib/zlib_deflate/deftree.c index ddf348299..9b1756b12 100644 --- a/kernel/lib/zlib_deflate/deftree.c +++ b/kernel/lib/zlib_deflate/deftree.c @@ -35,6 +35,7 @@ /* #include "deflate.h" */ #include <linux/zutil.h> +#include <linux/bitrev.h> #include "defutil.h" #ifdef DEBUG_ZLIB @@ -146,7 +147,6 @@ static void send_all_trees (deflate_state *s, int lcodes, int dcodes, static void compress_block (deflate_state *s, ct_data *ltree, ct_data *dtree); static void set_data_type (deflate_state *s); -static unsigned bi_reverse (unsigned value, int length); static void bi_windup (deflate_state *s); static void bi_flush (deflate_state *s); static void copy_block (deflate_state *s, char *buf, unsigned len, @@ -284,7 +284,7 @@ static void tr_static_init(void) /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; - static_dtree[n].Code = bi_reverse((unsigned)n, 5); + static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5); } static_init_done = 1; } @@ -520,7 +520,7 @@ static void gen_codes( int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ - tree[n].Code = bi_reverse(next_code[len]++, len); + tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); diff --git a/kernel/lib/zlib_deflate/defutil.h b/kernel/lib/zlib_deflate/defutil.h index b640b6402..a8c370897 100644 --- a/kernel/lib/zlib_deflate/defutil.h +++ b/kernel/lib/zlib_deflate/defutil.h @@ -293,22 +293,6 @@ void zlib_tr_stored_type_only (deflate_state *); } /* =========================================================================== - * Reverse the first len bits of a code, using straightforward code (a faster - * method would use a table) - * IN assertion: 1 <= len <= 15 - */ -static inline unsigned bi_reverse(unsigned code, /* the value to invert */ - int len) /* its bit length */ -{ - register unsigned res = 0; - do { - res |= code & 1; - code >>= 1, res <<= 1; - } while (--len > 0); - return res >> 1; -} - -/* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ static inline void bi_flush(deflate_state *s) |