diff options
33 files changed, 7647 insertions, 1 deletions
diff --git a/src/bpfswitch/Makefile b/src/bpfswitch/Makefile new file mode 100644 index 00000000..94a58c9a --- /dev/null +++ b/src/bpfswitch/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 + +SUBDIRS = ksrc usrc + +all: + @for s in $(SUBDIRS); do \ + make -C $$s $(BUILDDIR) all; \ + done + +clean: + @for s in $(SUBDIRS); do \ + make -C $$s $(BUILDDIR) clean; \ + done diff --git a/src/bpfswitch/README.md b/src/bpfswitch/README.md new file mode 100644 index 00000000..15c3d4fd --- /dev/null +++ b/src/bpfswitch/README.md @@ -0,0 +1,34 @@ +<!--- +This work is licensed under a Creative Commons Attribution 4.0 International License. +http://creativecommons.org/licenses/by/4.0 +--> + +# bpfswitch + +This repository is self contained. Please install linux-headers for kernel +version. + +## XDP L2 forwarding +xdp\_l2fwd handles Layer 2 forwarding between an ingress device (e.g., host +devices) and egress device (e.g., tap device for VMs or other host device). +Userspace populates an FDB (hash map) with \<smac,dmac> pairs returning an +index into a device map which contains the device to receive the packet. +See scripts/l2fwd.sh for an example. + +This program is used for the netdev 0x14 tutorial, XDP and the cloud: Using +XDP on hosts and VMs https://netdevconf.info/0x14/session.html?tutorial-XDP-and-the-cloud + +## Using l2Fwd + +1. Go to vineperf/src/bpfswitch/ folder. +2. First run configure script and then run make +3. Go to scripts/ folder and open l2fwd.sh file +4. Update values of TGEN<EAST/WEST>MAC, <EAST/WEST> Interfaces and their indexes. +5. Use ifindex.py to know the indexes. +5. Ensure that path of bpftool is correc in the file. +6. Save changes and run the script. +7. If successfully run the setup is ready to test loopback + +## Dummy XDP program + +xdp\_dummy is a dummy XDP program that just returns XDP\_PASS. diff --git a/src/bpfswitch/configure b/src/bpfswitch/configure new file mode 100755 index 00000000..7dc7f359 --- /dev/null +++ b/src/bpfswitch/configure @@ -0,0 +1,276 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# This is not an autoconf generated configure +# +# Influential environment variables: +# LIBBPF_DIR Location of libbpf install to use +# KVER Specify a specific kernel version to build programs +# KSRC Location of kernel headers (e.g., source tree) +# KBLD Location of kernel build files (e.g., O= directory) + +# Output file which is input to Makefile +CONFIG_FINAL=config.mk +CONFIG=".${CONFIG}.tmp" + +# Make a temp directory in build tree. +TMPDIR=$(mktemp -d config.XXXXXX) +trap 'status=$?; rm -rf $TMPDIR; rm -f $CONFIG; exit $status' EXIT HUP INT QUIT TERM + +check_toolchain() +{ + : ${PKG_CONFIG:=pkg-config} + : ${CC=gcc} + : ${CLANG=clang} + : ${LLC=llc} + + for TOOL in $PKG_CONFIG $CC $CLANG $LLC; do + if [ ! $(command -v ${TOOL} 2>/dev/null) ]; then + echo "*** ERROR: Cannot find tool ${TOOL}" ; + exit 1; + fi; + done + + echo "PKG_CONFIG:=${PKG_CONFIG}" >>$CONFIG + echo "CC:=${CC}" >>$CONFIG + echo "CLANG:=${CLANG}" >>$CONFIG + echo "LLC:=${LLC}" >>$CONFIG +} + +check_elf() +{ + if ${PKG_CONFIG} libelf --exists; then + echo "HAVE_ELF:=y" >>$CONFIG + echo "yes" + + echo 'CFLAGS += -DHAVE_ELF' `${PKG_CONFIG} libelf --cflags` >> $CONFIG + LDLIBS="$LDLIBS $(${PKG_CONFIG} libelf --libs)" + else + echo "missing - this is required" + return 1 + fi +} + +check_zlib() +{ + if ${PKG_CONFIG} zlib --exists; then + echo "HAVE_ZLIB:=y" >>$CONFIG + echo "yes" + + echo 'CFLAGS += -DHAVE_ZLIB' `${PKG_CONFIG} zlib --cflags` >> $CONFIG + LDLIBS="$LDLIBS $(${PKG_CONFIG} zlib --libs)" + else + echo "missing - this is required" + return 1 + fi +} + +libbpf_compile_test() +{ + local libs="$*" + + cat >$TMPDIR/libbpftest.c <<EOF +#include <bpf/libbpf.h> +int main(int argc, char **argv) { + void *ptr; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = "/path"); + DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, lopts, .old_fd = -1); + (void) bpf_object__open_file("file", &opts); + (void) bpf_program__name(ptr); + (void) bpf_map__set_initial_value(ptr, ptr, 0); + (void) bpf_set_link_xdp_fd_opts(0, 0, 0, &lopts); + return 0; +} +EOF + + libbpf_err=$($CC -o $TMPDIR/libbpftest $TMPDIR/libbpftest.c $LIBBPF_CFLAGS $LIBBPF_LDLIBS $libs 2>&1) +} + +check_custom_libbpf() +{ + LIBBPF_CFLAGS="-I${LIBBPF_DIR}/usr/include" + LIBBPF_LDLIBS="$(find ${LIBBPF_DIR} -name libbpf.a)" + + echo -n "ELF support: " + check_elf || exit 1 + + echo -n "zlib support: " + check_zlib || exit 1 + + libbpf_compile_test "${LDLIBS}" + if [ "$?" -eq "0" ]; then + echo "LIBBPF_DIR=${LIBBPF_DIR}" >> $CONFIG + echo "CFLAGS += $LIBBPF_CFLAGS" >> $CONFIG + echo "LDLIBS += $LIBBPF_LDLIBS $LDLIBS" >> $CONFIG + + echo "libbpf support: custom install" + return 0 + fi + + echo "libbpf support: invalid custom install" + return 1 +} + +check_libbpf() +{ + local libbpf_err + + if [ -n "$LIBBPF_DIR" ]; then + check_custom_libbpf + return $? + fi + + echo -n "libbpf support: " + if [ "${FORCE_SUBMODULE_LIBBPF:-0}" -ne "1" ] && ${PKG_CONFIG} libbpf --exists; then + LIBBPF_CFLAGS=$(${PKG_CONFIG} libbpf --cflags) + LIBBPF_LDLIBS=$(${PKG_CONFIG} libbpf --libs) + + libbpf_compile_test + if [ "$?" -eq "0" ]; then + echo "SYSTEM_LIBBPF:=y" >>$CONFIG + echo 'CFLAGS += ' $LIBBPF_CFLAGS >> $CONFIG + echo 'LDLIBS += ' $LIBBPF_LDLIBS >>$CONFIG + echo 'OBJECT_LIBBPF = ' >>$CONFIG + echo system + + return 0 + fi + else + libbpf_err="${PKG_CONFIG} couldn't find libbpf" + fi + + if [ "${FORCE_SYSTEM_LIBBPF:-0}" -eq "1" ]; then + echo "FORCE_SYSTEM_LIBBPF is set, but no usable libbpf found on system" + echo "error: $libbpf_err" + rm -f "$CONFIG" + exit 1 + fi + + echo submodule + echo "SYSTEM_LIBBPF:=n" >> $CONFIG + echo 'CFLAGS += -I$(LIB_DIR)/libbpf-install/usr/include' >>$CONFIG + echo 'BPF_CFLAGS += -I$(LIB_DIR)/libbpf-install/usr/include' >>$CONFIG + echo 'LDFLAGS += -L$(LIB_DIR)/libbpf/src' >>$CONFIG + echo 'LDLIBS += -l:libbpf.a' >>$CONFIG + echo 'OBJECT_LIBBPF = $(LIB_DIR)/libbpf/src/libbpf.a' >>$CONFIG + if ! [ -d "lib/libbpf/src" ] && [ -f ".gitmodules" ] && [ -e ".git" ]; then + git submodule init && git submodule update + fi + + echo -n "ELF support: " + check_elf || exit 1 + + echo -n "zlib support: " + check_zlib || exit 1 + + echo "LDLIBS += $LDLIBS" >>$CONFIG + + # For the build submodule library we know it does support this API, so we + # hard code it. Also due to the fact it's hard to build a test app as + # libbpf.a has not been build at configure time. + echo "HAVE_LIBBPF_PERF_BUFFER__CONSUME:=y" >>"$CONFIG" +} + +find_kernel_path() +{ + local ksrc + local kbld + + : ${KVER:=$(uname -r)} + + if [ "${KSRC}" != "" ]; then + ksrc="${KSRC}" + # Fedora, Redhat and custom kernels + elif [ -e /lib/modules/${KVER}/source ]; then + ksrc="/lib/modules/${KVER}/source" + # Ubuntu, Debian + elif [ -e /usr/src/linux-headers-${KVER} ]; then + ksrc="/usr/src/linux-headers-${KVER}" + else + echo "Failed to find kernel source directory" >&2 + return 1 + fi + + if [ "${KBLD}" != "" ]; then + kbld="${KBLD}" + elif [ -e /lib/modules/${KVER}/build ]; then + kbld="/lib/modules/${KVER}/build" + else + echo "Failed to find kernel build directory" >&2 + return 1 + fi + echo "KSRC=${ksrc}" >> "$CONFIG" + echo "KBLD=${kbld}" >> "$CONFIG" +} + +quiet_config() +{ + cat <<EOF +# user can control verbosity similar to kernel builds (e.g., V=1) +ifeq ("\$(origin V)", "command line") + VERBOSE = \$(V) +endif +ifndef VERBOSE + VERBOSE = 0 +endif +ifeq (\$(VERBOSE),1) + Q = +else + Q = @ +endif +ifeq (\$(VERBOSE),0) +MAKEFLAGS += --no-print-directory +endif + + +ifeq (\$(VERBOSE), 0) + QUIET_CC = @echo ' CC '\$@; + QUIET_CLANG = @echo ' CLANG '\$@; + QUIET_LLC = @echo ' LLC '\$@; + QUIET_LINK = @echo ' LINK '\$@; + QUIET_INSTALL = @echo ' INSTALL '\$@; + QUIET_GEN = @echo ' GEN '\$@; +endif +EOF +} + +################################################################################ +# show help + +usage() +{ + cat <<EOF +Usage: $0 OPTS + +OPTS: + -l LIBBPF Path to libbpf install (if not using system version) + + -k KVER Kernel version to build against (default: $(uname -r)) + -s KSRC Location of kernel source + -b KBLD Location of kernel build directory +EOF +} + +################################################################################ +# main + +while getopts :l:k:s:b: o +do + case "$o" in + l) LIBBPF_DIR="$OPTARG";; + k) KVER="$OPTARG";; + s) KSRC="$OPTARG";; + b) KBLD="$OPTARG";; + *) usage; exit 1;; + esac +done + +echo "# Generated config" >$CONFIG +quiet_config >> $CONFIG + +check_toolchain + +check_libbpf || exit 1 + +find_kernel_path || exit 1 + +mv $CONFIG $CONFIG_FINAL diff --git a/src/bpfswitch/include/bpf/bpf.h b/src/bpfswitch/include/bpf/bpf.h new file mode 100644 index 00000000..dbef24eb --- /dev/null +++ b/src/bpfswitch/include/bpf/bpf.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * common eBPF ELF operations. + * + * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> + * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> + * Copyright (C) 2015 Huawei Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + */ +#ifndef __LIBBPF_BPF_H +#define __LIBBPF_BPF_H + +#include <linux/bpf.h> +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> + +#include "libbpf_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct bpf_create_map_attr { + const char *name; + enum bpf_map_type map_type; + __u32 map_flags; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 numa_node; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 map_ifindex; + union { + __u32 inner_map_fd; + __u32 btf_vmlinux_value_type_id; + }; +}; + +LIBBPF_API int +bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); +LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, + int max_entries, __u32 map_flags, int node); +LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, + int max_entries, __u32 map_flags); +LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size, + int value_size, int max_entries, __u32 map_flags); +LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type, + const char *name, int key_size, + int inner_map_fd, int max_entries, + __u32 map_flags, int node); +LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type, + const char *name, int key_size, + int inner_map_fd, int max_entries, + __u32 map_flags); + +struct bpf_load_program_attr { + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + const char *name; + const struct bpf_insn *insns; + size_t insns_cnt; + const char *license; + union { + __u32 kern_version; + __u32 attach_prog_fd; + }; + union { + __u32 prog_ifindex; + __u32 attach_btf_id; + }; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + const void *func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + const void *line_info; + __u32 line_info_cnt; + __u32 log_level; + __u32 prog_flags; +}; + +/* Flags to direct loading requirements */ +#define MAPS_RELAX_COMPAT 0x01 + +/* Recommend log buffer size */ +#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */ +LIBBPF_API int +bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz); +LIBBPF_API int bpf_load_program(enum bpf_prog_type type, + const struct bpf_insn *insns, size_t insns_cnt, + const char *license, __u32 kern_version, + char *log_buf, size_t log_buf_sz); +LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, + const struct bpf_insn *insns, + size_t insns_cnt, __u32 prog_flags, + const char *license, __u32 kern_version, + char *log_buf, size_t log_buf_sz, + int log_level); + +LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, + __u64 flags); + +LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value); +LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, + __u64 flags); +LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key, + void *value); +LIBBPF_API int bpf_map_delete_elem(int fd, const void *key); +LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key); +LIBBPF_API int bpf_map_freeze(int fd); + +struct bpf_map_batch_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u64 elem_flags; + __u64 flags; +}; +#define bpf_map_batch_opts__last_field flags + +LIBBPF_API int bpf_map_delete_batch(int fd, void *keys, + __u32 *count, + const struct bpf_map_batch_opts *opts); +LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, + void *keys, void *values, __u32 *count, + const struct bpf_map_batch_opts *opts); +LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, + void *out_batch, void *keys, + void *values, __u32 *count, + const struct bpf_map_batch_opts *opts); +LIBBPF_API int bpf_map_update_batch(int fd, void *keys, void *values, + __u32 *count, + const struct bpf_map_batch_opts *opts); + +LIBBPF_API int bpf_obj_pin(int fd, const char *pathname); +LIBBPF_API int bpf_obj_get(const char *pathname); + +struct bpf_prog_attach_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + unsigned int flags; + int replace_prog_fd; +}; +#define bpf_prog_attach_opts__last_field replace_prog_fd + +LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, + enum bpf_attach_type type, unsigned int flags); +LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); +LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); +LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, + enum bpf_attach_type type); + +struct bpf_link_create_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ +}; +#define bpf_link_create_opts__last_field sz + +LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type, + const struct bpf_link_create_opts *opts); + +struct bpf_link_update_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; /* extra flags */ + __u32 old_prog_fd; /* expected old program FD */ +}; +#define bpf_link_update_opts__last_field old_prog_fd + +LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd, + const struct bpf_link_update_opts *opts); + +LIBBPF_API int bpf_iter_create(int link_fd); + +struct bpf_prog_test_run_attr { + int prog_fd; + int repeat; + const void *data_in; + __u32 data_size_in; + void *data_out; /* optional */ + __u32 data_size_out; /* in: max length of data_out + * out: length of data_out */ + __u32 retval; /* out: return code of the BPF program */ + __u32 duration; /* out: average per repetition in ns */ + const void *ctx_in; /* optional */ + __u32 ctx_size_in; + void *ctx_out; /* optional */ + __u32 ctx_size_out; /* in: max length of ctx_out + * out: length of cxt_out */ +}; + +LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr); + +/* + * bpf_prog_test_run does not check that data_out is large enough. Consider + * using bpf_prog_test_run_xattr instead. + */ +LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data, + __u32 size, void *data_out, __u32 *size_out, + __u32 *retval, __u32 *duration); +LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_link_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); +LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, + __u32 query_flags, __u32 *attach_flags, + __u32 *prog_ids, __u32 *prog_cnt); +LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); +LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, + __u32 log_buf_size, bool do_log); +LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, + __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, + __u64 *probe_offset, __u64 *probe_addr); + +enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ +LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_BPF_H */ diff --git a/src/bpfswitch/include/bpf/bpf_core_read.h b/src/bpfswitch/include/bpf/bpf_core_read.h new file mode 100644 index 00000000..eae5cccf --- /dev/null +++ b/src/bpfswitch/include/bpf/bpf_core_read.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_CORE_READ_H__ +#define __BPF_CORE_READ_H__ + +/* + * enum bpf_field_info_kind is passed as a second argument into + * __builtin_preserve_field_info() built-in to get a specific aspect of + * a field, captured as a first argument. __builtin_preserve_field_info(field, + * info_kind) returns __u32 integer and produces BTF field relocation, which + * is understood and processed by libbpf during BPF object loading. See + * selftests/bpf for examples. + */ +enum bpf_field_info_kind { + BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, + BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, +}; + +#define __CORE_RELO(src, field, info) \ + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read((void *)dst, \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#else +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so + * for big-endian we need to adjust destination pointer accordingly, based on + * field byte size + */ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#endif + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * All this is done in relocatable manner, so bitfield changes such as + * signedness, bit size, offset changes, this will be handled automatically. + * This version of macro is using bpf_probe_read() to read underlying integer + * storage. Macro functions as an expression and its return type is + * bpf_probe_read()'s return value: 0, on success, <0 on error. + */ +#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ + unsigned long long val = 0; \ + \ + __CORE_BITFIELD_PROBE_READ(&val, s, field); \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * This version of macro is using direct memory reads and should be used from + * BPF program types that support such functionality (e.g., typed raw + * tracepoints). + */ +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ + unsigned long long val; \ + \ + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ + case 1: val = *(const unsigned char *)p; \ + case 2: val = *(const unsigned short *)p; \ + case 4: val = *(const unsigned int *)p; \ + case 8: val = *(const unsigned long long *)p; \ + } \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Convenience macro to check that field actually exists in target kernel's. + * Returns: + * 1, if matching field is present in target kernel; + * 0, if no matching field found. + */ +#define bpf_core_field_exists(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_EXISTS) + +/* + * Convenience macro to get byte size of a field. Works for integers, + * struct/unions, pointers, arrays, and enums. + */ +#define bpf_core_field_size(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) + +/* + * bpf_core_read() abstracts away bpf_probe_read() call and captures offset + * relocation for source address using __builtin_preserve_access_index() + * built-in, provided by Clang. + * + * __builtin_preserve_access_index() takes as an argument an expression of + * taking an address of a field within struct/union. It makes compiler emit + * a relocation, which records BTF type ID describing root struct/union and an + * accessor string which describes exact embedded field that was used to take + * an address. See detailed description of this relocation format and + * semantics in comments to struct bpf_field_reloc in libbpf_internal.h. + * + * This relocation allows libbpf to adjust BPF instruction to use correct + * actual field offset, based on target kernel BTF type that matches original + * (local) BTF, used to record relocation. + */ +#define bpf_core_read(dst, sz, src) \ + bpf_probe_read(dst, sz, \ + (const void *)__builtin_preserve_access_index(src)) + +/* + * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() + * additionally emitting BPF CO-RE field relocation for specified source + * argument. + */ +#define bpf_core_read_str(dst, sz, src) \ + bpf_probe_read_str(dst, sz, \ + (const void *)__builtin_preserve_access_index(src)) + +#define ___concat(a, b) a ## b +#define ___apply(fn, n) ___concat(fn, n) +#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N + +/* + * return number of provided arguments; used for switch-based variadic macro + * definitions (see ___last, ___arrow, etc below) + */ +#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +/* + * return 0 if no arguments are passed, N - otherwise; used for + * recursively-defined macros to specify termination (0) case, and generic + * (N) case (e.g., ___read_ptrs, ___core_read) + */ +#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___last1(x) x +#define ___last2(a, x) x +#define ___last3(a, b, x) x +#define ___last4(a, b, c, x) x +#define ___last5(a, b, c, d, x) x +#define ___last6(a, b, c, d, e, x) x +#define ___last7(a, b, c, d, e, f, x) x +#define ___last8(a, b, c, d, e, f, g, x) x +#define ___last9(a, b, c, d, e, f, g, h, x) x +#define ___last10(a, b, c, d, e, f, g, h, i, x) x +#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___nolast2(a, _) a +#define ___nolast3(a, b, _) a, b +#define ___nolast4(a, b, c, _) a, b, c +#define ___nolast5(a, b, c, d, _) a, b, c, d +#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e +#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f +#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g +#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h +#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i +#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___arrow1(a) a +#define ___arrow2(a, b) a->b +#define ___arrow3(a, b, c) a->b->c +#define ___arrow4(a, b, c, d) a->b->c->d +#define ___arrow5(a, b, c, d, e) a->b->c->d->e +#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f +#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g +#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h +#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i +#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j +#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___type(...) typeof(___arrow(__VA_ARGS__)) + +#define ___read(read_fn, dst, src_type, src, accessor) \ + read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) + +/* "recursively" read a sequence of inner pointers using local __t var */ +#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a); +#define ___rd_last(...) \ + ___read(bpf_core_read, &__t, \ + ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); +#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__) +#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___read_ptrs(src, ...) \ + ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__) + +#define ___core_read0(fn, dst, src, a) \ + ___read(fn, dst, ___type(src), src, a); +#define ___core_readN(fn, dst, src, ...) \ + ___read_ptrs(src, ___nolast(__VA_ARGS__)) \ + ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ + ___last(__VA_ARGS__)); +#define ___core_read(fn, dst, src, a, ...) \ + ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \ + src, a, ##__VA_ARGS__) + +/* + * BPF_CORE_READ_INTO() is a more performance-conscious variant of + * BPF_CORE_READ(), in which final field is read into user-provided storage. + * See BPF_CORE_READ() below for more details on general usage. + */ +#define BPF_CORE_READ_INTO(dst, src, a, ...) \ + ({ \ + ___core_read(bpf_core_read, dst, (src), a, ##__VA_ARGS__) \ + }) + +/* + * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as + * BPF_CORE_READ() for intermediate pointers, but then executes (and returns + * corresponding error code) bpf_core_read_str() for final string read. + */ +#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \ + ({ \ + ___core_read(bpf_core_read_str, dst, (src), a, ##__VA_ARGS__)\ + }) + +/* + * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially + * when there are few pointer chasing steps. + * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: + * int x = s->a.b.c->d.e->f->g; + * can be succinctly achieved using BPF_CORE_READ as: + * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); + * + * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF + * CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to: + * 1. const void *__t = s->a.b.c; + * 2. __t = __t->d.e; + * 3. __t = __t->f; + * 4. return __t->g; + * + * Equivalence is logical, because there is a heavy type casting/preservation + * involved, as well as all the reads are happening through bpf_probe_read() + * calls using __builtin_preserve_access_index() to emit CO-RE relocations. + * + * N.B. Only up to 9 "field accessors" are supported, which should be more + * than enough for any practical purpose. + */ +#define BPF_CORE_READ(src, a, ...) \ + ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ + }) + +#endif + diff --git a/src/bpfswitch/include/bpf/bpf_endian.h b/src/bpfswitch/include/bpf/bpf_endian.h new file mode 100644 index 00000000..ec9db4fe --- /dev/null +++ b/src/bpfswitch/include/bpf/bpf_endian.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_ENDIAN__ +#define __BPF_ENDIAN__ + +/* + * Isolate byte #n and put it into byte #m, for __u##b type. + * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: + * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx + * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 + * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn + * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 + */ +#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) + +#define ___bpf_swab16(x) ((__u16)( \ + ___bpf_mvb(x, 16, 0, 1) | \ + ___bpf_mvb(x, 16, 1, 0))) + +#define ___bpf_swab32(x) ((__u32)( \ + ___bpf_mvb(x, 32, 0, 3) | \ + ___bpf_mvb(x, 32, 1, 2) | \ + ___bpf_mvb(x, 32, 2, 1) | \ + ___bpf_mvb(x, 32, 3, 0))) + +#define ___bpf_swab64(x) ((__u64)( \ + ___bpf_mvb(x, 64, 0, 7) | \ + ___bpf_mvb(x, 64, 1, 6) | \ + ___bpf_mvb(x, 64, 2, 5) | \ + ___bpf_mvb(x, 64, 3, 4) | \ + ___bpf_mvb(x, 64, 4, 3) | \ + ___bpf_mvb(x, 64, 5, 2) | \ + ___bpf_mvb(x, 64, 6, 1) | \ + ___bpf_mvb(x, 64, 7, 0))) + +/* LLVM's BPF target selects the endianness of the CPU + * it compiles on, or the user specifies (bpfel/bpfeb), + * respectively. The used __BYTE_ORDER__ is defined by + * the compiler, we cannot rely on __BYTE_ORDER from + * libc headers, since it doesn't reflect the actual + * requested byte order. + * + * Note, LLVM's BPF target has different __builtin_bswapX() + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE + * in bpfel and bpfeb case, which means below, that we map + * to cpu_to_be16(). We could use it unconditionally in BPF + * case, but better not rely on it, so that this header here + * can be used from application and BPF program side, which + * use different targets. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +# define __bpf_constant_ntohs(x) ___bpf_swab16(x) +# define __bpf_constant_htons(x) ___bpf_swab16(x) +# define __bpf_ntohl(x) __builtin_bswap32(x) +# define __bpf_htonl(x) __builtin_bswap32(x) +# define __bpf_constant_ntohl(x) ___bpf_swab32(x) +# define __bpf_constant_htonl(x) ___bpf_swab32(x) +# define __bpf_be64_to_cpu(x) __builtin_bswap64(x) +# define __bpf_cpu_to_be64(x) __builtin_bswap64(x) +# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) +# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +# define __bpf_constant_ntohs(x) (x) +# define __bpf_constant_htons(x) (x) +# define __bpf_ntohl(x) (x) +# define __bpf_htonl(x) (x) +# define __bpf_constant_ntohl(x) (x) +# define __bpf_constant_htonl(x) (x) +# define __bpf_be64_to_cpu(x) (x) +# define __bpf_cpu_to_be64(x) (x) +# define __bpf_constant_be64_to_cpu(x) (x) +# define __bpf_constant_cpu_to_be64(x) (x) +#else +# error "Fix your compiler's __BYTE_ORDER__?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) +#define bpf_htonl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htonl(x) : __bpf_htonl(x)) +#define bpf_ntohl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohl(x) : __bpf_ntohl(x)) +#define bpf_cpu_to_be64(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) +#define bpf_be64_to_cpu(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) + +#endif /* __BPF_ENDIAN__ */ diff --git a/src/bpfswitch/include/bpf/bpf_helper_defs.h b/src/bpfswitch/include/bpf/bpf_helper_defs.h new file mode 100644 index 00000000..1c5298d2 --- /dev/null +++ b/src/bpfswitch/include/bpf/bpf_helper_defs.h @@ -0,0 +1,3260 @@ +/* This is auto-generated file. See bpf_helpers_doc.py for details. */ + +/* Forward declarations of BPF structs */ +struct bpf_fib_lookup; +struct bpf_perf_event_data; +struct bpf_perf_event_value; +struct bpf_pidns_info; +struct bpf_sock; +struct bpf_sock_addr; +struct bpf_sock_ops; +struct bpf_sock_tuple; +struct bpf_spin_lock; +struct bpf_sysctl; +struct bpf_tcp_sock; +struct bpf_tunnel_key; +struct bpf_xfrm_state; +struct pt_regs; +struct sk_reuseport_md; +struct sockaddr; +struct tcphdr; +struct seq_file; +struct tcp6_sock; +struct tcp_sock; +struct tcp_timewait_sock; +struct tcp_request_sock; +struct udp6_sock; +struct task_struct; +struct __sk_buff; +struct sk_msg_md; +struct xdp_md; + +/* + * bpf_map_lookup_elem + * + * Perform a lookup in *map* for an entry associated to *key*. + * + * Returns + * Map value associated to *key*, or **NULL** if no entry was + * found. + */ +static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1; + +/* + * bpf_map_update_elem + * + * Add or update the value of the entry associated to *key* in + * *map* with *value*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * Flag value **BPF_NOEXIST** cannot be used for maps of types + * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all + * elements always exist), the helper would return an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; + +/* + * bpf_map_delete_elem + * + * Delete entry with *key* from *map*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_delete_elem)(void *map, const void *key) = (void *) 3; + +/* + * bpf_probe_read + * + * For tracing programs, safely attempt to read *size* bytes from + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use **bpf_probe_read_user**\ () or + * **bpf_probe_read_kernel**\ () instead. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 4; + +/* + * bpf_ktime_get_ns + * + * Return the time elapsed since system boot, in nanoseconds. + * Does not include time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_ns)(void) = (void *) 5; + +/* + * bpf_trace_printk + * + * This helper is a "printk()-like" facility for debugging. It + * prints a message defined by format *fmt* (of size *fmt_size*) + * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * available. It can take up to three additional **u64** + * arguments (as an eBPF helpers, the total number of arguments is + * limited to five). + * + * Each time the helper is called, it appends a line to the trace. + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * The format of the trace is customizable, and the exact output + * one will get depends on the options set in + * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *README* file under the same directory). However, it usually + * defaults to something like: + * + * :: + * + * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> + * + * In the above: + * + * * ``telnet`` is the name of the current task. + * * ``470`` is the PID of the current task. + * * ``001`` is the CPU number on which the task is + * running. + * * In ``.N..``, each character refers to a set of + * options (whether irqs are enabled, scheduling + * options, whether hard/softirqs are running, level of + * preempt_disabled respectively). **N** means that + * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** + * are set. + * * ``419421.045894`` is a timestamp. + * * ``0x00000001`` is a fake value used by BPF for the + * instruction pointer register. + * * ``<formatted msg>`` is the message formatted with + * *fmt*. + * + * The conversion specifiers supported by *fmt* are similar, but + * more limited than for printk(). They are **%d**, **%i**, + * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, + * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size + * of field, padding with zeroes, etc.) is available, and the + * helper will return **-EINVAL** (but print nothing) if it + * encounters an unknown specifier. + * + * Also, note that **bpf_trace_printk**\ () is slow, and should + * only be used for debugging purposes. For this reason, a notice + * bloc (spanning several lines) is printed to kernel logs and + * states that the helper should not be used "for production use" + * the first time this helper is used (or more precisely, when + * **trace_printk**\ () buffers are allocated). For passing values + * to user space, perf events should be preferred. + * + * Returns + * The number of bytes written to the buffer, or a negative error + * in case of failure. + */ +static long (*bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6; + +/* + * bpf_get_prandom_u32 + * + * Get a pseudo-random number. + * + * From a security point of view, this helper uses its own + * pseudo-random internal state, and cannot be used to infer the + * seed of other random functions in the kernel. However, it is + * essential to note that the generator used by the helper is not + * cryptographically secure. + * + * Returns + * A random 32-bit unsigned value. + */ +static __u32 (*bpf_get_prandom_u32)(void) = (void *) 7; + +/* + * bpf_get_smp_processor_id + * + * Get the SMP (symmetric multiprocessing) processor id. Note that + * all programs run with preemption disabled, which means that the + * SMP processor id is stable during all the execution of the + * program. + * + * Returns + * The SMP id of the processor running the program. + */ +static __u32 (*bpf_get_smp_processor_id)(void) = (void *) 8; + +/* + * bpf_skb_store_bytes + * + * Store *len* bytes from address *from* into the packet + * associated to *skb*, at *offset*. *flags* are a combination of + * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the + * checksum for the packet after storing the bytes) and + * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ + * **->swhash** and *skb*\ **->l4hash** to 0). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len, __u64 flags) = (void *) 9; + +/* + * bpf_l3_csum_replace + * + * Recompute the layer 3 (e.g. IP) checksum for the packet + * associated to *skb*. Computation is incremental, so the helper + * must know the former value of the header field that was + * modified (*from*), the new value of this field (*to*), and the + * number of bytes (2 or 4) for this field, stored in *size*. + * Alternatively, it is possible to store the difference between + * the previous and the new values of the header field in *to*, by + * setting *from* and *size* to 0. For both methods, *offset* + * indicates the location of the IP checksum within the packet. + * + * This helper works in combination with **bpf_csum_diff**\ (), + * which does not update the checksum in-place, but offers more + * flexibility and can handle sizes larger than 2 or 4 for the + * checksum to update. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_l3_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 size) = (void *) 10; + +/* + * bpf_l4_csum_replace + * + * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the + * packet associated to *skb*. Computation is incremental, so the + * helper must know the former value of the header field that was + * modified (*from*), the new value of this field (*to*), and the + * number of bytes (2 or 4) for this field, stored on the lowest + * four bits of *flags*. Alternatively, it is possible to store + * the difference between the previous and the new values of the + * header field in *to*, by setting *from* and the four lowest + * bits of *flags* to 0. For both methods, *offset* indicates the + * location of the IP checksum within the packet. In addition to + * the size of the field, *flags* can be added (bitwise OR) actual + * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left + * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and + * for updates resulting in a null checksum the value is set to + * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates + * the checksum is to be computed against a pseudo-header. + * + * This helper works in combination with **bpf_csum_diff**\ (), + * which does not update the checksum in-place, but offers more + * flexibility and can handle sizes larger than 2 or 4 for the + * checksum to update. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_l4_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 flags) = (void *) 11; + +/* + * bpf_tail_call + * + * This special helper is used to trigger a "tail call", or in + * other words, to jump into another eBPF program. The same stack + * frame is used (but values on stack and in registers for the + * caller are not accessible to the callee). This mechanism allows + * for program chaining, either for raising the maximum number of + * available eBPF instructions, or to execute given programs in + * conditional blocks. For security reasons, there is an upper + * limit to the number of successive tail calls that can be + * performed. + * + * Upon call of this helper, the program attempts to jump into a + * program referenced at index *index* in *prog_array_map*, a + * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes + * *ctx*, a pointer to the context. + * + * If the call succeeds, the kernel immediately runs the first + * instruction of the new program. This is not a function call, + * and it never returns to the previous program. If the call + * fails, then the helper has no effect, and the caller continues + * to run its subsequent instructions. A call can fail if the + * destination program for the jump does not exist (i.e. *index* + * is superior to the number of entries in *prog_array_map*), or + * if the maximum number of tail calls has been reached for this + * chain of programs. This limit is defined in the kernel by the + * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), + * which is currently set to 32. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_tail_call)(void *ctx, void *prog_array_map, __u32 index) = (void *) 12; + +/* + * bpf_clone_redirect + * + * Clone and redirect the packet associated to *skb* to another + * net device of index *ifindex*. Both ingress and egress + * interfaces can be used for redirection. The **BPF_F_INGRESS** + * value in *flags* is used to make the distinction (ingress path + * is selected if the flag is present, egress path otherwise). + * This is the only flag supported for now. + * + * In comparison with **bpf_redirect**\ () helper, + * **bpf_clone_redirect**\ () has the associated cost of + * duplicating the packet buffer, but this can be executed out of + * the eBPF program. Conversely, **bpf_redirect**\ () is more + * efficient, but it is handled through an action code where the + * redirection happens only after the eBPF program has returned. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_clone_redirect)(struct __sk_buff *skb, __u32 ifindex, __u64 flags) = (void *) 13; + +/* + * bpf_get_current_pid_tgid + * + * + * Returns + * A 64-bit integer containing the current tgid and pid, and + * created as such: + * *current_task*\ **->tgid << 32 \|** + * *current_task*\ **->pid**. + */ +static __u64 (*bpf_get_current_pid_tgid)(void) = (void *) 14; + +/* + * bpf_get_current_uid_gid + * + * + * Returns + * A 64-bit integer containing the current GID and UID, and + * created as such: *current_gid* **<< 32 \|** *current_uid*. + */ +static __u64 (*bpf_get_current_uid_gid)(void) = (void *) 15; + +/* + * bpf_get_current_comm + * + * Copy the **comm** attribute of the current task into *buf* of + * *size_of_buf*. The **comm** attribute contains the name of + * the executable (excluding the path) for the current task. The + * *size_of_buf* must be strictly positive. On success, the + * helper makes sure that the *buf* is NUL-terminated. On failure, + * it is filled with zeroes. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_get_current_comm)(void *buf, __u32 size_of_buf) = (void *) 16; + +/* + * bpf_get_cgroup_classid + * + * Retrieve the classid for the current task, i.e. for the net_cls + * cgroup to which *skb* belongs. + * + * This helper can be used on TC egress path, but not on ingress. + * + * The net_cls cgroup provides an interface to tag network packets + * based on a user-provided identifier for all traffic coming from + * the tasks belonging to the related cgroup. See also the related + * kernel documentation, available from the Linux sources in file + * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. + * + * The Linux kernel has two versions for cgroups: there are + * cgroups v1 and cgroups v2. Both are available to users, who can + * use a mixture of them, but note that the net_cls cgroup is for + * cgroup v1 only. This makes it incompatible with BPF programs + * run on cgroups, which is a cgroup-v2-only feature (a socket can + * only hold data for one version of cgroups at a time). + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to + * "**y**" or to "**m**". + * + * Returns + * The classid, or 0 for the default unconfigured classid. + */ +static __u32 (*bpf_get_cgroup_classid)(struct __sk_buff *skb) = (void *) 17; + +/* + * bpf_skb_vlan_push + * + * Push a *vlan_tci* (VLAN tag control information) of protocol + * *vlan_proto* to the packet associated to *skb*, then update + * the checksum. Note that if *vlan_proto* is different from + * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to + * be **ETH_P_8021Q**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_vlan_push)(struct __sk_buff *skb, __be16 vlan_proto, __u16 vlan_tci) = (void *) 18; + +/* + * bpf_skb_vlan_pop + * + * Pop a VLAN header from the packet associated to *skb*. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_vlan_pop)(struct __sk_buff *skb) = (void *) 19; + +/* + * bpf_skb_get_tunnel_key + * + * Get tunnel metadata. This helper takes a pointer *key* to an + * empty **struct bpf_tunnel_key** of **size**, that will be + * filled with tunnel metadata for the packet associated to *skb*. + * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which + * indicates that the tunnel is based on IPv6 protocol instead of + * IPv4. + * + * The **struct bpf_tunnel_key** is an object that generalizes the + * principal parameters used by various tunneling protocols into a + * single struct. This way, it can be used to easily make a + * decision based on the contents of the encapsulation header, + * "summarized" in this struct. In particular, it holds the IP + * address of the remote end (IPv4 or IPv6, depending on the case) + * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, + * this struct exposes the *key*\ **->tunnel_id**, which is + * generally mapped to a VNI (Virtual Network Identifier), making + * it programmable together with the **bpf_skb_set_tunnel_key**\ + * () helper. + * + * Let's imagine that the following code is part of a program + * attached to the TC ingress interface, on one end of a GRE + * tunnel, and is supposed to filter out all messages coming from + * remote ends with IPv4 address other than 10.0.0.1: + * + * :: + * + * int ret; + * struct bpf_tunnel_key key = {}; + * + * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + * if (ret < 0) + * return TC_ACT_SHOT; // drop packet + * + * if (key.remote_ipv4 != 0x0a000001) + * return TC_ACT_SHOT; // drop packet + * + * return TC_ACT_OK; // accept packet + * + * This interface can also be used with all encapsulation devices + * that can operate in "collect metadata" mode: instead of having + * one network device per specific configuration, the "collect + * metadata" mode only requires a single device where the + * configuration can be extracted from this helper. + * + * This can be used together with various tunnels such as VXLan, + * Geneve, GRE or IP in IP (IPIP). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_get_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 20; + +/* + * bpf_skb_set_tunnel_key + * + * Populate tunnel metadata for packet associated to *skb.* The + * tunnel metadata is set to the contents of *key*, of *size*. The + * *flags* can be set to a combination of the following values: + * + * **BPF_F_TUNINFO_IPV6** + * Indicate that the tunnel is based on IPv6 protocol + * instead of IPv4. + * **BPF_F_ZERO_CSUM_TX** + * For IPv4 packets, add a flag to tunnel metadata + * indicating that checksum computation should be skipped + * and checksum set to zeroes. + * **BPF_F_DONT_FRAGMENT** + * Add a flag to tunnel metadata indicating that the + * packet should not be fragmented. + * **BPF_F_SEQ_NUMBER** + * Add a flag to tunnel metadata indicating that a + * sequence number should be added to tunnel header before + * sending the packet. This flag was added for GRE + * encapsulation, but might be used with other protocols + * as well in the future. + * + * Here is a typical usage on the transmit path: + * + * :: + * + * struct bpf_tunnel_key key; + * populate key ... + * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); + * + * See also the description of the **bpf_skb_get_tunnel_key**\ () + * helper for additional information. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_set_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 21; + +/* + * bpf_perf_event_read + * + * Read the value of a perf event counter. This helper relies on a + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of + * the perf event counter is selected when *map* is updated with + * perf event file descriptors. The *map* is an array whose size + * is the number of available CPUs, and each cell contains a value + * relative to one CPU. The value to retrieve is indicated by + * *flags*, that contains the index of the CPU to look up, masked + * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to + * **BPF_F_CURRENT_CPU** to indicate that the value for the + * current CPU should be retrieved. + * + * Note that before Linux 4.13, only hardware perf event can be + * retrieved. + * + * Also, be aware that the newer helper + * **bpf_perf_event_read_value**\ () is recommended over + * **bpf_perf_event_read**\ () in general. The latter has some ABI + * quirks where error and counter value are used as a return code + * (which is wrong to do since ranges may overlap). This issue is + * fixed with **bpf_perf_event_read_value**\ (), which at the same + * time provides more features over the **bpf_perf_event_read**\ + * () interface. Please refer to the description of + * **bpf_perf_event_read_value**\ () for details. + * + * Returns + * The value of the perf event counter read from the map, or a + * negative error code in case of failure. + */ +static __u64 (*bpf_perf_event_read)(void *map, __u64 flags) = (void *) 22; + +/* + * bpf_redirect + * + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_clone_redirect**\ + * (), except that the packet is not cloned, which provides + * increased performance. + * + * Except for XDP, both ingress and egress interfaces can be used + * for redirection. The **BPF_F_INGRESS** value in *flags* is used + * to make the distinction (ingress path is selected if the flag + * is present, egress path otherwise). Currently, XDP only + * supports redirection to the egress interface, and accepts no + * flag at all. + * + * The same effect can also be attained with the more generic + * **bpf_redirect_map**\ (), which uses a BPF map to store the + * redirect target instead of providing it directly to the helper. + * + * Returns + * For XDP, the helper returns **XDP_REDIRECT** on success or + * **XDP_ABORTED** on error. For other program types, the values + * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on + * error. + */ +static long (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void *) 23; + +/* + * bpf_get_route_realm + * + * Retrieve the realm or the route, that is to say the + * **tclassid** field of the destination for the *skb*. The + * indentifier retrieved is a user-provided tag, similar to the + * one used with the net_cls cgroup (see description for + * **bpf_get_cgroup_classid**\ () helper), but here this tag is + * held by a route (a destination entry), not by a task. + * + * Retrieving this identifier works with the clsact TC egress hook + * (see also **tc-bpf(8)**), or alternatively on conventional + * classful egress qdiscs, but not on TC ingress path. In case of + * clsact TC egress hook, this has the advantage that, internally, + * the destination entry has not been dropped yet in the transmit + * path. Therefore, the destination entry does not need to be + * artificially held via **netif_keep_dst**\ () for a classful + * qdisc until the *skb* is freed. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_IP_ROUTE_CLASSID** configuration option. + * + * Returns + * The realm of the route for the packet associated to *skb*, or 0 + * if none was found. + */ +static __u32 (*bpf_get_route_realm)(struct __sk_buff *skb) = (void *) 24; + +/* + * bpf_perf_event_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * The context of the program *ctx* needs also be passed to the + * helper. + * + * On user space, a program willing to read the values needs to + * call **perf_event_open**\ () on the perf event (either for + * one or for all CPUs) and to store the file descriptor into the + * *map*. This must be done before the eBPF program can send data + * into it. An example is available in file + * *samples/bpf/trace_output_user.c* in the Linux kernel source + * tree (the eBPF program counterpart is in + * *samples/bpf/trace_output_kern.c*). + * + * **bpf_perf_event_output**\ () achieves better performance + * than **bpf_trace_printk**\ () for sharing data with user + * space, and is much better suitable for streaming data from eBPF + * programs. + * + * Note that this helper is not restricted to tracing use cases + * and can be used with programs attached to TC or XDP as well, + * where it allows for passing data to user space listeners. Data + * can be: + * + * * Only custom structs, + * * Only the packet payload, or + * * A combination of both. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_event_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 25; + +/* + * bpf_skb_load_bytes + * + * This helper was provided as an easy way to load data from a + * packet. It can be used to load *len* bytes from *offset* from + * the packet associated to *skb*, into the buffer pointed by + * *to*. + * + * Since Linux 4.7, usage of this helper has mostly been replaced + * by "direct packet access", enabling packet data to be + * manipulated with *skb*\ **->data** and *skb*\ **->data_end** + * pointing respectively to the first byte of packet data and to + * the byte after the last byte of packet data. However, it + * remains useful if one wishes to read large quantities of data + * at once from a packet into the eBPF stack. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_load_bytes)(const void *skb, __u32 offset, void *to, __u32 len) = (void *) 26; + +/* + * bpf_get_stackid + * + * Walk a user or a kernel stack and return its id. To achieve + * this, the helper needs *ctx*, which is a pointer to the context + * on which the tracing program is executed, and a pointer to a + * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * a combination of the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_FAST_STACK_CMP** + * Compare stacks by hash only. + * **BPF_F_REUSE_STACKID** + * If two different stacks hash into the same *stackid*, + * discard the old one. + * + * The stack id retrieved is a 32 bit long integer handle which + * can be further combined with other data (including other stack + * ids) and used as a key into maps. This can be useful for + * generating a variety of graphs (such as flame graphs or off-cpu + * graphs). + * + * For walking a stack, this helper is an improvement over + * **bpf_probe_read**\ (), which can be used with unrolled loops + * but is not efficient and consumes a lot of eBPF instructions. + * Instead, **bpf_get_stackid**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack=<new value> + * + * Returns + * The positive or null stack id on success, or a negative error + * in case of failure. + */ +static long (*bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void *) 27; + +/* + * bpf_csum_diff + * + * Compute a checksum difference, from the raw buffer pointed by + * *from*, of length *from_size* (that must be a multiple of 4), + * towards the raw buffer pointed by *to*, of size *to_size* + * (same remark). An optional *seed* can be added to the value + * (this can be cascaded, the seed may come from a previous call + * to the helper). + * + * This is flexible enough to be used in several ways: + * + * * With *from_size* == 0, *to_size* > 0 and *seed* set to + * checksum, it can be used when pushing new data. + * * With *from_size* > 0, *to_size* == 0 and *seed* set to + * checksum, it can be used when removing data from a packet. + * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it + * can be used to compute a diff. Note that *from_size* and + * *to_size* do not need to be equal. + * + * This helper can be used in combination with + * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to + * which one can feed in the difference computed with + * **bpf_csum_diff**\ (). + * + * Returns + * The checksum result, or a negative error code in case of + * failure. + */ +static __s64 (*bpf_csum_diff)(__be32 *from, __u32 from_size, __be32 *to, __u32 to_size, __wsum seed) = (void *) 28; + +/* + * bpf_skb_get_tunnel_opt + * + * Retrieve tunnel options metadata for the packet associated to + * *skb*, and store the raw tunnel option data to the buffer *opt* + * of *size*. + * + * This helper can be used with encapsulation devices that can + * operate in "collect metadata" mode (please refer to the related + * note in the description of **bpf_skb_get_tunnel_key**\ () for + * more details). A particular example where this can be used is + * in combination with the Geneve encapsulation protocol, where it + * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) + * and retrieving arbitrary TLVs (Type-Length-Value headers) from + * the eBPF program. This allows for full customization of these + * headers. + * + * Returns + * The size of the option data retrieved. + */ +static long (*bpf_skb_get_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 29; + +/* + * bpf_skb_set_tunnel_opt + * + * Set tunnel options metadata for the packet associated to *skb* + * to the option data contained in the raw buffer *opt* of *size*. + * + * See also the description of the **bpf_skb_get_tunnel_opt**\ () + * helper for additional information. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_set_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 30; + +/* + * bpf_skb_change_proto + * + * Change the protocol of the *skb* to *proto*. Currently + * supported are transition from IPv4 to IPv6, and from IPv6 to + * IPv4. The helper takes care of the groundwork for the + * transition, including resizing the socket buffer. The eBPF + * program is expected to fill the new headers, if any, via + * **skb_store_bytes**\ () and to recompute the checksums with + * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ + * (). The main case for this helper is to perform NAT64 + * operations out of an eBPF program. + * + * Internally, the GSO type is marked as dodgy so that headers are + * checked and segments are recalculated by the GSO/GRO engine. + * The size for GSO target is adapted as well. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_proto)(struct __sk_buff *skb, __be16 proto, __u64 flags) = (void *) 31; + +/* + * bpf_skb_change_type + * + * Change the packet type for the packet associated to *skb*. This + * comes down to setting *skb*\ **->pkt_type** to *type*, except + * the eBPF program does not have a write access to *skb*\ + * **->pkt_type** beside this helper. Using a helper here allows + * for graceful handling of errors. + * + * The major use case is to change incoming *skb*s to + * **PACKET_HOST** in a programmatic way instead of having to + * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for + * example. + * + * Note that *type* only allows certain values. At this time, they + * are: + * + * **PACKET_HOST** + * Packet is for us. + * **PACKET_BROADCAST** + * Send packet to all. + * **PACKET_MULTICAST** + * Send packet to group. + * **PACKET_OTHERHOST** + * Send packet to someone else. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_type)(struct __sk_buff *skb, __u32 type) = (void *) 32; + +/* + * bpf_skb_under_cgroup + * + * Check whether *skb* is a descendant of the cgroup2 held by + * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. + * + * Returns + * The return value depends on the result of the test, and can be: + * + * * 0, if the *skb* failed the cgroup2 descendant test. + * * 1, if the *skb* succeeded the cgroup2 descendant test. + * * A negative error code, if an error occurred. + */ +static long (*bpf_skb_under_cgroup)(struct __sk_buff *skb, void *map, __u32 index) = (void *) 33; + +/* + * bpf_get_hash_recalc + * + * Retrieve the hash of the packet, *skb*\ **->hash**. If it is + * not set, in particular if the hash was cleared due to mangling, + * recompute this hash. Later accesses to the hash can be done + * directly with *skb*\ **->hash**. + * + * Calling **bpf_set_hash_invalid**\ (), changing a packet + * prototype with **bpf_skb_change_proto**\ (), or calling + * **bpf_skb_store_bytes**\ () with the + * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear + * the hash and to trigger a new computation for the next call to + * **bpf_get_hash_recalc**\ (). + * + * Returns + * The 32-bit hash. + */ +static __u32 (*bpf_get_hash_recalc)(struct __sk_buff *skb) = (void *) 34; + +/* + * bpf_get_current_task + * + * + * Returns + * A pointer to the current task struct. + */ +static __u64 (*bpf_get_current_task)(void) = (void *) 35; + +/* + * bpf_probe_write_user + * + * Attempt in a safe way to write *len* bytes from the buffer + * *src* to *dst* in memory. It only works for threads that are in + * user context, and *dst* must be a valid user space address. + * + * This helper should not be used to implement any kind of + * security mechanism because of TOC-TOU attacks, but rather to + * debug, divert, and manipulate execution of semi-cooperative + * processes. + * + * Keep in mind that this feature is meant for experiments, and it + * has a risk of crashing the system and running programs. + * Therefore, when an eBPF program using this helper is attached, + * a warning including PID and process name is printed to kernel + * logs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_write_user)(void *dst, const void *src, __u32 len) = (void *) 36; + +/* + * bpf_current_task_under_cgroup + * + * Check whether the probe is being run is the context of a given + * subset of the cgroup2 hierarchy. The cgroup2 to test is held by + * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. + * + * Returns + * The return value depends on the result of the test, and can be: + * + * * 0, if the *skb* task belongs to the cgroup2. + * * 1, if the *skb* task does not belong to the cgroup2. + * * A negative error code, if an error occurred. + */ +static long (*bpf_current_task_under_cgroup)(void *map, __u32 index) = (void *) 37; + +/* + * bpf_skb_change_tail + * + * Resize (trim or grow) the packet associated to *skb* to the + * new *len*. The *flags* are reserved for future usage, and must + * be left at zero. + * + * The basic idea is that the helper performs the needed work to + * change the size of the packet, then the eBPF program rewrites + * the rest via helpers like **bpf_skb_store_bytes**\ (), + * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () + * and others. This helper is a slow path utility intended for + * replies with control messages. And because it is targeted for + * slow path, the helper itself can afford to be slow: it + * implicitly linearizes, unclones and drops offloads from the + * *skb*. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 38; + +/* + * bpf_skb_pull_data + * + * Pull in non-linear data in case the *skb* is non-linear and not + * all of *len* are part of the linear section. Make *len* bytes + * from *skb* readable and writable. If a zero value is passed for + * *len*, then the whole length of the *skb* is pulled. + * + * This helper is only needed for reading and writing with direct + * packet access. + * + * For direct packet access, testing that offsets to access + * are within packet boundaries (test on *skb*\ **->data_end**) is + * susceptible to fail if offsets are invalid, or if the requested + * data is in non-linear parts of the *skb*. On failure the + * program can just bail out, or in the case of a non-linear + * buffer, use a helper to make the data available. The + * **bpf_skb_load_bytes**\ () helper is a first solution to access + * the data. Another one consists in using **bpf_skb_pull_data** + * to pull in once the non-linear parts, then retesting and + * eventually access the data. + * + * At the same time, this also makes sure the *skb* is uncloned, + * which is a necessary condition for direct write. As this needs + * to be an invariant for the write part only, the verifier + * detects writes and adds a prologue that is calling + * **bpf_skb_pull_data()** to effectively unclone the *skb* from + * the very beginning in case it is indeed cloned. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_pull_data)(struct __sk_buff *skb, __u32 len) = (void *) 39; + +/* + * bpf_csum_update + * + * Add the checksum *csum* into *skb*\ **->csum** in case the + * driver has supplied a checksum for the entire packet into that + * field. Return an error otherwise. This helper is intended to be + * used in combination with **bpf_csum_diff**\ (), in particular + * when the checksum needs to be updated after data has been + * written into the packet through direct packet access. + * + * Returns + * The checksum on success, or a negative error code in case of + * failure. + */ +static __s64 (*bpf_csum_update)(struct __sk_buff *skb, __wsum csum) = (void *) 40; + +/* + * bpf_set_hash_invalid + * + * Invalidate the current *skb*\ **->hash**. It can be used after + * mangling on headers through direct packet access, in order to + * indicate that the hash is outdated and to trigger a + * recalculation the next time the kernel tries to access this + * hash or when the **bpf_get_hash_recalc**\ () helper is called. + * + */ +static void (*bpf_set_hash_invalid)(struct __sk_buff *skb) = (void *) 41; + +/* + * bpf_get_numa_node_id + * + * Return the id of the current NUMA node. The primary use case + * for this helper is the selection of sockets for the local NUMA + * node, when the program is attached to sockets using the + * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), + * but the helper is also available to other eBPF program types, + * similarly to **bpf_get_smp_processor_id**\ (). + * + * Returns + * The id of current NUMA node. + */ +static long (*bpf_get_numa_node_id)(void) = (void *) 42; + +/* + * bpf_skb_change_head + * + * Grows headroom of packet associated to *skb* and adjusts the + * offset of the MAC header accordingly, adding *len* bytes of + * space. It automatically extends and reallocates memory as + * required. + * + * This helper can be used on a layer 3 *skb* to push a MAC header + * for redirection into a layer 2 device. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_head)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 43; + +/* + * bpf_xdp_adjust_head + * + * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that + * it is possible to use a negative value for *delta*. This helper + * can be used to prepare the packet for pushing or popping + * headers. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_head)(struct xdp_md *xdp_md, int delta) = (void *) 44; + +/* + * bpf_probe_read_str + * + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for + * more details. + * + * Generally, use **bpf_probe_read_user_str**\ () or + * **bpf_probe_read_kernel_str**\ () instead. + * + * Returns + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_probe_read_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 45; + +/* + * bpf_get_socket_cookie + * + * If the **struct sk_buff** pointed by *skb* has a known socket, + * retrieve the cookie (generated by the kernel) of this socket. + * If no cookie has been set yet, generate a new cookie. Once + * generated, the socket cookie remains stable for the life of the + * socket. This helper can be useful for monitoring per socket + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. + * + * Returns + * A 8-byte long non-decreasing number on success, or 0 if the + * socket field is missing inside *skb*. + */ +static __u64 (*bpf_get_socket_cookie)(void *ctx) = (void *) 46; + +/* + * bpf_get_socket_uid + * + * + * Returns + * The owner UID of the socket associated to *skb*. If the socket + * is **NULL**, or if it is not a full socket (i.e. if it is a + * time-wait or a request socket instead), **overflowuid** value + * is returned (note that **overflowuid** might also be the actual + * UID value for the socket). + */ +static __u32 (*bpf_get_socket_uid)(struct __sk_buff *skb) = (void *) 47; + +/* + * bpf_set_hash + * + * Set the full hash for *skb* (set the field *skb*\ **->hash**) + * to value *hash*. + * + * Returns + * 0 + */ +static long (*bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *) 48; + +/* + * bpf_setsockopt + * + * Emulate a call to **setsockopt()** on the socket associated to + * *bpf_socket*, which must be a full socket. The *level* at + * which the option resides and the name *optname* of the option + * must be specified, see **setsockopt(2)** for more information. + * The option value of length *optlen* is pointed by *optval*. + * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * + * This helper actually implements a subset of **setsockopt()**. + * It supports the following *level*\ s: + * + * * **SOL_SOCKET**, which supports the following *optname*\ s: + * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. + * * **IPPROTO_TCP**, which supports the following *optname*\ s: + * **TCP_CONGESTION**, **TCP_BPF_IW**, + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. + * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. + * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 49; + +/* + * bpf_skb_adjust_room + * + * Grow or shrink the room for data in the packet associated to + * *skb* by *len_diff*, and according to the selected *mode*. + * + * By default, the helper will reset any offloaded checksum + * indicator of the skb to CHECKSUM_NONE. This can be avoided + * by the following flag: + * + * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded + * checksum data of the skb to CHECKSUM_NONE. + * + * There are two supported modes at this time: + * + * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer + * (room space is added or removed below the layer 2 header). + * + * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer + * (room space is added or removed below the layer 3 header). + * + * The following flags are supported at this time: + * + * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. + * Adjusting mss in this way is not allowed for datagrams. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: + * Any new space is reserved to hold a tunnel header. + * Configure skb offsets and other fields accordingly. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, + * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: + * Use with ENCAP_L3 flags to further specify the tunnel type. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): + * Use with ENCAP_L3/L4 flags to further specify the tunnel + * type; *len* is the length of the inner MAC header. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_adjust_room)(struct __sk_buff *skb, __s32 len_diff, __u32 mode, __u64 flags) = (void *) 50; + +/* + * bpf_redirect_map + * + * Redirect the packet to the endpoint referenced by *map* at + * index *key*. Depending on its type, this *map* can contain + * references to net devices (for forwarding packets through other + * ports), or to CPUs (for redirecting XDP frames to another CPU; + * but this is only implemented for native XDP (with driver + * support) as of this writing). + * + * The lower two bits of *flags* are used as the return code if + * the map lookup fails. This is so that the return value can be + * one of the XDP program return codes up to **XDP_TX**, as chosen + * by the caller. Any higher bits in the *flags* argument must be + * unset. + * + * See also **bpf_redirect**\ (), which only supports redirecting + * to an ifindex, but doesn't require a map to do so. + * + * Returns + * **XDP_REDIRECT** on success, or the value of the two lower bits + * of the *flags* argument on error. + */ +static long (*bpf_redirect_map)(void *map, __u32 key, __u64 flags) = (void *) 51; + +/* + * bpf_sk_redirect_map + * + * Redirect the packet to the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_sk_redirect_map)(struct __sk_buff *skb, void *map, __u32 key, __u64 flags) = (void *) 52; + +/* + * bpf_sock_map_update + * + * Add an entry to, or update a *map* referencing sockets. The + * *skops* is used as a new value for the entry associated to + * *key*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * If the *map* has eBPF programs (parser and verdict), those will + * be inherited by the socket being added. If the socket is + * already attached to eBPF programs, this results in an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sock_map_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 53; + +/* + * bpf_xdp_adjust_meta + * + * Adjust the address pointed by *xdp_md*\ **->data_meta** by + * *delta* (which can be positive or negative). Note that this + * operation modifies the address stored in *xdp_md*\ **->data**, + * so the latter must be loaded only after the helper has been + * called. + * + * The use of *xdp_md*\ **->data_meta** is optional and programs + * are not required to use it. The rationale is that when the + * packet is processed with XDP (e.g. as DoS filter), it is + * possible to push further meta data along with it before passing + * to the stack, and to give the guarantee that an ingress eBPF + * program attached as a TC classifier on the same device can pick + * this up for further post-processing. Since TC works with socket + * buffers, it remains possible to set from XDP the **mark** or + * **priority** pointers, or other pointers for the socket buffer. + * Having this scratch space generic and programmable allows for + * more flexibility as the user is free to store whatever meta + * data they need. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_meta)(struct xdp_md *xdp_md, int delta) = (void *) 54; + +/* + * bpf_perf_event_read_value + * + * Read the value of a perf event counter, and store it into *buf* + * of size *buf_size*. This helper relies on a *map* of type + * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event + * counter is selected when *map* is updated with perf event file + * descriptors. The *map* is an array whose size is the number of + * available CPUs, and each cell contains a value relative to one + * CPU. The value to retrieve is indicated by *flags*, that + * contains the index of the CPU to look up, masked with + * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to + * **BPF_F_CURRENT_CPU** to indicate that the value for the + * current CPU should be retrieved. + * + * This helper behaves in a way close to + * **bpf_perf_event_read**\ () helper, save that instead of + * just returning the value observed, it fills the *buf* + * structure. This allows for additional data to be retrieved: in + * particular, the enabled and running times (in *buf*\ + * **->enabled** and *buf*\ **->running**, respectively) are + * copied. In general, **bpf_perf_event_read_value**\ () is + * recommended over **bpf_perf_event_read**\ (), which has some + * ABI issues and provides fewer functionalities. + * + * These values are interesting, because hardware PMU (Performance + * Monitoring Unit) counters are limited resources. When there are + * more PMU based perf events opened than available counters, + * kernel will multiplex these events so each event gets certain + * percentage (but not all) of the PMU time. In case that + * multiplexing happens, the number of samples or counter value + * will not reflect the case compared to when no multiplexing + * occurs. This makes comparison between different runs difficult. + * Typically, the counter value should be normalized before + * comparing to other experiments. The usual normalization is done + * as follows. + * + * :: + * + * normalized_counter = counter * t_enabled / t_running + * + * Where t_enabled is the time enabled for event and t_running is + * the time running for event since last normalization. The + * enabled and running times are accumulated since the perf event + * open. To achieve scaling factor between two invocations of an + * eBPF program, users can use CPU id as the key (which is + * typical for perf array usage model) to remember the previous + * value and do the calculation inside the eBPF program. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 55; + +/* + * bpf_perf_prog_read_value + * + * For en eBPF program attached to a perf event, retrieve the + * value of the event counter associated to *ctx* and store it in + * the structure pointed by *buf* and of size *buf_size*. Enabled + * and running times are also stored in the structure (see + * description of helper **bpf_perf_event_read_value**\ () for + * more details). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_prog_read_value)(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 56; + +/* + * bpf_getsockopt + * + * Emulate a call to **getsockopt()** on the socket associated to + * *bpf_socket*, which must be a full socket. The *level* at + * which the option resides and the name *optname* of the option + * must be specified, see **getsockopt(2)** for more information. + * The retrieved value is stored in the structure pointed by + * *opval* and of length *optlen*. + * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * + * This helper actually implements a subset of **getsockopt()**. + * It supports the following *level*\ s: + * + * * **IPPROTO_TCP**, which supports *optname* + * **TCP_CONGESTION**. + * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. + * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_getsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 57; + +/* + * bpf_override_return + * + * Used for error injection, this helper uses kprobes to override + * the return value of the probed function, and to set it to *rc*. + * The first argument is the context *regs* on which the kprobe + * works. + * + * This helper works by setting the PC (program counter) + * to an override function which is run in place of the original + * probed function. This means the probed function is not run at + * all. The replacement function just returns with the required + * value. + * + * This helper has security implications, and thus is subject to + * restrictions. It is only available if the kernel was compiled + * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration + * option, and in this case it only works on functions tagged with + * **ALLOW_ERROR_INJECTION** in the kernel code. + * + * Also, the helper is only available for the architectures having + * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, + * x86 architecture is the only one to support this feature. + * + * Returns + * 0 + */ +static long (*bpf_override_return)(struct pt_regs *regs, __u64 rc) = (void *) 58; + +/* + * bpf_sock_ops_cb_flags_set + * + * Attempt to set the value of the **bpf_sock_ops_cb_flags** field + * for the full TCP socket associated to *bpf_sock_ops* to + * *argval*. + * + * The primary use of this field is to determine if there should + * be calls to eBPF programs of type + * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP + * code. A program of the same type can change its value, per + * connection and as necessary, when the connection is + * established. This field is directly accessible for reading, but + * this helper must be used for updates in order to return an + * error if an eBPF program tries to set a callback that is not + * supported in the current kernel. + * + * *argval* is a flag array which can combine these flags: + * + * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) + * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) + * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) + * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) + * + * Therefore, this function can be used to clear a callback flag by + * setting the appropriate bit to zero. e.g. to disable the RTO + * callback: + * + * **bpf_sock_ops_cb_flags_set(bpf_sock,** + * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** + * + * Here are some examples of where one could call such eBPF + * program: + * + * * When RTO fires. + * * When a packet is retransmitted. + * * When the connection terminates. + * * When a packet is sent. + * * When a packet is received. + * + * Returns + * Code **-EINVAL** if the socket is not a full TCP socket; + * otherwise, a positive number containing the bits that could not + * be set is returned (which comes down to 0 if all bits were set + * as required). + */ +static long (*bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops *bpf_sock, int argval) = (void *) 59; + +/* + * bpf_msg_redirect_map + * + * This helper is used in programs implementing policies at the + * socket level. If the message *msg* is allowed to pass (i.e. if + * the verdict eBPF program returns **SK_PASS**), redirect it to + * the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_msg_redirect_map)(struct sk_msg_md *msg, void *map, __u32 key, __u64 flags) = (void *) 60; + +/* + * bpf_msg_apply_bytes + * + * For socket policies, apply the verdict of the eBPF program to + * the next *bytes* (number of bytes) of message *msg*. + * + * For example, this helper can be used in the following cases: + * + * * A single **sendmsg**\ () or **sendfile**\ () system call + * contains multiple logical messages that the eBPF program is + * supposed to read and for which it should apply a verdict. + * * An eBPF program only cares to read the first *bytes* of a + * *msg*. If the message has a large payload, then setting up + * and calling the eBPF program repeatedly for all bytes, even + * though the verdict is already known, would create unnecessary + * overhead. + * + * When called from within an eBPF program, the helper sets a + * counter internal to the BPF infrastructure, that is used to + * apply the last verdict to the next *bytes*. If *bytes* is + * smaller than the current data being processed from a + * **sendmsg**\ () or **sendfile**\ () system call, the first + * *bytes* will be sent and the eBPF program will be re-run with + * the pointer for start of data pointing to byte number *bytes* + * **+ 1**. If *bytes* is larger than the current data being + * processed, then the eBPF verdict will be applied to multiple + * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are + * consumed. + * + * Note that if a socket closes with the internal counter holding + * a non-zero value, this is not a problem because data is not + * being buffered for *bytes* and is sent as it is received. + * + * Returns + * 0 + */ +static long (*bpf_msg_apply_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 61; + +/* + * bpf_msg_cork_bytes + * + * For socket policies, prevent the execution of the verdict eBPF + * program for message *msg* until *bytes* (byte number) have been + * accumulated. + * + * This can be used when one needs a specific number of bytes + * before a verdict can be assigned, even if the data spans + * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme + * case would be a user calling **sendmsg**\ () repeatedly with + * 1-byte long message segments. Obviously, this is bad for + * performance, but it is still valid. If the eBPF program needs + * *bytes* bytes to validate a header, this helper can be used to + * prevent the eBPF program to be called again until *bytes* have + * been accumulated. + * + * Returns + * 0 + */ +static long (*bpf_msg_cork_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 62; + +/* + * bpf_msg_pull_data + * + * For socket policies, pull in non-linear data from user space + * for *msg* and set pointers *msg*\ **->data** and *msg*\ + * **->data_end** to *start* and *end* bytes offsets into *msg*, + * respectively. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it can only parse data that the (**data**, **data_end**) + * pointers have already consumed. For **sendmsg**\ () hooks this + * is likely the first scatterlist element. But for calls relying + * on the **sendpage** handler (e.g. **sendfile**\ ()) this will + * be the range (**0**, **0**) because the data is shared with + * user space and by default the objective is to avoid allowing + * user space to modify data while (or after) eBPF verdict is + * being decided. This helper can be used to pull in data and to + * set the start and end pointer to given values. Data will be + * copied if necessary (i.e. if data was not linear and if start + * and end pointers do not point to the same chunk). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_pull_data)(struct sk_msg_md *msg, __u32 start, __u32 end, __u64 flags) = (void *) 63; + +/* + * bpf_bind + * + * Bind the socket associated to *ctx* to the address pointed by + * *addr*, of length *addr_len*. This allows for making outgoing + * connection from the desired IP address, which can be useful for + * example when all processes inside a cgroup should use one + * single IP address on a host that has multiple IP configured. + * + * This helper works for IPv4 and IPv6, TCP and UDP sockets. The + * domain (*addr*\ **->sa_family**) must be **AF_INET** (or + * **AF_INET6**). It's advised to pass zero port (**sin_port** + * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like + * behavior and lets the kernel efficiently pick up an unused + * port as long as 4-tuple is unique. Passing non-zero port might + * lead to degraded performance. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_bind)(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) = (void *) 64; + +/* + * bpf_xdp_adjust_tail + * + * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is + * possible to both shrink and grow the packet tail. + * Shrink done via *delta* being a negative integer. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_tail)(struct xdp_md *xdp_md, int delta) = (void *) 65; + +/* + * bpf_skb_get_xfrm_state + * + * Retrieve the XFRM state (IP transform framework, see also + * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. + * + * The retrieved value is stored in the **struct bpf_xfrm_state** + * pointed by *xfrm_state* and of length *size*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_XFRM** configuration option. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct bpf_xfrm_state *xfrm_state, __u32 size, __u64 flags) = (void *) 66; + +/* + * bpf_get_stack + * + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *ctx*, which is a pointer + * to the context on which the tracing program is executed. + * To store the stacktrace, the bpf program provides *buf* with + * a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack=<new value> + * + * Returns + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + */ +static long (*bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void *) 67; + +/* + * bpf_skb_load_bytes_relative + * + * This helper is similar to **bpf_skb_load_bytes**\ () in that + * it provides an easy way to load *len* bytes from *offset* + * from the packet associated to *skb*, into the buffer pointed + * by *to*. The difference to **bpf_skb_load_bytes**\ () is that + * a fifth argument *start_header* exists in order to select a + * base offset to start from. *start_header* can be one of: + * + * **BPF_HDR_START_MAC** + * Base offset to load data from is *skb*'s mac header. + * **BPF_HDR_START_NET** + * Base offset to load data from is *skb*'s network header. + * + * In general, "direct packet access" is the preferred method to + * access packet data, however, this helper is in particular useful + * in socket filters where *skb*\ **->data** does not always point + * to the start of the mac header and where "direct packet access" + * is not available. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void *to, __u32 len, __u32 start_header) = (void *) 68; + +/* + * bpf_fib_lookup + * + * Do FIB lookup in kernel tables using parameters in *params*. + * If lookup is successful and result shows packet is to be + * forwarded, the neighbor tables are searched for the nexthop. + * If successful (ie., FIB lookup shows forwarding and nexthop + * is resolved), the nexthop address is returned in ipv4_dst + * or ipv6_dst based on family, smac is set to mac address of + * egress device, dmac is set to nexthop mac address, rt_metric + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. + * + * *plen* argument is the size of the passed in struct. + * *flags* argument can be a combination of one or more of the + * following values: + * + * **BPF_FIB_LOOKUP_DIRECT** + * Do a direct table lookup vs full lookup using FIB + * rules. + * **BPF_FIB_LOOKUP_OUTPUT** + * Perform lookup from an egress perspective (default is + * ingress). + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. + * + * Returns + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * packet is not forwarded or needs assist from full stack + */ +static long (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, int plen, __u32 flags) = (void *) 69; + +/* + * bpf_sock_hash_update + * + * Add an entry to, or update a sockhash *map* referencing sockets. + * The *skops* is used as a new value for the entry associated to + * *key*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * If the *map* has eBPF programs (parser and verdict), those will + * be inherited by the socket being added. If the socket is + * already attached to eBPF programs, this results in an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sock_hash_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 70; + +/* + * bpf_msg_redirect_hash + * + * This helper is used in programs implementing policies at the + * socket level. If the message *msg* is allowed to pass (i.e. if + * the verdict eBPF program returns **SK_PASS**), redirect it to + * the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_msg_redirect_hash)(struct sk_msg_md *msg, void *map, void *key, __u64 flags) = (void *) 71; + +/* + * bpf_sk_redirect_hash + * + * This helper is used in programs implementing policies at the + * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. + * if the verdeict eBPF program returns **SK_PASS**), redirect it + * to the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_sk_redirect_hash)(struct __sk_buff *skb, void *map, void *key, __u64 flags) = (void *) 72; + +/* + * bpf_lwt_push_encap + * + * Encapsulate the packet associated to *skb* within a Layer 3 + * protocol header. This header is provided in the buffer at + * address *hdr*, with *len* its size in bytes. *type* indicates + * the protocol of the header and can be one of: + * + * **BPF_LWT_ENCAP_SEG6** + * IPv6 encapsulation with Segment Routing Header + * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, + * the IPv6 header is computed by the kernel. + * **BPF_LWT_ENCAP_SEG6_INLINE** + * Only works if *skb* contains an IPv6 packet. Insert a + * Segment Routing Header (**struct ipv6_sr_hdr**) inside + * the IPv6 header. + * **BPF_LWT_ENCAP_IP** + * IP encapsulation (GRE/GUE/IPIP/etc). The outer header + * must be IPv4 or IPv6, followed by zero or more + * additional headers, up to **LWT_BPF_MAX_HEADROOM** + * total bytes in all prepended headers. Please note that + * if **skb_is_gso**\ (*skb*) is true, no more than two + * headers can be prepended, and the inner header, if + * present, should be either GRE or UDP/GUE. + * + * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs + * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can + * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and + * **BPF_PROG_TYPE_LWT_XMIT**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_push_encap)(struct __sk_buff *skb, __u32 type, void *hdr, __u32 len) = (void *) 73; + +/* + * bpf_lwt_seg6_store_bytes + * + * Store *len* bytes from address *from* into the packet + * associated to *skb*, at *offset*. Only the flags, tag and TLVs + * inside the outermost IPv6 Segment Routing Header can be + * modified through this helper. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len) = (void *) 74; + +/* + * bpf_lwt_seg6_adjust_srh + * + * Adjust the size allocated to TLVs in the outermost IPv6 + * Segment Routing Header contained in the packet associated to + * *skb*, at position *offset* by *delta* bytes. Only offsets + * after the segments are accepted. *delta* can be as well + * positive (growing) as negative (shrinking). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_adjust_srh)(struct __sk_buff *skb, __u32 offset, __s32 delta) = (void *) 75; + +/* + * bpf_lwt_seg6_action + * + * Apply an IPv6 Segment Routing action of type *action* to the + * packet associated to *skb*. Each action takes a parameter + * contained at address *param*, and of length *param_len* bytes. + * *action* can be one of: + * + * **SEG6_LOCAL_ACTION_END_X** + * End.X action: Endpoint with Layer-3 cross-connect. + * Type of *param*: **struct in6_addr**. + * **SEG6_LOCAL_ACTION_END_T** + * End.T action: Endpoint with specific IPv6 table lookup. + * Type of *param*: **int**. + * **SEG6_LOCAL_ACTION_END_B6** + * End.B6 action: Endpoint bound to an SRv6 policy. + * Type of *param*: **struct ipv6_sr_hdr**. + * **SEG6_LOCAL_ACTION_END_B6_ENCAP** + * End.B6.Encap action: Endpoint bound to an SRv6 + * encapsulation policy. + * Type of *param*: **struct ipv6_sr_hdr**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_action)(struct __sk_buff *skb, __u32 action, void *param, __u32 param_len) = (void *) 76; + +/* + * bpf_rc_repeat + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded repeat key message. This delays + * the generation of a key up event for previously generated + * key down event. + * + * Some IR protocols like NEC have a special IR message for + * repeating last button, for when a button is held down. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_repeat)(void *ctx) = (void *) 77; + +/* + * bpf_rc_keydown + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded key press with *scancode*, + * *toggle* value in the given *protocol*. The scancode will be + * translated to a keycode using the rc keymap, and reported as + * an input key down event. After a period a key up event is + * generated. This period can be extended by calling either + * **bpf_rc_keydown**\ () again with the same values, or calling + * **bpf_rc_repeat**\ (). + * + * Some protocols include a toggle bit, in case the button was + * released and pressed again between consecutive scancodes. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * The *protocol* is the decoded protocol number (see + * **enum rc_proto** for some predefined values). + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void *) 78; + +/* + * bpf_skb_cgroup_id + * + * Return the cgroup v2 id of the socket associated with the *skb*. + * This is roughly similar to the **bpf_get_cgroup_classid**\ () + * helper for cgroup v1 by providing a tag resp. identifier that + * can be matched on or used for map lookups e.g. to implement + * policy. The cgroup v2 id of a given path in the hierarchy is + * exposed in user space through the f_handle API in order to get + * to the same 64-bit id. + * + * This helper can be used on TC egress path, but not on ingress, + * and is available only if the kernel was compiled with the + * **CONFIG_SOCK_CGROUP_DATA** configuration option. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_cgroup_id)(struct __sk_buff *skb) = (void *) 79; + +/* + * bpf_get_current_cgroup_id + * + * + * Returns + * A 64-bit integer containing the current cgroup id based + * on the cgroup within which the current task is running. + */ +static __u64 (*bpf_get_current_cgroup_id)(void) = (void *) 80; + +/* + * bpf_get_local_storage + * + * Get the pointer to the local storage area. + * The type and the size of the local storage is defined + * by the *map* argument. + * The *flags* meaning is specific for each map type, + * and has to be 0 for cgroup local storage. + * + * Depending on the BPF program type, a local storage area + * can be shared between multiple instances of the BPF program, + * running simultaneously. + * + * A user should care about the synchronization by himself. + * For example, by using the **BPF_STX_XADD** instruction to alter + * the shared data. + * + * Returns + * A pointer to the local storage area. + */ +static void *(*bpf_get_local_storage)(void *map, __u64 flags) = (void *) 81; + +/* + * bpf_sk_select_reuseport + * + * Select a **SO_REUSEPORT** socket from a + * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. + * It checks the selected socket is matching the incoming + * request in the socket buffer. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sk_select_reuseport)(struct sk_reuseport_md *reuse, void *map, void *key, __u64 flags) = (void *) 82; + +/* + * bpf_skb_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *skb* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *skb*, then return value will be same as that + * of **bpf_skb_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *skb*. + * + * The format of returned id and helper limitations are same as in + * **bpf_skb_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_ancestor_cgroup_id)(struct __sk_buff *skb, int ancestor_level) = (void *) 83; + +/* + * bpf_sk_lookup_tcp + * + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 84; + +/* + * bpf_sk_lookup_udp + * + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 85; + +/* + * bpf_sk_release + * + * Release the reference held by *sock*. *sock* must be a + * non-**NULL** pointer that was returned from + * **bpf_sk_lookup_xxx**\ (). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sk_release)(struct bpf_sock *sock) = (void *) 86; + +/* + * bpf_map_push_elem + * + * Push an element *value* in *map*. *flags* is one of: + * + * **BPF_EXIST** + * If the queue/stack is full, the oldest element is + * removed to make room for this. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void *) 87; + +/* + * bpf_map_pop_elem + * + * Pop an element from *map*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_pop_elem)(void *map, void *value) = (void *) 88; + +/* + * bpf_map_peek_elem + * + * Get an element from *map* without removing it. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_peek_elem)(void *map, void *value) = (void *) 89; + +/* + * bpf_msg_push_data + * + * For socket policies, insert *len* bytes into *msg* at offset + * *start*. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it may want to insert metadata or options into the *msg*. + * This can later be read and used by any of the lower layer BPF + * hooks. + * + * This helper may fail if under memory pressure (a malloc + * fails) in these cases BPF programs will get an appropriate + * error and BPF programs will need to handle them. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_push_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 90; + +/* + * bpf_msg_pop_data + * + * Will remove *len* bytes from a *msg* starting at byte *start*. + * This may result in **ENOMEM** errors under certain situations if + * an allocation and copy are required due to a full ring buffer. + * However, the helper will try to avoid doing the allocation + * if possible. Other errors can occur if input parameters are + * invalid either due to *start* byte not being valid part of *msg* + * payload and/or *pop* value being to large. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_pop_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 91; + +/* + * bpf_rc_pointer_rel + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded pointer movement. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void *) 92; + +/* + * bpf_spin_lock + * + * Acquire a spinlock represented by the pointer *lock*, which is + * stored as part of a value of a map. Taking the lock allows to + * safely update the rest of the fields in that value. The + * spinlock can (and must) later be released with a call to + * **bpf_spin_unlock**\ (\ *lock*\ ). + * + * Spinlocks in BPF programs come with a number of restrictions + * and constraints: + * + * * **bpf_spin_lock** objects are only allowed inside maps of + * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this + * list could be extended in the future). + * * BTF description of the map is mandatory. + * * The BPF program can take ONE lock at a time, since taking two + * or more could cause dead locks. + * * Only one **struct bpf_spin_lock** is allowed per map element. + * * When the lock is taken, calls (either BPF to BPF or helpers) + * are not allowed. + * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not + * allowed inside a spinlock-ed region. + * * The BPF program MUST call **bpf_spin_unlock**\ () to release + * the lock, on all execution paths, before it returns. + * * The BPF program can access **struct bpf_spin_lock** only via + * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () + * helpers. Loading or storing data into the **struct + * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. + * * To use the **bpf_spin_lock**\ () helper, the BTF description + * of the map value must be a struct and have **struct + * bpf_spin_lock** *anyname*\ **;** field at the top level. + * Nested lock inside another struct is not allowed. + * * The **struct bpf_spin_lock** *lock* field in a map value must + * be aligned on a multiple of 4 bytes in that value. + * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy + * the **bpf_spin_lock** field to user space. + * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from + * a BPF program, do not update the **bpf_spin_lock** field. + * * **bpf_spin_lock** cannot be on the stack or inside a + * networking packet (it can only be inside of a map values). + * * **bpf_spin_lock** is available to root only. + * * Tracing programs and socket filter programs cannot use + * **bpf_spin_lock**\ () due to insufficient preemption checks + * (but this may change in the future). + * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. + * + * Returns + * 0 + */ +static long (*bpf_spin_lock)(struct bpf_spin_lock *lock) = (void *) 93; + +/* + * bpf_spin_unlock + * + * Release the *lock* previously locked by a call to + * **bpf_spin_lock**\ (\ *lock*\ ). + * + * Returns + * 0 + */ +static long (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void *) 94; + +/* + * bpf_sk_fullsock + * + * This helper gets a **struct bpf_sock** pointer such + * that all the fields in this **bpf_sock** can be accessed. + * + * Returns + * A **struct bpf_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) 95; + +/* + * bpf_tcp_sock + * + * This helper gets a **struct bpf_tcp_sock** pointer from a + * **struct bpf_sock** pointer. + * + * Returns + * A **struct bpf_tcp_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void *) 96; + +/* + * bpf_skb_ecn_set_ce + * + * Set ECN (Explicit Congestion Notification) field of IP header + * to **CE** (Congestion Encountered) if current value is **ECT** + * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 + * and IPv4. + * + * Returns + * 1 if the **CE** flag is set (either by the current helper call + * or because it was already present), 0 if it is not set. + */ +static long (*bpf_skb_ecn_set_ce)(struct __sk_buff *skb) = (void *) 97; + +/* + * bpf_get_listener_sock + * + * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. + * **bpf_sk_release**\ () is unnecessary and not allowed. + * + * Returns + * A **struct bpf_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = (void *) 98; + +/* + * bpf_skc_lookup_tcp + * + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * This function is identical to **bpf_sk_lookup_tcp**\ (), except + * that it also returns timewait or request sockets. Use + * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the + * full structure. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 99; + +/* + * bpf_tcp_check_syncookie + * + * Check whether *iph* and *th* contain a valid SYN cookie ACK for + * the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains **sizeof**\ (**struct tcphdr**). + * + * Returns + * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative + * error otherwise. + */ +static long (*bpf_tcp_check_syncookie)(struct bpf_sock *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 100; + +/* + * bpf_sysctl_get_name + * + * Get name of sysctl in /proc/sys/ and copy it into provided by + * program buffer *buf* of size *buf_len*. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is + * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name + * only (e.g. "tcp_mem"). + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + */ +static long (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len, __u64 flags) = (void *) 101; + +/* + * bpf_sysctl_get_current_value + * + * Get current value of sysctl as it is presented in /proc/sys + * (incl. newline, etc), and copy it as a string into provided + * by program buffer *buf* of size *buf_len*. + * + * The whole value is copied, no matter what file position user + * space issued e.g. sys_read at. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + * + * **-EINVAL** if current value was unavailable, e.g. because + * sysctl is uninitialized and read returns -EIO for it. + */ +static long (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 102; + +/* + * bpf_sysctl_get_new_value + * + * Get new value being written by user space to sysctl (before + * the actual write happens) and copy it as a string into + * provided by program buffer *buf* of size *buf_len*. + * + * User space may write new value at file position > 0. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + * + * **-EINVAL** if sysctl is being read. + */ +static long (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 103; + +/* + * bpf_sysctl_set_new_value + * + * Override new value being written by user space to sysctl with + * value provided by program in buffer *buf* of size *buf_len*. + * + * *buf* should contain a string in same form as provided by user + * space on sysctl write. + * + * User space may write new value at file position > 0. To override + * the whole sysctl value file position should be set to zero. + * + * Returns + * 0 on success. + * + * **-E2BIG** if the *buf_len* is too big. + * + * **-EINVAL** if sysctl is being read. + */ +static long (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, unsigned long buf_len) = (void *) 104; + +/* + * bpf_strtol + * + * Convert the initial part of the string from buffer *buf* of + * size *buf_len* to a long integer according to the given base + * and save the result in *res*. + * + * The string may begin with an arbitrary amount of white space + * (as determined by **isspace**\ (3)) followed by a single + * optional '**-**' sign. + * + * Five least significant bits of *flags* encode base, other bits + * are currently unused. + * + * Base must be either 8, 10, 16 or 0 to detect it automatically + * similar to user space **strtol**\ (3). + * + * Returns + * Number of characters consumed on success. Must be positive but + * no more than *buf_len*. + * + * **-EINVAL** if no valid digits were found or unsupported base + * was provided. + * + * **-ERANGE** if resulting value was out of range. + */ +static long (*bpf_strtol)(const char *buf, unsigned long buf_len, __u64 flags, long *res) = (void *) 105; + +/* + * bpf_strtoul + * + * Convert the initial part of the string from buffer *buf* of + * size *buf_len* to an unsigned long integer according to the + * given base and save the result in *res*. + * + * The string may begin with an arbitrary amount of white space + * (as determined by **isspace**\ (3)). + * + * Five least significant bits of *flags* encode base, other bits + * are currently unused. + * + * Base must be either 8, 10, 16 or 0 to detect it automatically + * similar to user space **strtoul**\ (3). + * + * Returns + * Number of characters consumed on success. Must be positive but + * no more than *buf_len*. + * + * **-EINVAL** if no valid digits were found or unsupported base + * was provided. + * + * **-ERANGE** if resulting value was out of range. + */ +static long (*bpf_strtoul)(const char *buf, unsigned long buf_len, __u64 flags, unsigned long *res) = (void *) 106; + +/* + * bpf_sk_storage_get + * + * Get a bpf-local-storage from a *sk*. + * + * Logically, it could be thought of getting the value from + * a *map* with *sk* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this + * helper enforces the key must be a full socket and the map must + * be a **BPF_MAP_TYPE_SK_STORAGE** also. + * + * Underneath, the value is stored locally at *sk* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf-local-storages residing at *sk*. + * + * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf-local-storage will be + * created if one does not exist. *value* can be used + * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf-local-storage. If *value* is + * **NULL**, the new bpf-local-storage will be zero initialized. + * + * Returns + * A bpf-local-storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf-local-storage. + */ +static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk, void *value, __u64 flags) = (void *) 107; + +/* + * bpf_sk_storage_delete + * + * Delete a bpf-local-storage from a *sk*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf-local-storage cannot be found. + */ +static long (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) = (void *) 108; + +/* + * bpf_send_signal + * + * Send signal *sig* to the process of the current task. + * The signal may be delivered to any of this process's threads. + * + * Returns + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + */ +static long (*bpf_send_signal)(__u32 sig) = (void *) 109; + +/* + * bpf_tcp_gen_syncookie + * + * Try to issue a SYN cookie for the packet with corresponding + * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header. + * + * Returns + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** SYN cookie cannot be issued due to error + * + * **-ENOENT** SYN cookie should not be issued (no SYN flood) + * + * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies + * + * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 + */ +static __s64 (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 110; + +/* + * bpf_skb_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct sk_buff. + * + * This helper is similar to **bpf_perf_event_output**\ () but + * restricted to raw_tracepoint bpf programs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 111; + +/* + * bpf_probe_read_user + * + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read_user)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 112; + +/* + * bpf_probe_read_kernel + * + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read_kernel)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 113; + +/* + * bpf_probe_read_user_str + * + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user**\ () helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * + * Returns + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_probe_read_user_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 114; + +/* + * bpf_probe_read_kernel_str + * + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. + * + * Returns + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. + */ +static long (*bpf_probe_read_kernel_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 115; + +/* + * bpf_tcp_send_ack + * + * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. + * *rcv_nxt* is the ack_seq to be sent out. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = (void *) 116; + +/* + * bpf_send_signal_thread + * + * Send signal *sig* to the thread corresponding to the current task. + * + * Returns + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + */ +static long (*bpf_send_signal_thread)(__u32 sig) = (void *) 117; + +/* + * bpf_jiffies64 + * + * Obtain the 64bit jiffies + * + * Returns + * The 64 bit jiffies + */ +static __u64 (*bpf_jiffies64)(void) = (void *) 118; + +/* + * bpf_read_branch_records + * + * For an eBPF program attached to a perf event, retrieve the + * branch records (**struct perf_branch_entry**) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size + * *size* bytes. + * + * Returns + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + * instead return the number of bytes required to store all the + * branch entries. If this flag is set, *buf* may be NULL. + * + * **-EINVAL** if arguments invalid or **size** not a multiple + * of **sizeof**\ (**struct perf_branch_entry**\ ). + * + * **-ENOENT** if architecture does not support branch records. + */ +static long (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, __u32 size, __u64 flags) = (void *) 119; + +/* + * bpf_get_ns_current_pid_tgid + * + * Returns 0 on success, values for *pid* and *tgid* as seen from the current + * *namespace* will be returned in *nsdata*. + * + * Returns + * 0 on success, or one of the following in case of failure: + * + * **-EINVAL** if dev and inum supplied don't match dev_t and inode number + * with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + * **-ENOENT** if pidns does not exists for the current task. + */ +static long (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, struct bpf_pidns_info *nsdata, __u32 size) = (void *) 120; + +/* + * bpf_xdp_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 121; + +/* + * bpf_get_netns_cookie + * + * Retrieve the cookie (generated by the kernel) of the network + * namespace the input *ctx* is associated with. The network + * namespace cookie remains stable for its lifetime and provides + * a global identifier that can be assumed unique. If *ctx* is + * NULL, then the helper returns the cookie for the initial + * network namespace. The cookie itself is very similar to that + * of **bpf_get_socket_cookie**\ () helper, but for network + * namespaces instead of sockets. + * + * Returns + * A 8-byte long opaque number. + */ +static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *) 122; + +/* + * bpf_get_current_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of the cgroup associated + * with the current task at the *ancestor_level*. The root cgroup + * is at *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with the current task, then return value will be the + * same as that of **bpf_get_current_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with the current task. + * + * The format of returned id and helper limitations are same as in + * **bpf_get_current_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = (void *) 123; + +/* + * bpf_sk_assign + * + * Assign the *sk* to the *skb*. When combined with appropriate + * routing configuration to receive the packet towards the socket, + * will cause *skb* to be delivered to the specified socket. + * Subsequent redirection of *skb* via **bpf_redirect**\ (), + * **bpf_clone_redirect**\ () or other methods outside of BPF may + * interfere with successful delivery to the socket. + * + * This operation is only valid from TC ingress path. + * + * The *flags* argument must be zero. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EINVAL** if specified *flags* are not supported. + * + * **-ENOENT** if the socket is unavailable for assignment. + * + * **-ENETUNREACH** if the socket is unreachable (wrong netns). + * + * **-EOPNOTSUPP** if the operation is not supported, for example + * a call from outside of TC ingress. + * + * **-ESOCKTNOSUPPORT** if the socket type is not supported + * (reuseport). + */ +static long (*bpf_sk_assign)(struct __sk_buff *skb, struct bpf_sock *sk, __u64 flags) = (void *) 124; + +/* + * bpf_ktime_get_boot_ns + * + * Return the time elapsed since system boot, in nanoseconds. + * Does include the time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *) 125; + +/* + * bpf_seq_printf + * + * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print + * out the format string. + * The *m* represents the seq_file. The *fmt* and *fmt_size* are for + * the format string itself. The *data* and *data_len* are format string + * arguments. The *data* are a **u64** array and corresponding format string + * values are stored in the array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* array. + * The *data_len* is the size of *data* in bytes. + * + * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. + * Reading kernel memory may fail due to either invalid address or + * valid address but requiring a major memory fault. If reading kernel memory + * fails, the string for **%s** will be an empty string, and the ip + * address for **%p{i,I}{4,6}** will be 0. Not returning error to + * bpf program is consistent with what **bpf_trace_printk**\ () does for now. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EBUSY** if per-CPU memory copy buffer is busy, can try again + * by returning 1 from bpf program. + * + * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. + * + * **-E2BIG** if *fmt* contains too many format specifiers. + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + */ +static long (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 126; + +/* + * bpf_seq_write + * + * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. + * The *m* represents the seq_file. The *data* and *len* represent the + * data to write in bytes. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + */ +static long (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = (void *) 127; + +/* + * bpf_sk_cgroup_id + * + * Return the cgroup v2 id of the socket *sk*. + * + * *sk* must be a non-**NULL** pointer to a full socket, e.g. one + * returned from **bpf_sk_lookup_xxx**\ (), + * **bpf_sk_fullsock**\ (), etc. The format of returned id is + * same as in **bpf_skb_cgroup_id**\ (). + * + * This helper is available only if the kernel was compiled with + * the **CONFIG_SOCK_CGROUP_DATA** configuration option. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_sk_cgroup_id)(struct bpf_sock *sk) = (void *) 128; + +/* + * bpf_sk_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *sk* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *sk*, then return value will be same as that + * of **bpf_sk_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *sk*. + * + * The format of returned id and helper limitations are same as in + * **bpf_sk_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_sk_ancestor_cgroup_id)(struct bpf_sock *sk, int ancestor_level) = (void *) 129; + +/* + * bpf_ringbuf_output + * + * Copy *size* bytes from *data* into a ring buffer *ringbuf*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = (void *) 130; + +/* + * bpf_ringbuf_reserve + * + * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * + * Returns + * Valid pointer with *size* bytes of memory available; NULL, + * otherwise. + */ +static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = (void *) 131; + +/* + * bpf_ringbuf_submit + * + * Submit reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * + * Returns + * Nothing. Always succeeds. + */ +static void (*bpf_ringbuf_submit)(void *data, __u64 flags) = (void *) 132; + +/* + * bpf_ringbuf_discard + * + * Discard reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * + * Returns + * Nothing. Always succeeds. + */ +static void (*bpf_ringbuf_discard)(void *data, __u64 flags) = (void *) 133; + +/* + * bpf_ringbuf_query + * + * Query various characteristics of provided ring buffer. What + * exactly is queries is determined by *flags*: + * + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. + * * **BPF_RB_RING_SIZE**: The size of ring buffer. + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). + * + * Data returned is just a momentary snapshot of actual values + * and could be inaccurate, so this facility should be used to + * power heuristics and for reporting, not to make 100% correct + * calculation. + * + * Returns + * Requested value, or 0, if *flags* are not recognized. + */ +static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) = (void *) 134; + +/* + * bpf_csum_level + * + * Change the skbs checksum level by one layer up or down, or + * reset it entirely to none in order to have the stack perform + * checksum validation. The level is applicable to the following + * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of + * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | + * through **bpf_skb_adjust_room**\ () helper with passing in + * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call + * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since + * the UDP header is removed. Similarly, an encap of the latter + * into the former could be accompanied by a helper call to + * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the + * skb is still intended to be processed in higher layers of the + * stack instead of just egressing at tc. + * + * There are three supported level settings at this time: + * + * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and + * sets CHECKSUM_NONE to force checksum validation by the stack. + * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current + * skb->csum_level. + * + * Returns + * 0 on success, or a negative error in case of failure. In the + * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level + * is returned or the error code -EACCES in case the skb is not + * subject to CHECKSUM_UNNECESSARY. + */ +static long (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) = (void *) 135; + +/* + * bpf_skc_to_tcp6_sock + * + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * + * Returns + * *sk* if casting is valid, or NULL otherwise. + */ +static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) = (void *) 136; + +/* + * bpf_skc_to_tcp_sock + * + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * + * Returns + * *sk* if casting is valid, or NULL otherwise. + */ +static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) = (void *) 137; + +/* + * bpf_skc_to_tcp_timewait_sock + * + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * + * Returns + * *sk* if casting is valid, or NULL otherwise. + */ +static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) = (void *) 138; + +/* + * bpf_skc_to_tcp_request_sock + * + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * + * Returns + * *sk* if casting is valid, or NULL otherwise. + */ +static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) = (void *) 139; + +/* + * bpf_skc_to_udp6_sock + * + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * + * Returns + * *sk* if casting is valid, or NULL otherwise. + */ +static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) = (void *) 140; + +/* + * bpf_get_task_stack + * + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to struct task_struct. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack=<new value> + * + * Returns + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + */ +static long (*bpf_get_task_stack)(struct task_struct *task, void *buf, __u32 size, __u64 flags) = (void *) 141; + + diff --git a/src/bpfswitch/include/bpf/bpf_helpers.h b/src/bpfswitch/include/bpf/bpf_helpers.h new file mode 100644 index 00000000..a510d8ed --- /dev/null +++ b/src/bpfswitch/include/bpf/bpf_helpers.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_HELPERS__ +#define __BPF_HELPERS__ + +/* + * Note that bpf programs need to include either + * vmlinux.h (auto-generated from BTF) or linux/types.h + * in advance since bpf_helper_defs.h uses such types + * as __u64. + */ +#include "bpf_helper_defs.h" + +#define __uint(name, val) int (*name)[val] +#define __type(name, val) typeof(val) *name +#define __array(name, val) typeof(val) *name[] + +/* Helper macro to print out debug messages */ +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +/* + * Helper macro to place programs, maps, license in + * different sections in elf_bpf file. Section names + * are interpreted by elf_bpf loader + */ +#define SEC(NAME) __attribute__((section(NAME), used)) + +#ifndef __always_inline +#define __always_inline __attribute__((always_inline)) +#endif +#ifndef __weak +#define __weak __attribute__((weak)) +#endif + +/* + * Helper macro to manipulate data structures + */ +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#endif +#ifndef container_of +#define container_of(ptr, type, member) \ + ({ \ + void *__mptr = (void *)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); \ + }) +#endif + +/* + * Helper structure used by eBPF C program + * to describe BPF map attributes to libbpf loader + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; +}; + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + +#define __kconfig __attribute__((section(".kconfig"))) +#define __ksym __attribute__((section(".ksyms"))) + +#endif diff --git a/src/bpfswitch/include/bpf/bpf_tracing.h b/src/bpfswitch/include/bpf/bpf_tracing.h new file mode 100644 index 00000000..58eceb88 --- /dev/null +++ b/src/bpfswitch/include/bpf/bpf_tracing.h @@ -0,0 +1,432 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_TRACING_H__ +#define __BPF_TRACING_H__ + +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ +#if defined(__TARGET_ARCH_x86) + #define bpf_target_x86 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_s390) + #define bpf_target_s390 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm) + #define bpf_target_arm + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm64) + #define bpf_target_arm64 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_mips) + #define bpf_target_mips + #define bpf_target_defined +#elif defined(__TARGET_ARCH_powerpc) + #define bpf_target_powerpc + #define bpf_target_defined +#elif defined(__TARGET_ARCH_sparc) + #define bpf_target_sparc + #define bpf_target_defined +#else + #undef bpf_target_defined +#endif + +/* Fall back to what the compiler says */ +#ifndef bpf_target_defined +#if defined(__x86_64__) + #define bpf_target_x86 +#elif defined(__s390__) + #define bpf_target_s390 +#elif defined(__arm__) + #define bpf_target_arm +#elif defined(__aarch64__) + #define bpf_target_arm64 +#elif defined(__mips__) + #define bpf_target_mips +#elif defined(__powerpc__) + #define bpf_target_powerpc +#elif defined(__sparc__) + #define bpf_target_sparc +#endif +#endif + +#if defined(bpf_target_x86) + +#if defined(__KERNEL__) || defined(__VMLINUX_H__) + +#define PT_REGS_PARM1(x) ((x)->di) +#define PT_REGS_PARM2(x) ((x)->si) +#define PT_REGS_PARM3(x) ((x)->dx) +#define PT_REGS_PARM4(x) ((x)->cx) +#define PT_REGS_PARM5(x) ((x)->r8) +#define PT_REGS_RET(x) ((x)->sp) +#define PT_REGS_FP(x) ((x)->bp) +#define PT_REGS_RC(x) ((x)->ax) +#define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->ip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip) + +#else + +#ifdef __i386__ +/* i386 kernel is built with -mregparm=3 */ +#define PT_REGS_PARM1(x) ((x)->eax) +#define PT_REGS_PARM2(x) ((x)->edx) +#define PT_REGS_PARM3(x) ((x)->ecx) +#define PT_REGS_PARM4(x) 0 +#define PT_REGS_PARM5(x) 0 +#define PT_REGS_RET(x) ((x)->esp) +#define PT_REGS_FP(x) ((x)->ebp) +#define PT_REGS_RC(x) ((x)->eax) +#define PT_REGS_SP(x) ((x)->esp) +#define PT_REGS_IP(x) ((x)->eip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx) +#define PT_REGS_PARM4_CORE(x) 0 +#define PT_REGS_PARM5_CORE(x) 0 +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip) + +#else + +#define PT_REGS_PARM1(x) ((x)->rdi) +#define PT_REGS_PARM2(x) ((x)->rsi) +#define PT_REGS_PARM3(x) ((x)->rdx) +#define PT_REGS_PARM4(x) ((x)->rcx) +#define PT_REGS_PARM5(x) ((x)->r8) +#define PT_REGS_RET(x) ((x)->rsp) +#define PT_REGS_FP(x) ((x)->rbp) +#define PT_REGS_RC(x) ((x)->rax) +#define PT_REGS_SP(x) ((x)->rsp) +#define PT_REGS_IP(x) ((x)->rip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip) + +#endif +#endif + +#elif defined(bpf_target_s390) + +/* s390 provides user_pt_regs instead of struct pt_regs to userspace */ +struct pt_regs; +#define PT_REGS_S390 const volatile user_pt_regs +#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2]) +#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3]) +#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4]) +#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5]) +#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6]) +#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14]) +/* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11]) +#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2]) +#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15]) +#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[14]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), psw.addr) + +#elif defined(bpf_target_arm) + +#define PT_REGS_PARM1(x) ((x)->uregs[0]) +#define PT_REGS_PARM2(x) ((x)->uregs[1]) +#define PT_REGS_PARM3(x) ((x)->uregs[2]) +#define PT_REGS_PARM4(x) ((x)->uregs[3]) +#define PT_REGS_PARM5(x) ((x)->uregs[4]) +#define PT_REGS_RET(x) ((x)->uregs[14]) +#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_RC(x) ((x)->uregs[0]) +#define PT_REGS_SP(x) ((x)->uregs[13]) +#define PT_REGS_IP(x) ((x)->uregs[12]) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12]) + +#elif defined(bpf_target_arm64) + +/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ +struct pt_regs; +#define PT_REGS_ARM64 const volatile struct user_pt_regs +#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0]) +#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1]) +#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2]) +#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3]) +#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4]) +#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30]) +/* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29]) +#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0]) +#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp) +#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc) + +#elif defined(bpf_target_mips) + +#define PT_REGS_PARM1(x) ((x)->regs[4]) +#define PT_REGS_PARM2(x) ((x)->regs[5]) +#define PT_REGS_PARM3(x) ((x)->regs[6]) +#define PT_REGS_PARM4(x) ((x)->regs[7]) +#define PT_REGS_PARM5(x) ((x)->regs[8]) +#define PT_REGS_RET(x) ((x)->regs[31]) +#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_RC(x) ((x)->regs[1]) +#define PT_REGS_SP(x) ((x)->regs[29]) +#define PT_REGS_IP(x) ((x)->cp0_epc) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc) + +#elif defined(bpf_target_powerpc) + +#define PT_REGS_PARM1(x) ((x)->gpr[3]) +#define PT_REGS_PARM2(x) ((x)->gpr[4]) +#define PT_REGS_PARM3(x) ((x)->gpr[5]) +#define PT_REGS_PARM4(x) ((x)->gpr[6]) +#define PT_REGS_PARM5(x) ((x)->gpr[7]) +#define PT_REGS_RC(x) ((x)->gpr[3]) +#define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->nip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip) + +#elif defined(bpf_target_sparc) + +#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) +#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) +#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2]) +#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3]) +#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4]) +#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) +#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) +#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP]) + +/* Should this also be a bpf_target check for the sparc case? */ +#if defined(__arch64__) +#define PT_REGS_IP(x) ((x)->tpc) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc) +#else +#define PT_REGS_IP(x) ((x)->pc) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc) +#endif + +#endif + +#if defined(bpf_target_powerpc) +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP +#elif defined(bpf_target_sparc) +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP +#else +#define BPF_KPROBE_READ_RET_IP(ip, ctx) \ + ({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) +#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \ + ({ bpf_probe_read(&(ip), sizeof(ip), \ + (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) +#endif + +#define ___bpf_concat(a, b) a ## b +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#define ___bpf_narg(...) \ + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define ___bpf_empty(...) \ + ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___bpf_ctx_cast0() ctx +#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] +#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] +#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] +#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] +#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] +#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] +#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] +#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] +#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] +#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] +#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] +#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] +#define ___bpf_ctx_cast(args...) \ + ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) + +/* + * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and + * similar kinds of BPF programs, that accept input arguments as a single + * pointer to untyped u64 array, where each u64 can actually be a typed + * pointer or integer of different size. Instead of requring user to write + * manual casts and work with array elements by index, BPF_PROG macro + * allows user to declare a list of named and typed input arguments in the + * same syntax as for normal C function. All the casting is hidden and + * performed transparently, while user code can just assume working with + * function arguments of specified type and name. + * + * Original raw context argument is preserved as well as 'ctx' argument. + * This is useful when using BPF helpers that expect original context + * as one of the parameters (e.g., for bpf_perf_event_output()). + */ +#define BPF_PROG(name, args...) \ +name(unsigned long long *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args); \ +typeof(name(0)) name(unsigned long long *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_ctx_cast(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args) + +struct pt_regs; + +#define ___bpf_kprobe_args0() ctx +#define ___bpf_kprobe_args1(x) \ + ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) +#define ___bpf_kprobe_args2(x, args...) \ + ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) +#define ___bpf_kprobe_args3(x, args...) \ + ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) +#define ___bpf_kprobe_args4(x, args...) \ + ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) +#define ___bpf_kprobe_args5(x, args...) \ + ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) +#define ___bpf_kprobe_args(args...) \ + ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for + * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific + * low-level way of getting kprobe input arguments from struct pt_regs, and + * provides a familiar typed and named function arguments syntax and + * semantics of accessing kprobe input paremeters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + */ +#define BPF_KPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + +#define ___bpf_kretprobe_args0() ctx +#define ___bpf_kretprobe_args1(x) \ + ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx) +#define ___bpf_kretprobe_args(args...) \ + ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional + * return value (in addition to `struct pt_regs *ctx`), but no input + * arguments, because they will be clobbered by the time probed function + * returns. + */ +#define BPF_KRETPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kretprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) + +/* + * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values + * in a structure. + */ +#define BPF_SEQ_PRINTF(seq, fmt, args...) \ + ({ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[] = { args }; \ + _Pragma("GCC diagnostic pop") \ + int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ + ___ret; \ + }) + +#endif diff --git a/src/bpfswitch/include/bpf/btf.h b/src/bpfswitch/include/bpf/btf.h new file mode 100644 index 00000000..a3b7ef9b --- /dev/null +++ b/src/bpfswitch/include/bpf/btf.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2018 Facebook */ + +#ifndef __LIBBPF_BTF_H +#define __LIBBPF_BTF_H + +#include <stdarg.h> +#include <linux/btf.h> +#include <linux/types.h> + +#include "libbpf_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define BTF_ELF_SEC ".BTF" +#define BTF_EXT_ELF_SEC ".BTF.ext" +#define MAPS_ELF_SEC ".maps" + +struct btf; +struct btf_ext; +struct btf_type; + +struct bpf_object; + +/* + * The .BTF.ext ELF section layout defined as + * struct btf_ext_header + * func_info subsection + * + * The func_info subsection layout: + * record size for struct bpf_func_info in the func_info subsection + * struct btf_sec_func_info for section #1 + * a list of bpf_func_info records for section #1 + * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h + * but may not be identical + * struct btf_sec_func_info for section #2 + * a list of bpf_func_info records for section #2 + * ...... + * + * Note that the bpf_func_info record size in .BTF.ext may not + * be the same as the one defined in include/uapi/linux/bpf.h. + * The loader should ensure that record_size meets minimum + * requirement and pass the record as is to the kernel. The + * kernel will handle the func_info properly based on its contents. + */ +struct btf_ext_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 func_info_off; + __u32 func_info_len; + __u32 line_info_off; + __u32 line_info_len; + + /* optional part of .BTF.ext header */ + __u32 field_reloc_off; + __u32 field_reloc_len; +}; + +LIBBPF_API void btf__free(struct btf *btf); +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); +LIBBPF_API struct btf *btf__parse_elf(const char *path, + struct btf_ext **btf_ext); +LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); +LIBBPF_API int btf__load(struct btf *btf); +LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, + const char *type_name); +LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, + const char *type_name, __u32 kind); +LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); +LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, + __u32 id); +LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); +LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); +LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); +LIBBPF_API int btf__fd(const struct btf *btf); +LIBBPF_API void btf__set_fd(struct btf *btf, int fd); +LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); +LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); +LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); +LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, + __u32 expected_key_size, + __u32 expected_value_size, + __u32 *key_type_id, __u32 *value_type_id); + +LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); +LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); +LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, + __u32 *size); +LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **func_info, __u32 *cnt); +LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **line_info, __u32 *cnt); +LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); +LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); + +LIBBPF_API struct btf *libbpf_find_kernel_btf(void); + +struct btf_dedup_opts { + unsigned int dedup_table_size; + bool dont_resolve_fwds; +}; + +LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, + const struct btf_dedup_opts *opts); + +struct btf_dump; + +struct btf_dump_opts { + void *ctx; +}; + +typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args); + +LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn); +LIBBPF_API void btf_dump__free(struct btf_dump *d); + +LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); + +struct btf_dump_emit_type_decl_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* optional field name for type declaration, e.g.: + * - struct my_struct <FNAME> + * - void (*<FNAME>)(int) + * - char (*<FNAME>)[123] + */ + const char *field_name; + /* extra indentation level (in number of tabs) to emit for multi-line + * type declarations (e.g., anonymous struct); applies for lines + * starting from the second one (first line is assumed to have + * necessary indentation already + */ + int indent_level; +}; +#define btf_dump_emit_type_decl_opts__last_field indent_level + +LIBBPF_API int +btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, + const struct btf_dump_emit_type_decl_opts *opts); + +/* + * A set of helpers for easier BTF types handling + */ +static inline __u16 btf_kind(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info); +} + +static inline __u16 btf_vlen(const struct btf_type *t) +{ + return BTF_INFO_VLEN(t->info); +} + +static inline bool btf_kflag(const struct btf_type *t) +{ + return BTF_INFO_KFLAG(t->info); +} + +static inline bool btf_is_void(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNKN; +} + +static inline bool btf_is_int(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_INT; +} + +static inline bool btf_is_ptr(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_PTR; +} + +static inline bool btf_is_array(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ARRAY; +} + +static inline bool btf_is_struct(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_STRUCT; +} + +static inline bool btf_is_union(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNION; +} + +static inline bool btf_is_composite(const struct btf_type *t) +{ + __u16 kind = btf_kind(t); + + return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; +} + +static inline bool btf_is_enum(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ENUM; +} + +static inline bool btf_is_fwd(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FWD; +} + +static inline bool btf_is_typedef(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_TYPEDEF; +} + +static inline bool btf_is_volatile(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_VOLATILE; +} + +static inline bool btf_is_const(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_CONST; +} + +static inline bool btf_is_restrict(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_RESTRICT; +} + +static inline bool btf_is_mod(const struct btf_type *t) +{ + __u16 kind = btf_kind(t); + + return kind == BTF_KIND_VOLATILE || + kind == BTF_KIND_CONST || + kind == BTF_KIND_RESTRICT; +} + +static inline bool btf_is_func(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FUNC; +} + +static inline bool btf_is_func_proto(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FUNC_PROTO; +} + +static inline bool btf_is_var(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_VAR; +} + +static inline bool btf_is_datasec(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_DATASEC; +} + +static inline __u8 btf_int_encoding(const struct btf_type *t) +{ + return BTF_INT_ENCODING(*(__u32 *)(t + 1)); +} + +static inline __u8 btf_int_offset(const struct btf_type *t) +{ + return BTF_INT_OFFSET(*(__u32 *)(t + 1)); +} + +static inline __u8 btf_int_bits(const struct btf_type *t) +{ + return BTF_INT_BITS(*(__u32 *)(t + 1)); +} + +static inline struct btf_array *btf_array(const struct btf_type *t) +{ + return (struct btf_array *)(t + 1); +} + +static inline struct btf_enum *btf_enum(const struct btf_type *t) +{ + return (struct btf_enum *)(t + 1); +} + +static inline struct btf_member *btf_members(const struct btf_type *t) +{ + return (struct btf_member *)(t + 1); +} + +/* Get bit offset of a member with specified index. */ +static inline __u32 btf_member_bit_offset(const struct btf_type *t, + __u32 member_idx) +{ + const struct btf_member *m = btf_members(t) + member_idx; + bool kflag = btf_kflag(t); + + return kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset; +} +/* + * Get bitfield size of a member, assuming t is BTF_KIND_STRUCT or + * BTF_KIND_UNION. If member is not a bitfield, zero is returned. + */ +static inline __u32 btf_member_bitfield_size(const struct btf_type *t, + __u32 member_idx) +{ + const struct btf_member *m = btf_members(t) + member_idx; + bool kflag = btf_kflag(t); + + return kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0; +} + +static inline struct btf_param *btf_params(const struct btf_type *t) +{ + return (struct btf_param *)(t + 1); +} + +static inline struct btf_var *btf_var(const struct btf_type *t) +{ + return (struct btf_var *)(t + 1); +} + +static inline struct btf_var_secinfo * +btf_var_secinfos(const struct btf_type *t) +{ + return (struct btf_var_secinfo *)(t + 1); +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_BTF_H */ diff --git a/src/bpfswitch/include/bpf/libbpf.h b/src/bpfswitch/include/bpf/libbpf.h new file mode 100644 index 00000000..2335971e --- /dev/null +++ b/src/bpfswitch/include/bpf/libbpf.h @@ -0,0 +1,750 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Common eBPF ELF object loading operations. + * + * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> + * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> + * Copyright (C) 2015 Huawei Inc. + */ +#ifndef __LIBBPF_LIBBPF_H +#define __LIBBPF_LIBBPF_H + +#include <stdarg.h> +#include <stdio.h> +#include <stdint.h> +#include <stdbool.h> +#include <sys/types.h> // for size_t +#include <linux/bpf.h> + +#include "libbpf_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum libbpf_errno { + __LIBBPF_ERRNO__START = 4000, + + /* Something wrong in libelf */ + LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, + LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ + LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ + LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ + LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ + LIBBPF_ERRNO__RELOC, /* Relocation failed */ + LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ + LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ + LIBBPF_ERRNO__PROG2BIG, /* Program too big */ + LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ + LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ + LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ + LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ + LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ + __LIBBPF_ERRNO__END, +}; + +LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); + +enum libbpf_print_level { + LIBBPF_WARN, + LIBBPF_INFO, + LIBBPF_DEBUG, +}; + +typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, + const char *, va_list ap); + +LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); + +/* Hide internal to user */ +struct bpf_object; + +struct bpf_object_open_attr { + const char *file; + enum bpf_prog_type prog_type; +}; + +struct bpf_object_open_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* object name override, if provided: + * - for object open from file, this will override setting object + * name from file path's base name; + * - for object open from memory buffer, this will specify an object + * name and will override default "<addr>-<buf-size>" name; + */ + const char *object_name; + /* parse map definitions non-strictly, allowing extra attributes/data */ + bool relaxed_maps; + /* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures. + * Value is ignored. Relocations always are processed non-strictly. + * Non-relocatable instructions are replaced with invalid ones to + * prevent accidental errors. + * */ + bool relaxed_core_relocs; + /* maps that set the 'pinning' attribute in their definition will have + * their pin_path attribute set to a file in this directory, and be + * auto-pinned to that path on load; defaults to "/sys/fs/bpf". + */ + const char *pin_root_path; + __u32 attach_prog_fd; + /* Additional kernel config content that augments and overrides + * system Kconfig for CONFIG_xxx externs. + */ + const char *kconfig; +}; +#define bpf_object_open_opts__last_field kconfig + +LIBBPF_API struct bpf_object *bpf_object__open(const char *path); +LIBBPF_API struct bpf_object * +bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); +LIBBPF_API struct bpf_object * +bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts); + +/* deprecated bpf_object__open variants */ +LIBBPF_API struct bpf_object * +bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, + const char *name); +LIBBPF_API struct bpf_object * +bpf_object__open_xattr(struct bpf_object_open_attr *attr); + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +/* pin_maps and unpin_maps can both be called with a NULL path, in which case + * they will use the pin_path attribute of each map (and ignore all maps that + * don't have a pin_path set). + */ +LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); +LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, + const char *path); +LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, + const char *path); +LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, + const char *path); +LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); +LIBBPF_API void bpf_object__close(struct bpf_object *object); + +struct bpf_object_load_attr { + struct bpf_object *obj; + int log_level; + const char *target_btf_path; +}; + +/* Load/unload object into/from kernel */ +LIBBPF_API int bpf_object__load(struct bpf_object *obj); +LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); +LIBBPF_API int bpf_object__unload(struct bpf_object *obj); + +LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); +LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); + +struct btf; +LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); +LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); + +LIBBPF_API struct bpf_program * +bpf_object__find_program_by_title(const struct bpf_object *obj, + const char *title); +LIBBPF_API struct bpf_program * +bpf_object__find_program_by_name(const struct bpf_object *obj, + const char *name); + +LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev); +#define bpf_object__for_each_safe(pos, tmp) \ + for ((pos) = bpf_object__next(NULL), \ + (tmp) = bpf_object__next(pos); \ + (pos) != NULL; \ + (pos) = (tmp), (tmp) = bpf_object__next(tmp)) + +typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *); +LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv, + bpf_object_clear_priv_t clear_priv); +LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog); + +LIBBPF_API int +libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type); +LIBBPF_API int libbpf_attach_type_by_name(const char *name, + enum bpf_attach_type *attach_type); +LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type); + +/* Accessors of bpf_program */ +struct bpf_program; +LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog, + const struct bpf_object *obj); + +#define bpf_object__for_each_program(pos, obj) \ + for ((pos) = bpf_program__next(NULL, (obj)); \ + (pos) != NULL; \ + (pos) = bpf_program__next((pos), (obj))) + +LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog, + const struct bpf_object *obj); + +typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *); + +LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv, + bpf_program_clear_priv_t clear_priv); + +LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog); +LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, + __u32 ifindex); + +LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); +LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog, + bool needs_copy); +LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); + +/* returns program size in bytes */ +LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); + +LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, + __u32 kern_version); +LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); +LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog, + const char *path, + int instance); +LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog, + const char *path, + int instance); +LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); +LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); +LIBBPF_API void bpf_program__unload(struct bpf_program *prog); + +struct bpf_link; + +LIBBPF_API struct bpf_link *bpf_link__open(const char *path); +LIBBPF_API int bpf_link__fd(const struct bpf_link *link); +LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); +LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); +LIBBPF_API int bpf_link__unpin(struct bpf_link *link); +LIBBPF_API int bpf_link__update_program(struct bpf_link *link, + struct bpf_program *prog); +LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); +LIBBPF_API int bpf_link__destroy(struct bpf_link *link); + +LIBBPF_API struct bpf_link * +bpf_program__attach(struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_perf_event(struct bpf_program *prog, int pfd); +LIBBPF_API struct bpf_link * +bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe, + const char *func_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe, + pid_t pid, const char *binary_path, + size_t func_offset); +LIBBPF_API struct bpf_link * +bpf_program__attach_tracepoint(struct bpf_program *prog, + const char *tp_category, + const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_raw_tracepoint(struct bpf_program *prog, + const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_trace(struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_lsm(struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_netns(struct bpf_program *prog, int netns_fd); + +struct bpf_map; + +LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map); + +struct bpf_iter_attach_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ +}; +#define bpf_iter_attach_opts__last_field sz + +LIBBPF_API struct bpf_link * +bpf_program__attach_iter(struct bpf_program *prog, + const struct bpf_iter_attach_opts *opts); + +struct bpf_insn; + +/* + * Libbpf allows callers to adjust BPF programs before being loaded + * into kernel. One program in an object file can be transformed into + * multiple variants to be attached to different hooks. + * + * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd + * form an API for this purpose. + * + * - bpf_program_prep_t: + * Defines a 'preprocessor', which is a caller defined function + * passed to libbpf through bpf_program__set_prep(), and will be + * called before program is loaded. The processor should adjust + * the program one time for each instance according to the instance id + * passed to it. + * + * - bpf_program__set_prep: + * Attaches a preprocessor to a BPF program. The number of instances + * that should be created is also passed through this function. + * + * - bpf_program__nth_fd: + * After the program is loaded, get resulting FD of a given instance + * of the BPF program. + * + * If bpf_program__set_prep() is not used, the program would be loaded + * without adjustment during bpf_object__load(). The program has only + * one instance. In this case bpf_program__fd(prog) is equal to + * bpf_program__nth_fd(prog, 0). + */ + +struct bpf_prog_prep_result { + /* + * If not NULL, load new instruction array. + * If set to NULL, don't load this instance. + */ + struct bpf_insn *new_insn_ptr; + int new_insn_cnt; + + /* If not NULL, result FD is written to it. */ + int *pfd; +}; + +/* + * Parameters of bpf_program_prep_t: + * - prog: The bpf_program being loaded. + * - n: Index of instance being generated. + * - insns: BPF instructions array. + * - insns_cnt:Number of instructions in insns. + * - res: Output parameter, result of transformation. + * + * Return value: + * - Zero: pre-processing success. + * - Non-zero: pre-processing error, stop loading. + */ +typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n, + struct bpf_insn *insns, int insns_cnt, + struct bpf_prog_prep_result *res); + +LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance, + bpf_program_prep_t prep); + +LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n); + +/* + * Adjust type of BPF program. Default is kprobe. + */ +LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); + +LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); +LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, + enum bpf_prog_type type); + +LIBBPF_API enum bpf_attach_type +bpf_program__get_expected_attach_type(struct bpf_program *prog); +LIBBPF_API void +bpf_program__set_expected_attach_type(struct bpf_program *prog, + enum bpf_attach_type type); + +LIBBPF_API int +bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, + const char *attach_func_name); + +LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); + +/* + * No need for __attribute__((packed)), all members of 'bpf_map_def' + * are all aligned. In addition, using __attribute__((packed)) + * would trigger a -Wpacked warning message, and lead to an error + * if -Werror is set. + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; +}; + +/* + * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel, + * so no need to worry about a name clash. + */ +LIBBPF_API struct bpf_map * +bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); + +LIBBPF_API int +bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); + +/* + * Get bpf_map through the offset of corresponding struct bpf_map_def + * in the BPF object file. + */ +LIBBPF_API struct bpf_map * +bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset); + +LIBBPF_API struct bpf_map * +bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); +#define bpf_object__for_each_map(pos, obj) \ + for ((pos) = bpf_map__next(NULL, (obj)); \ + (pos) != NULL; \ + (pos) = bpf_map__next((pos), (obj))) +#define bpf_map__for_each bpf_object__for_each_map + +LIBBPF_API struct bpf_map * +bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); + +/* get/set map FD */ +LIBBPF_API int bpf_map__fd(const struct bpf_map *map); +LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); +/* get map definition */ +LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); +/* get map name */ +LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); +/* get/set map type */ +LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); +/* get/set map size (max_entries) */ +LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); +LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); +/* get/set map flags */ +LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); +/* get/set map NUMA node */ +LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); +/* get/set map key size */ +LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); +/* get/set map value size */ +LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); +/* get map key/value BTF type IDs */ +LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); +LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); +/* get/set map if_index */ +LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); + +typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); +LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, + bpf_map_clear_priv_t clear_priv); +LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size); +LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); +LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); +LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); +LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); +LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); +LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); + +LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); + +LIBBPF_API long libbpf_get_error(const void *ptr); + +struct bpf_prog_load_attr { + const char *file; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + int ifindex; + int log_level; + int prog_flags; +}; + +LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, + struct bpf_object **pobj, int *prog_fd); +LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd); + +struct xdp_link_info { + __u32 prog_id; + __u32 drv_prog_id; + __u32 hw_prog_id; + __u32 skb_prog_id; + __u8 attach_mode; +}; + +struct bpf_xdp_set_link_opts { + size_t sz; + int old_fd; +}; +#define bpf_xdp_set_link_opts__last_field old_fd + +LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); +LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts); +LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags); + +/* Ring buffer APIs */ +struct ring_buffer; + +typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); + +struct ring_buffer_opts { + size_t sz; /* size of this struct, for forward/backward compatiblity */ +}; + +#define ring_buffer_opts__last_field sz + +LIBBPF_API struct ring_buffer * +ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, + const struct ring_buffer_opts *opts); +LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); +LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, + ring_buffer_sample_fn sample_cb, void *ctx); +LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); +LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); + +/* Perf buffer APIs */ +struct perf_buffer; + +typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, + void *data, __u32 size); +typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); + +/* common use perf buffer options */ +struct perf_buffer_opts { + /* if specified, sample_cb is called for each sample */ + perf_buffer_sample_fn sample_cb; + /* if specified, lost_cb is called for each batch of lost samples */ + perf_buffer_lost_fn lost_cb; + /* ctx is provided to sample_cb and lost_cb */ + void *ctx; +}; + +LIBBPF_API struct perf_buffer * +perf_buffer__new(int map_fd, size_t page_cnt, + const struct perf_buffer_opts *opts); + +enum bpf_perf_event_ret { + LIBBPF_PERF_EVENT_DONE = 0, + LIBBPF_PERF_EVENT_ERROR = -1, + LIBBPF_PERF_EVENT_CONT = -2, +}; + +struct perf_event_header; + +typedef enum bpf_perf_event_ret +(*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event); + +/* raw perf buffer options, giving most power and control */ +struct perf_buffer_raw_opts { + /* perf event attrs passed directly into perf_event_open() */ + struct perf_event_attr *attr; + /* raw event callback */ + perf_buffer_event_fn event_cb; + /* ctx is provided to event_cb */ + void *ctx; + /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of + * max_entries of given PERF_EVENT_ARRAY map) + */ + int cpu_cnt; + /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */ + int *cpus; + /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ + int *map_keys; +}; + +LIBBPF_API struct perf_buffer * +perf_buffer__new_raw(int map_fd, size_t page_cnt, + const struct perf_buffer_raw_opts *opts); + +LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); +LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); +LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); + +typedef enum bpf_perf_event_ret + (*bpf_perf_event_print_t)(struct perf_event_header *hdr, + void *private_data); +LIBBPF_API enum bpf_perf_event_ret +bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, + void **copy_mem, size_t *copy_size, + bpf_perf_event_print_t fn, void *private_data); + +struct bpf_prog_linfo; +struct bpf_prog_info; + +LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); +LIBBPF_API struct bpf_prog_linfo * +bpf_prog_linfo__new(const struct bpf_prog_info *info); +LIBBPF_API const struct bpf_line_info * +bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, + __u64 addr, __u32 func_idx, __u32 nr_skip); +LIBBPF_API const struct bpf_line_info * +bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, + __u32 insn_off, __u32 nr_skip); + +/* + * Probe for supported system features + * + * Note that running many of these probes in a short amount of time can cause + * the kernel to reach the maximal size of lockable memory allowed for the + * user, causing subsequent probes to fail. In this case, the caller may want + * to adjust that limit with setrlimit(). + */ +LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, + __u32 ifindex); +LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex); +LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, + enum bpf_prog_type prog_type, __u32 ifindex); +LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex); + +/* + * Get bpf_prog_info in continuous memory + * + * struct bpf_prog_info has multiple arrays. The user has option to choose + * arrays to fetch from kernel. The following APIs provide an uniform way to + * fetch these data. All arrays in bpf_prog_info are stored in a single + * continuous memory region. This makes it easy to store the info in a + * file. + * + * Before writing bpf_prog_info_linear to files, it is necessary to + * translate pointers in bpf_prog_info to offsets. Helper functions + * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr() + * are introduced to switch between pointers and offsets. + * + * Examples: + * # To fetch map_ids and prog_tags: + * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) | + * (1UL << BPF_PROG_INFO_PROG_TAGS); + * struct bpf_prog_info_linear *info_linear = + * bpf_program__get_prog_info_linear(fd, arrays); + * + * # To save data in file + * bpf_program__bpil_addr_to_offs(info_linear); + * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len); + * + * # To read data from file + * read(f, info_linear, <proper_size>); + * bpf_program__bpil_offs_to_addr(info_linear); + */ +enum bpf_prog_info_array { + BPF_PROG_INFO_FIRST_ARRAY = 0, + BPF_PROG_INFO_JITED_INSNS = 0, + BPF_PROG_INFO_XLATED_INSNS, + BPF_PROG_INFO_MAP_IDS, + BPF_PROG_INFO_JITED_KSYMS, + BPF_PROG_INFO_JITED_FUNC_LENS, + BPF_PROG_INFO_FUNC_INFO, + BPF_PROG_INFO_LINE_INFO, + BPF_PROG_INFO_JITED_LINE_INFO, + BPF_PROG_INFO_PROG_TAGS, + BPF_PROG_INFO_LAST_ARRAY, +}; + +struct bpf_prog_info_linear { + /* size of struct bpf_prog_info, when the tool is compiled */ + __u32 info_len; + /* total bytes allocated for data, round up to 8 bytes */ + __u32 data_len; + /* which arrays are included in data */ + __u64 arrays; + struct bpf_prog_info info; + __u8 data[]; +}; + +LIBBPF_API struct bpf_prog_info_linear * +bpf_program__get_prog_info_linear(int fd, __u64 arrays); + +LIBBPF_API void +bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear); + +LIBBPF_API void +bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); + +/* + * A helper function to get the number of possible CPUs before looking up + * per-CPU maps. Negative errno is returned on failure. + * + * Example usage: + * + * int ncpus = libbpf_num_possible_cpus(); + * if (ncpus < 0) { + * // error handling + * } + * long values[ncpus]; + * bpf_map_lookup_elem(per_cpu_map_fd, key, values); + * + */ +LIBBPF_API int libbpf_num_possible_cpus(void); + +struct bpf_map_skeleton { + const char *name; + struct bpf_map **map; + void **mmaped; +}; + +struct bpf_prog_skeleton { + const char *name; + struct bpf_program **prog; + struct bpf_link **link; +}; + +struct bpf_object_skeleton { + size_t sz; /* size of this struct, for forward/backward compatibility */ + + const char *name; + void *data; + size_t data_sz; + + struct bpf_object **obj; + + int map_cnt; + int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */ + struct bpf_map_skeleton *maps; + + int prog_cnt; + int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */ + struct bpf_prog_skeleton *progs; +}; + +LIBBPF_API int +bpf_object__open_skeleton(struct bpf_object_skeleton *s, + const struct bpf_object_open_opts *opts); +LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_LIBBPF_H */ diff --git a/src/bpfswitch/include/bpf/libbpf_common.h b/src/bpfswitch/include/bpf/libbpf_common.h new file mode 100644 index 00000000..a23ae1ac --- /dev/null +++ b/src/bpfswitch/include/bpf/libbpf_common.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Common user-facing libbpf helpers. + * + * Copyright (c) 2019 Facebook + */ + +#ifndef __LIBBPF_LIBBPF_COMMON_H +#define __LIBBPF_LIBBPF_COMMON_H + +#include <string.h> + +#ifndef LIBBPF_API +#define LIBBPF_API __attribute__((visibility("default"))) +#endif + +/* Helper macro to declare and initialize libbpf options struct + * + * This dance with uninitialized declaration, followed by memset to zero, + * followed by assignment using compound literal syntax is done to preserve + * ability to use a nice struct field initialization syntax and **hopefully** + * have all the padding bytes initialized to zero. It's not guaranteed though, + * when copying literal, that compiler won't copy garbage in literal's padding + * bytes, but that's the best way I've found and it seems to work in practice. + * + * Macro declares opts struct of given type and name, zero-initializes, + * including any extra padding, it with memset() and then assigns initial + * values provided by users in struct initializer-syntax as varargs. + */ +#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \ + struct TYPE NAME = ({ \ + memset(&NAME, 0, sizeof(struct TYPE)); \ + (struct TYPE) { \ + .sz = sizeof(struct TYPE), \ + __VA_ARGS__ \ + }; \ + }) + +#endif /* __LIBBPF_LIBBPF_COMMON_H */ diff --git a/src/bpfswitch/include/bpf/libbpf_util.h b/src/bpfswitch/include/bpf/libbpf_util.h new file mode 100644 index 00000000..59c779c5 --- /dev/null +++ b/src/bpfswitch/include/bpf/libbpf_util.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2019 Facebook */ + +#ifndef __LIBBPF_LIBBPF_UTIL_H +#define __LIBBPF_LIBBPF_UTIL_H + +#include <stdbool.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* Use these barrier functions instead of smp_[rw]mb() when they are + * used in a libbpf header file. That way they can be built into the + * application that uses libbpf. + */ +#if defined(__i386__) || defined(__x86_64__) +# define libbpf_smp_rmb() asm volatile("" : : : "memory") +# define libbpf_smp_wmb() asm volatile("" : : : "memory") +# define libbpf_smp_mb() \ + asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc") +/* Hinders stores to be observed before older loads. */ +# define libbpf_smp_rwmb() asm volatile("" : : : "memory") +#elif defined(__aarch64__) +# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory") +# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") +# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") +# define libbpf_smp_rwmb() libbpf_smp_mb() +#elif defined(__arm__) +/* These are only valid for armv7 and above */ +# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory") +# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") +# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") +# define libbpf_smp_rwmb() libbpf_smp_mb() +#else +/* Architecture missing native barrier functions. */ +# define libbpf_smp_rmb() __sync_synchronize() +# define libbpf_smp_wmb() __sync_synchronize() +# define libbpf_smp_mb() __sync_synchronize() +# define libbpf_smp_rwmb() __sync_synchronize() +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/bpfswitch/include/bpf/xsk.h b/src/bpfswitch/include/bpf/xsk.h new file mode 100644 index 00000000..584f6820 --- /dev/null +++ b/src/bpfswitch/include/bpf/xsk.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * AF_XDP user-space access library. + * + * Copyright(c) 2018 - 2019 Intel Corporation. + * + * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> + */ + +#ifndef __LIBBPF_XSK_H +#define __LIBBPF_XSK_H + +#include <stdio.h> +#include <stdint.h> +#include <linux/if_xdp.h> + +#include "libbpf.h" +#include "libbpf_util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Do not access these members directly. Use the functions below. */ +#define DEFINE_XSK_RING(name) \ +struct name { \ + __u32 cached_prod; \ + __u32 cached_cons; \ + __u32 mask; \ + __u32 size; \ + __u32 *producer; \ + __u32 *consumer; \ + void *ring; \ + __u32 *flags; \ +} + +DEFINE_XSK_RING(xsk_ring_prod); +DEFINE_XSK_RING(xsk_ring_cons); + +/* For a detailed explanation on the memory barriers associated with the + * ring, please take a look at net/xdp/xsk_queue.h. + */ + +struct xsk_umem; +struct xsk_socket; + +static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, + __u32 idx) +{ + __u64 *addrs = (__u64 *)fill->ring; + + return &addrs[idx & fill->mask]; +} + +static inline const __u64 * +xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) +{ + const __u64 *addrs = (const __u64 *)comp->ring; + + return &addrs[idx & comp->mask]; +} + +static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, + __u32 idx) +{ + struct xdp_desc *descs = (struct xdp_desc *)tx->ring; + + return &descs[idx & tx->mask]; +} + +static inline const struct xdp_desc * +xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) +{ + const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; + + return &descs[idx & rx->mask]; +} + +static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) +{ + return *r->flags & XDP_RING_NEED_WAKEUP; +} + +static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) +{ + __u32 free_entries = r->cached_cons - r->cached_prod; + + if (free_entries >= nb) + return free_entries; + + /* Refresh the local tail pointer. + * cached_cons is r->size bigger than the real consumer pointer so + * that this addition can be avoided in the more frequently + * executed code that computs free_entries in the beginning of + * this function. Without this optimization it whould have been + * free_entries = r->cached_prod - r->cached_cons + r->size. + */ + r->cached_cons = *r->consumer + r->size; + + return r->cached_cons - r->cached_prod; +} + +static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) +{ + __u32 entries = r->cached_prod - r->cached_cons; + + if (entries == 0) { + r->cached_prod = *r->producer; + entries = r->cached_prod - r->cached_cons; + } + + return (entries > nb) ? nb : entries; +} + +static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod, + size_t nb, __u32 *idx) +{ + if (xsk_prod_nb_free(prod, nb) < nb) + return 0; + + *idx = prod->cached_prod; + prod->cached_prod += nb; + + return nb; +} + +static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb) +{ + /* Make sure everything has been written to the ring before indicating + * this to the kernel by writing the producer pointer. + */ + libbpf_smp_wmb(); + + *prod->producer += nb; +} + +static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons, + size_t nb, __u32 *idx) +{ + size_t entries = xsk_cons_nb_avail(cons, nb); + + if (entries > 0) { + /* Make sure we do not speculatively read the data before + * we have received the packet buffers from the ring. + */ + libbpf_smp_rmb(); + + *idx = cons->cached_cons; + cons->cached_cons += entries; + } + + return entries; +} + +static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb) +{ + /* Make sure data has been read before indicating we are done + * with the entries by updating the consumer pointer. + */ + libbpf_smp_rwmb(); + + *cons->consumer += nb; +} + +static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) +{ + return &((char *)umem_area)[addr]; +} + +static inline __u64 xsk_umem__extract_addr(__u64 addr) +{ + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; +} + +static inline __u64 xsk_umem__extract_offset(__u64 addr) +{ + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; +} + +static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) +{ + return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); +} + +LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem); +LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk); + +#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 +#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 +#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ +#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) +#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 +#define XSK_UMEM__DEFAULT_FLAGS 0 + +struct xsk_umem_config { + __u32 fill_size; + __u32 comp_size; + __u32 frame_size; + __u32 frame_headroom; + __u32 flags; +}; + +/* Flags for the libbpf_flags field. */ +#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) + +struct xsk_socket_config { + __u32 rx_size; + __u32 tx_size; + __u32 libbpf_flags; + __u32 xdp_flags; + __u16 bind_flags; +}; + +/* Set config to NULL to get the default configuration. */ +LIBBPF_API int xsk_umem__create(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, + const char *ifname, __u32 queue_id, + struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + const struct xsk_socket_config *config); + +/* Returns 0 for success and -EBUSY if the umem is still in use. */ +LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem); +LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_XSK_H */ diff --git a/src/bpfswitch/include/xdp_fdb.h b/src/bpfswitch/include/xdp_fdb.h new file mode 100644 index 00000000..3e2e593a --- /dev/null +++ b/src/bpfswitch/include/xdp_fdb.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XDP_FDB_H_ +#define _XDP_FDB_H_ + +#include <linux/if_ether.h> + +struct xdp_stats { + __u64 bytes_fwd; + __u64 pkts_fwd; + __u64 dropped; +}; + +struct fdb_key +{ + __u8 smac[ETH_ALEN]; + __u8 dmac[ETH_ALEN]; + //__u16 vlan; +}; + +#endif diff --git a/src/bpfswitch/ksrc/Makefile b/src/bpfswitch/ksrc/Makefile new file mode 100644 index 00000000..64bec2f8 --- /dev/null +++ b/src/bpfswitch/ksrc/Makefile @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0 + +include ../config.mk + +ifneq (,$(BUILDDIR)) +OBJDIR = $(BUILDDIR)/ksrc/obj/ +else +OBJDIR = obj/ +endif + +MODS += $(OBJDIR)xdp_l2fwd.o +MODS += $(OBJDIR)xdp_dummy.o + +VPATH := . + +# rule is based on samples/bpf/Makefile +DEFS = -D__KERNEL__ -D__BPF_TRACING__ -D__TARGET_ARCH_x86 $(EXTRA_DEFS) + +CFLAGS += -Wno-unused-value -Wno-pointer-sign +CFLAGS += -Wno-compare-distinct-pointer-types +CFLAGS += -Wno-gnu-variable-sized-type-not-at-end +CFLAGS += -Wno-address-of-packed-member +CFLAGS += -Wno-tautological-compare +CFLAGS += -Wno-unknown-warning-option +CFLAGS += -fno-stack-protector + +INCLUDES = -I../include +INCLUDES += -I$(KSRC)/arch/x86/include +INCLUDES += -I$(KBLD)/arch/x86/include/generated +INCLUDES += -I$(KBLD)/include +INCLUDES += -I$(KSRC)/include +INCLUDES += -I$(KSRC)/arch/x86/include/uapi +INCLUDES += -I$(KBLD)/arch/x86/include/generated/uapi +INCLUDES += -I$(KSRC)/include/uapi +INCLUDES += -I$(KBLD)/include/generated/uapi + +SINCLUDES = -include $(KSRC)/include/linux/kconfig.h +SINCLUDES += -include include/asm_goto_workaround.h + +# this is to find stdarg.h. Ubuntu has this under x86_64-linux-gnu +# and Fedora is under x86_64-redhat-linux. Let's try 'find'. +GCCVER=$(shell gcc -v 2>&1 | awk '{if ($$0 ~ /gcc version/) {ver=split($$3,n,"."); print n[1]}}') +GCC_INC=$(shell find /usr/lib/gcc/x86_64-*linux*/$(GCCVER) -name include) +NOSTDINC_FLAGS = -nostdinc -isystem $(GCC_INC) + +all: build $(MODS) + +build: + @mkdir -p $(OBJDIR) + +$(OBJDIR)%.o: %.c + $(QUIET_CLANG)$(CLANG) $(NOSTDINC_FLAGS) $(INCLUDES) \ + $(SINCLUDES) $(DEFS) $(CFLAGS) \ + -O2 -emit-llvm $(CLANG_FLAGS) -c $< -o $@.cl + $(QUIET_LLC)$(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ $@.cl + @rm $@.cl + +clean: + @rm -rf $(OBJDIR) diff --git a/src/bpfswitch/ksrc/include/asm_goto_workaround.h b/src/bpfswitch/ksrc/include/asm_goto_workaround.h new file mode 100644 index 00000000..7048bb35 --- /dev/null +++ b/src/bpfswitch/ksrc/include/asm_goto_workaround.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Facebook */ +#ifndef __ASM_GOTO_WORKAROUND_H +#define __ASM_GOTO_WORKAROUND_H + +/* + * This will bring in asm_volatile_goto and asm_inline macro definitions + * if enabled by compiler and config options. + */ +#include <linux/types.h> + +#ifdef asm_volatile_goto +#undef asm_volatile_goto +#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") +#endif + +/* + * asm_inline is defined as asm __inline in "include/linux/compiler_types.h" + * if supported by the kernel's CC (i.e CONFIG_CC_HAS_ASM_INLINE) which is not + * supported by CLANG. + */ +#ifdef asm_inline +#undef asm_inline +#define asm_inline asm +#endif + +#define volatile(x...) volatile("") +#endif diff --git a/src/bpfswitch/ksrc/xdp_dummy.c b/src/bpfswitch/ksrc/xdp_dummy.c new file mode 100644 index 00000000..d60c1c07 --- /dev/null +++ b/src/bpfswitch/ksrc/xdp_dummy.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Dummy XDP program + * + * David Ahern <dsahern@gmail.com> + */ +#define KBUILD_MODNAME "xdp_dummy" +#include <uapi/linux/bpf.h> +#include <linux/version.h> +#include <bpf/bpf_helpers.h> + +SEC("xdp_dummy") +int xdp_dummy_prog(struct xdp_md *ctx) +{ + //bpf_debug("ingress: device %u queue %u\n", + // ctx->ingress_ifindex, ctx->rx_queue_index); + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; +int _version SEC("version") = LINUX_VERSION_CODE; diff --git a/src/bpfswitch/ksrc/xdp_l2fwd.c b/src/bpfswitch/ksrc/xdp_l2fwd.c new file mode 100644 index 00000000..750b580d --- /dev/null +++ b/src/bpfswitch/ksrc/xdp_l2fwd.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Example of L2 forwarding via XDP. FDB is a <vlan,dmac> hash table + * returning device index to redirect packet. + * + * Copyright (c) 2019-2020 David Ahern <dsahern@gmail.com> + * The code is modified to use 2 mac addresses as key instead of + * mac address and vlan id as key + * + */ +#define KBUILD_MODNAME "xdp_l2fwd" +#include <uapi/linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_vlan.h> +#include <linux/version.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#include "xdp_fdb.h" + +/* For TX-traffic redirect requires net_device ifindex to be in this devmap */ +struct bpf_map_def SEC("maps") xdp_fwd_ports = { + .type = BPF_MAP_TYPE_DEVMAP_HASH, + .key_size = sizeof(u32), + .value_size = sizeof(struct bpf_devmap_val), + .max_entries = 512, +}; + +/* <vlan,dmac> to device index map */ +struct bpf_map_def SEC("maps") fdb_map = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(struct fdb_key), + .value_size = sizeof(u32), + .max_entries = 512, +}; + +SEC("xdp_l2fwd") +int xdp_l2fwd_prog(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct bpf_devmap_val *entry; + struct vlan_hdr *vhdr = NULL; + struct ethhdr *eth; + struct fdb_key key; + u8 smac[ETH_ALEN]; + u16 h_proto = 0; + void *nh; + int rc; + + /* data in context points to ethernet header */ + eth = data; + + /* set pointer to header after ethernet header */ + nh = data + sizeof(*eth); + if (nh > data_end) + return XDP_DROP; // malformed packet + + __builtin_memset(&key, 0, sizeof(key)); + __builtin_memcpy(key.dmac, eth->h_dest, ETH_ALEN); + __builtin_memcpy(key.smac, eth->h_source, ETH_ALEN); + + if (eth->h_proto == htons(ETH_P_8021Q)) { + vhdr = nh; + if (vhdr + 1 > data_end) + return XDP_DROP; // malformed packet + + //key.vlan = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + } + + entry = bpf_map_lookup_elem(&fdb_map, &key); + if (!entry || entry->ifindex == 0) + return XDP_PASS; + + /* Verify redirect index exists in port map */ + if (!bpf_map_lookup_elem(&xdp_fwd_ports, &entry->ifindex)) + return XDP_PASS; + + if (vhdr) { + /* remove VLAN header before hand off to VM */ + h_proto = vhdr->h_vlan_encapsulated_proto; + __builtin_memcpy(smac, eth->h_source, ETH_ALEN); + + if (bpf_xdp_adjust_head(ctx, sizeof(*vhdr))) + return XDP_PASS; + + /* reset data pointers after adjust */ + data = (void *)(long)ctx->data; + data_end = (void *)(long)ctx->data_end; + eth = data; + if (eth + 1 > data_end) + return XDP_DROP; + + __builtin_memcpy(eth->h_dest, key.dmac, ETH_ALEN); + __builtin_memcpy(eth->h_source, smac, ETH_ALEN); + eth->h_proto = h_proto; + } + + return bpf_redirect_map(&xdp_fwd_ports, entry->ifindex, 0); +} + +char _license[] SEC("license") = "GPL"; +int _version SEC("version") = LINUX_VERSION_CODE; diff --git a/src/bpfswitch/scripts/ifindex.py b/src/bpfswitch/scripts/ifindex.py new file mode 100755 index 00000000..aff9a3c5 --- /dev/null +++ b/src/bpfswitch/scripts/ifindex.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket +for if_nidx in socket.if_nameindex(): + print("Interface: {}, Index: {}".format(if_nidx[1], if_nidx[0])) diff --git a/src/bpfswitch/scripts/l2fwd.sh b/src/bpfswitch/scripts/l2fwd.sh new file mode 100755 index 00000000..173f93c1 --- /dev/null +++ b/src/bpfswitch/scripts/l2fwd.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +# Copyright David Ashern and Sridhar K. N. Rao + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# L2FWD configuration + + +BPFFS=/sys/fs/bpf +BPFTOOL=/usr/sbin/bpftool + +# MAC ADDRESSES - Ex: Of Traffic-Generator Ports +TGENEASTMAC=12:41:da:80:1f:71 +TGENWESTMAC=12:30:6d:9f:6f:42 + +# Interfaces +EAST=ens0f0 +WEST=ens0f1 + +# Index of Interfaces +EASTINDEX=1 +EASTINDEX=2 + + +################################################################################ +# +pr_msg() +{ + echo -e "\e[34m$*\e[00m" +} + +run_cmd() +{ + local cmd="$*" + + echo + echo -e "\e[31m${cmd}\e[00m" + sudo $cmd +} + +show_maps() +{ + echo + echo -e "\e[31m${BPFTOOL} map sh\e[00m" + sudo ${BPFTOOL} map sh | \ + awk 'BEGIN { skip = 0 } { + if (skip) { + skip-- + } else if ($2 == "lpm_trie") { + skip = 1 + } else { + print + } + }' +} + +show_progs() +{ + echo + echo -e "\e[31m${BPFTOOL} prog sh\e[00m" + sudo ${BPFTOOL} prog sh | \ + awk 'BEGIN { skip = 0 } { + if (skip) { + skip-- + } else if ($2 == "cgroup_skb") { + skip = 2 + } else { + print + } + }' +} + +show_status() +{ + show_maps + show_progs + run_cmd ${BPFTOOL} net sh +} + +do_reset() +{ + sudo rm -rf ${BPFFS}/map + sudo rm -rf ${BPFFS}/prog + sudo mkdir ${BPFFS}/map + sudo mkdir ${BPFFS}/prog + + for d in eth0 eth1 + do + sudo ${BPFTOOL} net detach xdp dev ${d} + sudo ethtool -K ${d} hw-tc-offload on + sudo ethtool -K ${d} rxvlan off + done +} + +################################################################################ +# start + +do_reset >/dev/null 2>&1 + +echo +pr_msg "Create ports map" +pr_msg "- global map used for bulking redirected packets" + +run_cmd ${BPFTOOL} map create ${BPFFS}/map/xdp_fwd_ports \ + type devmap_hash key 4 value 8 entries 512 name xdp_fwd_ports + +echo +pr_msg "Add entries to the egress port map for EAST and WEST Interfaces" +run_cmd ${BPFTOOL} map update pinned ${BPFFS}/map/xdp_fwd_ports \ + key hex ${EASTINDEX} 0 0 0 value hex ${EASTINDEX} 0 0 0 0 0 0 0 +run_cmd ${BPFTOOL} map update pinned ${BPFFS}/map/xdp_fwd_ports \ + key hex ${WESTINDEX} 0 0 0 value hex ${WESTINDEX} 0 0 0 0 0 0 0 + +echo +pr_msg "load l2fwd program and attach to eth0 and eth1" + +run_cmd ${BPFTOOL} prog load ../ksrc/obj/xdp_l2fwd.o ${BPFFS}/prog/xdp_l2fwd \ + map name xdp_fwd_ports name xdp_fwd_ports +run_cmd ${BPFTOOL} net attach xdp pinned ${BPFFS}/prog/xdp_l2fwd dev ${EAST} +run_cmd ${BPFTOOL} net attach xdp pinned ${BPFFS}/prog/xdp_l2fwd dev ${WEST} + +echo +pr_msg "Add FDB and port map entries for this" +run_cmd ../usrc/bin/xdp_l2fwd -s ${TGENEASTMAC} -m ${TGENWESTMAC} -d eth1 +run_cmd ../usrc/bin/xdp_l2fwd -s ${TGENWESTMAC} -m ${TGENEASTMAC} -d eth1 +run_cmd ../usrc/bin/xdp_l2fwd -P diff --git a/src/bpfswitch/usrc/Makefile b/src/bpfswitch/usrc/Makefile new file mode 100644 index 00000000..fc15a6b0 --- /dev/null +++ b/src/bpfswitch/usrc/Makefile @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: GPL-2.0 + +include ../config.mk + +ifneq (,$(BUILDDIR)) +OBJDIR = $(BUILDDIR)/src/obj/ +else +OBJDIR = obj/ +endif + +ifneq (,$(BUILDDIR)) +BINDIR = $(BUILDDIR)/src/bin/ +else +BINDIR = bin/ +endif + +MODS += $(BINDIR)xdp_l2fwd +MODS += $(BINDIR)xdp_dummy + +VPATH := . + +CC = gcc +CFLAGS += -O2 -g -Wall + +INCLUDES = -I../include -I../include/uapi +INCLUDES += -I../include/tools + +ifneq (,$(LIBBPF_DIR)) +BPF_LINK_FEAT := $(shell egrep 'bpf_link_create' $(LIBBPF_PATH)/usr/include/bpf/libbpf.h) +ifneq (,$(BPF_LINK_FEAT)) +CFLAGS += -DHAVE_BPF_LINK_CREATE +endif +else +LIBBPF=-lbpf +endif + +#COMMON += $(OBJDIR)timestamps.o +COMMON += $(OBJDIR)libbpf_helpers.o +COMMON += $(OBJDIR)str_utils.o +#COMMON += $(OBJDIR)rbtree.o +#COMMON += $(OBJDIR)parse_pkt.o +#COMMON += $(OBJDIR)print_pkt.o +#COMMON += $(OBJDIR)ksyms.o + +all: build $(MODS) + +build: + @mkdir -p $(OBJDIR) $(BINDIR) + +$(BINDIR)%: $(OBJDIR)%.o $(COMMON) + $(QUIET_LINK)$(CC) $(INCLUDES) $(DEFS) $(CFLAGS) $^ -o $@ $(LDLIBS) + +$(BINDIR)xdp_%: $(OBJDIR)xdp_%_user.o $(COMMON) + $(QUIET_LINK)$(CC) $(INCLUDES) $(DEFS) $(CFLAGS) $^ -o $@ $(LDLIBS) + +$(OBJDIR)%.o: %.c + $(QUIET_CC)$(CC) $(INCLUDES) $(DEFS) $(CFLAGS) -c $^ -o $@ + +clean: + @rm -rf $(OBJDIR) $(BINDIR) diff --git a/src/bpfswitch/usrc/libbpf_helpers.c b/src/bpfswitch/usrc/libbpf_helpers.c new file mode 100644 index 00000000..522f5586 --- /dev/null +++ b/src/bpfswitch/usrc/libbpf_helpers.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * convenience wrappers around libbpf functions + * + * Copyright (c) 2019-2021 David Ahern <dsahern@gmail.com> + */ + +#include <linux/if_link.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <limits.h> +#include <unistd.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include <errno.h> + +#include "libbpf_helpers.h" + +enum bpf_obj_type { + BPF_OBJ_UNKNOWN, + BPF_OBJ_PROG, + BPF_OBJ_MAP, + BPF_OBJ_LINK, + BPF_OBJ_BTF, +}; + +int load_obj_file(struct bpf_prog_load_attr *attr, + struct bpf_object **obj, + const char *objfile, bool user_set) +{ + static char *expected_paths[] = { + "bin", + "ksrc/obj", /* path in git tree */ + "bpf-obj", + ".", /* cwd */ + NULL, + }; + char path[PATH_MAX]; + int prog_fd, i = 0; + + if (user_set) { + attr->file = objfile; + return bpf_prog_load_xattr(attr, obj, &prog_fd); + } + + attr->file = path; + while (expected_paths[i]) { + struct stat sbuf; + + snprintf(path, sizeof(path), "%s/%s", + expected_paths[i], objfile); + + if (stat(path, &sbuf) == 0) { + if (!bpf_prog_load_xattr(attr, obj, &prog_fd)) + return 0; + + if (errno != ENOENT) + break; + } + ++i; + } + + fprintf(stderr, "Failed to find object file; nothing to load\n"); + return 1; +} + +int bpf_map_get_fd_by_name(const char *name) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + __u32 id = 0; + int err, fd; + + while (1) { + err = bpf_map_get_next_id(id, &id); + if (err) + break; + + fd = bpf_map_get_fd_by_id(id); + if (fd < 0) + continue; + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (!err && strcmp(info.name, name) == 0) + return fd; + + close(fd); + } + + return -1; +} + +/* from bpftool */ +static int get_fd_type(int fd) +{ + char path[PATH_MAX]; + char buf[512]; + ssize_t n; + + snprintf(path, sizeof(path), "/proc/self/fd/%d", fd); + + n = readlink(path, buf, sizeof(buf)); + if (n < 0) { + fprintf(stderr, "can't read link type: %s\n", strerror(errno)); + return -1; + } + if (n == sizeof(path)) { + fprintf(stderr, "can't read link type: path too long!\n"); + return -1; + } + + if (strstr(buf, "bpf-map")) + return BPF_OBJ_MAP; + + if (strstr(buf, "bpf-prog")) + return BPF_OBJ_PROG; + + if (strstr(buf, "bpf-link")) + return BPF_OBJ_LINK; + + return BPF_OBJ_UNKNOWN; +} + +int bpf_map_get_fd_by_path(const char *path) +{ + enum bpf_obj_type objtype; + int fd; + + fd = bpf_obj_get(path); + if (fd < 0) { + fprintf(stderr, "Failed to get bpf object (%s): %s\n", + path, strerror(errno)); + return -1; + } + + objtype = get_fd_type(fd); + if (objtype != BPF_OBJ_MAP) { + fprintf(stderr, "Path is not to a BPF map\n"); + close(fd); + return -1; + } + + return fd; +} + +int bpf_map_get_fd(__u32 id, const char *path, const char *name, + const char *desc) +{ + int fd = -1; + + if (id) { + fd = bpf_map_get_fd_by_id(id); + if (fd < 0 && errno != ENOENT) { + fprintf(stderr, + "Failed to get fd for %s by id: %s: %d\n", + desc, strerror(errno), errno); + return -1; + } + } else if (path) { + fd = bpf_map_get_fd_by_path(path); + if (fd < 0) { + fprintf(stderr, + "Failed to get fd for %s by path: %s: %d\n", + desc, strerror(errno), errno); + return -1; + } + } else if (name) { + fd = bpf_map_get_fd_by_name(name); + if (fd < 0 && errno != ENOENT) { + fprintf(stderr, + "Failed to get fd for %s by expected name: %s: %d\n", + desc, strerror(errno), errno); + return -1; + } + } + + return fd; +} + +int bpf_prog_get_fd_by_path(const char *path) +{ + enum bpf_obj_type objtype; + int fd; + + fd = bpf_obj_get(path); + if (fd < 0) { + fprintf(stderr, "Failed to get bpf object (%s): %s\n", + path, strerror(errno)); + return -1; + } + + objtype = get_fd_type(fd); + if (objtype != BPF_OBJ_PROG) { + fprintf(stderr, "Path is not to a BPF program\n"); + close(fd); + return -1; + } + + return fd; +} + +int bpf_prog_get_fd_by_name(const char *name) +{ + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + __u32 id = 0; + int err, fd; + + while (1) { + err = bpf_prog_get_next_id(id, &id); + if (err) + break; + + fd = bpf_prog_get_fd_by_id(id); + if (fd < 0) + continue; + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (!err && strcmp(info.name, name) == 0) + return fd; + + close(fd); + } + + return -1; +} + +int bpf_prog_get_fd(__u32 id, const char *path, const char *name, + const char *desc) +{ + int fd = -1; + + if (id) { + fd = bpf_prog_get_fd_by_id(id); + if (fd < 0 && errno != ENOENT) { + fprintf(stderr, + "Failed to get fd for %s by id: %s: %d\n", + desc, strerror(errno), errno); + return -1; + } + } else if (path) { + fd = bpf_prog_get_fd_by_path(path); + if (fd < 0) { + fprintf(stderr, + "Failed to get fd for %s by path: %s: %d\n", + desc, strerror(errno), errno); + return -1; + } + } else if (name) { + fd = bpf_prog_get_fd_by_name(name); + if (fd < 0 && errno != ENOENT) { + fprintf(stderr, + "Failed to get fd for %s by expected name: %s: %d\n", + desc, strerror(errno), errno); + return -1; + } + } + + return fd; +} + +int attach_to_dev_generic(int idx, int prog_fd, const char *dev) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, prog_fd, XDP_FLAGS_SKB_MODE); + if (err < 0) { + printf("ERROR: failed to attach program to %s\n", dev); + return err; + } + + return 0; +} + +int detach_from_dev_generic(int idx, const char *dev) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, -1, XDP_FLAGS_SKB_MODE); + if (err < 0) + printf("ERROR: failed to detach program from %s\n", dev); + + return 0; +} + + +int attach_to_dev(int idx, int prog_fd, const char *dev) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, prog_fd, XDP_FLAGS_DRV_MODE); + if (err < 0) { + printf("ERROR: failed to attach program to %s\n", dev); + return err; + } + + return 0; +} + +int detach_from_dev(int idx, const char *dev) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, -1, 0); + if (err < 0) + printf("ERROR: failed to detach program from %s\n", dev); + + return 0; +} diff --git a/src/bpfswitch/usrc/libbpf_helpers.h b/src/bpfswitch/usrc/libbpf_helpers.h new file mode 100644 index 00000000..9be0513c --- /dev/null +++ b/src/bpfswitch/usrc/libbpf_helpers.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBBPF_HELPERS_H +#define __LIBBPF_HELPERS_H + +#include <bpf/libbpf.h> + +int load_obj_file(struct bpf_prog_load_attr *attr, + struct bpf_object **obj, + const char *objfile, bool user_set); + +int bpf_map_get_fd_by_name(const char *name); +int bpf_map_get_fd_by_path(const char *path); +int bpf_map_get_fd(__u32 id, const char *path, const char *name, + const char *desc); + +int bpf_prog_get_fd_by_path(const char *path); +int bpf_prog_get_fd(__u32 id, const char *path, const char *name, + const char *desc); + +int attach_to_dev_generic(int idx, int prog_fd, const char *dev); +int detach_from_dev_generic(int idx, const char *dev); + +int attach_to_dev(int idx, int prog_fd, const char *dev); +int detach_from_dev(int idx, const char *dev); + +int attach_to_dev_tx(int idx, int prog_fd, const char *dev); +int detach_from_dev_tx(int idx, const char *dev); + +#endif diff --git a/src/bpfswitch/usrc/str_utils.c b/src/bpfswitch/usrc/str_utils.c new file mode 100644 index 00000000..8650cdfb --- /dev/null +++ b/src/bpfswitch/usrc/str_utils.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * String conversion and parsing functions. + * + * David Ahern <dsahern@gmail.com> + */ +#include <linux/if_ether.h> +#include <net/if.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <string.h> +#include <errno.h> + +#include "str_utils.h" + +static int str_to_int_base(const char *str, int min, int max, int *value, int base) +{ + int number; + char *end; + + errno = 0; + number = (int) strtol(str, &end, base); + + if ( ((*end == '\0') || (*end == '\n')) && (end != str) && + (errno != ERANGE) && (min <= number) && (number <= max)) { + *value = number; + return 0; + } + + return -1; +} + +int str_to_int(const char *str, int min, int max, int *value) +{ + return str_to_int_base(str, min, max, value, 0); +} + +int str_to_ushort(const char *str, unsigned short *us) +{ + int i; + + if (str_to_int(str, 0, 0xFFFF, &i) != 0) + return -1; + + *us = (unsigned short) (i); + + return 0; +} + +int str_to_ulong_base(const char *str, unsigned long *ul, int base) +{ + char *end; + + *ul= strtoul(str, &end, base); + if (*end != '\0') + return -1; + + return 0; +} + +int str_to_ulong(const char *str, unsigned long *ul) +{ + return str_to_ulong_base(str, ul, 0); +} + +int str_to_ullong(const char *str, unsigned long long *ul) +{ + char *end; + + *ul= strtoull(str, &end, 0); + if (*end != '\0') + return -1; + + return 0; +} + +int str_to_mac(const char *str, unsigned char *mac) +{ + int rc = -1, m, i; + char *s = strdup(str), *p, *d, tmp[3]; + + if (!s) + return -1; + + p = s; + tmp[2] = '\0'; + for (i = 0; i < ETH_ALEN; ++i) { + if (*p == '\0') + goto out; + + d = strchr(p, ':'); + if (d) { + *d = '\0'; + if (strlen(p) > 2) + goto out; + + strcpy(tmp, p); + p = d + 1; + } else { + strncpy(tmp, p, 2); + p += 2; + } + + if (str_to_int_base(tmp, 0, 0xFF, &m, 16) != 0) + goto out; + + mac[i] = m; + } + + if (*p == '\0') + rc = 0; +out: + free(s); + + return rc; +} + +int get_ifidx(const char *arg) +{ + int idx; + + idx = if_nametoindex(arg); + if (!idx) + idx = strtoul(arg, NULL, 0); + + return idx; +} + +/* find parameters in a string -- based on Harbison and Steele, p. 291 */ +int parsestr(char *str, char *delims, char *fields[], int nmax) +{ + int n; + + if (!str || (*str == '\0')) + return 0; + + n = 0; + fields[0] = strtok(str, delims); + while ((fields[n] != (char *) NULL) && (n < (nmax-1))) { + ++n; + fields[n] = strtok(NULL, delims); + } + + if ((n == (nmax - 1)) && (fields[n] != (char *) NULL)) + ++n; + + return n; +} + +void print_mac(const __u8 *mac, bool reverse) +{ + if (reverse) + printf("%.02x:%.02x:%.02x:%.02x:%.02x:%.02x", + mac[5], mac[4], mac[3], mac[2], mac[1], mac[0]); + else + printf("%.02x:%.02x:%.02x:%.02x:%.02x:%.02x", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); +} diff --git a/src/bpfswitch/usrc/str_utils.h b/src/bpfswitch/usrc/str_utils.h new file mode 100644 index 00000000..817eb5f6 --- /dev/null +++ b/src/bpfswitch/usrc/str_utils.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __STR_UTILS_H +#define __STR_UTILS_H + +int str_to_int(const char *str, int min, int max, int *value); +int str_to_ushort(const char *str, unsigned short *us); +int str_to_ulong(const char *str, unsigned long *ul); +int str_to_ulong_base(const char *str, unsigned long *ul, int base); +int str_to_ullong(const char *str, unsigned long long *ul); +int str_to_mac(const char *str, unsigned char *mac); +int get_ifidx(const char *arg); + +int parsestr(char *str, char *delims, char *fields[], int nmax); +void print_mac(const __u8 *mac, bool reverse); +#endif diff --git a/src/bpfswitch/usrc/xdp_dummy_user.c b/src/bpfswitch/usrc/xdp_dummy_user.c new file mode 100644 index 00000000..7741f330 --- /dev/null +++ b/src/bpfswitch/usrc/xdp_dummy_user.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <linux/if_link.h> +#include <linux/limits.h> +#include <net/if.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <string.h> +#include <unistd.h> +#include <fcntl.h> +#include <libgen.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "libbpf_helpers.h" + +static void usage(const char *prog) +{ + fprintf(stderr, + "usage: %s [OPTS] interface-list\n" + "\nOPTS:\n" + " -d detach program\n" + " -f bpf-file bpf filename to load\n" + " -g skb mode\n" + , prog); +} + +int main(int argc, char **argv) +{ + int (*attach_fn)(int idx, int prog_fd, const char *dev) = attach_to_dev; + int (*detach_fn)(int idx, const char *dev) = detach_from_dev; + struct bpf_prog_load_attr prog_load_attr = { }; + const char *objfile = "xdp_dummy_kern.o"; + const char *pname = "xdp_dummy"; + bool filename_set = false; + struct bpf_program *prog; + struct bpf_object *obj; + int opt, i, prog_fd; + bool attach = true; + int ret = 0; + + while ((opt = getopt(argc, argv, ":df:g")) != -1) { + switch (opt) { + case 'f': + objfile = optarg; + filename_set = true; + break; + case 'd': + attach = false; + break; + case 'g': + attach_fn = attach_to_dev_generic; + detach_fn = detach_from_dev_generic; + break; + default: + usage(basename(argv[0])); + return 1; + } + } + + if (optind == argc) { + usage(basename(argv[0])); + return 1; + } + + if (!attach) { + for (i = optind; i < argc; ++i) { + int idx, err; + + idx = if_nametoindex(argv[i]); + if (!idx) + idx = strtoul(argv[i], NULL, 0); + + if (!idx) { + fprintf(stderr, "Invalid device argument\n"); + return 1; + } + err = detach_fn(idx, argv[i]); + if (err) + ret = err; + } + return ret; + } + + if (load_obj_file(&prog_load_attr, &obj, objfile, filename_set)) + return 1; + + prog = bpf_object__find_program_by_title(obj, pname); + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + printf("program not found: %s\n", strerror(prog_fd)); + return 1; + } + + for (i = optind; i < argc; ++i) { + int idx, err; + + idx = if_nametoindex(argv[i]); + if (!idx) + idx = strtoul(argv[i], NULL, 0); + + if (!idx) { + fprintf(stderr, "Invalid device argument\n"); + return 1; + } + + err = attach_fn(idx, prog_fd, argv[i]); + if (err) + ret = err; + } + + return ret; +} diff --git a/src/bpfswitch/usrc/xdp_l2fwd_user.c b/src/bpfswitch/usrc/xdp_l2fwd_user.c new file mode 100644 index 00000000..13437621 --- /dev/null +++ b/src/bpfswitch/usrc/xdp_l2fwd_user.c @@ -0,0 +1,402 @@ +/* Copyright (c) 2019-22 David Ahern <dsahern@gmail.com> + * + * The code is modifed to use 2 mac addresses as key instead of + * mac-address and vland-id as key + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <limits.h> +#include <net/if.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <libgen.h> +#include <bpf/bpf.h> + +#include "xdp_fdb.h" +#include "str_utils.h" +#include "libbpf_helpers.h" + +static bool fdb_map_verify(int map_fd) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + + if (bpf_obj_get_info_by_fd(map_fd, &info, &len)) { + fprintf(stderr, "Failed to get map info: %s: %d", + strerror(errno), errno); + return false; + } + + if (info.type != BPF_MAP_TYPE_HASH || + info.key_size != sizeof(struct fdb_key) || + info.value_size != sizeof(__u32)) { + fprintf(stderr, "Incompatible map\n"); + return false; + } + + return true; +} + +static bool ports_map_verify(int ports_fd, bool *with_prog) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + + if (bpf_obj_get_info_by_fd(ports_fd, &info, &len)) { + fprintf(stderr, "Failed to get map info: %s: %d", + strerror(errno), errno); + return false; + } + + if (info.value_size == sizeof(struct bpf_devmap_val)) + *with_prog = true; + + if (info.type != BPF_MAP_TYPE_DEVMAP_HASH || + info.key_size != sizeof(__u32) || + (info.value_size != sizeof(__u32) && !with_prog)) { + fprintf(stderr, "Incompatible map\n"); + return false; + } + + return true; +} + +static int show_entries_cli(int fdb_fd, int ports_fd) +{ + struct fdb_key *key, *prev_key = NULL; + struct bpf_devmap_val pval; + bool with_prog = false; + char buf[IFNAMSIZ]; + int err, i; + __u32 fval; + + + if (!fdb_map_verify(fdb_fd) || + !ports_map_verify(ports_fd, &with_prog)) + return 1; + + key = calloc(1, sizeof(*key)); + if (!key) { + fprintf(stderr, "Failed to allocate memory for key\n"); + return 1; + } + + for (i = 0; ; ++i) { + err = bpf_map_get_next_key(fdb_fd, prev_key, key); + if (err) { + if (errno == ENOENT) + err = 0; + break; + } + + fval = 0; + memset(&pval, 0, sizeof(pval)); + if (bpf_map_lookup_elem(fdb_fd, key, &fval)) + goto next_key; + + if (if_indextoname(fval, buf) == NULL) { + fprintf(stderr, "WARNING: stale device index\n"); + snprintf(buf, IFNAMSIZ, "-"); + } + + //printf("-v %u -m ", key->vlan); + print_mac(key->smac, false); + print_mac(key->dmac, false); + printf(" -d %s", buf); + + if (bpf_map_lookup_elem(ports_fd, &fval, &pval)) { + fprintf(stderr, + "No ports entry for device %s/%d\n", buf, fval); + goto end_entry; + } + + if (with_prog) + printf(" -p %u", pval.bpf_prog.id); +end_entry: + printf("\n"); +next_key: + prev_key = key; + } + + free(key); + return err; +} + +static int show_ports_entries(int map_fd) +{ + __u32 *key, *prev_key = NULL; + struct bpf_devmap_val val; + bool with_prog = false; + int err; + + if (!ports_map_verify(map_fd, &with_prog)) + return 1; + + key = calloc(1, sizeof(*key)); + if (!key) { + fprintf(stderr, "Failed to allocate memory for key\n"); + return 1; + } + + printf("\nPorts map:\n"); + while(1) { + err = bpf_map_get_next_key(map_fd, prev_key, key); + if (err) { + if (errno == ENOENT) + err = 0; + break; + } + + memset(&val, 0, sizeof(val)); + if (!bpf_map_lookup_elem(map_fd, key, &val)) { + char buf[IFNAMSIZ]; + + if (if_indextoname(val.ifindex, buf) == NULL) { + fprintf(stderr, "WARNING: stale device index\n"); + snprintf(buf, IFNAMSIZ, "-"); + } + + printf("index %u -> device %s/%u", + *key, buf, val.ifindex); + if (with_prog && val.bpf_prog.id) + printf(", prog id %u", val.bpf_prog.id); + printf("\n"); + } + + prev_key = key; + } + + free(key); + return err; +} + +static int show_fdb_entries(int map_fd) +{ + struct fdb_key *key, *prev_key = NULL; + int err, i; + __u32 val; + + if (!fdb_map_verify(map_fd)) + return 1; + + key = calloc(1, sizeof(*key)); + if (!key) { + fprintf(stderr, "Failed to allocate memory for key\n"); + return 1; + } + + printf("FDB map:\n"); + for (i = 0; ; ++i) { + err = bpf_map_get_next_key(map_fd, prev_key, key); + if (err) { + if (errno == ENOENT) + err = 0; + break; + } + + val = 0; + if (!bpf_map_lookup_elem(map_fd, key, &val)) { + char buf[IFNAMSIZ]; + + if (if_indextoname(val, buf) == NULL) { + fprintf(stderr, "WARNING: stale device index\n"); + snprintf(buf, IFNAMSIZ, "-"); + } + + //printf("entry %d: < %u, ", i, key->vlan); + print_mac(key->smac, false); + print_mac(key->dmac, false); + printf(" > --> device %s/%u\n", buf, val); + } + + prev_key = key; + } + + free(key); + return err; +} + +/* remove from fdb then remove device */ +static int remove_entries(int fdb_fd, struct fdb_key *key, + int ports_fd, int idx) +{ + int rc; + + rc = bpf_map_delete_elem(fdb_fd, key); + if (rc) + fprintf(stderr, "Failed to delete fdb entry\n"); + + rc = bpf_map_delete_elem(ports_fd, &idx); + if (rc) + fprintf(stderr, "Failed to delete ports entry\n"); + return rc; +} + +static void usage(const char *prog) +{ + fprintf(stderr, + "usage: %s [OPTS]\n" + "\nOPTS:\n" + " -f id fdb map id or pinned path\n" + " -t id devmap id or pinned path for tx ports\n" + " -d device device to redirect\n" + " -m mac Destination mac address for entry\n" + " -s mac Source mac address for entry\n" + " -r remove entries\n" + " -p progid bpf program id or pinned path to attach to entry\n" + " -P print map entries\n" + " -C print map entries in command line format\n" + , prog); +} + +int main(int argc, char **argv) +{ + struct bpf_devmap_val pval = { .bpf_prog.fd = -1 }; + __u32 fdb_id = 0, ports_id = 0, bpf_prog_id = 0; + const char *ports_map_path = NULL; + const char *bpf_prog_path = NULL; + const char *fdb_map_path = NULL; + bool print_entries = false; + struct fdb_key key = {}; + int fdb_fd, ports_fd; + bool delete = false; + bool cli_arg = false; + unsigned long tmp; + int opt, ret; + + while ((opt = getopt(argc, argv, ":f:t:d:m:s:p:rPC")) != -1) { + switch (opt) { + case 'f': + if (str_to_ulong(optarg, &tmp) == 0) { + fdb_id = (__u32)tmp; + } else if (*optarg == '/') { + fdb_map_path = optarg; + } else { + fprintf(stderr, "Invalid fdb map id\n"); + return 1; + } + break; + case 't': + if (str_to_ulong(optarg, &tmp) == 0) { + ports_id = (__u32)tmp; + } else if (*optarg == '/') { + ports_map_path = optarg; + } else { + fprintf(stderr, "Invalid ports map id\n"); + return 1; + } + break; + case 'd': + pval.ifindex = if_nametoindex(optarg); + if (!pval.ifindex) { + if (str_to_int(optarg, 0, INT_MAX, &ret)) { + fprintf(stderr, "Invalid device\n"); + return 1; + } + pval.ifindex = (__u32)ret; + } + break; + case 'm': + if (str_to_mac(optarg, key.dmac)) { + fprintf(stderr, "Invalid Dest-mac address\n"); + return 1; + } + break; + case 's': + /* + if (str_to_int(optarg, 0, 4095, &ret)) { + fprintf(stderr, "Invalid vlan\n"); + return 1; + } + key.vlan = (__u16)ret; + */ + if (str_to_mac(optarg, key.smac)) { + fprintf(stderr, "Invalid Source-mac address\n"); + return 1; + } + break; + case 'p': + if (str_to_ulong(optarg, &tmp) == 0) { + bpf_prog_id = (__u32)tmp; + } else if (*optarg == '/') { + bpf_prog_path = optarg; + } else { + fprintf(stderr, "Invalid program id\n"); + return 1; + } + break; + case 'r': + delete = true; + break; + case 'C': + cli_arg = true; + break; + case 'P': + print_entries = true; + break; + default: + usage(basename(argv[0])); + return 1; + } + } + + fdb_fd = bpf_map_get_fd(fdb_id, fdb_map_path, "fdb_map", "fdb map"); + if (fdb_fd < 0) + return 1; + + ports_fd = bpf_map_get_fd(ports_id, ports_map_path, "xdp_fwd_ports", + "ports map"); + if (ports_fd < 0) + return 1; + + if (cli_arg) + return show_entries_cli(fdb_fd, ports_fd); + + if (print_entries) { + ret = show_fdb_entries(fdb_fd); + return show_ports_entries(ports_fd) ? : ret; + } + + pval.bpf_prog.fd = bpf_prog_get_fd(bpf_prog_id, bpf_prog_path, NULL, + "redirect program"); + if (pval.bpf_prog.fd < 0) + return 1; + + if (!pval.ifindex) { + fprintf(stderr, "Device index not given\n"); + return 1; + } + + if (delete) + return remove_entries(fdb_fd, &key, ports_fd, pval.ifindex); + + /* add device to port map and then add fdb entry */ + ret = bpf_map_update_elem(ports_fd, &pval.ifindex, &pval, 0); + if (ret) { + fprintf(stderr, "Failed to add ports entry: %s: %d\n", + strerror(errno), errno); + remove_entries(fdb_fd, &key, ports_fd, pval.ifindex); + return ret; + } + + ret = bpf_map_update_elem(fdb_fd, &key, &pval.ifindex, BPF_ANY); + if (ret) { + fprintf(stderr, "Failed to add fdb entry: %s: %d\n", + strerror(errno), errno); + remove_entries(fdb_fd, &key, ports_fd, pval.ifindex); + return ret; + } + + return 0; +} diff --git a/systems/README.md b/systems/README.md index d0812a81..c4148e41 100644 --- a/systems/README.md +++ b/systems/README.md @@ -39,4 +39,31 @@ May need following changes: DPDK_LIB = $(DPDK_DIR)/build/lib64 ``` - +eBPF +---- + +Adding eBPF support may require additional installations. The installation +script already includes tools like llvm, clang, libbpf-dev, etc. +For some programs, you may have to build kernel by downloading the source. +In the folder where you have kernel source, run: +``` +make oldconfig && make prepare && make headers_install && make +``` +While building any program, if you encounter an error with bpf.h in +include/linux/ folder, get the latest of bcc from iovisor project, +using following commands + +``` +apt purge bpfcc-tools libbpfcc python3-bpfcc +wget https://github.com/iovisor/bcc/releases/download/v0.25.0/bcc-src-with-submodule.tar.gz +tar xf bcc-src-with-submodule.tar.gz +cd bcc/ +apt install -y python-is-python3 +apt install -y bison build-essential cmake flex git libedit-dev libllvm11 llvm-11-dev libclang-11-dev zlib1g-dev libelf-dev libfl-dev python3-distutils +apt install -y checkinstall +mkdir build +cd build/ +cmake -DCMAKE_INSTALL_PREFIX=/usr -DPYTHON_CMD=python3 .. +make +checkinstall +``` diff --git a/systems/debian/build_base_machine.sh b/systems/debian/build_base_machine.sh index e41837a2..0c078140 100755 --- a/systems/debian/build_base_machine.sh +++ b/systems/debian/build_base_machine.sh @@ -30,6 +30,8 @@ apt-get -y install python3-venv apt-get -y install python3-pyelftools apt-get -y install meson apt-get -y install ninja-build +apt-get -y install clang +apt-get -y install llvm # Make and Compilers apt-get -y install make @@ -40,3 +42,7 @@ apt-get -y install libssl1.1 apt-get -y install libxml2 apt-get -y install zlib1g-dev apt-get -y install scapy +apt-get -y install libbpf-dev +apt-get -y install libelf-dev +apt-get -y install linux-tools-$(uname -r) +apt-get -y install linux-cloud-tools-$(uname -r) diff --git a/systems/fedora/33/build_base_machine.sh b/systems/fedora/33/build_base_machine.sh index f3a27644..a6a973ce 100644 --- a/systems/fedora/33/build_base_machine.sh +++ b/systems/fedora/33/build_base_machine.sh @@ -51,6 +51,10 @@ cifs-utils socat sysstat sshpass +llvm +clang +libbpf-devel + # install python packages python3-virtualenv diff --git a/systems/fedora/36/build_base_machine.sh b/systems/fedora/36/build_base_machine.sh index f3a27644..a9a7f63b 100755 --- a/systems/fedora/36/build_base_machine.sh +++ b/systems/fedora/36/build_base_machine.sh @@ -51,6 +51,9 @@ cifs-utils socat sysstat sshpass +llvm +clang +libbpf-devel # install python packages python3-virtualenv diff --git a/systems/ubuntu/build_base_machine.sh b/systems/ubuntu/build_base_machine.sh index 910cd173..c66fae43 100755 --- a/systems/ubuntu/build_base_machine.sh +++ b/systems/ubuntu/build_base_machine.sh @@ -34,6 +34,8 @@ apt-get -y install scapy apt-get -y install meson apt-get -y install ninja-build apt-get -y install python3-pyelftools +apt-get -y install clang +apt-get -y install llvm # Linux Kernel Source apt-get -y install linux-source @@ -50,6 +52,10 @@ apt-get -y install libfuse-dev apt-get -y install libnuma1 apt-get -y install libnuma-dev apt-get -y install sshpass +apt-get -y install libelf-dev +apt-get -y install libbpf-dev +apt-get -y install linux-tools-$(uname -r) +apt-get -y install linux-cloud-tools-$(uname -r) # Some useful tools you may optionally install #apt-get -y install ctags |