diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/include/uapi/linux/byteorder | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/include/uapi/linux/byteorder')
-rw-r--r-- | kernel/include/uapi/linux/byteorder/Kbuild | 3 | ||||
-rw-r--r-- | kernel/include/uapi/linux/byteorder/big_endian.h | 105 | ||||
-rw-r--r-- | kernel/include/uapi/linux/byteorder/little_endian.h | 105 |
3 files changed, 213 insertions, 0 deletions
diff --git a/kernel/include/uapi/linux/byteorder/Kbuild b/kernel/include/uapi/linux/byteorder/Kbuild new file mode 100644 index 000000000..619225b9f --- /dev/null +++ b/kernel/include/uapi/linux/byteorder/Kbuild @@ -0,0 +1,3 @@ +# UAPI Header export list +header-y += big_endian.h +header-y += little_endian.h diff --git a/kernel/include/uapi/linux/byteorder/big_endian.h b/kernel/include/uapi/linux/byteorder/big_endian.h new file mode 100644 index 000000000..672374450 --- /dev/null +++ b/kernel/include/uapi/linux/byteorder/big_endian.h @@ -0,0 +1,105 @@ +#ifndef _UAPI_LINUX_BYTEORDER_BIG_ENDIAN_H +#define _UAPI_LINUX_BYTEORDER_BIG_ENDIAN_H + +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#endif +#ifndef __BIG_ENDIAN_BITFIELD +#define __BIG_ENDIAN_BITFIELD +#endif + +#include <linux/types.h> +#include <linux/swab.h> + +#define __constant_htonl(x) ((__force __be32)(__u32)(x)) +#define __constant_ntohl(x) ((__force __u32)(__be32)(x)) +#define __constant_htons(x) ((__force __be16)(__u16)(x)) +#define __constant_ntohs(x) ((__force __u16)(__be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x))) +#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x))) +#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x))) +#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x)) +#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x)) +#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x)) +#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)__swab64((x))) +#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)__swab32((x))) +#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)__swab16((x))) +#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) +#define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +#define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +#define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)__swab64p(p); +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)__swab32p(p); +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)__swab16p(p); +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return __swab16p((__u16 *)p); +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)*p; +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return (__force __u64)*p; +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)*p; +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return (__force __u32)*p; +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)*p; +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return (__force __u16)*p; +} +#define __cpu_to_le64s(x) __swab64s((x)) +#define __le64_to_cpus(x) __swab64s((x)) +#define __cpu_to_le32s(x) __swab32s((x)) +#define __le32_to_cpus(x) __swab32s((x)) +#define __cpu_to_le16s(x) __swab16s((x)) +#define __le16_to_cpus(x) __swab16s((x)) +#define __cpu_to_be64s(x) do { (void)(x); } while (0) +#define __be64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be32s(x) do { (void)(x); } while (0) +#define __be32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be16s(x) do { (void)(x); } while (0) +#define __be16_to_cpus(x) do { (void)(x); } while (0) + + +#endif /* _UAPI_LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/kernel/include/uapi/linux/byteorder/little_endian.h b/kernel/include/uapi/linux/byteorder/little_endian.h new file mode 100644 index 000000000..d876736a0 --- /dev/null +++ b/kernel/include/uapi/linux/byteorder/little_endian.h @@ -0,0 +1,105 @@ +#ifndef _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif +#ifndef __LITTLE_ENDIAN_BITFIELD +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#include <linux/types.h> +#include <linux/swab.h> + +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) +#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)*p; +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return (__force __u64)*p; +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)*p; +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return (__force __u32)*p; +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)*p; +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return (__force __u16)*p; +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)__swab64p(p); +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)__swab32p(p); +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)__swab16p(p); +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return __swab16p((__u16 *)p); +} +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be64s(x) __swab64s((x)) +#define __be64_to_cpus(x) __swab64s((x)) +#define __cpu_to_be32s(x) __swab32s((x)) +#define __be32_to_cpus(x) __swab32s((x)) +#define __cpu_to_be16s(x) __swab16s((x)) +#define __be16_to_cpus(x) __swab16s((x)) + + +#endif /* _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H */ |