mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-07 06:44:18 +00:00
* 0day explit mitigation * Memory corruption prevention * Privilege escalation prevention * Buffer over flow prevention * File System corruption defense * Thread escape prevention This may very well be the most intensive inclusion to BrooklynR. This will not be part of an x86 suite nor it will be released as tool kit. The security core toolkit will remain part of kernel base.
68 lines
1.6 KiB
C
68 lines
1.6 KiB
C
#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
|
|
#define _LINUX_UNALIGNED_ACCESS_OK_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <asm/byteorder.h>
|
|
|
|
static __always_inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
|
|
{
|
|
return le16_to_cpup((const __le16 *)p);
|
|
}
|
|
|
|
static __always_inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
|
|
{
|
|
return le32_to_cpup((const __le32 *)p);
|
|
}
|
|
|
|
static __always_inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
|
|
{
|
|
return le64_to_cpup((const __le64 *)p);
|
|
}
|
|
|
|
static __always_inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
|
|
{
|
|
return be16_to_cpup((const __be16 *)p);
|
|
}
|
|
|
|
static __always_inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
|
|
{
|
|
return be32_to_cpup((const __be32 *)p);
|
|
}
|
|
|
|
static __always_inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
|
|
{
|
|
return be64_to_cpup((const __be64 *)p);
|
|
}
|
|
|
|
static __always_inline void put_unaligned_le16(u16 val, void *p)
|
|
{
|
|
*((__le16 *)p) = cpu_to_le16(val);
|
|
}
|
|
|
|
static __always_inline void put_unaligned_le32(u32 val, void *p)
|
|
{
|
|
*((__le32 *)p) = cpu_to_le32(val);
|
|
}
|
|
|
|
static __always_inline void put_unaligned_le64(u64 val, void *p)
|
|
{
|
|
*((__le64 *)p) = cpu_to_le64(val);
|
|
}
|
|
|
|
static __always_inline void put_unaligned_be16(u16 val, void *p)
|
|
{
|
|
*((__be16 *)p) = cpu_to_be16(val);
|
|
}
|
|
|
|
static __always_inline void put_unaligned_be32(u32 val, void *p)
|
|
{
|
|
*((__be32 *)p) = cpu_to_be32(val);
|
|
}
|
|
|
|
static __always_inline void put_unaligned_be64(u64 val, void *p)
|
|
{
|
|
*((__be64 *)p) = cpu_to_be64(val);
|
|
}
|
|
|
|
#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
|