3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-02-07 14:54:17 +00:00
Brooklyn/include/linux/cache.h
Scare Crowe 2a709f28fa Auto exploit mitigation feature
* 0day explit mitigation
* Memory corruption prevention
* Privilege escalation prevention
* Buffer over flow prevention
* File System corruption defense
* Thread escape prevention

This may very well be the most intensive inclusion to BrooklynR. This will not be part of an x86 suite nor it will be released as tool kit. The security core toolkit will remain part of kernel base.
2021-11-13 09:26:51 +05:00

92 lines
2.3 KiB
C

#ifndef __LINUX_CACHE_H
#define __LINUX_CACHE_H
#include <uapi/linux/kernel.h>
#include <asm/cache.h>
#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
#endif
#ifndef SMP_CACHE_BYTES
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
/*
* __read_mostly is used to keep rarely changing variables out of frequently
* updated cachelines. If an architecture doesn't support it, ignore the
* hint.
*/
#ifndef __read_mostly
#define __read_mostly
#endif
/*
* __ro_after_init is used to mark things that are read-only after init (i.e.
* after mark_rodata_ro() has been called). These are effectively read-only,
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifdef CONFIG_PAX_KERNEXEC
# ifdef __ro_after_init
# error KERNEXEC requires __read_only
# endif
# define __read_only __attribute__((__section__(".data..read_only")))
# define __ro_after_init __read_only
#else
# define __read_only __read_mostly
#endif
#ifndef __ro_after_init
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
#endif
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
#ifndef ____cacheline_aligned_in_smp
#ifdef CONFIG_SMP
#define ____cacheline_aligned_in_smp ____cacheline_aligned
#else
#define ____cacheline_aligned_in_smp
#endif /* CONFIG_SMP */
#endif
#ifndef __cacheline_aligned
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
__section__(".data..cacheline_aligned")))
#endif /* __cacheline_aligned */
#ifndef __cacheline_aligned_in_smp
#ifdef CONFIG_SMP
#define __cacheline_aligned_in_smp __cacheline_aligned
#else
#define __cacheline_aligned_in_smp
#endif /* CONFIG_SMP */
#endif
/*
* The maximum alignment needed for some critical structures
* These could be inter-node cacheline sizes/L3 cacheline
* size etc. Define this in asm/cache.h for your arch
*/
#ifndef INTERNODE_CACHE_SHIFT
#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
#endif
#if !defined(____cacheline_internodealigned_in_smp)
#if defined(CONFIG_SMP)
#define ____cacheline_internodealigned_in_smp \
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
#else
#define ____cacheline_internodealigned_in_smp
#endif
#endif
#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
#define cache_line_size() L1_CACHE_BYTES
#endif
#endif /* __LINUX_CACHE_H */