3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-02-01 07:42:18 +00:00
This commit is contained in:
Raziel K. Crowe 2022-04-02 17:10:17 +05:00
parent b530551891
commit c06278f256
589 changed files with 22216 additions and 12186 deletions

2
lib/.gitignore vendored
View File

@ -4,3 +4,5 @@
/gen_crc32table
/gen_crc64table
/oid_registry_data.c
/test_fortify.log
/test_fortify/*.log

View File

@ -64,9 +64,6 @@ config GENERIC_STRNLEN_USER
config GENERIC_NET_UTILS
bool
config GENERIC_FIND_FIRST_BIT
bool
source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP
@ -121,6 +118,8 @@ config INDIRECT_IOMEM_FALLBACK
mmio accesses when the IO memory address is not a registered
emulated region.
source "lib/crypto/Kconfig"
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@ -670,6 +669,10 @@ config STACKDEPOT
bool
select STACKTRACE
config STACKDEPOT_ALWAYS_INIT
bool
select STACKDEPOT
config STACK_HASH_ORDER
int "stack depot hash size (12 => 4KB, 20 => 1024KB)"
range 12 20
@ -679,6 +682,11 @@ config STACK_HASH_ORDER
Select the hash size as a power of 2 for the stackdepot hash table.
Choose a lower value to reduce the memory impact.
config REF_TRACKER
bool
depends on STACKTRACE_SUPPORT
select STACKDEPOT
config SBITMAP
bool

View File

@ -316,6 +316,7 @@ config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
@ -346,8 +347,9 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1536 if (!64BIT && (PARISC || XTENSA))
default 1024 if (!64BIT && !PARISC)
default 2048 if PARISC
default 1536 if (!64BIT && XTENSA)
default 1024 if !64BIT
default 2048 if 64BIT
help
Tell gcc to warn at build time for stack frames larger than this.
@ -458,7 +460,7 @@ config STACK_VALIDATION
config VMLINUX_VALIDATION
bool
depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT
depends on STACK_VALIDATION && DEBUG_ENTRY
default y
config VMLINUX_MAP
@ -597,6 +599,11 @@ config DEBUG_MISC
Say Y here if you need to enable miscellaneous debug code that should
be under a more specific debug option but isn't.
menu "Networking Debugging"
source "net/Kconfig.debug"
endmenu # "Networking Debugging"
menu "Memory Debugging"
@ -877,7 +884,7 @@ config DEBUG_MEMORY_INIT
config MEMORY_NOTIFIER_ERROR_INJECT
tristate "Memory hotplug notifier error injection module"
depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
depends on MEMORY_HOTPLUG && NOTIFIER_ERROR_INJECTION
help
This option provides the ability to inject artificial errors to
memory hotplug notifier chain callbacks. It is controlled through
@ -1977,6 +1984,8 @@ config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || STACK_VALIDATION || \
GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
help
@ -2080,9 +2089,10 @@ config TEST_DIV64
If unsure, say N.
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
tristate "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
depends on KUNIT
help
This option provides for testing basic kprobes functionality on
boot. Samples of kprobe and kretprobe are inserted and
@ -2104,6 +2114,16 @@ config BACKTRACE_SELF_TEST
Say N if you are unsure.
config TEST_REF_TRACKER
tristate "Self test for reference tracker"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select REF_TRACKER
help
This option provides a kernel module performing tests
using reference tracker infrastructure.
Say N if you are unsure.
config RBTREE_TEST
tristate "Red-Black tree test"
depends on DEBUG_KERNEL
@ -2204,12 +2224,11 @@ config TEST_RHASHTABLE
If unsure, say N.
config TEST_HASH
tristate "Perform selftest on hash functions"
config TEST_SIPHASH
tristate "Perform selftest on siphash functions"
help
Enable this option to test the kernel's integer (<linux/hash.h>),
string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
hash functions on boot (or module load).
Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
functions on boot (or module load).
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
@ -2353,6 +2372,25 @@ config BITFIELD_KUNIT
If unsure, say N.
config HASH_KUNIT_TEST
tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Enable this option to test the kernel's string (<linux/stringhash.h>), and
integer (<linux/hash.h>) hash functions on boot.
KUnit tests run during boot and output the results to the debug log
in TAP format (https://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
config RESOURCE_KUNIT_TEST
tristate "KUnit test for resource API"
depends on KUNIT
@ -2452,6 +2490,17 @@ config RATIONAL_KUNIT_TEST
If unsure, say N.
config MEMCPY_KUNIT_TEST
tristate "Test memcpy(), memmove(), and memset() functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Builds unit tests for memcpy(), memmove(), and memset() functions.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config TEST_UDELAY
tristate "udelay test driver"
help
@ -2473,6 +2522,7 @@ config TEST_KMOD
depends on m
depends on NETDEVICES && NET_CORE && INET # for TUN
depends on BLOCK
depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS
select TEST_LKM
select XFS_FS
select TUN

View File

@ -38,7 +38,7 @@ menuconfig KASAN
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select STACKDEPOT
select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.

View File

@ -191,6 +191,26 @@ config KCSAN_STRICT
closely aligns with the rules defined by the Linux-kernel memory
consistency model (LKMM).
config KCSAN_WEAK_MEMORY
bool "Enable weak memory modeling to detect missing memory barriers"
default y
depends on KCSAN_STRICT
# We can either let objtool nop __tsan_func_{entry,exit}() and builtin
# atomics instrumentation in .noinstr.text, or use a compiler that can
# implement __no_kcsan to really remove all instrumentation.
depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000
help
Enable support for modeling a subset of weak memory, which allows
detecting a subset of data races due to missing memory barriers.
Depends on KCSAN_STRICT, because the options strenghtening certain
plain accesses by default (depending on !KCSAN_STRICT) reduce the
ability to detect any data races invoving reordered accesses, in
particular reordered writes.
Weak memory modeling relies on additional instrumentation and may
affect performance.
config KCSAN_REPORT_VALUE_CHANGE_ONLY
bool "Only report races where watcher observed a data value change"
default y

View File

@ -112,19 +112,6 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
config UBSAN_OBJECT_SIZE
bool "Perform checking for accesses beyond the end of objects"
default UBSAN
# gcc hugely expands stack usage with -fsanitize=object-size
# https://lore.kernel.org/lkml/CAHk-=wjPasyJrDuwDnpHJS2TuQfExwe=px-SzLeN8GFMAQJPmQ@mail.gmail.com/
depends on !CC_IS_GCC
depends on $(cc-option,-fsanitize=object-size)
help
This option enables -fsanitize=object-size which checks for accesses
beyond the end of objects where the optimizer can determine both the
object being operated on and its size, usually seen with bad downcasts,
or access to struct members from NULL pointers.
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
default UBSAN

View File

@ -61,7 +61,8 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
@ -100,7 +101,8 @@ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
#
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
@ -269,6 +271,8 @@ obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
obj-$(CONFIG_REF_TRACKER) += ref_tracker.o
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o fdt_addresses.o
$(foreach file, $(libfdt_files), \
@ -358,5 +362,39 @@ obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
# FORTIFY_SOURCE compile-time behavior tests
TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
TEST_FORTIFY_LOG = test_fortify.log
quiet_cmd_test_fortify = TEST $@
cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \
$< $@ "$(NM)" $(CC) $(c_flags) \
$(call cc-disable-warning,fortify-source)
targets += $(TEST_FORTIFY_LOGS)
clean-files += $(TEST_FORTIFY_LOGS)
clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS))
$(obj)/test_fortify/%.log: $(src)/test_fortify/%.c \
$(src)/test_fortify/test_fortify.h \
$(srctree)/include/linux/fortify-string.h \
$(srctree)/scripts/test_fortify.sh \
FORCE
$(call if_changed,test_fortify)
quiet_cmd_gen_fortify_log = GEN $@
cmd_gen_fortify_log = cat </dev/null $(filter-out FORCE,$^) 2>/dev/null > $@ || true
targets += $(TEST_FORTIFY_LOG)
clean-files += $(TEST_FORTIFY_LOG)
$(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
$(call if_changed,gen_fortify_log)
# Fake dependency to trigger the fortify tests.
ifeq ($(CONFIG_FORTIFY_SOURCE),y)
$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
endif

View File

@ -164,8 +164,6 @@ asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
data_len -= 3;
ret = 0;
for (i = 2; i < oid_len; i++) {
ret = asn1_encode_oid_digit(&d, &data_len, oid[i]);
if (ret < 0)

View File

@ -741,8 +741,7 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
new_s0 = kzalloc(struct_size(new_s0, index_key, keylen), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
@ -849,8 +848,8 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
new_s0 = kzalloc(struct_size(new_s0, index_key, keylen),
GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
@ -864,7 +863,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
new_n0->parent_slot = 0;
memcpy(new_s0->index_key, shortcut->index_key,
keylen * sizeof(unsigned long));
flex_array_size(new_s0, index_key, keylen));
blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
@ -899,8 +898,8 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
new_s1 = kzalloc(struct_size(new_s1, index_key, keylen),
GFP_KERNEL);
if (!new_s1)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
@ -913,7 +912,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
memcpy(new_s1->index_key, shortcut->index_key,
keylen * sizeof(unsigned long));
flex_array_size(new_s1, index_key, keylen));
edit->set[1].ptr = &side->back_pointer;
edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
@ -1490,13 +1489,12 @@ int assoc_array_gc(struct assoc_array *array,
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
new_s = kmalloc(struct_size(new_s, index_key, keylen),
GFP_KERNEL);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long)));
memcpy(new_s, shortcut, struct_size(new_s, index_key, keylen));
new_s->back_pointer = new_parent;
new_s->parent_slot = shortcut->parent_slot;
*new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);

View File

@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=)
@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
s64 generic_atomic64_dec_if_positive(atomic64_t *v)

View File

@ -45,23 +45,27 @@ int audit_classify_syscall(int abi, unsigned syscall)
switch(syscall) {
#ifdef __NR_open
case __NR_open:
return 2;
return AUDITSC_OPEN;
#endif
#ifdef __NR_openat
case __NR_openat:
return 3;
return AUDITSC_OPENAT;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
return 4;
return AUDITSC_SOCKETCALL;
#endif
#ifdef __NR_execveat
case __NR_execveat:
#endif
case __NR_execve:
return 5;
return AUDITSC_EXECVE;
#ifdef __NR_openat2
case __NR_openat2:
return AUDITSC_OPENAT2;
#endif
default:
return 0;
return AUDITSC_NATIVE;
}
}

View File

@ -1398,6 +1398,19 @@ unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
}
EXPORT_SYMBOL(bitmap_zalloc);
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
{
return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
flags, node);
}
EXPORT_SYMBOL(bitmap_alloc_node);
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
{
return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(bitmap_zalloc_node);
void bitmap_free(const unsigned long *bitmap)
{
kfree(bitmap);

View File

@ -4,16 +4,24 @@
* Masami Hiramatsu <mhiramat@kernel.org>
*/
#define pr_fmt(fmt) "bootconfig: " fmt
#ifdef __KERNEL__
#include <linux/bootconfig.h>
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/printk.h>
#include <linux/string.h>
#else /* !__KERNEL__ */
/*
* NOTE: This is only for tools/bootconfig, because tools/bootconfig will
* run the parser sanity test.
* This does NOT mean lib/bootconfig.c is available in the user space.
* However, if you change this file, please make sure the tools/bootconfig
* has no issue on building and running.
*/
#include <linux/bootconfig.h>
#endif
/*
* Extra Boot Config (XBC) is given as tree-structured ascii text of
@ -34,6 +42,50 @@ static int xbc_err_pos __initdata;
static int open_brace[XBC_DEPTH_MAX] __initdata;
static int brace_index __initdata;
#ifdef __KERNEL__
static inline void * __init xbc_alloc_mem(size_t size)
{
return memblock_alloc(size, SMP_CACHE_BYTES);
}
static inline void __init xbc_free_mem(void *addr, size_t size)
{
memblock_free(addr, size);
}
#else /* !__KERNEL__ */
static inline void *xbc_alloc_mem(size_t size)
{
return malloc(size);
}
static inline void xbc_free_mem(void *addr, size_t size)
{
free(addr);
}
#endif
/**
* xbc_get_info() - Get the information of loaded boot config
* @node_size: A pointer to store the number of nodes.
* @data_size: A pointer to store the size of bootconfig data.
*
* Get the number of used nodes in @node_size if it is not NULL,
* and the size of bootconfig data in @data_size if it is not NULL.
* Return 0 if the boot config is initialized, or return -ENODEV.
*/
int __init xbc_get_info(int *node_size, size_t *data_size)
{
if (!xbc_data)
return -ENODEV;
if (node_size)
*node_size = xbc_node_num;
if (data_size)
*data_size = xbc_data_size;
return 0;
}
static int __init xbc_parse_error(const char *msg, const char *p)
{
xbc_err_msg = msg;
@ -226,7 +278,7 @@ int __init xbc_node_compose_key_after(struct xbc_node *root,
struct xbc_node *node,
char *buf, size_t size)
{
u16 keys[XBC_DEPTH_MAX];
uint16_t keys[XBC_DEPTH_MAX];
int depth = 0, ret = 0, total = 0;
if (!node || node == root)
@ -341,21 +393,21 @@ const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
/* XBC parse and tree build */
static int __init xbc_init_node(struct xbc_node *node, char *data, u32 flag)
static int __init xbc_init_node(struct xbc_node *node, char *data, uint32_t flag)
{
unsigned long offset = data - xbc_data;
if (WARN_ON(offset >= XBC_DATA_MAX))
return -EINVAL;
node->data = (u16)offset | flag;
node->data = (uint16_t)offset | flag;
node->child = 0;
node->next = 0;
return 0;
}
static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
static struct xbc_node * __init xbc_add_node(char *data, uint32_t flag)
{
struct xbc_node *node;
@ -385,7 +437,7 @@ static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
return node;
}
static struct xbc_node * __init __xbc_add_sibling(char *data, u32 flag, bool head)
static struct xbc_node * __init __xbc_add_sibling(char *data, uint32_t flag, bool head)
{
struct xbc_node *sib, *node = xbc_add_node(data, flag);
@ -412,17 +464,17 @@ static struct xbc_node * __init __xbc_add_sibling(char *data, u32 flag, bool hea
return node;
}
static inline struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
static inline struct xbc_node * __init xbc_add_sibling(char *data, uint32_t flag)
{
return __xbc_add_sibling(data, flag, false);
}
static inline struct xbc_node * __init xbc_add_head_sibling(char *data, u32 flag)
static inline struct xbc_node * __init xbc_add_head_sibling(char *data, uint32_t flag)
{
return __xbc_add_sibling(data, flag, true);
}
static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
static inline __init struct xbc_node *xbc_add_child(char *data, uint32_t flag)
{
struct xbc_node *node = xbc_add_sibling(data, flag);
@ -780,72 +832,14 @@ static int __init xbc_verify_tree(void)
return 0;
}
/**
* xbc_destroy_all() - Clean up all parsed bootconfig
*
* This clears all data structures of parsed bootconfig on memory.
* If you need to reuse xbc_init() with new boot config, you can
* use this.
*/
void __init xbc_destroy_all(void)
{
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_nodes = NULL;
brace_index = 0;
}
/**
* xbc_init() - Parse given XBC file and build XBC internal tree
* @buf: boot config text
* @emsg: A pointer of const char * to store the error message
* @epos: A pointer of int to store the error position
*
* This parses the boot config text in @buf. @buf must be a
* null terminated string and smaller than XBC_DATA_MAX.
* Return the number of stored nodes (>0) if succeeded, or -errno
* if there is any error.
* In error cases, @emsg will be updated with an error message and
* @epos will be updated with the error position which is the byte offset
* of @buf. If the error is not a parser error, @epos will be -1.
*/
int __init xbc_init(char *buf, const char **emsg, int *epos)
/* Need to setup xbc_data and xbc_nodes before call this. */
static int __init xbc_parse_tree(void)
{
char *p, *q;
int ret, c;
int ret = 0, c;
if (epos)
*epos = -1;
if (xbc_data) {
if (emsg)
*emsg = "Bootconfig is already initialized";
return -EBUSY;
}
ret = strlen(buf);
if (ret > XBC_DATA_MAX - 1 || ret == 0) {
if (emsg)
*emsg = ret ? "Config data is too big" :
"Config data is empty";
return -ERANGE;
}
xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX,
SMP_CACHE_BYTES);
if (!xbc_nodes) {
if (emsg)
*emsg = "Failed to allocate bootconfig nodes";
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_data = buf;
xbc_data_size = ret + 1;
last_parent = NULL;
p = buf;
p = xbc_data;
do {
q = strpbrk(p, "{}=+;:\n#");
if (!q) {
@ -887,6 +881,81 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
}
} while (!ret);
return ret;
}
/**
* xbc_exit() - Clean up all parsed bootconfig
*
* This clears all data structures of parsed bootconfig on memory.
* If you need to reuse xbc_init() with new boot config, you can
* use this.
*/
void __init xbc_exit(void)
{
xbc_free_mem(xbc_data, xbc_data_size);
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_nodes = NULL;
brace_index = 0;
}
/**
* xbc_init() - Parse given XBC file and build XBC internal tree
* @data: The boot config text original data
* @size: The size of @data
* @emsg: A pointer of const char * to store the error message
* @epos: A pointer of int to store the error position
*
* This parses the boot config text in @data. @size must be smaller
* than XBC_DATA_MAX.
* Return the number of stored nodes (>0) if succeeded, or -errno
* if there is any error.
* In error cases, @emsg will be updated with an error message and
* @epos will be updated with the error position which is the byte offset
* of @buf. If the error is not a parser error, @epos will be -1.
*/
int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
{
int ret;
if (epos)
*epos = -1;
if (xbc_data) {
if (emsg)
*emsg = "Bootconfig is already initialized";
return -EBUSY;
}
if (size > XBC_DATA_MAX || size == 0) {
if (emsg)
*emsg = size ? "Config data is too big" :
"Config data is empty";
return -ERANGE;
}
xbc_data = xbc_alloc_mem(size + 1);
if (!xbc_data) {
if (emsg)
*emsg = "Failed to allocate bootconfig data";
return -ENOMEM;
}
memcpy(xbc_data, data, size);
xbc_data[size] = '\0';
xbc_data_size = size + 1;
xbc_nodes = xbc_alloc_mem(sizeof(struct xbc_node) * XBC_NODE_MAX);
if (!xbc_nodes) {
if (emsg)
*emsg = "Failed to allocate bootconfig nodes";
xbc_exit();
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
ret = xbc_parse_tree();
if (!ret)
ret = xbc_verify_tree();
@ -895,27 +964,9 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
*epos = xbc_err_pos;
if (emsg)
*emsg = xbc_err_msg;
xbc_destroy_all();
xbc_exit();
} else
ret = xbc_node_num;
return ret;
}
/**
* xbc_debug_dump() - Dump current XBC node list
*
* Dump the current XBC node list on printk buffer for debug.
*/
void __init xbc_debug_dump(void)
{
int i;
for (i = 0; i < xbc_node_num; i++) {
pr_debug("[%d] %s (%s) .next=%d, .child=%d .parent=%d\n", i,
xbc_node_get_data(xbc_nodes + i),
xbc_node_is_value(xbc_nodes + i) ? "value" : "key",
xbc_nodes[i].next, xbc_nodes[i].child,
xbc_nodes[i].parent);
}
}

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit_arch.h>
#include <asm/unistd32.h>
unsigned compat_dir_class[] = {
@ -33,19 +34,23 @@ int audit_classify_compat_syscall(int abi, unsigned syscall)
switch (syscall) {
#ifdef __NR_open
case __NR_open:
return 2;
return AUDITSC_OPEN;
#endif
#ifdef __NR_openat
case __NR_openat:
return 3;
return AUDITSC_OPENAT;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
return 4;
return AUDITSC_SOCKETCALL;
#endif
case __NR_execve:
return 5;
return AUDITSC_EXECVE;
#ifdef __NR_openat2
case __NR_openat2:
return AUDITSC_OPENAT2;
#endif
default:
return 1;
return AUDITSC_COMPAT;
}
}

View File

@ -188,7 +188,7 @@ EXPORT_SYMBOL(free_cpumask_var);
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
memblock_free_early(__pa(mask), cpumask_size());
memblock_free(mask, cpumask_size());
}
#endif

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
comment "Crypto library routines"
menu "Crypto library routines"
config CRYPTO_LIB_AES
tristate
@ -9,14 +9,14 @@ config CRYPTO_LIB_ARC4
tristate
config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
tristate
bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the Blake2s library interface,
either builtin or as a module.
config CRYPTO_LIB_BLAKE2S_GENERIC
tristate
def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
help
This symbol can be depended upon by arch implementations of the
Blake2s library interface that require the generic code as a
@ -24,15 +24,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC
implementation is enabled, this implementation serves the users
of CRYPTO_LIB_BLAKE2S.
config CRYPTO_LIB_BLAKE2S
tristate "BLAKE2s hash function library"
depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n
help
Enable the Blake2s library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
is available and enabled.
config CRYPTO_ARCH_HAVE_LIB_CHACHA
tristate
help
@ -42,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
select CRYPTO_ALGAPI
select XOR_BLOCKS
help
This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a
@ -52,6 +43,7 @@ config CRYPTO_LIB_CHACHA_GENERIC
config CRYPTO_LIB_CHACHA
tristate "ChaCha library interface"
depends on CRYPTO
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
help
@ -123,11 +115,15 @@ config CRYPTO_LIB_CHACHA20POLY1305
tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
depends on CRYPTO
select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_POLY1305
select CRYPTO_ALGAPI
config CRYPTO_LIB_SHA256
tristate
config CRYPTO_LIB_SM4
tristate
endmenu

View File

@ -10,11 +10,10 @@ libaes-y := aes.o
obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
libarc4-y := arc4.o
obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o
libblake2s-generic-y += blake2s-generic.o
obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o
libblake2s-y += blake2s.o
# blake2s is used by the /dev/random driver which is always builtin
obj-y += libblake2s.o
libblake2s-y := blake2s.o
libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
libchacha20poly1305-y += chacha20poly1305.o

View File

@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state,
state->t[1] += (state->t[0] < inc);
}
void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
__weak __alias(blake2s_compress_generic);
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
{
u32 m[16];

View File

@ -15,7 +15,6 @@
* #include <stdio.h>
*
* #include <openssl/evp.h>
* #include <openssl/hmac.h>
*
* #define BLAKE2S_TESTVEC_COUNT 256
*
@ -58,16 +57,6 @@
* }
* printf("};\n\n");
*
* printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
*
* HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL);
* print_vec(hash, BLAKE2S_OUTBYTES);
*
* HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL);
* print_vec(hash, BLAKE2S_OUTBYTES);
*
* printf("};\n");
*
* return 0;
*}
*/
@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
};
static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
{ 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70,
0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79,
0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, },
{ 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9,
0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f,
0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, },
};
bool __init blake2s_selftest(void)
{
u8 key[BLAKE2S_KEY_SIZE];
@ -607,16 +587,5 @@ bool __init blake2s_selftest(void)
}
}
if (success) {
blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key));
success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE);
blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf));
success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE);
if (!success)
pr_err("blake2s256_hmac self-test: FAIL\n");
}
return success;
}

View File

@ -16,63 +16,20 @@
#include <linux/init.h>
#include <linux/bug.h>
#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
# define blake2s_compress blake2s_compress_arch
#else
# define blake2s_compress blake2s_compress_generic
#endif
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
__blake2s_update(state, in, inlen, blake2s_compress);
__blake2s_update(state, in, inlen, false);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
__blake2s_final(state, out, blake2s_compress);
__blake2s_final(state, out, false);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
const size_t keylen)
{
struct blake2s_state state;
u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
int i;
if (keylen > BLAKE2S_BLOCK_SIZE) {
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, key, keylen);
blake2s_final(&state, x_key);
} else
memcpy(x_key, key, keylen);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, in, inlen);
blake2s_final(&state, i_hash);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x5c ^ 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
blake2s_final(&state, i_hash);
memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
}
EXPORT_SYMBOL(blake2s256_hmac);
static int __init blake2s_mod_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&

View File

@ -68,11 +68,7 @@
#ifdef STATIC
# define UNZSTD_PREBOOT
# include "xxhash.c"
# include "zstd/entropy_common.c"
# include "zstd/fse_decompress.c"
# include "zstd/huf_decompress.c"
# include "zstd/zstd_common.c"
# include "zstd/decompress.c"
# include "zstd/decompress_sources.h"
#endif
#include <linux/decompress/mm.h>
@ -91,11 +87,15 @@
static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
{
const int err = ZSTD_getErrorCode(ret);
const zstd_error_code err = zstd_get_error_code(ret);
if (!ZSTD_isError(ret))
if (!zstd_is_error(ret))
return 0;
/*
* zstd_get_error_name() cannot be used because error takes a char *
* not a const char *
*/
switch (err) {
case ZSTD_error_memory_allocation:
error("ZSTD decompressor ran out of memory");
@ -124,28 +124,28 @@ static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
long out_len, long *in_pos,
void (*error)(char *x))
{
const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
const size_t wksp_size = zstd_dctx_workspace_bound();
void *wksp = large_malloc(wksp_size);
ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
int err;
size_t ret;
if (dctx == NULL) {
error("Out of memory while allocating ZSTD_DCtx");
error("Out of memory while allocating zstd_dctx");
err = -1;
goto out;
}
/*
* Find out how large the frame actually is, there may be junk at
* the end of the frame that ZSTD_decompressDCtx() can't handle.
* the end of the frame that zstd_decompress_dctx() can't handle.
*/
ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
ret = zstd_find_frame_compressed_size(in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
in_len = (long)ret;
ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
@ -167,14 +167,14 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
long *in_pos,
void (*error)(char *x))
{
ZSTD_inBuffer in;
ZSTD_outBuffer out;
ZSTD_frameParams params;
zstd_in_buffer in;
zstd_out_buffer out;
zstd_frame_header header;
void *in_allocated = NULL;
void *out_allocated = NULL;
void *wksp = NULL;
size_t wksp_size;
ZSTD_DStream *dstream;
zstd_dstream *dstream;
int err;
size_t ret;
@ -238,13 +238,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
out.size = out_len;
/*
* We need to know the window size to allocate the ZSTD_DStream.
* We need to know the window size to allocate the zstd_dstream.
* Since we are streaming, we need to allocate a buffer for the sliding
* window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
* (8 MB), so it is important to use the actual value so as not to
* waste memory when it is smaller.
*/
ret = ZSTD_getFrameParams(&params, in.src, in.size);
ret = zstd_get_frame_header(&header, in.src, in.size);
err = handle_zstd_error(ret, error);
if (err)
goto out;
@ -253,19 +253,19 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
err = -1;
goto out;
}
if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
error("ZSTD-compressed data has too large a window size");
err = -1;
goto out;
}
/*
* Allocate the ZSTD_DStream now that we know how much memory is
* Allocate the zstd_dstream now that we know how much memory is
* required.
*/
wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
wksp_size = zstd_dstream_workspace_bound(header.windowSize);
wksp = large_malloc(wksp_size);
dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
if (dstream == NULL) {
error("Out of memory while allocating ZSTD_DStream");
err = -1;
@ -298,7 +298,7 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
in.size = in_len;
}
/* Returns zero when the frame is complete. */
ret = ZSTD_decompressStream(dstream, &out, &in);
ret = zstd_decompress_stream(dstream, &out, &in);
err = handle_zstd_error(ret, error);
if (err)
goto out;

View File

@ -528,3 +528,85 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
}
EXPORT_SYMBOL(pcim_iounmap_regions);
#endif /* CONFIG_PCI */
static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
{
arch_phys_wc_del(*((int *)res));
}
/**
* devm_arch_phys_wc_add - Managed arch_phys_wc_add()
* @dev: Managed device
* @base: Memory base address
* @size: Size of memory range
*
* Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
* See arch_phys_wc_add() for more information.
*/
int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
{
int *mtrr;
int ret;
mtrr = devres_alloc(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL);
if (!mtrr)
return -ENOMEM;
ret = arch_phys_wc_add(base, size);
if (ret < 0) {
devres_free(mtrr);
return ret;
}
*mtrr = ret;
devres_add(dev, mtrr);
return ret;
}
EXPORT_SYMBOL(devm_arch_phys_wc_add);
struct arch_io_reserve_memtype_wc_devres {
resource_size_t start;
resource_size_t size;
};
static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
{
const struct arch_io_reserve_memtype_wc_devres *this = res;
arch_io_free_memtype_wc(this->start, this->size);
}
/**
* devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
* @dev: Managed device
* @start: Memory base address
* @size: Size of memory range
*
* Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
* and sets up a release callback See arch_io_reserve_memtype_wc() for more
* information.
*/
int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
resource_size_t size)
{
struct arch_io_reserve_memtype_wc_devres *dr;
int ret;
dr = devres_alloc(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
ret = arch_io_reserve_memtype_wc(start, size);
if (ret < 0) {
devres_free(dr);
return ret;
}
dr->start = start;
dr->size = size;
devres_add(dev, dr);
return ret;
}
EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);

View File

@ -71,6 +71,8 @@ static DEFINE_MUTEX(ddebug_lock);
static LIST_HEAD(ddebug_tables);
static int verbose;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, " dynamic_debug/control processing "
"( 0 = off (default), 1 = module add/rm, 2 = >control summary, 3 = parsing, 4 = per-site changes)");
/* Return the path relative to source root */
static inline const char *trim_prefix(const char *path)
@ -118,6 +120,8 @@ do { \
#define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__)
#define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__)
#define v3pr_info(fmt, ...) vnpr_info(3, fmt, ##__VA_ARGS__)
#define v4pr_info(fmt, ...) vnpr_info(4, fmt, ##__VA_ARGS__)
static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
{
@ -130,7 +134,7 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
fmtlen--;
}
vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n",
v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n",
msg,
query->function ?: "",
query->filename ?: "",
@ -213,7 +217,7 @@ static int ddebug_change(const struct ddebug_query *query,
static_branch_enable(&dp->key.dd_key_true);
#endif
dp->flags = newflags;
v2pr_info("changed %s:%d [%s]%s =%s\n",
v4pr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
dt->mod_name, dp->function,
ddebug_describe_flags(dp->flags, &fbuf));
@ -273,7 +277,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
buf = end;
}
if (verbose) {
if (verbose >= 3) {
int i;
pr_info("split into words:");
for (i = 0; i < nwords; i++)
@ -333,7 +337,7 @@ static int parse_linerange(struct ddebug_query *query, const char *first)
} else {
query->last_lineno = query->first_lineno;
}
vpr_info("parsed line %d-%d\n", query->first_lineno,
v3pr_info("parsed line %d-%d\n", query->first_lineno,
query->last_lineno);
return 0;
}
@ -447,7 +451,7 @@ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
pr_err("bad flag-op %c, at start of %s\n", *str, str);
return -EINVAL;
}
vpr_info("op='%c'\n", op);
v3pr_info("op='%c'\n", op);
for (; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
@ -461,7 +465,7 @@ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
return -EINVAL;
}
}
vpr_info("flags=0x%x\n", modifiers->flags);
v3pr_info("flags=0x%x\n", modifiers->flags);
/* calculate final flags, mask based upon op */
switch (op) {
@ -477,7 +481,7 @@ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
modifiers->flags = 0;
break;
}
vpr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask);
v3pr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask);
return 0;
}
@ -529,7 +533,7 @@ static int ddebug_exec_queries(char *query, const char *modname)
if (!query || !*query || *query == '#')
continue;
vpr_info("query %d: \"%s\"\n", i, query);
vpr_info("query %d: \"%s\" mod:%s\n", i, query, modname ?: "*");
rc = ddebug_exec_query(query, modname);
if (rc < 0) {
@ -540,8 +544,9 @@ static int ddebug_exec_queries(char *query, const char *modname)
}
i++;
}
vpr_info("processed %d queries, with %d matches, %d errs\n",
i, nfound, errs);
if (i)
v2pr_info("processed %d queries, with %d matches, %d errs\n",
i, nfound, errs);
if (exitcode)
return exitcode;
@ -746,21 +751,6 @@ EXPORT_SYMBOL(__dynamic_ibdev_dbg);
#endif
#define DDEBUG_STRING_SIZE 1024
static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE];
static __init int ddebug_setup_query(char *str)
{
if (strlen(str) >= DDEBUG_STRING_SIZE) {
pr_warn("ddebug boot param string too large\n");
return 0;
}
strlcpy(ddebug_setup_string, str, DDEBUG_STRING_SIZE);
return 1;
}
__setup("ddebug_query=", ddebug_setup_query);
/*
* Install a noop handler to make dyndbg look like a normal kernel cli param.
* This avoids warnings about dyndbg being an unknown cli param when supplied
@ -793,7 +783,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
tmpbuf = memdup_user_nul(ubuf, len);
if (IS_ERR(tmpbuf))
return PTR_ERR(tmpbuf);
vpr_info("read %d bytes from userspace\n", (int)len);
v2pr_info("read %zu bytes from userspace\n", len);
ret = ddebug_exec_queries(tmpbuf, NULL);
kfree(tmpbuf);
@ -981,7 +971,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
list_add(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
v2pr_info("%3u debug prints in module %s\n", n, dt->mod_name);
vpr_info("%3u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
@ -1040,8 +1030,6 @@ int ddebug_remove_module(const char *mod_name)
struct ddebug_table *dt, *nextdt;
int ret = -ENOENT;
v2pr_info("removing module \"%s\"\n", mod_name);
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
if (dt->mod_name == mod_name) {
@ -1051,6 +1039,8 @@ int ddebug_remove_module(const char *mod_name)
}
}
mutex_unlock(&ddebug_lock);
if (!ret)
v2pr_info("removed module \"%s\"\n", mod_name);
return ret;
}
@ -1133,16 +1123,6 @@ static int __init dynamic_debug_init(void)
entries, modct, (int)((modct * sizeof(struct ddebug_table)) >> 10),
(int)((entries * sizeof(struct _ddebug)) >> 10));
/* apply ddebug_query boot param, dont unload tables on err */
if (ddebug_setup_string[0] != '\0') {
pr_warn("ddebug_query param name is deprecated, change it to dyndbg\n");
ret = ddebug_exec_queries(ddebug_setup_string, NULL);
if (ret < 0)
pr_warn("Invalid ddebug boot param %s\n",
ddebug_setup_string);
else
pr_info("%d changes by ddebug_query\n", ret);
}
/* now that ddebug tables are loaded, process all boot args
* again to find and activate queries given in dyndbg params.
* While this has already been done for known boot params, it

View File

@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/sections.h>
/* Whitelist of symbols that can be overridden for error injection. */
static LIST_HEAD(error_injection_list);
@ -64,7 +65,7 @@ static void populate_error_injection_list(struct error_injection_entry *start,
mutex_lock(&ei_mutex);
for (iter = start; iter < end; iter++) {
entry = arch_deref_entry_point((void *)iter->addr);
entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);
if (!kernel_text_address(entry) ||
!kallsyms_lookup_size_offset(entry, &size, &offset)) {

View File

@ -89,6 +89,27 @@ unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
EXPORT_SYMBOL(_find_first_bit);
#endif
#ifndef find_first_and_bit
/*
* Find the first set bit in two memory regions.
*/
unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
unsigned long idx, val;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
val = addr1[idx] & addr2[idx];
if (val)
return min(idx * BITS_PER_LONG + __ffs(val), size);
}
return size;
}
EXPORT_SYMBOL(_find_first_and_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.

View File

@ -49,6 +49,25 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len)
return 0;
}
static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len)
{
static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata;
unsigned long i, cnt;
ktime_t time;
bitmap_copy(cp, bitmap, BITMAP_LEN);
time = ktime_get();
for (cnt = i = 0; i < len; cnt++) {
i = find_first_and_bit(cp, bitmap2, len);
__clear_bit(i, cp);
}
time = ktime_get() - time;
pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
@ -129,6 +148,7 @@ static int __init find_bit_test(void)
* traverse only part of bitmap to avoid soft lockup.
*/
test_find_first_bit(bitmap, BITMAP_LEN / 10);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
@ -145,6 +165,7 @@ static int __init find_bit_test(void)
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
/*

View File

@ -217,11 +217,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
}
/* Event of type pl happened */
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
long nr)
{
fprop_reflect_period_percpu(p, pl);
percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
percpu_counter_add(&p->events, nr);
}
void fprop_fraction_percpu(struct fprop_global *p,
@ -253,20 +254,29 @@ void fprop_fraction_percpu(struct fprop_global *p,
}
/*
* Like __fprop_inc_percpu() except that event is counted only if the given
* Like __fprop_add_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
void __fprop_inc_percpu_max(struct fprop_global *p,
struct fprop_local_percpu *pl, int max_frac)
void __fprop_add_percpu_max(struct fprop_global *p,
struct fprop_local_percpu *pl, int max_frac, long nr)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
s64 tmp;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
if (numerator >
(((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
tmp = (u64)denominator * max_frac -
((u64)numerator << FPROP_FRAC_SHIFT);
if (tmp < 0) {
/* Maximum fraction already exceeded? */
return;
} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
/* Add just enough for the fraction to saturate */
nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
FPROP_FRAC_BASE - max_frac);
}
}
__fprop_inc_percpu(p, pl);
__fprop_add_percpu(p, pl, nr);
}

View File

@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool)
list_del(&chunk->next_chunk);
end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
bit = find_first_bit(chunk->bits, end_bit);
BUG_ON(bit < end_bit);
vfree(chunk);

View File

@ -69,42 +69,40 @@
#define iterate_xarray(i, n, base, len, __off, STEP) { \
__label__ __out; \
size_t __off = 0; \
struct page *head = NULL; \
struct folio *folio; \
loff_t start = i->xarray_start + i->iov_offset; \
unsigned offset = start % PAGE_SIZE; \
pgoff_t index = start / PAGE_SIZE; \
int j; \
\
XA_STATE(xas, i->xarray, index); \
\
len = PAGE_SIZE - offset_in_page(start); \
rcu_read_lock(); \
xas_for_each(&xas, head, ULONG_MAX) { \
xas_for_each(&xas, folio, ULONG_MAX) { \
unsigned left; \
if (xas_retry(&xas, head)) \
size_t offset; \
if (xas_retry(&xas, folio)) \
continue; \
if (WARN_ON(xa_is_value(head))) \
if (WARN_ON(xa_is_value(folio))) \
break; \
if (WARN_ON(PageHuge(head))) \
if (WARN_ON(folio_test_hugetlb(folio))) \
break; \
for (j = (head->index < index) ? index - head->index : 0; \
j < thp_nr_pages(head); j++) { \
void *kaddr = kmap_local_page(head + j); \
base = kaddr + offset; \
len = PAGE_SIZE - offset; \
offset = offset_in_folio(folio, start + __off); \
while (offset < folio_size(folio)) { \
base = kmap_local_folio(folio, offset); \
len = min(n, len); \
left = (STEP); \
kunmap_local(kaddr); \
kunmap_local(base); \
len -= left; \
__off += len; \
n -= len; \
if (left || n == 0) \
goto __out; \
offset = 0; \
offset += len; \
len = PAGE_SIZE; \
} \
} \
__out: \
rcu_read_unlock(); \
i->iov_offset += __off; \
i->iov_offset += __off; \
n = __off; \
}
@ -191,7 +189,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
buf = iov->iov_base + skip;
copy = min(bytes, iov->iov_len - skip);
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
kaddr = kmap_atomic(page);
from = kaddr + offset;
@ -275,7 +273,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
buf = iov->iov_base + skip;
copy = min(bytes, iov->iov_len - skip);
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
kaddr = kmap_atomic(page);
to = kaddr + offset;
@ -431,35 +429,81 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
}
/*
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* bytes. For each iovec, fault in each page that constitutes the iovec.
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
* @size: maximum length
*
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
* because it is an invalid address).
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* @size. For each iovec, fault in each page that constitutes the iovec.
*
* Returns the number of bytes not faulted in (like copy_to_user() and
* copy_from_user()).
*
* Always returns 0 for non-userspace iterators.
*/
int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
{
if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
if (bytes > i->count)
bytes = i->count;
for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
size_t len = min(bytes, p->iov_len - skip);
int err;
size -= count;
for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
err = fault_in_pages_readable(p->iov_base + skip, len);
if (unlikely(err))
return err;
bytes -= len;
ret = fault_in_readable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
EXPORT_SYMBOL(fault_in_iov_iter_readable);
/*
* fault_in_iov_iter_writeable - fault in iov iterator for writing
* @i: iterator
* @size: maximum length
*
* Faults in the iterator using get_user_pages(), i.e., without triggering
* hardware page faults. This is primarily useful when we already know that
* some or all of the pages in @i aren't in memory.
*
* Returns the number of bytes not faulted in, like copy_to_user() and
* copy_from_user().
*
* Always returns 0 for non-user-space iterators.
*/
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{
if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
size -= count;
for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
ret = fault_in_safe_writeable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
EXPORT_SYMBOL(fault_in_iov_iter_writeable);
void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
@ -468,6 +512,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
.nofault = false,
.data_source = direction,
.iov = iov,
.nr_segs = nr_segs,
@ -1483,13 +1528,17 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
return 0;
if (likely(iter_is_iovec(i))) {
unsigned int gup_flags = 0;
unsigned long addr;
if (iov_iter_rw(i) != WRITE)
gup_flags |= FOLL_WRITE;
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
n = DIV_ROUND_UP(len, PAGE_SIZE);
res = get_user_pages_fast(addr, n,
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
pages);
res = get_user_pages_fast(addr, n, gup_flags, pages);
if (unlikely(res <= 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
@ -1605,15 +1654,20 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
return 0;
if (likely(iter_is_iovec(i))) {
unsigned int gup_flags = 0;
unsigned long addr;
if (iov_iter_rw(i) != WRITE)
gup_flags |= FOLL_WRITE;
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
n = DIV_ROUND_UP(len, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
return -ENOMEM;
res = get_user_pages_fast(addr, n,
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
res = get_user_pages_fast(addr, n, gup_flags, p);
if (unlikely(res <= 0)) {
kvfree(p);
*pages = NULL;

View File

@ -65,7 +65,7 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
*/
static int populate_dir(struct kobject *kobj)
{
struct kobj_type *t = get_ktype(kobj);
const struct kobj_type *t = get_ktype(kobj);
struct attribute *attr;
int error = 0;
int i;
@ -346,7 +346,7 @@ EXPORT_SYMBOL(kobject_set_name);
* to kobject_put(), not by a call to kfree directly to ensure that all of
* the memory is cleaned up properly.
*/
void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
{
char *err_str;
@ -461,7 +461,7 @@ EXPORT_SYMBOL(kobject_add);
* same type of error handling after a call to kobject_add() and kobject
* lifetime rules are the same here.
*/
int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
struct kobject *parent, const char *fmt, ...)
{
va_list args;
@ -679,7 +679,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero);
static void kobject_cleanup(struct kobject *kobj)
{
struct kobject *parent = kobj->parent;
struct kobj_type *t = get_ktype(kobj);
const struct kobj_type *t = get_ktype(kobj);
const char *name = kobj->name;
pr_debug("kobject: '%s' (%p): %s, parent %p\n",
@ -777,7 +777,7 @@ static struct kobj_type dynamic_kobj_ktype = {
* call to kobject_put() and not kfree(), as kobject_init() has
* already been called on this structure.
*/
struct kobject *kobject_create(void)
static struct kobject *kobject_create(void)
{
struct kobject *kobj;

View File

@ -501,7 +501,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
}
/* skip the event, if the filter returns zero. */
if (uevent_ops && uevent_ops->filter)
if (!uevent_ops->filter(kset, kobj)) {
if (!uevent_ops->filter(kobj)) {
pr_debug("kobject: '%s' (%p): %s: filter function "
"caused the event to drop!\n",
kobject_name(kobj), kobj, __func__);
@ -510,7 +510,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
/* originating subsystem */
if (uevent_ops && uevent_ops->name)
subsystem = uevent_ops->name(kset, kobj);
subsystem = uevent_ops->name(kobj);
else
subsystem = kobject_name(&kset->kobj);
if (!subsystem) {
@ -554,7 +554,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
/* let the kset specific function add its stuff */
if (uevent_ops && uevent_ops->uevent) {
retval = uevent_ops->uevent(kset, kobj, env);
retval = uevent_ops->uevent(kobj, env);
if (retval) {
pr_debug("kobject: '%s' (%p): %s: uevent() returned "
"%d\n", kobject_name(kobj), kobj,

View File

@ -22,6 +22,7 @@
#include "kstrtox.h"
noinline
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
{
if (*base == 0) {
@ -47,6 +48,7 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
*
* Don't you dare use this function.
*/
noinline
unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
size_t max_chars)
{
@ -85,6 +87,7 @@ unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned lon
return rv;
}
noinline
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
{
return _parse_integer_limit(s, base, p, INT_MAX);
@ -125,6 +128,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtoull(). Return code must be checked.
*/
noinline
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
if (s[0] == '+')
@ -148,6 +152,7 @@ EXPORT_SYMBOL(kstrtoull);
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtoll(). Return code must be checked.
*/
noinline
int kstrtoll(const char *s, unsigned int base, long long *res)
{
unsigned long long tmp;
@ -219,6 +224,7 @@ EXPORT_SYMBOL(_kstrtol);
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtoul(). Return code must be checked.
*/
noinline
int kstrtouint(const char *s, unsigned int base, unsigned int *res)
{
unsigned long long tmp;
@ -249,6 +255,7 @@ EXPORT_SYMBOL(kstrtouint);
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtol(). Return code must be checked.
*/
noinline
int kstrtoint(const char *s, unsigned int base, int *res)
{
long long tmp;
@ -264,6 +271,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
}
EXPORT_SYMBOL(kstrtoint);
noinline
int kstrtou16(const char *s, unsigned int base, u16 *res)
{
unsigned long long tmp;
@ -279,6 +287,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
}
EXPORT_SYMBOL(kstrtou16);
noinline
int kstrtos16(const char *s, unsigned int base, s16 *res)
{
long long tmp;
@ -294,6 +303,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
}
EXPORT_SYMBOL(kstrtos16);
noinline
int kstrtou8(const char *s, unsigned int base, u8 *res)
{
unsigned long long tmp;
@ -309,6 +319,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
}
EXPORT_SYMBOL(kstrtou8);
noinline
int kstrtos8(const char *s, unsigned int base, s8 *res)
{
long long tmp;
@ -333,6 +344,7 @@ EXPORT_SYMBOL(kstrtos8);
* [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
* pointed to by res is updated upon finding a match.
*/
noinline
int kstrtobool(const char *s, bool *res)
{
if (!s)

View File

@ -15,23 +15,89 @@ extern struct kunit_suite * const * const __kunit_suites_end[];
#if IS_BUILTIN(CONFIG_KUNIT)
static char *filter_glob_param;
static char *action_param;
module_param_named(filter_glob, filter_glob_param, charp, 0);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites run at boot-time, e.g. list*");
"Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
module_param_named(action, action_param, charp, 0);
MODULE_PARM_DESC(action,
"Changes KUnit executor behavior, valid values are:\n"
"<none>: run the tests like normal\n"
"'list' to list test names instead of running them.\n");
/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */
struct kunit_test_filter {
char *suite_glob;
char *test_glob;
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
const char *period = strchr(filter_glob, '.');
if (!period) {
parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
return;
}
parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
}
/* Create a copy of suite with only tests that match test_glob. */
static struct kunit_suite *
kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob)
{
int n = 0;
struct kunit_case *filtered, *test_case;
struct kunit_suite *copy;
kunit_suite_for_each_test_case(suite, test_case) {
if (!test_glob || glob_match(test_glob, test_case->name))
++n;
}
if (n == 0)
return NULL;
/* Use memcpy to workaround copy->name being const. */
copy = kmalloc(sizeof(*copy), GFP_KERNEL);
memcpy(copy, suite, sizeof(*copy));
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
n = 0;
kunit_suite_for_each_test_case(suite, test_case) {
if (!test_glob || glob_match(test_glob, test_case->name))
filtered[n++] = *test_case;
}
copy->test_cases = filtered;
return copy;
}
static char *kunit_shutdown;
core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
static struct kunit_suite * const *
kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
const char *filter_glob)
struct kunit_test_filter *filter)
{
int i, n = 0;
struct kunit_suite **filtered;
struct kunit_suite **filtered, *filtered_suite;
n = 0;
for (i = 0; subsuite[i] != NULL; ++i) {
if (glob_match(filter_glob, subsuite[i]->name))
for (i = 0; subsuite[i]; ++i) {
if (glob_match(filter->suite_glob, subsuite[i]->name))
++n;
}
@ -44,8 +110,11 @@ kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
n = 0;
for (i = 0; subsuite[i] != NULL; ++i) {
if (glob_match(filter_glob, subsuite[i]->name))
filtered[n++] = subsuite[i];
if (!glob_match(filter->suite_glob, subsuite[i]->name))
continue;
filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob);
if (filtered_suite)
filtered[n++] = filtered_suite;
}
filtered[n] = NULL;
@ -57,12 +126,32 @@ struct suite_set {
struct kunit_suite * const * const *end;
};
static void kunit_free_subsuite(struct kunit_suite * const *subsuite)
{
unsigned int i;
for (i = 0; subsuite[i]; i++)
kfree(subsuite[i]);
kfree(subsuite);
}
static void kunit_free_suite_set(struct suite_set suite_set)
{
struct kunit_suite * const * const *suites;
for (suites = suite_set.start; suites < suite_set.end; suites++)
kunit_free_subsuite(*suites);
kfree(suite_set.start);
}
static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
const char *filter_glob)
{
int i;
struct kunit_suite * const **copy, * const *filtered_subsuite;
struct suite_set filtered;
struct kunit_test_filter filter;
const size_t max = suite_set->end - suite_set->start;
@ -73,12 +162,17 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
return filtered;
}
kunit_parse_filter_glob(&filter, filter_glob);
for (i = 0; i < max; ++i) {
filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], filter_glob);
filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter);
if (filtered_subsuite)
*copy++ = filtered_subsuite;
}
filtered.end = copy;
kfree(filter.suite_glob);
kfree(filter.test_glob);
return filtered;
}
@ -109,9 +203,35 @@ static void kunit_print_tap_header(struct suite_set *suite_set)
pr_info("1..%d\n", num_of_suites);
}
int kunit_run_all_tests(void)
static void kunit_exec_run_tests(struct suite_set *suite_set)
{
struct kunit_suite * const * const *suites;
kunit_print_tap_header(suite_set);
for (suites = suite_set->start; suites < suite_set->end; suites++)
__kunit_test_suites_init(*suites);
}
static void kunit_exec_list_tests(struct suite_set *suite_set)
{
unsigned int i;
struct kunit_suite * const * const *suites;
struct kunit_case *test_case;
/* Hack: print a tap header so kunit.py can find the start of KUnit output. */
pr_info("TAP version 14\n");
for (suites = suite_set->start; suites < suite_set->end; suites++)
for (i = 0; (*suites)[i] != NULL; i++) {
kunit_suite_for_each_test_case((*suites)[i], test_case) {
pr_info("%s.%s\n", (*suites)[i]->name, test_case->name);
}
}
}
int kunit_run_all_tests(void)
{
struct suite_set suite_set = {
.start = __kunit_suites_start,
.end = __kunit_suites_end,
@ -120,15 +240,15 @@ int kunit_run_all_tests(void)
if (filter_glob_param)
suite_set = kunit_filter_suites(&suite_set, filter_glob_param);
kunit_print_tap_header(&suite_set);
for (suites = suite_set.start; suites < suite_set.end; suites++)
__kunit_test_suites_init(*suites);
if (!action_param)
kunit_exec_run_tests(&suite_set);
else if (strcmp(action_param, "list") == 0)
kunit_exec_list_tests(&suite_set);
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
if (filter_glob_param) { /* a copy was made of each array */
for (suites = suite_set.start; suites < suite_set.end; suites++)
kfree(*suites);
kfree(suite_set.start);
kunit_free_suite_set(suite_set);
}
kunit_handle_shutdown();

View File

@ -9,38 +9,103 @@
#include <kunit/test.h>
static void kfree_at_end(struct kunit *test, const void *to_free);
static void free_subsuite_at_end(struct kunit *test,
struct kunit_suite *const *to_free);
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
const char *suite_name);
const char *suite_name,
struct kunit_case *test_cases);
static void dummy_test(struct kunit *test) {}
static struct kunit_case dummy_test_cases[] = {
/* .run_case is not important, just needs to be non-NULL */
{ .name = "test1", .run_case = dummy_test },
{ .name = "test2", .run_case = dummy_test },
{},
};
static void parse_filter_test(struct kunit *test)
{
struct kunit_test_filter filter = {NULL, NULL};
kunit_parse_filter_glob(&filter, "suite");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_FALSE(test, filter.test_glob);
kfree(filter.suite_glob);
kfree(filter.test_glob);
kunit_parse_filter_glob(&filter, "suite.test");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_STREQ(test, filter.test_glob, "test");
kfree(filter.suite_glob);
kfree(filter.test_glob);
}
static void filter_subsuite_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
struct kunit_suite * const *filtered;
struct kunit_test_filter filter = {
.suite_glob = "suite2",
.test_glob = NULL,
};
subsuite[0] = alloc_fake_suite(test, "suite1");
subsuite[1] = alloc_fake_suite(test, "suite2");
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2, NULL */
filtered = kunit_filter_subsuite(subsuite, "suite2*");
filtered = kunit_filter_subsuite(subsuite, &filter);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
kfree_at_end(test, filtered);
free_subsuite_at_end(test, filtered);
/* Validate we just have suite2 */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
KUNIT_EXPECT_FALSE(test, filtered[1]);
}
static void filter_subsuite_test_glob_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
struct kunit_suite * const *filtered;
struct kunit_test_filter filter = {
.suite_glob = "suite2",
.test_glob = "test2",
};
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
filtered = kunit_filter_subsuite(subsuite, &filter);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
free_subsuite_at_end(test, filtered);
/* Validate we just have suite2 */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
KUNIT_EXPECT_FALSE(test, filtered[1]);
/* Now validate we just have test2 */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]->test_cases);
KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->test_cases[0].name, "test2");
KUNIT_EXPECT_FALSE(test, filtered[0]->test_cases[1].name);
}
static void filter_subsuite_to_empty_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
struct kunit_suite * const *filtered;
struct kunit_test_filter filter = {
.suite_glob = "not_found",
.test_glob = NULL,
};
subsuite[0] = alloc_fake_suite(test, "suite1");
subsuite[1] = alloc_fake_suite(test, "suite2");
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
filtered = kunit_filter_subsuite(subsuite, "not_found");
kfree_at_end(test, filtered); /* just in case */
filtered = kunit_filter_subsuite(subsuite, &filter);
free_subsuite_at_end(test, filtered); /* just in case */
KUNIT_EXPECT_FALSE_MSG(test, filtered,
"should be NULL to indicate no match");
@ -52,7 +117,7 @@ static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_s
kfree_at_end(test, suite_set->start);
for (suites = suite_set->start; suites < suite_set->end; suites++)
kfree_at_end(test, *suites);
free_subsuite_at_end(test, *suites);
}
static void filter_suites_test(struct kunit *test)
@ -74,8 +139,8 @@ static void filter_suites_test(struct kunit *test)
struct suite_set filtered = {.start = NULL, .end = NULL};
/* Emulate two files, each having one suite */
subsuites[0][0] = alloc_fake_suite(test, "suite0");
subsuites[1][0] = alloc_fake_suite(test, "suite1");
subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases);
subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
/* Filter out suite1 */
filtered = kunit_filter_suites(&suite_set, "suite0");
@ -84,11 +149,14 @@ static void filter_suites_test(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]);
KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
}
static struct kunit_case executor_test_cases[] = {
KUNIT_CASE(parse_filter_test),
KUNIT_CASE(filter_subsuite_test),
KUNIT_CASE(filter_subsuite_test_glob_test),
KUNIT_CASE(filter_subsuite_to_empty_test),
KUNIT_CASE(filter_suites_test),
{}
@ -120,14 +188,30 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
(void *)to_free);
}
static void free_subsuite_res_free(struct kunit_resource *res)
{
kunit_free_subsuite(res->data);
}
static void free_subsuite_at_end(struct kunit *test,
struct kunit_suite *const *to_free)
{
if (IS_ERR_OR_NULL(to_free))
return;
kunit_alloc_resource(test, NULL, free_subsuite_res_free,
GFP_KERNEL, (void *)to_free);
}
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
const char *suite_name)
const char *suite_name,
struct kunit_case *test_cases)
{
struct kunit_suite *suite;
/* We normally never expect to allocate suites, hence the non-const cast. */
suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
suite->test_cases = test_cases;
return suite;
}

View File

@ -415,12 +415,15 @@ static struct kunit_suite kunit_log_test_suite = {
static void kunit_log_test(struct kunit *test)
{
struct kunit_suite *suite = &kunit_log_test_suite;
struct kunit_suite suite;
suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
kunit_log(KERN_INFO, test, "put this in log.");
kunit_log(KERN_INFO, test, "this too.");
kunit_log(KERN_INFO, suite, "add to suite log.");
kunit_log(KERN_INFO, suite, "along with this.");
kunit_log(KERN_INFO, &suite, "add to suite log.");
kunit_log(KERN_INFO, &suite, "along with this.");
#ifdef CONFIG_KUNIT_DEBUGFS
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
@ -428,12 +431,11 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(test->log, "this too."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(suite->log, "add to suite log."));
strstr(suite.log, "add to suite log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(suite->log, "along with this."));
strstr(suite.log, "along with this."));
#else
KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
#endif
}

View File

@ -190,10 +190,10 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static size_t kunit_suite_counter = 1;
static void kunit_print_subtest_end(struct kunit_suite *suite)
{
static size_t kunit_suite_counter = 1;
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
@ -512,6 +512,8 @@ int kunit_run_tests(struct kunit_suite *suite)
/* Get initial param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"# Subtest: %s", test_case->name);
while (test.param_value) {
kunit_run_case_catch_errors(suite, test_case, &test);
@ -522,9 +524,8 @@ int kunit_run_tests(struct kunit_suite *suite)
}
kunit_log(KERN_INFO, &test,
KUNIT_SUBTEST_INDENT
"# %s: %s %d - %s",
test_case->name,
KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"%s %d - %s",
kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);
@ -585,6 +586,8 @@ void __kunit_test_suites_exit(struct kunit_suite **suites)
for (i = 0; suites[i] != NULL; i++)
kunit_exit_suite(suites[i]);
kunit_suite_counter = 1;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);

View File

@ -17,7 +17,7 @@
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
{
try_catch->try_result = -EFAULT;
complete_and_exit(try_catch->try_completion, -EFAULT);
kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
}
EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
@ -27,7 +27,7 @@ static int kunit_generic_run_threadfn_adapter(void *data)
try_catch->try(try_catch->context);
complete_and_exit(try_catch->try_completion, 0);
kthread_complete_and_exit(try_catch->try_completion, 0);
}
static unsigned long kunit_test_timeout(void)

View File

@ -49,11 +49,11 @@ bool __list_del_entry_valid(struct list_head *entry)
"list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
CHECK_DATA_CORRUPTION(prev->next != entry,
"list_del corruption. prev->next should be %px, but was %px\n",
entry, prev->next) ||
"list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
entry, prev->next, prev) ||
CHECK_DATA_CORRUPTION(next->prev != entry,
"list_del corruption. next->prev should be %px, but was %px\n",
entry, next->prev))
"list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
entry, next->prev, next))
return false;
return true;

View File

@ -26,6 +26,12 @@
#include <linux/rtmutex.h>
#include <linux/local_lock.h>
#ifdef CONFIG_PREEMPT_RT
# define NON_RT(...)
#else
# define NON_RT(...) __VA_ARGS__
#endif
/*
* Change this to 1 if you want to see the failure printouts:
*/
@ -139,7 +145,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif
static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
static DEFINE_PER_CPU(local_lock_t, local_A);
/*
* non-inlined runtime initializers, to let separate locks share
@ -258,7 +264,7 @@ static void init_shared_classes(void)
#define WWAF(x) ww_acquire_fini(x)
#define WWL(x, c) ww_mutex_lock(x, c)
#define WWT(x) ww_mutex_trylock(x)
#define WWT(x) ww_mutex_trylock(x, NULL)
#define WWL1(x) ww_mutex_lock(x, NULL)
#define WWU(x) ww_mutex_unlock(x)
@ -712,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex);
#undef E
#ifdef CONFIG_PREEMPT_RT
# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
#else
# define RT_PREPARE_DBL_UNLOCK()
#endif
/*
* Double unlock:
*/
#define E() \
\
LOCK(A); \
RT_PREPARE_DBL_UNLOCK(); \
UNLOCK(A); \
UNLOCK(A); /* fail */
@ -802,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
@ -810,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
#endif
#undef E1
#undef E2
#ifndef CONFIG_PREEMPT_RT
/*
* Enabling hardirqs with a softirq-safe lock held:
*/
@ -846,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#undef E1
#undef E2
#endif
/*
* Enabling irqs with an irq-safe lock held:
*/
@ -875,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
@ -883,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
#endif
#undef E1
#undef E2
@ -921,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
@ -929,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
#endif
#undef E1
#undef E2
@ -969,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
@ -977,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
#endif
#undef E1
#undef E2
@ -1031,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
@ -1039,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#endif
#undef E1
#undef E2
@ -1206,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
#include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h"
#include "locking-selftest-rlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
#include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
#endif
#undef E1
#undef E2
@ -1252,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
#include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h"
#include "locking-selftest-rlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
#include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
#endif
#undef E1
#undef E2
@ -1306,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
#include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h"
#include "locking-selftest-rlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
#include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
@ -1320,7 +1351,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
#ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif
@ -1380,7 +1411,7 @@ static void reset_locks(void)
init_shared_classes();
raw_spin_lock_init(&raw_lock_A);
raw_spin_lock_init(&raw_lock_B);
local_lock_init(&local_A);
local_lock_init(this_cpu_ptr(&local_A));
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
@ -1398,7 +1429,13 @@ static int unexpected_testcase_failures;
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
{
unsigned long saved_preempt_count = preempt_count();
int saved_preempt_count = preempt_count();
#ifdef CONFIG_PREEMPT_RT
#ifdef CONFIG_SMP
int saved_mgd_count = current->migration_disabled;
#endif
int saved_rcu_count = current->rcu_read_lock_nesting;
#endif
WARN_ON(irqs_disabled());
@ -1432,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
* count, so restore it:
*/
preempt_count_set(saved_preempt_count);
#ifdef CONFIG_PREEMPT_RT
#ifdef CONFIG_SMP
while (current->migration_disabled > saved_mgd_count)
migrate_enable();
#endif
while (current->rcu_read_lock_nesting > saved_rcu_count)
rcu_read_unlock();
WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
if (softirq_count())
current->softirqs_enabled = 0;
@ -1499,7 +1548,7 @@ static inline void print_testname(const char *testname)
#define DO_TESTCASE_2x2RW(desc, name, nr) \
DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \
NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
#define DO_TESTCASE_6x2x2RW(desc, name) \
DO_TESTCASE_2x2RW(desc, name, 123); \
@ -1547,19 +1596,19 @@ static inline void print_testname(const char *testname)
#define DO_TESTCASE_2I(desc, name, nr) \
DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
DO_TESTCASE_1("soft-"desc, name##_soft, nr);
NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_2IB(desc, name, nr) \
DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_6I(desc, name, nr) \
DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
DO_TESTCASE_3("soft-"desc, name##_soft, nr);
NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_6IRW(desc, name, nr) \
DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_2x3(desc, name) \
DO_TESTCASE_3(desc, name, 12); \
@ -1651,6 +1700,22 @@ static void ww_test_fail_acquire(void)
#endif
}
#ifdef CONFIG_PREEMPT_RT
#define ww_mutex_base_lock(b) rt_mutex_lock(b)
#define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
#else
#define ww_mutex_base_lock(b) mutex_lock(b)
#define ww_mutex_base_trylock(b) mutex_trylock(b)
#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
#define ww_mutex_base_unlock(b) mutex_unlock(b)
#endif
static void ww_test_normal(void)
{
int ret;
@ -1665,50 +1730,50 @@ static void ww_test_normal(void)
/* mutex_lock (and indirectly, mutex_lock_nested) */
o.ctx = (void *)~0UL;
mutex_lock(&o.base);
mutex_unlock(&o.base);
ww_mutex_base_lock(&o.base);
ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL);
/* mutex_lock_interruptible (and *_nested) */
o.ctx = (void *)~0UL;
ret = mutex_lock_interruptible(&o.base);
ret = ww_mutex_base_lock_interruptible(&o.base);
if (!ret)
mutex_unlock(&o.base);
ww_mutex_base_unlock(&o.base);
else
WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL);
/* mutex_lock_killable (and *_nested) */
o.ctx = (void *)~0UL;
ret = mutex_lock_killable(&o.base);
ret = ww_mutex_base_lock_killable(&o.base);
if (!ret)
mutex_unlock(&o.base);
ww_mutex_base_unlock(&o.base);
else
WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL);
/* trylock, succeeding */
o.ctx = (void *)~0UL;
ret = mutex_trylock(&o.base);
ret = ww_mutex_base_trylock(&o.base);
WARN_ON(!ret);
if (ret)
mutex_unlock(&o.base);
ww_mutex_base_unlock(&o.base);
else
WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL);
/* trylock, failing */
o.ctx = (void *)~0UL;
mutex_lock(&o.base);
ret = mutex_trylock(&o.base);
ww_mutex_base_lock(&o.base);
ret = ww_mutex_base_trylock(&o.base);
WARN_ON(ret);
mutex_unlock(&o.base);
ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL);
/* nest_lock */
o.ctx = (void *)~0UL;
mutex_lock_nest_lock(&o.base, &t);
mutex_unlock(&o.base);
ww_mutex_base_lock_nest_lock(&o.base, &t);
ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL);
}
@ -1721,7 +1786,7 @@ static void ww_test_two_contexts(void)
static void ww_test_diff_class(void)
{
WWAI(&t);
#ifdef CONFIG_DEBUG_MUTEXES
#ifdef DEBUG_WW_MUTEXES
t.ww_class = NULL;
#endif
WWL(&o, &t);
@ -1785,7 +1850,7 @@ static void ww_test_edeadlk_normal(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
o2.ctx = &t2;
mutex_release(&o2.base.dep_map, _THIS_IP_);
@ -1801,7 +1866,7 @@ static void ww_test_edeadlk_normal(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base);
ww_mutex_base_unlock(&o2.base);
WWU(&o);
WWL(&o2, &t);
@ -1811,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
@ -1827,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base);
ww_mutex_base_unlock(&o2.base);
WWU(&o);
ww_mutex_lock_slow(&o2, &t);
@ -1837,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
o2.ctx = &t2;
mutex_release(&o2.base.dep_map, _THIS_IP_);
@ -1853,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base);
ww_mutex_base_unlock(&o2.base);
WWL(&o2, &t);
}
@ -1862,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_slow(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
@ -1878,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base);
ww_mutex_base_unlock(&o2.base);
ww_mutex_lock_slow(&o2, &t);
}
@ -1887,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
@ -1908,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more_slow(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
@ -1929,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
mutex_lock(&o3.base);
ww_mutex_base_lock(&o3.base);
mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2;
@ -1955,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
mutex_lock(&o3.base);
ww_mutex_base_lock(&o3.base);
mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2;
@ -1980,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wrong(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
@ -2005,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
{
int ret;
mutex_lock(&o2.base);
ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
@ -2646,8 +2711,8 @@ static void wait_context_tests(void)
static void local_lock_2(void)
{
local_lock_acquire(&local_A); /* IRQ-ON */
local_lock_release(&local_A);
local_lock(&local_A); /* IRQ-ON */
local_unlock(&local_A);
HARDIRQ_ENTER();
spin_lock(&lock_A); /* IN-IRQ */
@ -2656,18 +2721,18 @@ static void local_lock_2(void)
HARDIRQ_DISABLE();
spin_lock(&lock_A);
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
local_lock_release(&local_A);
local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
local_unlock(&local_A);
spin_unlock(&lock_A);
HARDIRQ_ENABLE();
}
static void local_lock_3A(void)
{
local_lock_acquire(&local_A); /* IRQ-ON */
local_lock(&local_A); /* IRQ-ON */
spin_lock(&lock_B); /* IRQ-ON */
spin_unlock(&lock_B);
local_lock_release(&local_A);
local_unlock(&local_A);
HARDIRQ_ENTER();
spin_lock(&lock_A); /* IN-IRQ */
@ -2676,18 +2741,18 @@ static void local_lock_3A(void)
HARDIRQ_DISABLE();
spin_lock(&lock_A);
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
local_lock_release(&local_A);
local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
local_unlock(&local_A);
spin_unlock(&lock_A);
HARDIRQ_ENABLE();
}
static void local_lock_3B(void)
{
local_lock_acquire(&local_A); /* IRQ-ON */
local_lock(&local_A); /* IRQ-ON */
spin_lock(&lock_B); /* IRQ-ON */
spin_unlock(&lock_B);
local_lock_release(&local_A);
local_unlock(&local_A);
HARDIRQ_ENTER();
spin_lock(&lock_A); /* IN-IRQ */
@ -2696,8 +2761,8 @@ static void local_lock_3B(void)
HARDIRQ_DISABLE();
spin_lock(&lock_A);
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
local_lock_release(&local_A);
local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
local_unlock(&local_A);
spin_unlock(&lock_A);
HARDIRQ_ENABLE();
@ -2812,7 +2877,7 @@ void locking_selftest(void)
printk("------------------------\n");
printk("| Locking API testsuite:\n");
printk("----------------------------------------------------------------------------\n");
printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
printk(" --------------------------------------------------------------------------\n");
init_shared_classes();
@ -2885,12 +2950,11 @@ void locking_selftest(void)
DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
printk(" --------------------------------------------------------------------------\n");
/*
* irq-context testcases:
*/
DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);

View File

@ -76,7 +76,7 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
return NULL;
}
static void real_iounmap(void __iomem *addr)
static void real_iounmap(volatile void __iomem *addr)
{
WARN(1, "invalid iounmap for addr 0x%llx\n",
(unsigned long long)(uintptr_t __force)addr);
@ -149,7 +149,7 @@ get_area(const volatile void __iomem *addr)
return NULL;
}
void iounmap(void __iomem *addr)
void iounmap(volatile void __iomem *addr)
{
struct logic_iomem_area *area = get_area(addr);

View File

@ -36,6 +36,8 @@
*/
#include <asm/unaligned.h>
#include <linux/bitops.h>
#include <linux/string.h> /* memset, memcpy */
#define FORCE_INLINE __always_inline

View File

@ -781,7 +781,6 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
struct objagg_tmp_node *node;
struct objagg_tmp_node *pnode;
struct objagg_obj *objagg_obj;
size_t alloc_size;
int i, j;
graph = kzalloc(sizeof(*graph), GFP_KERNEL);
@ -793,9 +792,7 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
goto err_nodes_alloc;
graph->nodes_count = nodes_count;
alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) *
sizeof(unsigned long);
graph->edges = kzalloc(alloc_size, GFP_KERNEL);
graph->edges = bitmap_zalloc(nodes_count * nodes_count, GFP_KERNEL);
if (!graph->edges)
goto err_edges_alloc;
@ -833,7 +830,7 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph)
{
kfree(graph->edges);
bitmap_free(graph->edges);
kfree(graph->nodes);
kfree(graph);
}

View File

@ -145,13 +145,13 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
static inline const struct raid6_calls *raid6_choose_gen(
void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
{
unsigned long perf, bestgenperf, bestxorperf, j0, j1;
unsigned long perf, bestgenperf, j0, j1;
int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
const struct raid6_calls *const *algo;
const struct raid6_calls *best;
for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
if (!best || (*algo)->prefer >= best->prefer) {
for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
if (!best || (*algo)->priority >= best->priority) {
if ((*algo)->valid && !(*algo)->valid())
continue;
@ -180,50 +180,48 @@ static inline const struct raid6_calls *raid6_choose_gen(
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
(perf * HZ * (disks-2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
if (!(*algo)->xor_syndrome)
continue;
perf = 0;
preempt_disable();
j0 = jiffies;
while ((j1 = jiffies) == j0)
cpu_relax();
while (time_before(jiffies,
j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
(*algo)->xor_syndrome(disks, start, stop,
PAGE_SIZE, *dptrs);
perf++;
}
preempt_enable();
if (best == *algo)
bestxorperf = perf;
pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
(perf * HZ * (disks-2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
}
}
if (best) {
if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
best->name,
(bestgenperf * HZ * (disks-2)) >>
(20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2));
if (best->xor_syndrome)
pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
(bestxorperf * HZ * (disks-2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
} else
pr_info("raid6: skip pq benchmark and using algorithm %s\n",
best->name);
raid6_call = *best;
} else
pr_err("raid6: Yikes! No algorithm found!\n");
if (!best) {
pr_err("raid6: Yikes! No algorithm found!\n");
goto out;
}
raid6_call = *best;
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
pr_info("raid6: skipped pq benchmark and selected %s\n",
best->name);
goto out;
}
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
best->name,
(bestgenperf * HZ * (disks - 2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
if (best->xor_syndrome) {
perf = 0;
preempt_disable();
j0 = jiffies;
while ((j1 = jiffies) == j0)
cpu_relax();
while (time_before(jiffies,
j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
best->xor_syndrome(disks, start, stop,
PAGE_SIZE, *dptrs);
perf++;
}
preempt_enable();
pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
(perf * HZ * (disks - 2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
}
out:
return best;
}

View File

@ -132,7 +132,7 @@ const struct raid6_calls raid6_avx2x1 = {
raid6_avx21_xor_syndrome,
raid6_have_avx2,
"avx2x1",
1 /* Has cache hints */
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
/*
@ -262,7 +262,7 @@ const struct raid6_calls raid6_avx2x2 = {
raid6_avx22_xor_syndrome,
raid6_have_avx2,
"avx2x2",
1 /* Has cache hints */
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
#ifdef CONFIG_X86_64
@ -465,6 +465,6 @@ const struct raid6_calls raid6_avx2x4 = {
raid6_avx24_xor_syndrome,
raid6_have_avx2,
"avx2x4",
1 /* Has cache hints */
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
#endif
#endif /* CONFIG_X86_64 */

View File

@ -162,7 +162,7 @@ const struct raid6_calls raid6_avx512x1 = {
raid6_avx5121_xor_syndrome,
raid6_have_avx512,
"avx512x1",
1 /* Has cache hints */
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
/*
@ -319,7 +319,7 @@ const struct raid6_calls raid6_avx512x2 = {
raid6_avx5122_xor_syndrome,
raid6_have_avx512,
"avx512x2",
1 /* Has cache hints */
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#ifdef CONFIG_X86_64
@ -557,7 +557,7 @@ const struct raid6_calls raid6_avx512x4 = {
raid6_avx5124_xor_syndrome,
raid6_have_avx512,
"avx512x4",
1 /* Has cache hints */
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#endif

View File

@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <trace/events/random.h>

View File

@ -457,10 +457,9 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int wake_batch)
{
unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
int i;
if (sbq->wake_batch != wake_batch) {
@ -476,6 +475,30 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
}
}
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
wake_batch = sbq_calc_wake_batch(sbq, depth);
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
}
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
unsigned int users)
{
unsigned int wake_batch;
unsigned int min_batch;
unsigned int depth = (sbq->sb.depth + users - 1) / users;
min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
min_batch, SBQ_WAKE_BATCH);
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
sbitmap_queue_update_wake_batch(sbq, depth);
@ -489,6 +512,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
unsigned int *offset)
{
struct sbitmap *sb = &sbq->sb;
unsigned int hint, depth;
unsigned long index, nr;
int i;
if (unlikely(sb->round_robin))
return 0;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
index = SB_NR_TO_INDEX(sb, hint);
for (i = 0; i < sb->map_nr; i++) {
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
sbitmap_deferred_clear(map);
if (map->word == (1UL << (map->depth - 1)) - 1)
continue;
nr = find_first_zero_bit(&map->word, map->depth);
if (nr + nr_tags <= map->depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
int map_tags = min_t(int, nr_tags, map->depth);
unsigned long val, ret;
get_mask = ((1UL << map_tags) - 1) << nr;
do {
val = READ_ONCE(map->word);
ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
} while (ret != val);
get_mask = (get_mask & ~ret) >> nr;
if (get_mask) {
*offset = nr + (index << sb->shift);
update_alloc_hint_after_get(sb, depth, hint,
*offset + map_tags - 1);
return get_mask;
}
}
/* Jump to next index. */
if (++index >= sb->map_nr)
index = 0;
}
return 0;
}
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth)
{
@ -577,6 +651,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
{
if (likely(!sb->round_robin && tag < sb->depth))
data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
}
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
int *tags, int nr_tags)
{
struct sbitmap *sb = &sbq->sb;
unsigned long *addr = NULL;
unsigned long mask = 0;
int i;
smp_mb__before_atomic();
for (i = 0; i < nr_tags; i++) {
const int tag = tags[i] - offset;
unsigned long *this_addr;
/* since we're clearing a batch, skip the deferred map */
this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
if (!addr) {
addr = this_addr;
} else if (addr != this_addr) {
atomic_long_andnot(mask, (atomic_long_t *) addr);
mask = 0;
addr = this_addr;
}
mask |= (1UL << SB_NR_TO_BIT(sb, tag));
}
if (mask)
atomic_long_andnot(mask, (atomic_long_t *) addr);
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq);
sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
tags[nr_tags - 1] - offset);
}
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
@ -601,9 +715,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
*/
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq);
if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
*per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);

View File

@ -828,8 +828,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
* stops @miter.
*
* Context:
* Don't care if @miter is stopped, or not proceeded yet.
* Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
* Don't care.
*
* Returns:
* true if @miter contains the valid mapping. false if end of sg
@ -865,8 +864,7 @@ EXPORT_SYMBOL(sg_miter_skip);
* @miter->addr and @miter->length point to the current mapping.
*
* Context:
* Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
* till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
* May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
@ -906,8 +904,7 @@ EXPORT_SYMBOL(sg_miter_next);
* need to be released during iteration.
*
* Context:
* Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
* otherwise.
* Don't care otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
@ -922,7 +919,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON_ONCE(preemptible());
WARN_ON_ONCE(!pagefault_disabled());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);

View File

@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <crypto/sha1.h>
#include <asm/unaligned.h>
@ -55,7 +56,8 @@
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \
B = ror32(B, 2); } while (0)
B = ror32(B, 2); \
TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
@ -84,6 +86,7 @@
void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
unsigned int i = 0;
A = digest[0];
B = digest[1];
@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array)
E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
T_0_15( 0, A, B, C, D, E);
T_0_15( 1, E, A, B, C, D);
T_0_15( 2, D, E, A, B, C);
T_0_15( 3, C, D, E, A, B);
T_0_15( 4, B, C, D, E, A);
T_0_15( 5, A, B, C, D, E);
T_0_15( 6, E, A, B, C, D);
T_0_15( 7, D, E, A, B, C);
T_0_15( 8, C, D, E, A, B);
T_0_15( 9, B, C, D, E, A);
T_0_15(10, A, B, C, D, E);
T_0_15(11, E, A, B, C, D);
T_0_15(12, D, E, A, B, C);
T_0_15(13, C, D, E, A, B);
T_0_15(14, B, C, D, E, A);
T_0_15(15, A, B, C, D, E);
for (; i < 16; ++i)
T_0_15(i, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */
T_16_19(16, E, A, B, C, D);
T_16_19(17, D, E, A, B, C);
T_16_19(18, C, D, E, A, B);
T_16_19(19, B, C, D, E, A);
for (; i < 20; ++i)
T_16_19(i, A, B, C, D, E);
/* Round 2 */
T_20_39(20, A, B, C, D, E);
T_20_39(21, E, A, B, C, D);
T_20_39(22, D, E, A, B, C);
T_20_39(23, C, D, E, A, B);
T_20_39(24, B, C, D, E, A);
T_20_39(25, A, B, C, D, E);
T_20_39(26, E, A, B, C, D);
T_20_39(27, D, E, A, B, C);
T_20_39(28, C, D, E, A, B);
T_20_39(29, B, C, D, E, A);
T_20_39(30, A, B, C, D, E);
T_20_39(31, E, A, B, C, D);
T_20_39(32, D, E, A, B, C);
T_20_39(33, C, D, E, A, B);
T_20_39(34, B, C, D, E, A);
T_20_39(35, A, B, C, D, E);
T_20_39(36, E, A, B, C, D);
T_20_39(37, D, E, A, B, C);
T_20_39(38, C, D, E, A, B);
T_20_39(39, B, C, D, E, A);
for (; i < 40; ++i)
T_20_39(i, A, B, C, D, E);
/* Round 3 */
T_40_59(40, A, B, C, D, E);
T_40_59(41, E, A, B, C, D);
T_40_59(42, D, E, A, B, C);
T_40_59(43, C, D, E, A, B);
T_40_59(44, B, C, D, E, A);
T_40_59(45, A, B, C, D, E);
T_40_59(46, E, A, B, C, D);
T_40_59(47, D, E, A, B, C);
T_40_59(48, C, D, E, A, B);
T_40_59(49, B, C, D, E, A);
T_40_59(50, A, B, C, D, E);
T_40_59(51, E, A, B, C, D);
T_40_59(52, D, E, A, B, C);
T_40_59(53, C, D, E, A, B);
T_40_59(54, B, C, D, E, A);
T_40_59(55, A, B, C, D, E);
T_40_59(56, E, A, B, C, D);
T_40_59(57, D, E, A, B, C);
T_40_59(58, C, D, E, A, B);
T_40_59(59, B, C, D, E, A);
for (; i < 60; ++i)
T_40_59(i, A, B, C, D, E);
/* Round 4 */
T_60_79(60, A, B, C, D, E);
T_60_79(61, E, A, B, C, D);
T_60_79(62, D, E, A, B, C);
T_60_79(63, C, D, E, A, B);
T_60_79(64, B, C, D, E, A);
T_60_79(65, A, B, C, D, E);
T_60_79(66, E, A, B, C, D);
T_60_79(67, D, E, A, B, C);
T_60_79(68, C, D, E, A, B);
T_60_79(69, B, C, D, E, A);
T_60_79(70, A, B, C, D, E);
T_60_79(71, E, A, B, C, D);
T_60_79(72, D, E, A, B, C);
T_60_79(73, C, D, E, A, B);
T_60_79(74, B, C, D, E, A);
T_60_79(75, A, B, C, D, E);
T_60_79(76, E, A, B, C, D);
T_60_79(77, D, E, A, B, C);
T_60_79(78, C, D, E, A, B);
T_60_79(79, B, C, D, E, A);
for (; i < 80; ++i)
T_60_79(i, A, B, C, D, E);
digest[0] += A;
digest[1] += B;

View File

@ -20,10 +20,10 @@
*/
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/slab.h>
@ -102,8 +102,8 @@ static bool init_stack_slab(void **prealloc)
}
/* Allocation of a new stack in raw storage */
static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
u32 hash, void **prealloc, gfp_t alloc_flags)
static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
@ -162,18 +162,40 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
int __init stack_depot_init(void)
/*
* __ref because of memblock_alloc(), which will not be actually called after
* the __init code is gone, because at that point slab_is_available() is true
*/
__ref int stack_depot_init(void)
{
if (!stack_depot_disable) {
static DEFINE_MUTEX(stack_depot_init_mutex);
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
int i;
stack_table = memblock_alloc(size, size);
for (i = 0; i < STACK_HASH_SIZE; i++)
stack_table[i] = NULL;
if (slab_is_available()) {
pr_info("Stack Depot allocating hash table with kvmalloc\n");
stack_table = kvmalloc(size, GFP_KERNEL);
} else {
pr_info("Stack Depot allocating hash table with memblock_alloc\n");
stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
}
if (stack_table) {
for (i = 0; i < STACK_HASH_SIZE; i++)
stack_table[i] = NULL;
} else {
pr_err("Stack Depot hash table allocation failed, disabling\n");
stack_depot_disable = true;
mutex_unlock(&stack_depot_init_mutex);
return -ENOMEM;
}
}
mutex_unlock(&stack_depot_init_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(stack_depot_init);
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
@ -214,6 +236,49 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
/**
* stack_depot_snprint - print stack entries from a depot into a buffer
*
* @handle: Stack depot handle which was returned from
* stack_depot_save().
* @buf: Pointer to the print buffer
*
* @size: Size of the print buffer
*
* @spaces: Number of leading spaces to print
*
* Return: Number of bytes printed.
*/
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(handle, &entries);
return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
spaces) : 0;
}
EXPORT_SYMBOL_GPL(stack_depot_snprint);
/**
* stack_depot_print - print stack entries from a depot
*
* @stack: Stack depot handle which was returned from
* stack_depot_save().
*
*/
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(stack, &entries);
if (nr_entries > 0)
stack_trace_print(entries, nr_entries, 0);
}
EXPORT_SYMBOL_GPL(stack_depot_print);
/**
* stack_depot_fetch - Fetch stack entries from a depot
*
@ -232,6 +297,9 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
struct stack_record *stack;
*entries = NULL;
if (!handle)
return 0;
if (parts.slabindex > depot_index) {
WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
parts.slabindex, depot_index, handle);
@ -248,17 +316,31 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
* stack_depot_save - Save a stack trace from an array
* __stack_depot_save - Save a stack trace from an array
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
* @alloc_flags: Allocation gfp flags
* @can_alloc: Allocate stack slabs (increased chance of failure if false)
*
* Return: The handle of the stack struct stored in depot
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
* %true, is allowed to replenish the stack slab pool in case no space is left
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
* any allocations and will fail if no space is left to store the stack trace.
*
* If the stack trace in @entries is from an interrupt, only the portion up to
* interrupt entry is saved.
*
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: The handle of the stack struct stored in depot, 0 on failure.
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
depot_stack_handle_t retval = 0;
@ -267,6 +349,16 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned long flags;
u32 hash;
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stackdepot growth.
*
* Because use of filter_irq_stacks() is a requirement to ensure
* stackdepot can efficiently deduplicate interrupt stacks, always
* filter_irq_stacks() to simplify all callers' use of stackdepot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disable)
goto fast_exit;
@ -291,7 +383,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
* The smp_load_acquire() here pairs with smp_store_release() to
* |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
*/
if (unlikely(!smp_load_acquire(&next_slab_inited))) {
if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@ -309,9 +401,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
depot_alloc_stack(entries, nr_entries,
hash, &prealloc, alloc_flags);
struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
new->next = *bucket;
/*
@ -340,27 +431,24 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
/**
* stack_depot_save - Save a stack trace from an array
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
* @alloc_flags: Allocation gfp flags
*
* Context: Contexts where allocations via alloc_pages() are allowed.
* See __stack_depot_save() for more details.
*
* Return: The handle of the stack struct stored in depot, 0 on failure.
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
return __stack_depot_save(entries, nr_entries, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
static inline int in_irqentry_text(unsigned long ptr)
{
return (ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end) ||
(ptr >= (unsigned long)&__softirqentry_text_start &&
ptr < (unsigned long)&__softirqentry_text_end);
}
unsigned int filter_irq_stacks(unsigned long *entries,
unsigned int nr_entries)
{
unsigned int i;
for (i = 0; i < nr_entries; i++) {
if (in_irqentry_text(entries[i])) {
/* Include the irqentry function into the stack. */
return i + 1;
}
}
return nr_entries;
}
EXPORT_SYMBOL_GPL(filter_irq_stacks);

View File

@ -6,20 +6,15 @@
*/
/*
* stupid library routines.. The optimized versions should generally be found
* as inline code in <asm-xx/string.h>
* This file should be used only for "library" routines that may have
* alternative implementations on specific architectures (generally
* found in <asm-xx/string.h>), or get overloaded by FORTIFY_SOURCE.
* (Specifically, this file is built with __NO_FORTIFY.)
*
* These are buggy as well..
*
* * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de>
* - Added strsep() which will replace strtok() soon (because strsep() is
* reentrant and should be faster). Use only strsep() in new code, please.
*
* * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>,
* Matthew Hawkins <matt@mh.dropbear.id.au>
* - Kissed strtok() goodbye
* Other helper functions should live in string_helpers.c.
*/
#define __NO_FORTIFY
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
@ -238,40 +233,6 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strscpy);
#endif
/**
* strscpy_pad() - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: Size of destination buffer
*
* Copy the string, or as much of it as fits, into the dest buffer. The
* behavior is undefined if the string buffers overlap. The destination
* buffer is always %NUL terminated, unless it's zero-sized.
*
* If the source string is shorter than the destination buffer, zeros
* the tail of the destination buffer.
*
* For full explanation of why you may want to consider using the
* 'strscpy' functions please see the function docstring for strscpy().
*
* Returns:
* * The number of characters copied (not including the trailing %NUL)
* * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy_pad(char *dest, const char *src, size_t count)
{
ssize_t written;
written = strscpy(dest, src, count);
if (written < 0 || written == count - 1)
return written;
memset(dest + written + 1, 0, count - written - 1);
return written;
}
EXPORT_SYMBOL(strscpy_pad);
/**
* stpcpy - copy a string from src to dest returning a pointer to the new end
* of dest, including src's %NUL-terminator. May overrun dest.
@ -514,46 +475,6 @@ char *strnchr(const char *s, size_t count, int c)
EXPORT_SYMBOL(strnchr);
#endif
/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
* Returns a pointer to the first non-whitespace character in @str.
*/
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
EXPORT_SYMBOL(skip_spaces);
/**
* strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
char *strim(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
return skip_spaces(s);
}
EXPORT_SYMBOL(strim);
#ifndef __HAVE_ARCH_STRLEN
/**
* strlen - Find the length of a string
@ -688,101 +609,6 @@ char *strsep(char **s, const char *ct)
EXPORT_SYMBOL(strsep);
#endif
/**
* sysfs_streq - return true if strings are equal, modulo trailing newline
* @s1: one string
* @s2: another string
*
* This routine returns true iff two strings are equal, treating both
* NUL and newline-then-NUL as equivalent string terminations. It's
* geared for use with sysfs input strings, which generally terminate
* with newlines but are compared against values without newlines.
*/
bool sysfs_streq(const char *s1, const char *s2)
{
while (*s1 && *s1 == *s2) {
s1++;
s2++;
}
if (*s1 == *s2)
return true;
if (!*s1 && *s2 == '\n' && !s2[1])
return true;
if (*s1 == '\n' && !s1[1] && !*s2)
return true;
return false;
}
EXPORT_SYMBOL(sysfs_streq);
/**
* match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @string: string to match with
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*
* Return:
* index of a @string in the @array if matches, or %-EINVAL otherwise.
*/
int match_string(const char * const *array, size_t n, const char *string)
{
int index;
const char *item;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (!strcmp(item, string))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(match_string);
/**
* __sysfs_match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @str: string to match with
*
* Returns index of @str in the @array or -EINVAL, just like match_string().
* Uses sysfs_streq instead of strcmp for matching.
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*/
int __sysfs_match_string(const char * const *array, size_t n, const char *str)
{
const char *item;
int index;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (sysfs_streq(item, str))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(__sysfs_match_string);
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
@ -1141,27 +967,3 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes % 8);
}
EXPORT_SYMBOL(memchr_inv);
/**
* strreplace - Replace all occurrences of character in string.
* @s: The string to operate on.
* @old: The character being replaced.
* @new: The character @old is replaced with.
*
* Returns pointer to the nul byte at the end of @s.
*/
char *strreplace(char *s, char old, char new)
{
for (; *s; ++s)
if (*s == old)
*s = new;
return s;
}
EXPORT_SYMBOL(strreplace);
void fortify_panic(const char *name)
{
pr_emerg("detected buffer overflow in %s\n", name);
BUG();
}
EXPORT_SYMBOL(fortify_panic);

View File

@ -10,6 +10,7 @@
#include <linux/math64.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/limits.h>
@ -674,6 +675,39 @@ char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
/**
* kasprintf_strarray - allocate and fill array of sequential strings
* @gfp: flags for the slab allocator
* @prefix: prefix to be used
* @n: amount of lines to be allocated and filled
*
* Allocates and fills @n strings using pattern "%s-%zu", where prefix
* is provided by caller. The caller is responsible to free them with
* kfree_strarray() after use.
*
* Returns array of strings or NULL when memory can't be allocated.
*/
char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n)
{
char **names;
size_t i;
names = kcalloc(n + 1, sizeof(char *), gfp);
if (!names)
return NULL;
for (i = 0; i < n; i++) {
names[i] = kasprintf(gfp, "%s-%zu", prefix, i);
if (!names[i]) {
kfree_strarray(names, i);
return NULL;
}
}
return names;
}
EXPORT_SYMBOL_GPL(kasprintf_strarray);
/**
* kfree_strarray - free a number of dynamically allocated strings contained
* in an array and the array itself
@ -697,6 +731,222 @@ void kfree_strarray(char **array, size_t n)
}
EXPORT_SYMBOL_GPL(kfree_strarray);
struct strarray {
char **array;
size_t n;
};
static void devm_kfree_strarray(struct device *dev, void *res)
{
struct strarray *array = res;
kfree_strarray(array->array, array->n);
}
char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
{
struct strarray *ptr;
ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n);
if (!ptr->array) {
devres_free(ptr);
return ERR_PTR(-ENOMEM);
}
return ptr->array;
}
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
/**
* strscpy_pad() - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: Size of destination buffer
*
* Copy the string, or as much of it as fits, into the dest buffer. The
* behavior is undefined if the string buffers overlap. The destination
* buffer is always %NUL terminated, unless it's zero-sized.
*
* If the source string is shorter than the destination buffer, zeros
* the tail of the destination buffer.
*
* For full explanation of why you may want to consider using the
* 'strscpy' functions please see the function docstring for strscpy().
*
* Returns:
* * The number of characters copied (not including the trailing %NUL)
* * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy_pad(char *dest, const char *src, size_t count)
{
ssize_t written;
written = strscpy(dest, src, count);
if (written < 0 || written == count - 1)
return written;
memset(dest + written + 1, 0, count - written - 1);
return written;
}
EXPORT_SYMBOL(strscpy_pad);
/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
* Returns a pointer to the first non-whitespace character in @str.
*/
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
EXPORT_SYMBOL(skip_spaces);
/**
* strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
char *strim(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
return skip_spaces(s);
}
EXPORT_SYMBOL(strim);
/**
* sysfs_streq - return true if strings are equal, modulo trailing newline
* @s1: one string
* @s2: another string
*
* This routine returns true iff two strings are equal, treating both
* NUL and newline-then-NUL as equivalent string terminations. It's
* geared for use with sysfs input strings, which generally terminate
* with newlines but are compared against values without newlines.
*/
bool sysfs_streq(const char *s1, const char *s2)
{
while (*s1 && *s1 == *s2) {
s1++;
s2++;
}
if (*s1 == *s2)
return true;
if (!*s1 && *s2 == '\n' && !s2[1])
return true;
if (*s1 == '\n' && !s1[1] && !*s2)
return true;
return false;
}
EXPORT_SYMBOL(sysfs_streq);
/**
* match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @string: string to match with
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*
* Return:
* index of a @string in the @array if matches, or %-EINVAL otherwise.
*/
int match_string(const char * const *array, size_t n, const char *string)
{
int index;
const char *item;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (!strcmp(item, string))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(match_string);
/**
* __sysfs_match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @str: string to match with
*
* Returns index of @str in the @array or -EINVAL, just like match_string().
* Uses sysfs_streq instead of strcmp for matching.
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*/
int __sysfs_match_string(const char * const *array, size_t n, const char *str)
{
const char *item;
int index;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (sysfs_streq(item, str))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(__sysfs_match_string);
/**
* strreplace - Replace all occurrences of character in string.
* @s: The string to operate on.
* @old: The character being replaced.
* @new: The character @old is replaced with.
*
* Returns pointer to the nul byte at the end of @s.
*/
char *strreplace(char *s, char old, char new)
{
for (; *s; ++s)
if (*s == old)
*s = new;
return s;
}
EXPORT_SYMBOL(strreplace);
/**
* memcpy_and_pad - Copy one buffer to another with padding
* @dest: Where to copy to
@ -716,3 +966,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
}
}
EXPORT_SYMBOL(memcpy_and_pad);
#ifdef CONFIG_FORTIFY_SOURCE
void fortify_panic(const char *name)
{
pr_emerg("detected buffer overflow in %s\n", name);
BUG();
}
EXPORT_SYMBOL(fortify_panic);
#endif /* CONFIG_FORTIFY_SOURCE */

View File

@ -446,6 +446,42 @@ static void __init test_bitmap_parselist(void)
}
}
static void __init test_bitmap_printlist(void)
{
unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL);
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
char expected[256];
int ret, slen;
ktime_t time;
if (!buf || !bmap)
goto out;
memset(bmap, -1, PAGE_SIZE);
slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1);
if (slen < 0)
goto out;
time = ktime_get();
ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8);
time = ktime_get() - time;
if (ret != slen + 1) {
pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
goto out;
}
if (strncmp(buf, expected, slen)) {
pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
goto out;
}
pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
out:
kfree(buf);
kfree(bmap);
}
static const unsigned long parse_test[] __initconst = {
BITMAP_FROM_U64(0),
BITMAP_FROM_U64(1),
@ -818,6 +854,7 @@ static void __init selftest(void)
test_bitmap_arr32();
test_bitmap_parse();
test_bitmap_parselist();
test_bitmap_printlist();
test_mem_optimisations();
test_for_each_set_clump8();
test_bitmap_cut();

File diff suppressed because it is too large Load Diff

View File

@ -14,17 +14,15 @@
* and hash_64().
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/printk.h>
#include <kunit/test.h>
/* 32-bit XORSHIFT generator. Seed must not be zero. */
static u32 __init __attribute_const__
static u32 __attribute_const__
xorshift(u32 seed)
{
seed ^= seed << 13;
@ -34,7 +32,7 @@ xorshift(u32 seed)
}
/* Given a non-zero x, returns a non-zero byte. */
static u8 __init __attribute_const__
static u8 __attribute_const__
mod255(u32 x)
{
x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
@ -45,8 +43,7 @@ mod255(u32 x)
}
/* Fill the buffer with non-zero bytes. */
static void __init
fill_buf(char *buf, size_t len, u32 seed)
static void fill_buf(char *buf, size_t len, u32 seed)
{
size_t i;
@ -56,6 +53,50 @@ fill_buf(char *buf, size_t len, u32 seed)
}
}
/* Holds most testing variables for the int test. */
struct test_hash_params {
/* Pointer to integer to be hashed. */
unsigned long long *h64;
/* Low 32-bits of integer to be hashed. */
u32 h0;
/* Arch-specific hash result. */
u32 h1;
/* Generic hash result. */
u32 h2;
/* ORed hashes of given size (in bits). */
u32 (*hash_or)[33];
};
#ifdef HAVE_ARCH__HASH_32
static void
test_int__hash_32(struct kunit *test, struct test_hash_params *params)
{
params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0);
#if HAVE_ARCH__HASH_32 == 1
KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
"__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
params->h0, params->h1, params->h2);
#endif
}
#endif
#ifdef HAVE_ARCH_HASH_64
static void
test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k)
{
params->h2 = hash_64_generic(*params->h64, *k);
#if HAVE_ARCH_HASH_64 == 1
KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
"hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x",
*params->h64, *k, params->h1, params->h2);
#else
KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2,
"hash_64_generic(%#llx, %d) = %#x > %#x",
*params->h64, *k, params->h1, *m);
#endif
}
#endif
/*
* Test the various integer hash functions. h64 (or its low-order bits)
* is the integer to hash. hash_or accumulates the OR of the hash values,
@ -65,23 +106,16 @@ fill_buf(char *buf, size_t len, u32 seed)
* inline, the code being tested is actually in the module, and you can
* recompile and re-test the module without rebooting.
*/
static bool __init
test_int_hash(unsigned long long h64, u32 hash_or[2][33])
static void
test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33])
{
int k;
u32 h0 = (u32)h64, h1, h2;
struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or };
/* Test __hash32 */
hash_or[0][0] |= h1 = __hash_32(h0);
hash_or[0][0] |= params.h1 = __hash_32(params.h0);
#ifdef HAVE_ARCH__HASH_32
hash_or[1][0] |= h2 = __hash_32_generic(h0);
#if HAVE_ARCH__HASH_32 == 1
if (h1 != h2) {
pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
h0, h1, h2);
return false;
}
#endif
test_int__hash_32(test, &params);
#endif
/* Test k = 1..32 bits */
@ -89,63 +123,53 @@ test_int_hash(unsigned long long h64, u32 hash_or[2][33])
u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
/* Test hash_32 */
hash_or[0][k] |= h1 = hash_32(h0, k);
if (h1 > m) {
pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
return false;
}
#ifdef HAVE_ARCH_HASH_32
h2 = hash_32_generic(h0, k);
#if HAVE_ARCH_HASH_32 == 1
if (h1 != h2) {
pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
" = %#x", h0, k, h1, h2);
return false;
}
#else
if (h2 > m) {
pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
h0, k, h1, m);
return false;
}
#endif
#endif
hash_or[0][k] |= params.h1 = hash_32(params.h0, k);
KUNIT_EXPECT_LE_MSG(test, params.h1, m,
"hash_32(%#x, %d) = %#x > %#x",
params.h0, k, params.h1, m);
/* Test hash_64 */
hash_or[1][k] |= h1 = hash_64(h64, k);
if (h1 > m) {
pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
return false;
}
hash_or[1][k] |= params.h1 = hash_64(h64, k);
KUNIT_EXPECT_LE_MSG(test, params.h1, m,
"hash_64(%#llx, %d) = %#x > %#x",
h64, k, params.h1, m);
#ifdef HAVE_ARCH_HASH_64
h2 = hash_64_generic(h64, k);
#if HAVE_ARCH_HASH_64 == 1
if (h1 != h2) {
pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
"= %#x", h64, k, h1, h2);
return false;
}
#else
if (h2 > m) {
pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
h64, k, h1, m);
return false;
}
#endif
test_int_hash_64(test, &params, &m, &k);
#endif
}
(void)h2; /* Suppress unused variable warning */
return true;
}
#define SIZE 256 /* Run time is cubic in SIZE */
static int __init
test_hash_init(void)
static void test_string_or(struct kunit *test)
{
char buf[SIZE+1];
u32 string_or = 0, hash_or[2][33] = { { 0, } };
unsigned tests = 0;
u32 string_or = 0;
int i, j;
fill_buf(buf, SIZE, 1);
/* Test every possible non-empty substring in the buffer. */
for (j = SIZE; j > 0; --j) {
buf[j] = '\0';
for (i = 0; i <= j; i++) {
u32 h0 = full_name_hash(buf+i, buf+i, j-i);
string_or |= h0;
} /* i */
} /* j */
/* The OR of all the hash values should cover all the bits */
KUNIT_EXPECT_EQ_MSG(test, string_or, -1u,
"OR of all string hash results = %#x != %#x",
string_or, -1u);
}
static void test_hash_or(struct kunit *test)
{
char buf[SIZE+1];
u32 hash_or[2][33] = { { 0, } };
unsigned long long h64 = 0;
int i, j;
@ -160,46 +184,27 @@ test_hash_init(void)
u32 h0 = full_name_hash(buf+i, buf+i, j-i);
/* Check that hashlen_string gets the length right */
if (hashlen_len(hashlen) != j-i) {
pr_err("hashlen_string(%d..%d) returned length"
" %u, expected %d",
i, j, hashlen_len(hashlen), j-i);
return -EINVAL;
}
KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i,
"hashlen_string(%d..%d) returned length %u, expected %d",
i, j, hashlen_len(hashlen), j-i);
/* Check that the hashes match */
if (hashlen_hash(hashlen) != h0) {
pr_err("hashlen_string(%d..%d) = %08x != "
"full_name_hash() = %08x",
i, j, hashlen_hash(hashlen), h0);
return -EINVAL;
}
KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0,
"hashlen_string(%d..%d) = %08x != full_name_hash() = %08x",
i, j, hashlen_hash(hashlen), h0);
string_or |= h0;
h64 = h64 << 32 | h0; /* For use with hash_64 */
if (!test_int_hash(h64, hash_or))
return -EINVAL;
tests++;
test_int_hash(test, h64, hash_or);
} /* i */
} /* j */
/* The OR of all the hash values should cover all the bits */
if (~string_or) {
pr_err("OR of all string hash results = %#x != %#x",
string_or, -1u);
return -EINVAL;
}
if (~hash_or[0][0]) {
pr_err("OR of all __hash_32 results = %#x != %#x",
hash_or[0][0], -1u);
return -EINVAL;
}
KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u,
"OR of all __hash_32 results = %#x != %#x",
hash_or[0][0], -1u);
#ifdef HAVE_ARCH__HASH_32
#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
if (~hash_or[1][0]) {
pr_err("OR of all __hash_32_generic results = %#x != %#x",
hash_or[1][0], -1u);
return -EINVAL;
}
KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u,
"OR of all __hash_32_generic results = %#x != %#x",
hash_or[1][0], -1u);
#endif
#endif
@ -207,51 +212,27 @@ test_hash_init(void)
for (i = 1; i <= 32; i++) {
u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
if (hash_or[0][i] != m) {
pr_err("OR of all hash_32(%d) results = %#x "
"(%#x expected)", i, hash_or[0][i], m);
return -EINVAL;
}
if (hash_or[1][i] != m) {
pr_err("OR of all hash_64(%d) results = %#x "
"(%#x expected)", i, hash_or[1][i], m);
return -EINVAL;
}
KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m,
"OR of all hash_32(%d) results = %#x (%#x expected)",
i, hash_or[0][i], m);
KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m,
"OR of all hash_64(%d) results = %#x (%#x expected)",
i, hash_or[1][i], m);
}
/* Issue notices about skipped tests. */
#ifdef HAVE_ARCH__HASH_32
#if HAVE_ARCH__HASH_32 != 1
pr_info("__hash_32() is arch-specific; not compared to generic.");
#endif
#else
pr_info("__hash_32() has no arch implementation to test.");
#endif
#ifdef HAVE_ARCH_HASH_32
#if HAVE_ARCH_HASH_32 != 1
pr_info("hash_32() is arch-specific; not compared to generic.");
#endif
#else
pr_info("hash_32() has no arch implementation to test.");
#endif
#ifdef HAVE_ARCH_HASH_64
#if HAVE_ARCH_HASH_64 != 1
pr_info("hash_64() is arch-specific; not compared to generic.");
#endif
#else
pr_info("hash_64() has no arch implementation to test.");
#endif
pr_notice("%u tests passed.", tests);
return 0;
}
static void __exit test_hash_exit(void)
{
}
static struct kunit_case hash_test_cases[] __refdata = {
KUNIT_CASE(test_string_or),
KUNIT_CASE(test_hash_or),
{}
};
module_init(test_hash_init); /* Does everything */
module_exit(test_hash_exit); /* Does nothing */
static struct kunit_suite hash_test_suite = {
.name = "hash",
.test_cases = hash_test_cases,
};
kunit_test_suite(hash_test_suite);
MODULE_LICENSE("GPL");

View File

@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/
rpage->zone_device_data = dmirror;
*dst = migrate_pfn(page_to_pfn(dpage)) |
MIGRATE_PFN_LOCKED;
*dst = migrate_pfn(page_to_pfn(dpage));
if ((*src & MIGRATE_PFN_WRITE) ||
(!spage && args->vma->vm_flags & VM_WRITE))
*dst |= MIGRATE_PFN_WRITE;
@ -1161,7 +1160,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
lock_page(dpage);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
*dst = migrate_pfn(page_to_pfn(dpage));
if (*src & MIGRATE_PFN_WRITE)
*dst |= MIGRATE_PFN_WRITE;
}

View File

@ -88,7 +88,7 @@ static void kasan_test_exit(struct kunit *test)
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) \
kasan_sync_fault_possible()) \
migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
barrier(); \
@ -440,6 +440,7 @@ static void kmalloc_oob_memset_2(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
kfree(ptr);
}
@ -452,6 +453,7 @@ static void kmalloc_oob_memset_4(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
kfree(ptr);
}
@ -464,6 +466,7 @@ static void kmalloc_oob_memset_8(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
kfree(ptr);
}
@ -476,6 +479,7 @@ static void kmalloc_oob_memset_16(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
kfree(ptr);
}
@ -488,16 +492,18 @@ static void kmalloc_oob_in_memset(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test,
memset(ptr, 0, size + KASAN_GRANULE_SIZE));
kfree(ptr);
}
static void kmalloc_memmove_invalid_size(struct kunit *test)
static void kmalloc_memmove_negative_size(struct kunit *test)
{
char *ptr;
size_t size = 64;
volatile size_t invalid_size = -2;
size_t invalid_size = -2;
/*
* Hardware tag-based mode doesn't check memmove for negative size.
@ -510,6 +516,24 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(invalid_size);
KUNIT_EXPECT_KASAN_FAIL(test,
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
}
static void kmalloc_memmove_invalid_size(struct kunit *test)
{
char *ptr;
size_t size = 64;
volatile size_t invalid_size = size;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test,
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
@ -679,7 +703,7 @@ static void kmem_cache_bulk(struct kunit *test)
static char global_array[10];
static void kasan_global_oob(struct kunit *test)
static void kasan_global_oob_right(struct kunit *test)
{
/*
* Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
@ -702,6 +726,20 @@ static void kasan_global_oob(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
static void kasan_global_oob_left(struct kunit *test)
{
char *volatile array = global_array;
char *p = array - 3;
/*
* GCC is known to fail this test, skip it.
* See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
*/
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
/* Check that ksize() makes the whole object accessible. */
static void ksize_unpoisons_memory(struct kunit *test)
{
@ -831,6 +869,19 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
static void empty_cache_ctor(void *object) { }
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
/* Provide a constructor to prevent cache merging. */
cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
kmem_cache_destroy(cache);
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
}
static void kasan_memchr(struct kunit *test)
{
char *ptr;
@ -848,6 +899,8 @@ static void kasan_memchr(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_ptr_result = memchr(ptr, '1', size + 1));
@ -873,6 +926,8 @@ static void kasan_memcmp(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr));
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result = memcmp(ptr, arr, size+1));
kfree(ptr);
@ -1129,6 +1184,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_memset_4),
KUNIT_CASE(kmalloc_oob_memset_8),
KUNIT_CASE(kmalloc_oob_memset_16),
KUNIT_CASE(kmalloc_memmove_negative_size),
KUNIT_CASE(kmalloc_memmove_invalid_size),
KUNIT_CASE(kmalloc_uaf),
KUNIT_CASE(kmalloc_uaf_memset),
@ -1138,7 +1194,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_oob),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
KUNIT_CASE(kasan_global_oob),
KUNIT_CASE(kasan_global_oob_right),
KUNIT_CASE(kasan_global_oob_left),
KUNIT_CASE(kasan_stack_oob),
KUNIT_CASE(kasan_alloca_oob_left),
KUNIT_CASE(kasan_alloca_oob_right),
@ -1146,6 +1203,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(ksize_uaf),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings),

View File

@ -35,6 +35,8 @@ static noinline void __init copy_user_test(void)
return;
}
OPTIMIZER_HIDE_VAR(size);
pr_info("out-of-bounds in copy_from_user()\n");
unused = copy_from_user(kmem, usermem, size + 1);

View File

@ -586,70 +586,59 @@ struct page_flags_test {
int width;
int shift;
int mask;
unsigned long value;
const char *fmt;
const char *name;
};
static struct page_flags_test pft[] = {
static const struct page_flags_test pft[] = {
{SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
0, "%d", "section"},
"%d", "section"},
{NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
0, "%d", "node"},
"%d", "node"},
{ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
0, "%d", "zone"},
"%d", "zone"},
{LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
0, "%#x", "lastcpupid"},
"%#x", "lastcpupid"},
{KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
0, "%#x", "kasantag"},
"%#x", "kasantag"},
};
static void __init
page_flags_test(int section, int node, int zone, int last_cpupid,
int kasan_tag, int flags, const char *name, char *cmp_buf)
int kasan_tag, unsigned long flags, const char *name,
char *cmp_buf)
{
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
unsigned long page_flags = 0;
unsigned long size = 0;
unsigned long size;
bool append = false;
int i;
flags &= PAGEFLAGS_MASK;
if (flags) {
page_flags |= flags;
snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
size = strlen(cmp_buf);
#if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \
LAST_CPUPID_WIDTH || KASAN_TAG_WIDTH
/* Other information also included in page flags */
snprintf(cmp_buf + size, BUF_SIZE - size, "|");
size = strlen(cmp_buf);
#endif
}
for (i = 0; i < ARRAY_SIZE(values); i++)
flags |= (values[i] & pft[i].mask) << pft[i].shift;
/* Set the test value */
for (i = 0; i < ARRAY_SIZE(pft); i++)
pft[i].value = values[i];
size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags);
if (flags & PAGEFLAGS_MASK) {
size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
append = true;
}
for (i = 0; i < ARRAY_SIZE(pft); i++) {
if (!pft[i].width)
continue;
if (append) {
snprintf(cmp_buf + size, BUF_SIZE - size, "|");
size = strlen(cmp_buf);
}
if (append)
size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|");
page_flags |= (pft[i].value & pft[i].mask) << pft[i].shift;
snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name);
size = strlen(cmp_buf);
snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
pft[i].value & pft[i].mask);
size = strlen(cmp_buf);
size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=",
pft[i].name);
size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
values[i] & pft[i].mask);
append = true;
}
test(cmp_buf, "%pGp", &page_flags);
snprintf(cmp_buf + size, BUF_SIZE - size, ")");
test(cmp_buf, "%pGp", &flags);
}
static void __init

View File

@ -128,26 +128,6 @@ static struct ctl_table test_table[] = {
{ }
};
static struct ctl_table test_sysctl_table[] = {
{
.procname = "test_sysctl",
.maxlen = 0,
.mode = 0555,
.child = test_table,
},
{ }
};
static struct ctl_table test_sysctl_root_table[] = {
{
.procname = "debug",
.maxlen = 0,
.mode = 0555,
.child = test_sysctl_table,
},
{ }
};
static struct ctl_table_header *test_sysctl_header;
static int __init test_sysctl_init(void)
@ -155,7 +135,7 @@ static int __init test_sysctl_init(void)
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
test_sysctl_header = register_sysctl("debug/test_sysctl", test_table);
if (!test_sysctl_header) {
kfree(test_data.bitmap_0001);
return -ENOMEM;

View File

@ -79,15 +79,6 @@ static void test_ubsan_load_invalid_value(void)
eval2 = eval;
}
static void test_ubsan_null_ptr_deref(void)
{
volatile int *ptr = NULL;
int val;
UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE);
val = *ptr;
}
static void test_ubsan_misaligned_access(void)
{
volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
@ -98,29 +89,16 @@ static void test_ubsan_misaligned_access(void)
*ptr = val;
}
static void test_ubsan_object_size_mismatch(void)
{
/* "((aligned(8)))" helps this not into be misaligned for ptr-access. */
volatile int val __aligned(8) = 4;
volatile long long *ptr, val2;
UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE);
ptr = (long long *)&val;
val2 = *ptr;
}
static const test_ubsan_fp test_ubsan_array[] = {
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
test_ubsan_misaligned_access,
test_ubsan_object_size_mismatch,
};
/* Excluded because they Oops the module. */
static const test_ubsan_fp skip_ubsan_array[] = {
test_ubsan_divrem_overflow,
test_ubsan_null_ptr_deref,
};
static int __init test_ubsan_init(void)

View File

@ -393,7 +393,7 @@ static struct test_driver {
static void shuffle_array(int *arr, int n)
{
unsigned int rnd;
int i, j, x;
int i, j;
for (i = n - 1; i > 0; i--) {
get_random_bytes(&rnd, sizeof(rnd));
@ -402,9 +402,7 @@ static void shuffle_array(int *arr, int n)
j = rnd % i;
/* Swap indexes. */
x = arr[i];
arr[i] = arr[j];
arr[j] = x;
swap(arr[i], arr[j]);
}
}

View File

@ -53,8 +53,7 @@
#include <linux/string_helpers.h>
#include "kstrtox.h"
static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
char **endp, unsigned int base)
static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
{
const char *cp;
unsigned long long result = 0ULL;
@ -408,8 +407,9 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
static_assert(SIGN == 1);
static_assert(ZEROPAD == ('0' - ' '));
static_assert(SMALL == ' ');
static_assert(SMALL == ('a' ^ 'A'));
enum format_type {
FORMAT_TYPE_NONE, /* Just a string part */
@ -1241,20 +1241,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
bool first = true;
int rbot, rtop;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
rbot = cur = find_first_bit(bitmap, nr_bits);
while (cur < nr_bits) {
rtop = cur;
cur = find_next_bit(bitmap, nr_bits, cur + 1);
if (cur < nr_bits && cur <= rtop + 1)
continue;
for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) {
if (!first) {
if (buf < end)
*buf = ',';
@ -1263,15 +1256,12 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
first = false;
buf = number(buf, end, rbot, default_dec_spec);
if (rbot < rtop) {
if (buf < end)
*buf = '-';
buf++;
if (rtop == rbot + 1)
continue;
buf = number(buf, end, rtop, default_dec_spec);
}
rbot = cur;
if (buf < end)
*buf = '-';
buf = number(++buf, end, rtop - 1, default_dec_spec);
}
return buf;
}
@ -2023,6 +2013,11 @@ char *format_page_flags(char *buf, char *end, unsigned long flags)
bool append = false;
int i;
buf = number(buf, end, flags, default_flag_spec);
if (buf < end)
*buf = '(';
buf++;
/* Page flags from the main area. */
if (main_flags) {
buf = format_flags(buf, end, main_flags, pageflag_names);
@ -2051,6 +2046,9 @@ char *format_page_flags(char *buf, char *end, unsigned long flags)
append = true;
}
if (buf < end)
*buf = ')';
buf++;
return buf;
}
@ -3556,7 +3554,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
++fmt;
for ( ; *fmt && *fmt != ']'; ++fmt, ++len)
set_bit((u8)*fmt, set);
__set_bit((u8)*fmt, set);
/* no ']' or no character set found */
if (!*fmt || !len)
@ -3566,7 +3564,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
if (negate) {
bitmap_complement(set, set, 256);
/* exclude null '\0' byte */
clear_bit(0, set);
__clear_bit(0, set);
}
/* match must be non-empty */

View File

@ -157,7 +157,7 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset)
xas->xa_index += offset << shift;
}
static void xas_advance(struct xa_state *xas)
static void xas_next_offset(struct xa_state *xas)
{
xas->xa_offset++;
xas_move_index(xas, xas->xa_offset);
@ -1250,7 +1250,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
}
xas_advance(xas);
xas_next_offset(xas);
while (xas->xa_node && (xas->xa_index <= max)) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
@ -1268,7 +1268,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
if (entry && !xa_is_sibling(entry))
return entry;
xas_advance(xas);
xas_next_offset(xas);
}
if (!xas->xa_node)

View File

@ -39,6 +39,19 @@ config XZ_DEC_SPARC
default y
select XZ_DEC_BCJ
config XZ_DEC_MICROLZMA
bool "MicroLZMA decoder"
default n
help
MicroLZMA is a header format variant where the first byte
of a raw LZMA stream (without the end of stream marker) has
been replaced with a bitwise-negation of the lc/lp/pb
properties byte. MicroLZMA was created to be used in EROFS
but can be used by other things too where wasting minimal
amount of space for headers is important.
Unless you know that you need this, say N.
endif
config XZ_DEC_BCJ

View File

@ -248,6 +248,10 @@ struct lzma2_dec {
* before the first LZMA chunk.
*/
bool need_props;
#ifdef XZ_DEC_MICROLZMA
bool pedantic_microlzma;
#endif
};
struct xz_dec_lzma2 {
@ -419,6 +423,12 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
}
}
#ifdef XZ_DEC_MICROLZMA
# define DICT_FLUSH_SUPPORTS_SKIPPING true
#else
# define DICT_FLUSH_SUPPORTS_SKIPPING false
#endif
/*
* Flush pending data from dictionary to b->out. It is assumed that there is
* enough space in b->out. This is guaranteed because caller uses dict_limit()
@ -437,9 +447,14 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
* decompression because in multi-call mode dict->buf
* has been allocated by us in this file; it's not
* provided by the caller like in single-call mode.
*
* With MicroLZMA, b->out can be NULL to skip bytes that
* the caller doesn't need. This cannot be done with XZ
* because it would break BCJ filters.
*/
memcpy(b->out + b->out_pos, dict->buf + dict->start,
copy_size);
if (!DICT_FLUSH_SUPPORTS_SKIPPING || b->out != NULL)
memcpy(b->out + b->out_pos, dict->buf + dict->start,
copy_size);
}
dict->start = dict->pos;
@ -505,7 +520,7 @@ static __always_inline void rc_normalize(struct rc_dec *rc)
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
* on x86). Using a non-splitted version results in nicer looking code too.
* on x86). Using a non-split version results in nicer looking code too.
*
* NOTE: This must return an int. Do not make it return a bool or the speed
* of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
@ -791,6 +806,7 @@ static void lzma_reset(struct xz_dec_lzma2 *s)
s->lzma.rep1 = 0;
s->lzma.rep2 = 0;
s->lzma.rep3 = 0;
s->lzma.len = 0;
/*
* All probabilities are initialized to the same value. This hack
@ -1174,8 +1190,6 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
}
}
s->lzma.len = 0;
s->lzma2.sequence = SEQ_CONTROL;
s->lzma2.need_dict_reset = true;
@ -1191,3 +1205,140 @@ XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
kfree(s);
}
#ifdef XZ_DEC_MICROLZMA
/* This is a wrapper struct to have a nice struct name in the public API. */
struct xz_dec_microlzma {
struct xz_dec_lzma2 s;
};
enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s_ptr,
struct xz_buf *b)
{
struct xz_dec_lzma2 *s = &s_ptr->s;
/*
* sequence is SEQ_PROPERTIES before the first input byte,
* SEQ_LZMA_PREPARE until a total of five bytes have been read,
* and SEQ_LZMA_RUN for the rest of the input stream.
*/
if (s->lzma2.sequence != SEQ_LZMA_RUN) {
if (s->lzma2.sequence == SEQ_PROPERTIES) {
/* One byte is needed for the props. */
if (b->in_pos >= b->in_size)
return XZ_OK;
/*
* Don't increment b->in_pos here. The same byte is
* also passed to rc_read_init() which will ignore it.
*/
if (!lzma_props(s, ~b->in[b->in_pos]))
return XZ_DATA_ERROR;
s->lzma2.sequence = SEQ_LZMA_PREPARE;
}
/*
* xz_dec_microlzma_reset() doesn't validate the compressed
* size so we do it here. We have to limit the maximum size
* to avoid integer overflows in lzma2_lzma(). 3 GiB is a nice
* round number and much more than users of this code should
* ever need.
*/
if (s->lzma2.compressed < RC_INIT_BYTES
|| s->lzma2.compressed > (3U << 30))
return XZ_DATA_ERROR;
if (!rc_read_init(&s->rc, b))
return XZ_OK;
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
dict_reset(&s->dict, b);
}
/* This is to allow increasing b->out_size between calls. */
if (DEC_IS_SINGLE(s->dict.mode))
s->dict.end = b->out_size - b->out_pos;
while (true) {
dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos,
s->lzma2.uncompressed));
if (!lzma2_lzma(s, b))
return XZ_DATA_ERROR;
s->lzma2.uncompressed -= dict_flush(&s->dict, b);
if (s->lzma2.uncompressed == 0) {
if (s->lzma2.pedantic_microlzma) {
if (s->lzma2.compressed > 0 || s->lzma.len > 0
|| !rc_is_finished(&s->rc))
return XZ_DATA_ERROR;
}
return XZ_STREAM_END;
}
if (b->out_pos == b->out_size)
return XZ_OK;
if (b->in_pos == b->in_size
&& s->temp.size < s->lzma2.compressed)
return XZ_OK;
}
}
struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
uint32_t dict_size)
{
struct xz_dec_microlzma *s;
/* Restrict dict_size to the same range as in the LZMA2 code. */
if (dict_size < 4096 || dict_size > (3U << 30))
return NULL;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
return NULL;
s->s.dict.mode = mode;
s->s.dict.size = dict_size;
if (DEC_IS_MULTI(mode)) {
s->s.dict.end = dict_size;
s->s.dict.buf = vmalloc(dict_size);
if (s->s.dict.buf == NULL) {
kfree(s);
return NULL;
}
}
return s;
}
void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
uint32_t uncomp_size, int uncomp_size_is_exact)
{
/*
* comp_size is validated in xz_dec_microlzma_run().
* uncomp_size can safely be anything.
*/
s->s.lzma2.compressed = comp_size;
s->s.lzma2.uncompressed = uncomp_size;
s->s.lzma2.pedantic_microlzma = uncomp_size_is_exact;
s->s.lzma2.sequence = SEQ_PROPERTIES;
s->s.temp.size = 0;
}
void xz_dec_microlzma_end(struct xz_dec_microlzma *s)
{
if (DEC_IS_MULTI(s->s.dict.mode))
vfree(s->s.dict.buf);
kfree(s);
}
#endif

View File

@ -15,8 +15,15 @@ EXPORT_SYMBOL(xz_dec_reset);
EXPORT_SYMBOL(xz_dec_run);
EXPORT_SYMBOL(xz_dec_end);
#ifdef CONFIG_XZ_DEC_MICROLZMA
EXPORT_SYMBOL(xz_dec_microlzma_alloc);
EXPORT_SYMBOL(xz_dec_microlzma_reset);
EXPORT_SYMBOL(xz_dec_microlzma_run);
EXPORT_SYMBOL(xz_dec_microlzma_end);
#endif
MODULE_DESCRIPTION("XZ decompressor");
MODULE_VERSION("1.0");
MODULE_VERSION("1.1");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
/*

View File

@ -37,6 +37,9 @@
# ifdef CONFIG_XZ_DEC_SPARC
# define XZ_DEC_SPARC
# endif
# ifdef CONFIG_XZ_DEC_MICROLZMA
# define XZ_DEC_MICROLZMA
# endif
# define memeq(a, b, size) (memcmp(a, b, size) == 0)
# define memzero(buf, size) memset(buf, 0, size)
# endif

View File

@ -1,10 +1,44 @@
# SPDX-License-Identifier: GPL-2.0-only
# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
# ################################################################
# Copyright (c) Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the BSD-style license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# You may select, at your option, one of the above-listed licenses.
# ################################################################
obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
ccflags-y += -O3
zstd_compress-y := \
zstd_compress_module.o \
common/debug.o \
common/entropy_common.o \
common/error_private.o \
common/fse_decompress.o \
common/zstd_common.o \
compress/fse_compress.o \
compress/hist.o \
compress/huf_compress.o \
compress/zstd_compress.o \
compress/zstd_compress_literals.o \
compress/zstd_compress_sequences.o \
compress/zstd_compress_superblock.o \
compress/zstd_double_fast.o \
compress/zstd_fast.o \
compress/zstd_lazy.o \
compress/zstd_ldm.o \
compress/zstd_opt.o \
zstd_compress-y := fse_compress.o huf_compress.o compress.o \
entropy_common.o fse_decompress.o zstd_common.o
zstd_decompress-y := huf_decompress.o decompress.o \
entropy_common.o fse_decompress.o zstd_common.o
zstd_decompress-y := \
zstd_decompress_module.o \
common/debug.o \
common/entropy_common.o \
common/error_private.o \
common/fse_decompress.o \
common/zstd_common.o \
decompress/huf_decompress.o \
decompress/zstd_ddict.o \
decompress/zstd_decompress.o \
decompress/zstd_decompress_block.o \

1
scripts/.gitignore vendored
View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
/asn1_compiler
/bin2c
/extract-cert
/insert-sys-cert
/kallsyms
/module.lds

View File

@ -195,53 +195,6 @@ why = \
echo-why = $(call escsq, $(strip $(why)))
endif
###############################################################################
#
# When a Kconfig string contains a filename, it is suitable for
# passing to shell commands. It is surrounded by double-quotes, and
# any double-quotes or backslashes within it are escaped by
# backslashes.
#
# This is no use for dependencies or $(wildcard). We need to strip the
# surrounding quotes and the escaping from quotes and backslashes, and
# we *do* need to escape any spaces in the string. So, for example:
#
# Usage: $(eval $(call config_filename,FOO))
#
# Defines FOO_FILENAME based on the contents of the CONFIG_FOO option,
# transformed as described above to be suitable for use within the
# makefile.
#
# Also, if the filename is a relative filename and exists in the source
# tree but not the build tree, define FOO_SRCPREFIX as $(srctree)/ to
# be prefixed to *both* command invocation and dependencies.
#
# Note: We also print the filenames in the quiet_cmd_foo text, and
# perhaps ought to have a version specially escaped for that purpose.
# But it's only cosmetic, and $(patsubst "%",%,$(CONFIG_FOO)) is good
# enough. It'll strip the quotes in the common case where there's no
# space and it's a simple filename, and it'll retain the quotes when
# there's a space. There are some esoteric cases in which it'll print
# the wrong thing, but we don't really care. The actual dependencies
# and commands *do* get it right, with various combinations of single
# and double quotes, backslashes and spaces in the filenames.
#
###############################################################################
#
define config_filename
ifneq ($$(CONFIG_$(1)),"")
$(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1)))))))
ifneq ($$(patsubst /%,%,$$(firstword $$($(1)_FILENAME))),$$(firstword $$($(1)_FILENAME)))
else
ifeq ($$(wildcard $$($(1)_FILENAME)),)
ifneq ($$(wildcard $$(srctree)/$$($(1)_FILENAME)),)
$(1)_SRCPREFIX := $(srctree)/
endif
endif
endif
endif
endef
#
###############################################################################
# delete partially updated (i.e. corrupted) files on error

View File

@ -3,25 +3,19 @@
# scripts contains sources for various helper programs used throughout
# the kernel for the build process.
CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
hostprogs-always-$(CONFIG_BUILD_BIN2C) += bin2c
hostprogs-always-$(CONFIG_KALLSYMS) += kallsyms
hostprogs-always-$(BUILD_C_RECORDMCOUNT) += recordmcount
hostprogs-always-$(CONFIG_BUILDTIME_TABLE_SORT) += sorttable
hostprogs-always-$(CONFIG_ASN1) += asn1_compiler
hostprogs-always-$(CONFIG_MODULE_SIG_FORMAT) += sign-file
hostprogs-always-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += extract-cert
hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
hostprogs-always-$(CONFIG_SYSTEM_REVOCATION_LIST) += extract-cert
HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
HOSTLDLIBS_sorttable = -lpthread
HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS)
HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
HOSTCFLAGS_sign-file.o = $(shell pkg-config --cflags libcrypto 2> /dev/null)
HOSTLDLIBS_sign-file = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
ifdef CONFIG_UNWINDER_ORC
ifeq ($(ARCH),x86_64)
@ -29,7 +23,10 @@ ARCH := x86
endif
HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include
HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED
HOSTLDLIBS_sorttable = -lpthread
endif
ifdef CONFIG_BUILDTIME_MCOUNT_SORT
HOSTCFLAGS_sorttable.o += -DMCOUNT_SORT_ENABLED
endif
# The following programs are only built on demand

View File

@ -155,7 +155,7 @@ $(obj)/%.ll: $(src)/%.c FORCE
# (See cmd_cc_o_c + relevant part of rule_cc_o_c)
quiet_cmd_cc_o_c = CC $(quiet_modtag) $@
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< $(cmd_objtool)
ifdef CONFIG_MODVERSIONS
# When module versioning is enabled the following steps are executed:
@ -224,27 +224,39 @@ cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),
endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
ifdef CONFIG_STACK_VALIDATION
ifndef CONFIG_LTO_CLANG
__objtool_obj := $(objtree)/tools/objtool/objtool
objtool := $(objtree)/tools/objtool/objtool
objtool_args = \
$(if $(CONFIG_UNWINDER_ORC),orc generate,check) \
$(if $(part-of-module), --module) \
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \
$(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\
$(if $(CONFIG_RETPOLINE), --retpoline) \
$(if $(CONFIG_X86_SMAP), --uaccess) \
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
$(if $(CONFIG_SLS), --sls)
cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $@)
cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
endif # CONFIG_STACK_VALIDATION
ifdef CONFIG_LTO_CLANG
# Skip objtool for LLVM bitcode
$(obj)/%.o: objtool-enabled :=
else
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
cmd_objtool = $(if $(patsubst y%,, \
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
$(__objtool_obj) $(objtool_args) $@)
objtool_obj = $(if $(patsubst y%,, \
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
$(__objtool_obj))
endif # CONFIG_LTO_CLANG
endif # CONFIG_STACK_VALIDATION
$(obj)/%.o: objtool-enabled = $(if $(filter-out y%, \
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n),y)
# Rebuild all objects when objtool changes, or is enabled/disabled.
objtool_dep = $(objtool_obj) \
$(wildcard include/config/ORC_UNWINDER \
include/config/STACK_VALIDATION)
endif
ifdef CONFIG_TRIM_UNUSED_KSYMS
cmd_gen_ksymdeps = \
@ -259,7 +271,7 @@ define rule_cc_o_c
$(call cmd,gen_ksymdeps)
$(call cmd,checksrc)
$(call cmd,checkdoc)
$(call cmd,objtool)
$(call cmd,gen_objtooldep)
$(call cmd,modversions_c)
$(call cmd,record_mcount)
endef
@ -267,13 +279,12 @@ endef
define rule_as_o_S
$(call cmd_and_fixdep,as_o_S)
$(call cmd,gen_ksymdeps)
$(call cmd,objtool)
$(call cmd,gen_objtooldep)
$(call cmd,modversions_S)
endef
# Built-in and composite module parts
.SECONDEXPANSION:
$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
$(call if_changed_rule,cc_o_c)
$(call cmd,force_checksrc)
@ -285,14 +296,13 @@ cmd_cc_lto_link_modules = \
$(LD) $(ld_flags) -r -o $@ \
$(shell [ -s $(@:.lto.o=.o.symversions) ] && \
echo -T $(@:.lto.o=.o.symversions)) \
--whole-archive $(filter-out FORCE,$^)
--whole-archive $(filter-out FORCE,$^) \
$(cmd_objtool)
ifdef CONFIG_STACK_VALIDATION
# objtool was skipped for LLVM bitcode, run it now that we have compiled
# modules into native code
cmd_cc_lto_link_modules += ; \
$(objtree)/tools/objtool/objtool $(objtool_args) --module $@
endif
$(obj)/%.lto.o: objtool-enabled = y
$(obj)/%.lto.o: part-of-module := y
$(obj)/%.lto.o: $(obj)/%.o FORCE
$(call if_changed,cc_lto_link_modules)
@ -356,7 +366,7 @@ $(obj)/%.s: $(src)/%.S FORCE
$(call if_changed_dep,cpp_s_S)
quiet_cmd_as_o_S = AS $(quiet_modtag) $@
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< $(cmd_objtool)
ifdef CONFIG_ASM_MODVERSIONS
@ -375,7 +385,7 @@ cmd_modversions_S = \
fi
endif
$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
$(obj)/%.o: $(src)/%.S FORCE
$(call if_changed_rule,as_o_S)
targets += $(filter-out $(subdir-builtin), $(real-obj-y))

View File

@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
gcc-plugin-$(CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) += cyc_complexity_plugin.so
gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) \
+= -DLATENT_ENTROPY_PLUGIN

View File

@ -9,7 +9,18 @@ endif
# Keep most options here optional, to allow enabling more compilers if absence
# of some options does not break KCSAN nor causes false positive reports.
export CFLAGS_KCSAN := -fsanitize=thread \
$(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
kcsan-cflags := -fsanitize=thread -fno-optimize-sibling-calls \
$(call cc-option,$(call cc-param,tsan-compound-read-before-write=1),$(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1))) \
$(call cc-param,tsan-distinguish-volatile=1)
ifdef CONFIG_CC_IS_GCC
# GCC started warning about operations unsupported by the TSan runtime. But
# KCSAN != TSan, so just ignore these warnings.
kcsan-cflags += -Wno-tsan
endif
ifndef CONFIG_KCSAN_WEAK_MEMORY
kcsan-cflags += $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0))
endif
export CFLAGS_KCSAN := $(kcsan-cflags)

View File

@ -182,6 +182,11 @@ ifeq ($(CONFIG_KCSAN),y)
_c_flags += $(if $(patsubst n%,, \
$(KCSAN_SANITIZE_$(basetarget).o)$(KCSAN_SANITIZE)y), \
$(CFLAGS_KCSAN))
# Some uninstrumented files provide implied barriers required to avoid false
# positives: set KCSAN_INSTRUMENT_BARRIERS for barrier instrumentation only.
_c_flags += $(if $(patsubst n%,, \
$(KCSAN_INSTRUMENT_BARRIERS_$(basetarget).o)$(KCSAN_INSTRUMENT_BARRIERS)n), \
-D__KCSAN_INSTRUMENT_BARRIERS__)
endif
# $(srctree)/$(src) for including checkin headers from generated source files
@ -232,17 +237,6 @@ ifeq ($(CONFIG_LTO_CLANG),y)
mod-prelink-ext := .lto
endif
# Objtool arguments are also needed for modfinal with LTO, so we define
# then here to avoid duplication.
objtool_args = \
$(if $(CONFIG_UNWINDER_ORC),orc generate,check) \
$(if $(part-of-module), --module) \
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \
$(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\
$(if $(CONFIG_RETPOLINE), --retpoline) \
$(if $(CONFIG_X86_SMAP), --uaccess) \
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount)
# Useful for describing the dependency of composite objects
# Usage:
# $(call multi_depend, multi_used_targets, suffix_to_remove, suffix_to_add)
@ -310,7 +304,6 @@ DTC_FLAGS += -Wno-interrupt_provider
# Disable noisy checks by default
ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
DTC_FLAGS += -Wno-unit_address_vs_reg \
-Wno-unit_address_format \
-Wno-gpios_property \
-Wno-avoid_unnecessary_addr_size \
-Wno-alias_paths \
@ -425,20 +418,35 @@ printf "%08x\n" $$dec_size | \
} \
)
quiet_cmd_file_size = GEN $@
cmd_file_size = $(size_append) > $@
quiet_cmd_bzip2 = BZIP2 $@
cmd_bzip2 = { cat $(real-prereqs) | $(KBZIP2) -9; $(size_append); } > $@
cmd_bzip2 = cat $(real-prereqs) | $(KBZIP2) -9 > $@
quiet_cmd_bzip2_with_size = BZIP2 $@
cmd_bzip2_with_size = { cat $(real-prereqs) | $(KBZIP2) -9; $(size_append); } > $@
# Lzma
# ---------------------------------------------------------------------------
quiet_cmd_lzma = LZMA $@
cmd_lzma = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@
cmd_lzma = cat $(real-prereqs) | $(LZMA) -9 > $@
quiet_cmd_lzma_with_size = LZMA $@
cmd_lzma_with_size = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@
quiet_cmd_lzo = LZO $@
cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
cmd_lzo = cat $(real-prereqs) | $(KLZOP) -9 > $@
quiet_cmd_lzo_with_size = LZO $@
cmd_lzo_with_size = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
quiet_cmd_lz4 = LZ4 $@
cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout > $@
quiet_cmd_lz4_with_size = LZ4 $@
cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
$(size_append); } > $@
# U-Boot mkimage
@ -481,7 +489,10 @@ quiet_cmd_uimage = UIMAGE $@
# big dictionary would increase the memory usage too much in the multi-call
# decompression mode. A BCJ filter isn't used either.
quiet_cmd_xzkern = XZKERN $@
cmd_xzkern = { cat $(real-prereqs) | sh $(srctree)/scripts/xz_wrap.sh; \
cmd_xzkern = cat $(real-prereqs) | sh $(srctree)/scripts/xz_wrap.sh > $@
quiet_cmd_xzkern_with_size = XZKERN $@
cmd_xzkern_with_size = { cat $(real-prereqs) | sh $(srctree)/scripts/xz_wrap.sh; \
$(size_append); } > $@
quiet_cmd_xzmisc = XZMISC $@
@ -504,10 +515,13 @@ quiet_cmd_xzmisc = XZMISC $@
# be used because it would require zstd to allocate a 128 MB buffer.
quiet_cmd_zstd = ZSTD $@
cmd_zstd = { cat $(real-prereqs) | $(ZSTD) -19; $(size_append); } > $@
cmd_zstd = cat $(real-prereqs) | $(ZSTD) -19 > $@
quiet_cmd_zstd22 = ZSTD22 $@
cmd_zstd22 = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@
cmd_zstd22 = cat $(real-prereqs) | $(ZSTD) -22 --ultra > $@
quiet_cmd_zstd22_with_size = ZSTD22 $@
cmd_zstd22_with_size = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@
# ASM offsets
# ---------------------------------------------------------------------------

View File

@ -40,7 +40,8 @@ quiet_cmd_ld_ko_o = LD [M] $@
quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \
if [ -f vmlinux ]; then \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
$(RESOLVE_BTFIDS) -b vmlinux $@; \
else \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
fi;

View File

@ -66,9 +66,9 @@ endif
# Don't stop modules_install even if we can't sign external modules.
#
ifeq ($(CONFIG_MODULE_SIG_ALL),y)
sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(srctree)/)$(CONFIG_MODULE_SIG_KEY)
quiet_cmd_sign = SIGN $@
$(eval $(call config_filename,MODULE_SIG_KEY))
cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY) certs/signing_key.x509 $@ \
cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(sig-key) certs/signing_key.x509 $@ \
$(if $(KBUILD_EXTMOD),|| true)
else
quiet_cmd_sign :=

View File

@ -103,7 +103,7 @@ snap-pkg:
# tarball targets
# ---------------------------------------------------------------------------
tar-pkgs := dir-pkg tar-pkg targz-pkg tarbz2-pkg tarxz-pkg
tar-pkgs := dir-pkg tar-pkg targz-pkg tarbz2-pkg tarxz-pkg tarzst-pkg
PHONY += $(tar-pkgs)
$(tar-pkgs):
$(MAKE) -f $(srctree)/Makefile
@ -130,10 +130,12 @@ $(if $(findstring tar-src,$@),, \
$(if $(findstring bz2,$@),$(KBZIP2), \
$(if $(findstring gz,$@),$(KGZIP), \
$(if $(findstring xz,$@),$(XZ), \
$(error unknown target $@)))) \
$(if $(findstring zst,$@),$(ZSTD), \
$(error unknown target $@))))) \
-f -9 $(perf-tar).tar)
perf-tar-pkgs := perf-tar-src-pkg perf-targz-src-pkg perf-tarbz2-src-pkg perf-tarxz-src-pkg
perf-tar-pkgs := perf-tar-src-pkg perf-targz-src-pkg perf-tarbz2-src-pkg \
perf-tarxz-src-pkg perf-tarzst-src-pkg
PHONY += $(perf-tar-pkgs)
$(perf-tar-pkgs):
$(call cmd,perf_tar)
@ -153,9 +155,11 @@ help:
@echo ' targz-pkg - Build the kernel as a gzip compressed tarball'
@echo ' tarbz2-pkg - Build the kernel as a bzip2 compressed tarball'
@echo ' tarxz-pkg - Build the kernel as a xz compressed tarball'
@echo ' tarzst-pkg - Build the kernel as a zstd compressed tarball'
@echo ' perf-tar-src-pkg - Build $(perf-tar).tar source tarball'
@echo ' perf-targz-src-pkg - Build $(perf-tar).tar.gz source tarball'
@echo ' perf-tarbz2-src-pkg - Build $(perf-tar).tar.bz2 source tarball'
@echo ' perf-tarxz-src-pkg - Build $(perf-tar).tar.xz source tarball'
@echo ' perf-tarzst-src-pkg - Build $(perf-tar).tar.zst source tarball'
.PHONY: $(PHONY)

View File

@ -8,7 +8,6 @@ ubsan-cflags-$(CONFIG_UBSAN_LOCAL_BOUNDS) += -fsanitize=local-bounds
ubsan-cflags-$(CONFIG_UBSAN_SHIFT) += -fsanitize=shift
ubsan-cflags-$(CONFIG_UBSAN_DIV_ZERO) += -fsanitize=integer-divide-by-zero
ubsan-cflags-$(CONFIG_UBSAN_UNREACHABLE) += -fsanitize=unreachable
ubsan-cflags-$(CONFIG_UBSAN_OBJECT_SIZE) += -fsanitize=object-size
ubsan-cflags-$(CONFIG_UBSAN_BOOL) += -fsanitize=bool
ubsan-cflags-$(CONFIG_UBSAN_ENUM) += -fsanitize=enum
ubsan-cflags-$(CONFIG_UBSAN_TRAP) += -fsanitize-undefined-trap-on-error

View File

@ -34,6 +34,14 @@ gen_param_check()
gen_params_checks()
{
local meta="$1"; shift
local order="$1"; shift
if [ "${order}" = "_release" ]; then
printf "\tkcsan_release();\n"
elif [ -z "${order}" ] && ! meta_in "$meta" "slv"; then
# RMW with return value is fully ordered
printf "\tkcsan_mb();\n"
fi
while [ "$#" -gt 0 ]; do
gen_param_check "$meta" "$1"
@ -56,7 +64,7 @@ gen_proto_order_variant()
local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local checks="$(gen_params_checks "${meta}" "$@")"
local checks="$(gen_params_checks "${meta}" "${order}" "$@")"
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
@ -75,29 +83,44 @@ EOF
gen_xchg()
{
local xchg="$1"; shift
local order="$1"; shift
local mult="$1"; shift
kcsan_barrier=""
if [ "${xchg%_local}" = "${xchg}" ]; then
case "$order" in
_release) kcsan_barrier="kcsan_release()" ;;
"") kcsan_barrier="kcsan_mb()" ;;
esac
fi
if [ "${xchg%${xchg#try_cmpxchg}}" = "try_cmpxchg" ] ; then
cat <<EOF
#define ${xchg}(ptr, oldp, ...) \\
#define ${xchg}${order}(ptr, oldp, ...) \\
({ \\
typeof(ptr) __ai_ptr = (ptr); \\
typeof(oldp) __ai_oldp = (oldp); \\
EOF
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
cat <<EOF
instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
instrument_atomic_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\
arch_${xchg}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
})
EOF
else
cat <<EOF
#define ${xchg}(ptr, ...) \\
#define ${xchg}${order}(ptr, ...) \\
({ \\
typeof(ptr) __ai_ptr = (ptr); \\
EOF
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
cat <<EOF
instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
arch_${xchg}(__ai_ptr, __VA_ARGS__); \\
arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\
})
EOF
@ -145,21 +168,21 @@ done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}${order}" ""
gen_xchg "${xchg}" "${order}" ""
printf "\n"
done
done
for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do
gen_xchg "${xchg}" ""
gen_xchg "${xchg}" "" ""
printf "\n"
done
gen_xchg "cmpxchg_double" "2 * "
gen_xchg "cmpxchg_double" "" "2 * "
printf "\n\n"
gen_xchg "cmpxchg_double_local" "2 * "
gen_xchg "cmpxchg_double_local" "" "2 * "
cat <<EOF

View File

@ -537,6 +537,7 @@ class PrinterHelpers(Printer):
'struct tcp_timewait_sock',
'struct tcp_request_sock',
'struct udp6_sock',
'struct unix_sock',
'struct task_struct',
'struct __sk_buff',
@ -589,6 +590,7 @@ class PrinterHelpers(Printer):
'struct tcp_timewait_sock',
'struct tcp_request_sock',
'struct udp6_sock',
'struct unix_sock',
'struct task_struct',
'struct path',
'struct btf_ptr',

View File

@ -63,6 +63,7 @@ my $min_conf_desc_length = 4;
my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
my $user_codespellfile = "";
my $conststructsfile = "$D/const_structs.checkpatch";
my $docsfile = "$D/../Documentation/dev-tools/checkpatch.rst";
my $typedefsfile;
@ -130,7 +131,7 @@ Options:
--ignore-perl-version override checking of perl version. expect
runtime errors.
--codespell Use the codespell dictionary for spelling/typos
(default:/usr/share/codespell/dictionary.txt)
(default:$codespellfile)
--codespellfile Use this codespell dictionary
--typedefsfile Read additional types from this file
--color[=WHEN] Use colors 'always', 'never', or only when output
@ -317,7 +318,7 @@ GetOptions(
'debug=s' => \%debug,
'test-only=s' => \$tst_only,
'codespell!' => \$codespell,
'codespellfile=s' => \$codespellfile,
'codespellfile=s' => \$user_codespellfile,
'typedefsfile=s' => \$typedefsfile,
'color=s' => \$color,
'no-color' => \$color, #keep old behaviors of -nocolor
@ -325,9 +326,32 @@ GetOptions(
'kconfig-prefix=s' => \${CONFIG_},
'h|help' => \$help,
'version' => \$help
) or help(1);
) or $help = 2;
help(0) if ($help);
if ($user_codespellfile) {
# Use the user provided codespell file unconditionally
$codespellfile = $user_codespellfile;
} elsif (!(-f $codespellfile)) {
# If /usr/share/codespell/dictionary.txt is not present, try to find it
# under codespell's install directory: <codespell_root>/data/dictionary.txt
if (($codespell || $help) && which("codespell") ne "" && which("python") ne "") {
my $python_codespell_dict = << "EOF";
import os.path as op
import codespell_lib
codespell_dir = op.dirname(codespell_lib.__file__)
codespell_file = op.join(codespell_dir, 'data', 'dictionary.txt')
print(codespell_file, end='')
EOF
my $codespell_dict = `python -c "$python_codespell_dict" 2> /dev/null`;
$codespellfile = $codespell_dict if (-f $codespell_dict);
}
}
# $help is 1 if either -h, --help or --version is passed as option - exitcode: 0
# $help is 2 if invalid option is passed - exitcode: 1
help($help - 1) if ($help);
die "$P: --git cannot be used with --file or --fix\n" if ($git && ($file || $fix));
die "$P: --verbose cannot be used with --terse\n" if ($verbose && $terse);
@ -489,7 +513,8 @@ our $Attribute = qr{
____cacheline_aligned|
____cacheline_aligned_in_smp|
____cacheline_internodealigned_in_smp|
__weak
__weak|
__alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\)
}x;
our $Modifier;
our $Inline = qr{inline|__always_inline|noinline|__inline|__inline__};
@ -3147,7 +3172,7 @@ sub process {
length($line) > 75 &&
!($line =~ /^\s*[a-zA-Z0-9_\/\.]+\s+\|\s+\d+/ ||
# file delta changes
$line =~ /^\s*(?:[\w\.\-]+\/)++[\w\.\-]+:/ ||
$line =~ /^\s*(?:[\w\.\-\+]*\/)++[\w\.\-\+]+:/ ||
# filename then :
$line =~ /^\s*(?:Fixes:|Link:|$signature_tags)/i ||
# A Fixes: or Link: line or signature tag line
@ -3454,47 +3479,47 @@ sub process {
# Kconfig supports named choices), so use a word boundary
# (\b) rather than a whitespace character (\s)
$line =~ /^\+\s*(?:config|menuconfig|choice)\b/) {
my $length = 0;
my $cnt = $realcnt;
my $ln = $linenr + 1;
my $f;
my $is_start = 0;
my $is_end = 0;
for (; $cnt > 0 && defined $lines[$ln - 1]; $ln++) {
$f = $lines[$ln - 1];
$cnt-- if ($lines[$ln - 1] !~ /^-/);
$is_end = $lines[$ln - 1] =~ /^\+/;
my $ln = $linenr;
my $needs_help = 0;
my $has_help = 0;
my $help_length = 0;
while (defined $lines[$ln]) {
my $f = $lines[$ln++];
next if ($f =~ /^-/);
last if (!$file && $f =~ /^\@\@/);
last if ($f !~ /^[\+ ]/); # !patch context
if ($lines[$ln - 1] =~ /^\+\s*(?:bool|tristate|prompt)\s*["']/) {
$is_start = 1;
} elsif ($lines[$ln - 1] =~ /^\+\s*(?:---)?help(?:---)?$/) {
$length = -1;
if ($f =~ /^\+\s*(?:bool|tristate|prompt)\s*["']/) {
$needs_help = 1;
next;
}
if ($f =~ /^\+\s*help\s*$/) {
$has_help = 1;
next;
}
$f =~ s/^.//;
$f =~ s/#.*//;
$f =~ s/^\s+//;
next if ($f =~ /^$/);
$f =~ s/^.//; # strip patch context [+ ]
$f =~ s/#.*//; # strip # directives
$f =~ s/^\s+//; # strip leading blanks
next if ($f =~ /^$/); # skip blank lines
# At the end of this Kconfig block:
# This only checks context lines in the patch
# and so hopefully shouldn't trigger false
# positives, even though some of these are
# common words in help texts
if ($f =~ /^\s*(?:config|menuconfig|choice|endchoice|
if|endif|menu|endmenu|source)\b/x) {
$is_end = 1;
if ($f =~ /^(?:config|menuconfig|choice|endchoice|
if|endif|menu|endmenu|source)\b/x) {
last;
}
$length++;
$help_length++ if ($has_help);
}
if ($is_start && $is_end && $length < $min_conf_desc_length) {
if ($needs_help &&
$help_length < $min_conf_desc_length) {
my $stat_real = get_stat_real($linenr, $ln - 1);
WARN("CONFIG_DESCRIPTION",
"please write a paragraph that describes the config symbol fully\n" . $herecurr);
"please write a help paragraph that fully describes the config symbol\n" . "$here\n$stat_real\n");
}
#print "is_start<$is_start> is_end<$is_end> length<$length>\n";
}
# check MAINTAINERS entries
@ -4448,6 +4473,7 @@ sub process {
# XXX(foo);
# EXPORT_SYMBOL(something_foo);
my $name = $1;
$name =~ s/^\s*($Ident).*/$1/;
if ($stat =~ /^(?:.\s*}\s*\n)?.([A-Z_]+)\s*\(\s*($Ident)/ &&
$name =~ /^${Ident}_$2/) {
#print "FOO C name<$name>\n";

View File

@ -12,19 +12,27 @@ driver_info
drm_connector_funcs
drm_encoder_funcs
drm_encoder_helper_funcs
dvb_frontend_ops
dvb_tuner_ops
ethtool_ops
extent_io_ops
fb_ops
file_lock_operations
file_operations
hv_ops
hwmon_ops
ib_device_ops
ide_dma_ops
ide_port_ops
ieee80211_ops
iio_buffer_setup_ops
inode_operations
intel_dvo_dev_ops
irq_domain_ops
item_operations
iwl_cfg
iwl_ops
kernel_param_ops
kgdb_arch
kgdb_io
kset_uevent_ops
@ -32,29 +40,41 @@ lock_manager_operations
machine_desc
microcode_ops
mlxsw_reg_info
mtd_ooblayout_ops
mtrr_ops
nand_controller_ops
neigh_ops
net_device_ops
nft_expr_ops
nlmsvc_binding
nvkm_device_chip
of_device_id
pci_raw_ops
phy_ops
pinconf_ops
pinctrl_ops
pinmux_ops
pipe_buf_operations
platform_hibernation_ops
platform_suspend_ops
proc_ops
proto_ops
pwm_ops
regmap_access_table
regulator_ops
reset_control_ops
rpc_pipe_ops
rtc_class_ops
sd_desc
sdhci_ops
seq_operations
sirfsoc_padmux
snd_ac97_build_ops
snd_pcm_ops
snd_rawmidi_ops
snd_soc_component_driver
snd_soc_dai_ops
snd_soc_ops
soc_pcmcia_socket_ops
stacktrace_ops
sysfs_ops
@ -63,6 +83,13 @@ uart_ops
usb_mon_operations
v4l2_ctrl_ops
v4l2_ioctl_ops
v4l2_subdev_core_ops
v4l2_subdev_internal_ops
v4l2_subdev_ops
v4l2_subdev_pad_ops
v4l2_subdev_video_ops
vb2_ops
vm_operations_struct
wacom_features
watchdog_ops
wd_ops

View File

@ -126,7 +126,7 @@ if [ $marker -ne 0 ]; then
fi
echo Code starting with the faulting instruction > $T.aa
echo =========================================== >> $T.aa
code=`echo $code | sed -e 's/ [<(]/ /;s/[>)] / /;s/ /,0x/g; s/[>)]$//'`
code=`echo $code | sed -e 's/\r//;s/ [<(]/ /;s/[>)] / /;s/ /,0x/g; s/[>)]$//'`
echo -n " .$type 0x" > $T.s
echo $code >> $T.s
disas $T 0

View File

@ -94,6 +94,9 @@ while (<IN>) {
# Makefiles and scripts contain nasty expressions to parse docs
next if ($f =~ m/Makefile/ || $f =~ m/\.sh$/);
# It doesn't make sense to parse hidden files
next if ($f =~ m#/\.#);
# Skip this script
next if ($f eq $scriptname);
@ -144,6 +147,7 @@ while (<IN>) {
if ($f =~ m/tools/) {
my $path = $f;
$path =~ s,(.*)/.*,$1,;
$path =~ s,testing/selftests/bpf,bpf/bpftool,;
next if (grep -e, glob("$path/$ref $path/../$ref $path/$fulref"));
}

View File

@ -143,6 +143,14 @@ static void check_nodes_props(struct check *c, struct dt_info *dti, struct node
check_nodes_props(c, dti, child);
}
static bool is_multiple_of(int multiple, int divisor)
{
if (divisor == 0)
return multiple == 0;
else
return (multiple % divisor) == 0;
}
static bool run_check(struct check *c, struct dt_info *dti)
{
struct node *dt = dti->dt;
@ -297,19 +305,20 @@ ERROR(duplicate_property_names, check_duplicate_property_names, NULL);
#define LOWERCASE "abcdefghijklmnopqrstuvwxyz"
#define UPPERCASE "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#define DIGITS "0123456789"
#define PROPNODECHARS LOWERCASE UPPERCASE DIGITS ",._+*#?-"
#define NODECHARS LOWERCASE UPPERCASE DIGITS ",._+-@"
#define PROPCHARS LOWERCASE UPPERCASE DIGITS ",._+*#?-"
#define PROPNODECHARSSTRICT LOWERCASE UPPERCASE DIGITS ",-"
static void check_node_name_chars(struct check *c, struct dt_info *dti,
struct node *node)
{
int n = strspn(node->name, c->data);
size_t n = strspn(node->name, c->data);
if (n < strlen(node->name))
FAIL(c, dti, node, "Bad character '%c' in node name",
node->name[n]);
}
ERROR(node_name_chars, check_node_name_chars, PROPNODECHARS "@");
ERROR(node_name_chars, check_node_name_chars, NODECHARS);
static void check_node_name_chars_strict(struct check *c, struct dt_info *dti,
struct node *node)
@ -330,6 +339,20 @@ static void check_node_name_format(struct check *c, struct dt_info *dti,
}
ERROR(node_name_format, check_node_name_format, NULL, &node_name_chars);
static void check_node_name_vs_property_name(struct check *c,
struct dt_info *dti,
struct node *node)
{
if (!node->parent)
return;
if (get_property(node->parent, node->name)) {
FAIL(c, dti, node, "node name and property name conflict");
}
}
WARNING(node_name_vs_property_name, check_node_name_vs_property_name,
NULL, &node_name_chars);
static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti,
struct node *node)
{
@ -363,14 +386,14 @@ static void check_property_name_chars(struct check *c, struct dt_info *dti,
struct property *prop;
for_each_property(node, prop) {
int n = strspn(prop->name, c->data);
size_t n = strspn(prop->name, c->data);
if (n < strlen(prop->name))
FAIL_PROP(c, dti, node, prop, "Bad character '%c' in property name",
prop->name[n]);
}
}
ERROR(property_name_chars, check_property_name_chars, PROPNODECHARS);
ERROR(property_name_chars, check_property_name_chars, PROPCHARS);
static void check_property_name_chars_strict(struct check *c,
struct dt_info *dti,
@ -380,7 +403,7 @@ static void check_property_name_chars_strict(struct check *c,
for_each_property(node, prop) {
const char *name = prop->name;
int n = strspn(name, c->data);
size_t n = strspn(name, c->data);
if (n == strlen(prop->name))
continue;
@ -497,7 +520,7 @@ static cell_t check_phandle_prop(struct check *c, struct dt_info *dti,
phandle = propval_cell(prop);
if ((phandle == 0) || (phandle == -1)) {
if (!phandle_is_valid(phandle)) {
FAIL_PROP(c, dti, node, prop, "bad value (0x%x) in %s property",
phandle, prop->name);
return 0;
@ -556,7 +579,7 @@ static void check_name_properties(struct check *c, struct dt_info *dti,
if (!prop)
return; /* No name property, that's fine */
if ((prop->val.len != node->basenamelen+1)
if ((prop->val.len != node->basenamelen + 1U)
|| (memcmp(prop->val.val, node->name, node->basenamelen) != 0)) {
FAIL(c, dti, node, "\"name\" property is incorrect (\"%s\" instead"
" of base node name)", prop->val.val);
@ -657,7 +680,6 @@ ERROR(omit_unused_nodes, fixup_omit_unused_nodes, NULL, &phandle_references, &pa
*/
WARNING_IF_NOT_CELL(address_cells_is_cell, "#address-cells");
WARNING_IF_NOT_CELL(size_cells_is_cell, "#size-cells");
WARNING_IF_NOT_CELL(interrupt_cells_is_cell, "#interrupt-cells");
WARNING_IF_NOT_STRING(device_type_is_string, "device_type");
WARNING_IF_NOT_STRING(model_is_string, "model");
@ -672,8 +694,7 @@ static void check_names_is_string_list(struct check *c, struct dt_info *dti,
struct property *prop;
for_each_property(node, prop) {
const char *s = strrchr(prop->name, '-');
if (!s || !streq(s, "-names"))
if (!strends(prop->name, "-names"))
continue;
c->data = prop->name;
@ -753,7 +774,7 @@ static void check_reg_format(struct check *c, struct dt_info *dti,
size_cells = node_size_cells(node->parent);
entrylen = (addr_cells + size_cells) * sizeof(cell_t);
if (!entrylen || (prop->val.len % entrylen) != 0)
if (!is_multiple_of(prop->val.len, entrylen))
FAIL_PROP(c, dti, node, prop, "property has invalid length (%d bytes) "
"(#address-cells == %d, #size-cells == %d)",
prop->val.len, addr_cells, size_cells);
@ -794,7 +815,7 @@ static void check_ranges_format(struct check *c, struct dt_info *dti,
"#size-cells (%d) differs from %s (%d)",
ranges, c_size_cells, node->parent->fullpath,
p_size_cells);
} else if ((prop->val.len % entrylen) != 0) {
} else if (!is_multiple_of(prop->val.len, entrylen)) {
FAIL_PROP(c, dti, node, prop, "\"%s\" property has invalid length (%d bytes) "
"(parent #address-cells == %d, child #address-cells == %d, "
"#size-cells == %d)", ranges, prop->val.len,
@ -871,7 +892,7 @@ static void check_pci_device_bus_num(struct check *c, struct dt_info *dti, struc
} else {
cells = (cell_t *)prop->val.val;
min_bus = fdt32_to_cpu(cells[0]);
max_bus = fdt32_to_cpu(cells[0]);
max_bus = fdt32_to_cpu(cells[1]);
}
if ((bus_num < min_bus) || (bus_num > max_bus))
FAIL_PROP(c, dti, node, prop, "PCI bus number %d out of range, expected (%d - %d)",
@ -1367,9 +1388,9 @@ static void check_property_phandle_args(struct check *c,
const struct provider *provider)
{
struct node *root = dti->dt;
int cell, cellsize = 0;
unsigned int cell, cellsize = 0;
if (prop->val.len % sizeof(cell_t)) {
if (!is_multiple_of(prop->val.len, sizeof(cell_t))) {
FAIL_PROP(c, dti, node, prop,
"property size (%d) is invalid, expected multiple of %zu",
prop->val.len, sizeof(cell_t));
@ -1379,14 +1400,14 @@ static void check_property_phandle_args(struct check *c,
for (cell = 0; cell < prop->val.len / sizeof(cell_t); cell += cellsize + 1) {
struct node *provider_node;
struct property *cellprop;
int phandle;
cell_t phandle;
phandle = propval_cell_n(prop, cell);
/*
* Some bindings use a cell value 0 or -1 to skip over optional
* entries when each index position has a specific definition.
*/
if (phandle == 0 || phandle == -1) {
if (!phandle_is_valid(phandle)) {
/* Give up if this is an overlay with external references */
if (dti->dtsflags & DTSF_PLUGIN)
break;
@ -1452,7 +1473,8 @@ static void check_provider_cells_property(struct check *c,
}
#define WARNING_PROPERTY_PHANDLE_CELLS(nm, propname, cells_name, ...) \
static struct provider nm##_provider = { (propname), (cells_name), __VA_ARGS__ }; \
WARNING(nm##_property, check_provider_cells_property, &nm##_provider, &phandle_references);
WARNING_IF_NOT_CELL(nm##_is_cell, cells_name); \
WARNING(nm##_property, check_provider_cells_property, &nm##_provider, &nm##_is_cell, &phandle_references);
WARNING_PROPERTY_PHANDLE_CELLS(clocks, "clocks", "#clock-cells");
WARNING_PROPERTY_PHANDLE_CELLS(cooling_device, "cooling-device", "#cooling-cells");
@ -1473,24 +1495,17 @@ WARNING_PROPERTY_PHANDLE_CELLS(thermal_sensors, "thermal-sensors", "#thermal-sen
static bool prop_is_gpio(struct property *prop)
{
char *str;
/*
* *-gpios and *-gpio can appear in property names,
* so skip over any false matches (only one known ATM)
*/
if (strstr(prop->name, "nr-gpio"))
if (strends(prop->name, ",nr-gpios"))
return false;
str = strrchr(prop->name, '-');
if (str)
str++;
else
str = prop->name;
if (!(streq(str, "gpios") || streq(str, "gpio")))
return false;
return true;
return strends(prop->name, "-gpios") ||
streq(prop->name, "gpios") ||
strends(prop->name, "-gpio") ||
streq(prop->name, "gpio");
}
static void check_gpios_property(struct check *c,
@ -1525,13 +1540,10 @@ static void check_deprecated_gpio_property(struct check *c,
struct property *prop;
for_each_property(node, prop) {
char *str;
if (!prop_is_gpio(prop))
continue;
str = strstr(prop->name, "gpio");
if (!streq(str, "gpio"))
if (!strends(prop->name, "gpio"))
continue;
FAIL_PROP(c, dti, node, prop,
@ -1561,21 +1573,106 @@ static void check_interrupt_provider(struct check *c,
struct node *node)
{
struct property *prop;
if (!node_is_interrupt_provider(node))
return;
bool irq_provider = node_is_interrupt_provider(node);
prop = get_property(node, "#interrupt-cells");
if (!prop)
if (irq_provider && !prop) {
FAIL(c, dti, node,
"Missing #interrupt-cells in interrupt provider");
"Missing '#interrupt-cells' in interrupt provider");
return;
}
prop = get_property(node, "#address-cells");
if (!prop)
if (!irq_provider && prop) {
FAIL(c, dti, node,
"Missing #address-cells in interrupt provider");
"'#interrupt-cells' found, but node is not an interrupt provider");
return;
}
}
WARNING(interrupt_provider, check_interrupt_provider, NULL);
WARNING(interrupt_provider, check_interrupt_provider, NULL, &interrupts_extended_is_cell);
static void check_interrupt_map(struct check *c,
struct dt_info *dti,
struct node *node)
{
struct node *root = dti->dt;
struct property *prop, *irq_map_prop;
size_t cellsize, cell, map_cells;
irq_map_prop = get_property(node, "interrupt-map");
if (!irq_map_prop)
return;
if (node->addr_cells < 0) {
FAIL(c, dti, node,
"Missing '#address-cells' in interrupt-map provider");
return;
}
cellsize = node_addr_cells(node);
cellsize += propval_cell(get_property(node, "#interrupt-cells"));
prop = get_property(node, "interrupt-map-mask");
if (prop && (prop->val.len != (cellsize * sizeof(cell_t))))
FAIL_PROP(c, dti, node, prop,
"property size (%d) is invalid, expected %zu",
prop->val.len, cellsize * sizeof(cell_t));
if (!is_multiple_of(irq_map_prop->val.len, sizeof(cell_t))) {
FAIL_PROP(c, dti, node, irq_map_prop,
"property size (%d) is invalid, expected multiple of %zu",
irq_map_prop->val.len, sizeof(cell_t));
return;
}
map_cells = irq_map_prop->val.len / sizeof(cell_t);
for (cell = 0; cell < map_cells; ) {
struct node *provider_node;
struct property *cellprop;
int phandle;
size_t parent_cellsize;
if ((cell + cellsize) >= map_cells) {
FAIL_PROP(c, dti, node, irq_map_prop,
"property size (%d) too small, expected > %zu",
irq_map_prop->val.len, (cell + cellsize) * sizeof(cell_t));
break;
}
cell += cellsize;
phandle = propval_cell_n(irq_map_prop, cell);
if (!phandle_is_valid(phandle)) {
/* Give up if this is an overlay with external references */
if (!(dti->dtsflags & DTSF_PLUGIN))
FAIL_PROP(c, dti, node, irq_map_prop,
"Cell %zu is not a phandle(%d)",
cell, phandle);
break;
}
provider_node = get_node_by_phandle(root, phandle);
if (!provider_node) {
FAIL_PROP(c, dti, node, irq_map_prop,
"Could not get phandle(%d) node for (cell %zu)",
phandle, cell);
break;
}
cellprop = get_property(provider_node, "#interrupt-cells");
if (cellprop) {
parent_cellsize = propval_cell(cellprop);
} else {
FAIL(c, dti, node, "Missing property '#interrupt-cells' in node %s or bad phandle (referred from interrupt-map[%zu])",
provider_node->fullpath, cell);
break;
}
cellprop = get_property(provider_node, "#address-cells");
if (cellprop)
parent_cellsize += propval_cell(cellprop);
cell += 1 + parent_cellsize;
}
}
WARNING(interrupt_map, check_interrupt_map, NULL, &phandle_references, &addr_size_cells, &interrupt_provider);
static void check_interrupts_property(struct check *c,
struct dt_info *dti,
@ -1584,13 +1681,13 @@ static void check_interrupts_property(struct check *c,
struct node *root = dti->dt;
struct node *irq_node = NULL, *parent = node;
struct property *irq_prop, *prop = NULL;
int irq_cells, phandle;
cell_t irq_cells, phandle;
irq_prop = get_property(node, "interrupts");
if (!irq_prop)
return;
if (irq_prop->val.len % sizeof(cell_t))
if (!is_multiple_of(irq_prop->val.len, sizeof(cell_t)))
FAIL_PROP(c, dti, node, irq_prop, "size (%d) is invalid, expected multiple of %zu",
irq_prop->val.len, sizeof(cell_t));
@ -1603,7 +1700,7 @@ static void check_interrupts_property(struct check *c,
prop = get_property(parent, "interrupt-parent");
if (prop) {
phandle = propval_cell(prop);
if ((phandle == 0) || (phandle == -1)) {
if (!phandle_is_valid(phandle)) {
/* Give up if this is an overlay with
* external references */
if (dti->dtsflags & DTSF_PLUGIN)
@ -1639,7 +1736,7 @@ static void check_interrupts_property(struct check *c,
}
irq_cells = propval_cell(prop);
if (irq_prop->val.len % (irq_cells * sizeof(cell_t))) {
if (!is_multiple_of(irq_prop->val.len, irq_cells * sizeof(cell_t))) {
FAIL_PROP(c, dti, node, prop,
"size is (%d), expected multiple of %d",
irq_prop->val.len, (int)(irq_cells * sizeof(cell_t)));
@ -1750,7 +1847,7 @@ WARNING(graph_port, check_graph_port, NULL, &graph_nodes);
static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti,
struct node *endpoint)
{
int phandle;
cell_t phandle;
struct node *node;
struct property *prop;
@ -1760,7 +1857,7 @@ static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti,
phandle = propval_cell(prop);
/* Give up if this is an overlay with external references */
if (phandle == 0 || phandle == -1)
if (!phandle_is_valid(phandle))
return NULL;
node = get_node_by_phandle(dti->dt, phandle);
@ -1796,7 +1893,7 @@ WARNING(graph_endpoint, check_graph_endpoint, NULL, &graph_nodes);
static struct check *check_table[] = {
&duplicate_node_names, &duplicate_property_names,
&node_name_chars, &node_name_format, &property_name_chars,
&name_is_string, &name_properties,
&name_is_string, &name_properties, &node_name_vs_property_name,
&duplicate_label,
@ -1804,7 +1901,7 @@ static struct check *check_table[] = {
&phandle_references, &path_references,
&omit_unused_nodes,
&address_cells_is_cell, &size_cells_is_cell, &interrupt_cells_is_cell,
&address_cells_is_cell, &size_cells_is_cell,
&device_type_is_string, &model_is_string, &status_is_string,
&label_is_string,
@ -1839,26 +1936,43 @@ static struct check *check_table[] = {
&chosen_node_is_root, &chosen_node_bootargs, &chosen_node_stdout_path,
&clocks_property,
&clocks_is_cell,
&cooling_device_property,
&cooling_device_is_cell,
&dmas_property,
&dmas_is_cell,
&hwlocks_property,
&hwlocks_is_cell,
&interrupts_extended_property,
&interrupts_extended_is_cell,
&io_channels_property,
&io_channels_is_cell,
&iommus_property,
&iommus_is_cell,
&mboxes_property,
&mboxes_is_cell,
&msi_parent_property,
&msi_parent_is_cell,
&mux_controls_property,
&mux_controls_is_cell,
&phys_property,
&phys_is_cell,
&power_domains_property,
&power_domains_is_cell,
&pwms_property,
&pwms_is_cell,
&resets_property,
&resets_is_cell,
&sound_dai_property,
&sound_dai_is_cell,
&thermal_sensors_property,
&thermal_sensors_is_cell,
&deprecated_gpio_property,
&gpios_property,
&interrupts_property,
&interrupt_provider,
&interrupt_map,
&alias_paths,
@ -1882,7 +1996,7 @@ static void enable_warning_error(struct check *c, bool warn, bool error)
static void disable_warning_error(struct check *c, bool warn, bool error)
{
int i;
unsigned int i;
/* Lowering level, also lower it for things this is the prereq
* for */
@ -1903,7 +2017,7 @@ static void disable_warning_error(struct check *c, bool warn, bool error)
void parse_checks_option(bool warn, bool error, const char *arg)
{
int i;
unsigned int i;
const char *name = arg;
bool enable = true;
@ -1930,7 +2044,7 @@ void parse_checks_option(bool warn, bool error, const char *arg)
void process_checks(bool force, struct dt_info *dti)
{
int i;
unsigned int i;
int error = 0;
for (i = 0; i < ARRAY_SIZE(check_table); i++) {

View File

@ -12,7 +12,7 @@
* Command line options
*/
int quiet; /* Level of quietness */
int reservenum; /* Number of memory reservation slots */
unsigned int reservenum;/* Number of memory reservation slots */
int minsize; /* Minimum blob size */
int padsize; /* Additional padding to blob */
int alignsize; /* Additional padding to blob accroding to the alignsize */
@ -197,7 +197,7 @@ int main(int argc, char *argv[])
depname = optarg;
break;
case 'R':
reservenum = strtol(optarg, NULL, 0);
reservenum = strtoul(optarg, NULL, 0);
break;
case 'S':
minsize = strtol(optarg, NULL, 0);
@ -359,8 +359,6 @@ int main(int argc, char *argv[])
#endif
} else if (streq(outform, "dtb")) {
dt_to_blob(outf, dti, outversion);
} else if (streq(outform, "dtbo")) {
dt_to_blob(outf, dti, outversion);
} else if (streq(outform, "asm")) {
dt_to_asm(outf, dti, outversion);
} else if (streq(outform, "null")) {

View File

@ -35,7 +35,7 @@
* Command line options
*/
extern int quiet; /* Level of quietness */
extern int reservenum; /* Number of memory reservation slots */
extern unsigned int reservenum; /* Number of memory reservation slots */
extern int minsize; /* Minimum blob size */
extern int padsize; /* Additional padding to blob */
extern int alignsize; /* Additional padding to blob accroding to the alignsize */
@ -51,6 +51,11 @@ extern int annotate; /* annotate .dts with input source location */
typedef uint32_t cell_t;
static inline bool phandle_is_valid(cell_t phandle)
{
return phandle != 0 && phandle != ~0U;
}
static inline uint16_t dtb_ld16(const void *p)
{
const uint8_t *bp = (const uint8_t *)p;
@ -86,6 +91,16 @@ static inline uint64_t dtb_ld64(const void *p)
#define streq(a, b) (strcmp((a), (b)) == 0)
#define strstarts(s, prefix) (strncmp((s), (prefix), strlen(prefix)) == 0)
#define strprefixeq(a, n, b) (strlen(b) == (n) && (memcmp(a, b, n) == 0))
static inline bool strends(const char *str, const char *suffix)
{
unsigned int len, suffix_len;
len = strlen(str);
suffix_len = strlen(suffix);
if (len < suffix_len)
return false;
return streq(str + len - suffix_len, suffix);
}
#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
@ -101,6 +116,12 @@ enum markertype {
TYPE_UINT64,
TYPE_STRING,
};
static inline bool is_type_marker(enum markertype type)
{
return type >= TYPE_UINT8;
}
extern const char *markername(enum markertype markertype);
struct marker {
@ -125,7 +146,22 @@ struct data {
for_each_marker(m) \
if ((m)->type == (t))
size_t type_marker_length(struct marker *m);
static inline struct marker *next_type_marker(struct marker *m)
{
for_each_marker(m)
if (is_type_marker(m->type))
break;
return m;
}
static inline size_t type_marker_length(struct marker *m)
{
struct marker *next = next_type_marker(m->next);
if (next)
return next->offset - m->offset;
return 0;
}
void data_free(struct data d);

View File

@ -124,7 +124,8 @@ static void asm_emit_cell(void *e, cell_t val)
{
FILE *f = e;
fprintf(f, "\t.byte 0x%02x; .byte 0x%02x; .byte 0x%02x; .byte 0x%02x\n",
fprintf(f, "\t.byte\t0x%02x\n" "\t.byte\t0x%02x\n"
"\t.byte\t0x%02x\n" "\t.byte\t0x%02x\n",
(val >> 24) & 0xff, (val >> 16) & 0xff,
(val >> 8) & 0xff, val & 0xff);
}
@ -134,9 +135,9 @@ static void asm_emit_string(void *e, const char *str, int len)
FILE *f = e;
if (len != 0)
fprintf(f, "\t.string\t\"%.*s\"\n", len, str);
fprintf(f, "\t.asciz\t\"%.*s\"\n", len, str);
else
fprintf(f, "\t.string\t\"%s\"\n", str);
fprintf(f, "\t.asciz\t\"%s\"\n", str);
}
static void asm_emit_align(void *e, int a)
@ -295,7 +296,7 @@ static struct data flatten_reserve_list(struct reserve_info *reservelist,
{
struct reserve_info *re;
struct data d = empty_data;
int j;
unsigned int j;
for (re = reservelist; re; re = re->next) {
d = data_append_re(d, re->address, re->size);
@ -438,7 +439,7 @@ static void dump_stringtable_asm(FILE *f, struct data strbuf)
while (p < (strbuf.val + strbuf.len)) {
len = strlen(p);
fprintf(f, "\t.string \"%s\"\n", p);
fprintf(f, "\t.asciz \"%s\"\n", p);
p += len+1;
}
}

View File

@ -90,6 +90,10 @@ int fdt_check_header(const void *fdt)
{
size_t hdrsize;
/* The device tree must be at an 8-byte aligned address */
if ((uintptr_t)fdt & 7)
return -FDT_ERR_ALIGNMENT;
if (fdt_magic(fdt) != FDT_MAGIC)
return -FDT_ERR_BADMAGIC;
if (!can_assume(LATEST)) {

View File

@ -349,7 +349,10 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset,
return offset;
/* Try to place the new node after the parent's properties */
fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */
tag = fdt_next_tag(fdt, parentoffset, &nextoffset);
/* the fdt_subnode_offset_namelen() should ensure this never hits */
if (!can_assume(LIBFDT_FLAWLESS) && (tag != FDT_BEGIN_NODE))
return -FDT_ERR_INTERNAL;
do {
offset = nextoffset;
tag = fdt_next_tag(fdt, offset, &nextoffset);
@ -391,7 +394,9 @@ int fdt_del_node(void *fdt, int nodeoffset)
}
static void fdt_packblocks_(const char *old, char *new,
int mem_rsv_size, int struct_size)
int mem_rsv_size,
int struct_size,
int strings_size)
{
int mem_rsv_off, struct_off, strings_off;
@ -406,8 +411,7 @@ static void fdt_packblocks_(const char *old, char *new,
fdt_set_off_dt_struct(new, struct_off);
fdt_set_size_dt_struct(new, struct_size);
memmove(new + strings_off, old + fdt_off_dt_strings(old),
fdt_size_dt_strings(old));
memmove(new + strings_off, old + fdt_off_dt_strings(old), strings_size);
fdt_set_off_dt_strings(new, strings_off);
fdt_set_size_dt_strings(new, fdt_size_dt_strings(old));
}
@ -467,7 +471,8 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
return -FDT_ERR_NOSPACE;
}
fdt_packblocks_(fdt, tmp, mem_rsv_size, struct_size);
fdt_packblocks_(fdt, tmp, mem_rsv_size, struct_size,
fdt_size_dt_strings(fdt));
memmove(buf, tmp, newsize);
fdt_set_magic(buf, FDT_MAGIC);
@ -487,7 +492,8 @@ int fdt_pack(void *fdt)
mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
* sizeof(struct fdt_reserve_entry);
fdt_packblocks_(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt));
fdt_packblocks_(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt),
fdt_size_dt_strings(fdt));
fdt_set_totalsize(fdt, fdt_data_size_(fdt));
return 0;

View File

@ -39,6 +39,7 @@ static struct fdt_errtabent fdt_errtable[] = {
FDT_ERRTABENT(FDT_ERR_BADOVERLAY),
FDT_ERRTABENT(FDT_ERR_NOPHANDLES),
FDT_ERRTABENT(FDT_ERR_BADFLAGS),
FDT_ERRTABENT(FDT_ERR_ALIGNMENT),
};
#define FDT_ERRTABSIZE ((int)(sizeof(fdt_errtable) / sizeof(fdt_errtable[0])))

View File

@ -131,6 +131,13 @@ uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
* to work even with unaligned pointers on platforms (such as ARMv5) that don't
* like unaligned loads and stores.
*/
static inline uint16_t fdt16_ld(const fdt16_t *p)
{
const uint8_t *bp = (const uint8_t *)p;
return ((uint16_t)bp[0] << 8) | bp[1];
}
static inline uint32_t fdt32_ld(const fdt32_t *p)
{
const uint8_t *bp = (const uint8_t *)p;

View File

@ -526,7 +526,7 @@ struct node *get_node_by_path(struct node *tree, const char *path)
p = strchr(path, '/');
for_each_child(tree, child) {
if (p && strprefixeq(path, p - path, child->name))
if (p && strprefixeq(path, (size_t)(p - path), child->name))
return get_node_by_path(child, p+1);
else if (!p && streq(path, child->name))
return child;
@ -559,7 +559,7 @@ struct node *get_node_by_phandle(struct node *tree, cell_t phandle)
{
struct node *child, *node;
if ((phandle == 0) || (phandle == -1)) {
if (!phandle_is_valid(phandle)) {
assert(generate_fixups);
return NULL;
}
@ -594,7 +594,7 @@ cell_t get_node_phandle(struct node *root, struct node *node)
static cell_t phandle = 1; /* FIXME: ick, static local */
struct data d = empty_data;
if ((node->phandle != 0) && (node->phandle != -1))
if (phandle_is_valid(node->phandle))
return node->phandle;
while (get_node_by_phandle(root, phandle))

View File

@ -124,27 +124,6 @@ static void write_propval_int(FILE *f, const char *p, size_t len, size_t width)
}
}
static bool has_data_type_information(struct marker *m)
{
return m->type >= TYPE_UINT8;
}
static struct marker *next_type_marker(struct marker *m)
{
while (m && !has_data_type_information(m))
m = m->next;
return m;
}
size_t type_marker_length(struct marker *m)
{
struct marker *next = next_type_marker(m->next);
if (next)
return next->offset - m->offset;
return 0;
}
static const char *delim_start[] = {
[TYPE_UINT8] = "[",
[TYPE_UINT16] = "/bits/ 16 <",
@ -229,26 +208,39 @@ static void write_propval(FILE *f, struct property *prop)
size_t chunk_len = (m->next ? m->next->offset : len) - m->offset;
size_t data_len = type_marker_length(m) ? : len - m->offset;
const char *p = &prop->val.val[m->offset];
struct marker *m_phandle;
if (has_data_type_information(m)) {
if (is_type_marker(m->type)) {
emit_type = m->type;
fprintf(f, " %s", delim_start[emit_type]);
} else if (m->type == LABEL)
fprintf(f, " %s:", m->ref);
else if (m->offset)
fputc(' ', f);
if (emit_type == TYPE_NONE) {
assert(chunk_len == 0);
if (emit_type == TYPE_NONE || chunk_len == 0)
continue;
}
switch(emit_type) {
case TYPE_UINT16:
write_propval_int(f, p, chunk_len, 2);
break;
case TYPE_UINT32:
write_propval_int(f, p, chunk_len, 4);
m_phandle = prop->val.markers;
for_each_marker_of_type(m_phandle, REF_PHANDLE)
if (m->offset == m_phandle->offset)
break;
if (m_phandle) {
if (m_phandle->ref[0] == '/')
fprintf(f, "&{%s}", m_phandle->ref);
else
fprintf(f, "&%s", m_phandle->ref);
if (chunk_len > 4) {
fputc(' ', f);
write_propval_int(f, p + 4, chunk_len - 4, 4);
}
} else {
write_propval_int(f, p, chunk_len, 4);
}
break;
case TYPE_UINT64:
write_propval_int(f, p, chunk_len, 8);

View File

@ -13,10 +13,10 @@
*/
#ifdef __GNUC__
#ifdef __clang__
#define PRINTF(i, j) __attribute__((format (printf, i, j)))
#else
#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)
#define PRINTF(i, j) __attribute__((format (gnu_printf, i, j)))
#else
#define PRINTF(i, j) __attribute__((format (printf, i, j)))
#endif
#define NORETURN __attribute__((noreturn))
#else

View File

@ -29,11 +29,12 @@ char *yaml_error_name[] = {
(emitter)->problem, __func__, __LINE__); \
})
static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, char *data, unsigned int len, int width)
static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers,
char *data, unsigned int seq_offset, unsigned int len, int width)
{
yaml_event_t event;
void *tag;
unsigned int off, start_offset = markers->offset;
unsigned int off;
switch(width) {
case 1: tag = "!u8"; break;
@ -66,7 +67,7 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
m = markers;
is_phandle = false;
for_each_marker_of_type(m, REF_PHANDLE) {
if (m->offset == (start_offset + off)) {
if (m->offset == (seq_offset + off)) {
is_phandle = true;
break;
}
@ -114,6 +115,7 @@ static void yaml_propval(yaml_emitter_t *emitter, struct property *prop)
yaml_event_t event;
unsigned int len = prop->val.len;
struct marker *m = prop->val.markers;
struct marker *markers = prop->val.markers;
/* Emit the property name */
yaml_scalar_event_initialize(&event, NULL,
@ -151,19 +153,19 @@ static void yaml_propval(yaml_emitter_t *emitter, struct property *prop)
switch(m->type) {
case TYPE_UINT16:
yaml_propval_int(emitter, m, data, chunk_len, 2);
yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 2);
break;
case TYPE_UINT32:
yaml_propval_int(emitter, m, data, chunk_len, 4);
yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 4);
break;
case TYPE_UINT64:
yaml_propval_int(emitter, m, data, chunk_len, 8);
yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 8);
break;
case TYPE_STRING:
yaml_propval_string(emitter, data, chunk_len);
break;
default:
yaml_propval_int(emitter, m, data, chunk_len, 1);
yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 1);
break;
}
}

View File

@ -19,24 +19,10 @@ menuconfig GCC_PLUGINS
if GCC_PLUGINS
config GCC_PLUGIN_CYC_COMPLEXITY
bool "Compute the cyclomatic complexity of a function" if EXPERT
depends on !COMPILE_TEST # too noisy
help
The complexity M of a function's control flow graph is defined as:
M = E - N + 2P
where
E = the number of edges
N = the number of nodes
P = the number of connected components (exit nodes).
Enabling this plugin reports the complexity to stderr during the
build. It mainly serves as a simple example of how to create a
gcc plugin for the kernel.
config GCC_PLUGIN_SANCOV
bool
# Plugin can be removed once the kernel only supports GCC 6+
depends on !CC_HAS_SANCOV_TRACE_PC
help
This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
basic blocks. It supports all gcc versions with plugin support (from
@ -83,8 +69,6 @@ config GCC_PLUGIN_RANDSTRUCT
the existing seed and will be removed by a make mrproper or
make distclean.
Note that the implementation requires gcc 4.7 or newer.
This plugin was ported from grsecurity/PaX. More information at:
* https://grsecurity.net/
* https://pax.grsecurity.net/

View File

@ -4,7 +4,7 @@
__visible int plugin_is_GPL_compatible;
static unsigned int sp_mask, canary_offset;
static unsigned int canary_offset;
static unsigned int arm_pertask_ssp_rtl_execute(void)
{
@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
const char *sym;
rtx body;
rtx mask, masked_sp;
rtx current;
/*
* Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
@ -30,19 +30,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
/*
* Replace the source of the SET insn with an expression that
* produces the address of the copy of the stack canary value
* stored in struct thread_info
* produces the address of the current task's stack canary value
*/
mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
masked_sp = gen_reg_rtx(Pmode);
current = gen_reg_rtx(Pmode);
emit_insn_before(gen_rtx_set(masked_sp,
gen_rtx_AND(Pmode,
stack_pointer_rtx,
mask)),
insn);
emit_insn_before(gen_load_tp_hard(current), insn);
SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
SET_SRC(body) = gen_rtx_PLUS(Pmode, current,
GEN_INT(canary_offset));
}
return 0;
@ -72,7 +66,6 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
const char * const plugin_name = plugin_info->base_name;
const int argc = plugin_info->argc;
const struct plugin_argument *argv = plugin_info->argv;
int tso = 0;
int i;
if (!plugin_default_version_check(version, &gcc_version)) {
@ -91,11 +84,6 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
return 1;
}
if (!strcmp(argv[i].key, "tso")) {
tso = atoi(argv[i].value);
continue;
}
if (!strcmp(argv[i].key, "offset")) {
canary_offset = atoi(argv[i].value);
continue;
@ -105,9 +93,6 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
return 1;
}
/* create the mask that produces the base of the stack */
sp_mask = ~((1U << (12 + tso)) - 1);
PASS_INFO(arm_pertask_ssp_rtl, "expand", 1, PASS_POS_INSERT_AFTER);
register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,

Some files were not shown because too many files have changed in this diff Show More