3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-01-30 23:02:18 +00:00
Brooklyn/tools/perf/util/unwind.h
crowetic a94b3d14aa Brooklyn+ (PLUS) changes
Changes included (and more):

1. Dynamic RAM merge

2. Real-time page scan and allocation

3. Cache compression

4. Real-time IRQ checks

5. Dynamic I/O allocation for Java heap

6. Java page migration

7. Contiguous memory allocation

8. Idle pages tracking

9. Per CPU RAM usage tracking

10. ARM NEON scalar multiplication library

11. NEON/ARMv8 crypto extensions

12. NEON SHA, Blake, RIPEMD crypto extensions

13. Parallel NEON crypto engine for multi-algo based CPU stress reduction
2022-05-12 10:47:00 -07:00

91 lines
2.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __UNWIND_H
#define __UNWIND_H
#include <linux/compiler.h>
#include <linux/types.h>
#include "util/map_symbol.h"
struct maps;
struct perf_sample;
struct thread;
struct unwind_entry {
struct map_symbol ms;
u64 ip;
};
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
struct unwind_libunwind_ops {
int (*prepare_access)(struct maps *maps);
void (*flush_access)(struct maps *maps);
void (*finish_access)(struct maps *maps);
int (*get_entries)(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack, bool best_effort);
};
#ifdef HAVE_DWARF_UNWIND_SUPPORT
/*
* When best_effort is set, don't report errors and fail silently. This could
* be expanded in the future to be more permissive about things other than
* error messages.
*/
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack,
bool best_effort);
/* libunwind specific */
#ifdef HAVE_LIBUNWIND_SUPPORT
#ifndef LIBUNWIND__ARCH_REG_ID
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
#endif
#ifndef LIBUNWIND__ARCH_REG_SP
#define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
#endif
#ifndef LIBUNWIND__ARCH_REG_IP
#define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
#endif
int LIBUNWIND__ARCH_REG_ID(int regnum);
int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
void unwind__flush_access(struct maps *maps);
void unwind__finish_access(struct maps *maps);
#else
static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
struct map *map __maybe_unused,
bool *initialized __maybe_unused)
{
return 0;
}
static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
#endif
#else
static inline int
unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
void *arg __maybe_unused,
struct thread *thread __maybe_unused,
struct perf_sample *data __maybe_unused,
int max_stack __maybe_unused,
bool best_effort __maybe_unused)
{
return 0;
}
static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
struct map *map __maybe_unused,
bool *initialized __maybe_unused)
{
return 0;
}
static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
#endif /* __UNWIND_H */