3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-02-07 06:44:18 +00:00
Brooklyn/include/linux/perf/riscv_pmu.h
crowetic a94b3d14aa Brooklyn+ (PLUS) changes
Changes included (and more):

1. Dynamic RAM merge

2. Real-time page scan and allocation

3. Cache compression

4. Real-time IRQ checks

5. Dynamic I/O allocation for Java heap

6. Java page migration

7. Contiguous memory allocation

8. Idle pages tracking

9. Per CPU RAM usage tracking

10. ARM NEON scalar multiplication library

11. NEON/ARMv8 crypto extensions

12. NEON SHA, Blake, RIPEMD crypto extensions

13. Parallel NEON crypto engine for multi-algo based CPU stress reduction
2022-05-12 10:47:00 -07:00

76 lines
2.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 SiFive
* Copyright (C) 2018 Andes Technology Corporation
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*
*/
#ifndef _ASM_RISCV_PERF_EVENT_H
#define _ASM_RISCV_PERF_EVENT_H
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#ifdef CONFIG_RISCV_PMU
/*
* The RISCV_MAX_COUNTERS parameter should be specified.
*/
#define RISCV_MAX_COUNTERS 64
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
#define RISCV_PMU_PDEV_NAME "riscv-pmu"
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
#define RISCV_PMU_STOP_FLAG_RESET 1
struct cpu_hw_events {
/* currently enabled events */
int n_events;
/* Counter overflow interrupt */
int irq;
/* currently enabled events */
struct perf_event *events[RISCV_MAX_COUNTERS];
/* currently enabled hardware counters */
DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
/* currently enabled firmware counters */
DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
};
struct riscv_pmu {
struct pmu pmu;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
int num_counters;
u64 (*ctr_read)(struct perf_event *event);
int (*ctr_get_idx)(struct perf_event *event);
int (*ctr_get_width)(int idx);
void (*ctr_clear_idx)(struct perf_event *event);
void (*ctr_start)(struct perf_event *event, u64 init_val);
void (*ctr_stop)(struct perf_event *event, unsigned long flag);
int (*event_map)(struct perf_event *event, u64 *config);
struct cpu_hw_events __percpu *hw_events;
struct hlist_node node;
};
#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
int riscv_pmu_event_set_period(struct perf_event *event);
uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
u64 riscv_pmu_event_update(struct perf_event *event);
#ifdef CONFIG_RISCV_PMU_LEGACY
void riscv_pmu_legacy_skip_init(void);
#else
static inline void riscv_pmu_legacy_skip_init(void) {};
#endif
struct riscv_pmu *riscv_pmu_alloc(void);
#endif /* CONFIG_RISCV_PMU */
#endif /* _ASM_RISCV_PERF_EVENT_H */