mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-01 07:42:18 +00:00
phase 2
This commit is contained in:
parent
4b3a56d965
commit
b530551891
@ -205,6 +205,8 @@ static void print_delayacct(struct taskstats *t)
|
||||
"RECLAIM %12s%15s%15s\n"
|
||||
" %15llu%15llu%15llums\n"
|
||||
"THRASHING%12s%15s%15s\n"
|
||||
" %15llu%15llu%15llums\n"
|
||||
"COMPACT %12s%15s%15s\n"
|
||||
" %15llu%15llu%15llums\n",
|
||||
"count", "real total", "virtual total",
|
||||
"delay total", "delay average",
|
||||
@ -228,7 +230,11 @@ static void print_delayacct(struct taskstats *t)
|
||||
"count", "delay total", "delay average",
|
||||
(unsigned long long)t->thrashing_count,
|
||||
(unsigned long long)t->thrashing_delay_total,
|
||||
average_ms(t->thrashing_delay_total, t->thrashing_count));
|
||||
average_ms(t->thrashing_delay_total, t->thrashing_count),
|
||||
"count", "delay total", "delay average",
|
||||
(unsigned long long)t->compact_count,
|
||||
(unsigned long long)t->compact_delay_total,
|
||||
average_ms(t->compact_delay_total, t->compact_count));
|
||||
}
|
||||
|
||||
static void task_context_switch_counts(struct taskstats *t)
|
||||
|
@ -15,9 +15,9 @@ CFLAGS = -Wall -g -I$(CURDIR)/include
|
||||
ALL_TARGETS := bootconfig
|
||||
ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
|
||||
|
||||
all: $(ALL_PROGRAMS)
|
||||
all: $(ALL_PROGRAMS) test
|
||||
|
||||
$(OUTPUT)bootconfig: main.c $(LIBSRC)
|
||||
$(OUTPUT)bootconfig: main.c include/linux/bootconfig.h $(LIBSRC)
|
||||
$(CC) $(filter %.c,$^) $(CFLAGS) -o $@
|
||||
|
||||
test: $(ALL_PROGRAMS) test-bootconfig.sh
|
||||
|
@ -2,10 +2,53 @@
|
||||
#ifndef _BOOTCONFIG_LINUX_BOOTCONFIG_H
|
||||
#define _BOOTCONFIG_LINUX_BOOTCONFIG_H
|
||||
|
||||
#include "../../../../include/linux/bootconfig.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
#ifndef fallthrough
|
||||
# define fallthrough
|
||||
#endif
|
||||
|
||||
#define WARN_ON(cond) \
|
||||
((cond) ? printf("Internal warning(%s:%d, %s): %s\n", \
|
||||
__FILE__, __LINE__, __func__, #cond) : 0)
|
||||
|
||||
#define unlikely(cond) (cond)
|
||||
|
||||
/* Copied from lib/string.c */
|
||||
static inline char *skip_spaces(const char *str)
|
||||
{
|
||||
while (isspace(*str))
|
||||
++str;
|
||||
return (char *)str;
|
||||
}
|
||||
|
||||
static inline char *strim(char *s)
|
||||
{
|
||||
size_t size;
|
||||
char *end;
|
||||
|
||||
size = strlen(s);
|
||||
if (!size)
|
||||
return s;
|
||||
|
||||
end = s + size - 1;
|
||||
while (end >= s && isspace(*end))
|
||||
end--;
|
||||
*(end + 1) = '\0';
|
||||
|
||||
return skip_spaces(s);
|
||||
}
|
||||
|
||||
#define __init
|
||||
#define __initdata
|
||||
|
||||
#include "../../../../include/linux/bootconfig.h"
|
||||
|
||||
#endif
|
||||
|
@ -12,9 +12,10 @@
|
||||
#include <errno.h>
|
||||
#include <endian.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bootconfig.h>
|
||||
|
||||
#define pr_err(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
|
||||
|
||||
static int xbc_show_value(struct xbc_node *node, bool semicolon)
|
||||
{
|
||||
const char *val, *eol;
|
||||
@ -176,7 +177,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
|
||||
{
|
||||
struct stat stat;
|
||||
int ret;
|
||||
u32 size = 0, csum = 0, rcsum;
|
||||
uint32_t size = 0, csum = 0, rcsum;
|
||||
char magic[BOOTCONFIG_MAGIC_LEN];
|
||||
const char *msg;
|
||||
|
||||
@ -200,11 +201,11 @@ static int load_xbc_from_initrd(int fd, char **buf)
|
||||
if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0)
|
||||
return pr_errno("Failed to lseek for size", -errno);
|
||||
|
||||
if (read(fd, &size, sizeof(u32)) < 0)
|
||||
if (read(fd, &size, sizeof(uint32_t)) < 0)
|
||||
return pr_errno("Failed to read size", -errno);
|
||||
size = le32toh(size);
|
||||
|
||||
if (read(fd, &csum, sizeof(u32)) < 0)
|
||||
if (read(fd, &csum, sizeof(uint32_t)) < 0)
|
||||
return pr_errno("Failed to read checksum", -errno);
|
||||
csum = le32toh(csum);
|
||||
|
||||
@ -229,7 +230,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = xbc_init(*buf, &msg, NULL);
|
||||
ret = xbc_init(*buf, size, &msg, NULL);
|
||||
/* Wrong data */
|
||||
if (ret < 0) {
|
||||
pr_err("parse error: %s.\n", msg);
|
||||
@ -269,7 +270,7 @@ static int init_xbc_with_error(char *buf, int len)
|
||||
if (!copy)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = xbc_init(buf, &msg, &pos);
|
||||
ret = xbc_init(buf, len, &msg, &pos);
|
||||
if (ret < 0)
|
||||
show_xbc_error(copy, msg, pos);
|
||||
free(copy);
|
||||
@ -362,7 +363,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
|
||||
size_t total_size;
|
||||
struct stat stat;
|
||||
const char *msg;
|
||||
u32 size, csum;
|
||||
uint32_t size, csum;
|
||||
int pos, pad;
|
||||
int ret, fd;
|
||||
|
||||
@ -376,13 +377,13 @@ static int apply_xbc(const char *path, const char *xbc_path)
|
||||
|
||||
/* Backup the bootconfig data */
|
||||
data = calloc(size + BOOTCONFIG_ALIGN +
|
||||
sizeof(u32) + sizeof(u32) + BOOTCONFIG_MAGIC_LEN, 1);
|
||||
sizeof(uint32_t) + sizeof(uint32_t) + BOOTCONFIG_MAGIC_LEN, 1);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
memcpy(data, buf, size);
|
||||
|
||||
/* Check the data format */
|
||||
ret = xbc_init(buf, &msg, &pos);
|
||||
ret = xbc_init(buf, size, &msg, &pos);
|
||||
if (ret < 0) {
|
||||
show_xbc_error(data, msg, pos);
|
||||
free(data);
|
||||
@ -391,12 +392,13 @@ static int apply_xbc(const char *path, const char *xbc_path)
|
||||
return ret;
|
||||
}
|
||||
printf("Apply %s to %s\n", xbc_path, path);
|
||||
xbc_get_info(&ret, NULL);
|
||||
printf("\tNumber of nodes: %d\n", ret);
|
||||
printf("\tSize: %u bytes\n", (unsigned int)size);
|
||||
printf("\tChecksum: %d\n", (unsigned int)csum);
|
||||
|
||||
/* TODO: Check the options by schema */
|
||||
xbc_destroy_all();
|
||||
xbc_exit();
|
||||
free(buf);
|
||||
|
||||
/* Remove old boot config if exists */
|
||||
@ -423,17 +425,17 @@ static int apply_xbc(const char *path, const char *xbc_path)
|
||||
}
|
||||
|
||||
/* To align up the total size to BOOTCONFIG_ALIGN, get padding size */
|
||||
total_size = stat.st_size + size + sizeof(u32) * 2 + BOOTCONFIG_MAGIC_LEN;
|
||||
total_size = stat.st_size + size + sizeof(uint32_t) * 2 + BOOTCONFIG_MAGIC_LEN;
|
||||
pad = ((total_size + BOOTCONFIG_ALIGN - 1) & (~BOOTCONFIG_ALIGN_MASK)) - total_size;
|
||||
size += pad;
|
||||
|
||||
/* Add a footer */
|
||||
p = data + size;
|
||||
*(u32 *)p = htole32(size);
|
||||
p += sizeof(u32);
|
||||
*(uint32_t *)p = htole32(size);
|
||||
p += sizeof(uint32_t);
|
||||
|
||||
*(u32 *)p = htole32(csum);
|
||||
p += sizeof(u32);
|
||||
*(uint32_t *)p = htole32(csum);
|
||||
p += sizeof(uint32_t);
|
||||
|
||||
memcpy(p, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
|
||||
p += BOOTCONFIG_MAGIC_LEN;
|
||||
|
2
tools/bpf/bpftool/.gitignore
vendored
2
tools/bpf/bpftool/.gitignore
vendored
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
*.d
|
||||
/bootstrap/
|
||||
/bpftool
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
include ../../../scripts/Makefile.include
|
||||
|
||||
INSTALL ?= install
|
||||
@ -24,7 +24,7 @@ man: man8
|
||||
man8: $(DOC_MAN8)
|
||||
|
||||
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
|
||||
RST2MAN_OPTS += --verbose
|
||||
RST2MAN_OPTS += --verbose --strip-comments
|
||||
|
||||
list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST))))
|
||||
see_also = $(subst " ",, \
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-btf
|
||||
================
|
||||
@ -7,13 +9,14 @@ tool for inspection of BTF data
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **btf** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | {**-d** | **--debug** } |
|
||||
{ **-B** | **--base-btf** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } }
|
||||
|
||||
*COMMANDS* := { **dump** | **help** }
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-cgroup
|
||||
================
|
||||
@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF progs
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **cgroup** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
|
||||
{ **-f** | **--bpffs** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } }
|
||||
|
||||
*COMMANDS* :=
|
||||
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
|
||||
@ -30,9 +33,9 @@ CGROUP COMMANDS
|
||||
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
|
||||
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
|
||||
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
|
||||
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
|
||||
| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
|
||||
| **sock_release** }
|
||||
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
|
||||
| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
|
||||
| **sock_release** }
|
||||
| *ATTACH_FLAGS* := { **multi** | **override** }
|
||||
|
||||
DESCRIPTION
|
||||
@ -98,9 +101,9 @@ DESCRIPTION
|
||||
**sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
|
||||
unconnected udp6 socket (since 4.18);
|
||||
**recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
|
||||
an unconnected udp4 socket (since 5.2);
|
||||
an unconnected udp4 socket (since 5.2);
|
||||
**recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
|
||||
an unconnected udp6 socket (since 5.2);
|
||||
an unconnected udp6 socket (since 5.2);
|
||||
**sysctl** sysctl access (since 5.2);
|
||||
**getsockopt** call to getsockopt (since 5.3);
|
||||
**setsockopt** call to setsockopt (since 5.3);
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
===============
|
||||
bpftool-feature
|
||||
===============
|
||||
@ -7,12 +9,14 @@ tool for inspection of eBPF-related parameters for Linux kernel or net device
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **feature** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| }
|
||||
|
||||
*COMMANDS* := { **probe** | **help** }
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-gen
|
||||
================
|
||||
@ -7,13 +9,14 @@ tool for BPF code-generation
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **gen** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
|
||||
{ **-L** | **--use-loader** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } }
|
||||
|
||||
*COMMAND* := { **object** | **skeleton** | **help** }
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
============
|
||||
bpftool-iter
|
||||
============
|
||||
@ -7,12 +9,14 @@ tool to create BPF iterators
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **iter** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| }
|
||||
|
||||
*COMMANDS* := { **pin** | **help** }
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-link
|
||||
================
|
||||
@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF links
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **link** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
|
||||
{ **-f** | **--bpffs** } | { **-n** | **--nomount** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
|
||||
|
||||
*COMMANDS* := { **show** | **list** | **pin** | **help** }
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-map
|
||||
================
|
||||
@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF maps
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **map** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
|
||||
{ **-f** | **--bpffs** } | { **-n** | **--nomount** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
|
||||
|
||||
*COMMANDS* :=
|
||||
{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
|
||||
@ -52,7 +55,7 @@ MAP COMMANDS
|
||||
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
|
||||
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
|
||||
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
|
||||
| **task_storage** }
|
||||
| | **task_storage** | **bloom_filter** }
|
||||
|
||||
DESCRIPTION
|
||||
===========
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-net
|
||||
================
|
||||
@ -7,12 +9,14 @@ tool for inspection of netdev/tc related bpf prog attachments
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **net** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| }
|
||||
|
||||
*COMMANDS* :=
|
||||
{ **show** | **list** | **attach** | **detach** | **help** }
|
||||
@ -31,44 +35,44 @@ NET COMMANDS
|
||||
DESCRIPTION
|
||||
===========
|
||||
**bpftool net { show | list }** [ **dev** *NAME* ]
|
||||
List bpf program attachments in the kernel networking subsystem.
|
||||
List bpf program attachments in the kernel networking subsystem.
|
||||
|
||||
Currently, only device driver xdp attachments and tc filter
|
||||
classification/action attachments are implemented, i.e., for
|
||||
program types **BPF_PROG_TYPE_SCHED_CLS**,
|
||||
**BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
|
||||
For programs attached to a particular cgroup, e.g.,
|
||||
**BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
|
||||
**BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
|
||||
users can use **bpftool cgroup** to dump cgroup attachments.
|
||||
For sk_{filter, skb, msg, reuseport} and lwt/seg6
|
||||
bpf programs, users should consult other tools, e.g., iproute2.
|
||||
Currently, only device driver xdp attachments and tc filter
|
||||
classification/action attachments are implemented, i.e., for
|
||||
program types **BPF_PROG_TYPE_SCHED_CLS**,
|
||||
**BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
|
||||
For programs attached to a particular cgroup, e.g.,
|
||||
**BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
|
||||
**BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
|
||||
users can use **bpftool cgroup** to dump cgroup attachments.
|
||||
For sk_{filter, skb, msg, reuseport} and lwt/seg6
|
||||
bpf programs, users should consult other tools, e.g., iproute2.
|
||||
|
||||
The current output will start with all xdp program attachments, followed by
|
||||
all tc class/qdisc bpf program attachments. Both xdp programs and
|
||||
tc programs are ordered based on ifindex number. If multiple bpf
|
||||
programs attached to the same networking device through **tc filter**,
|
||||
the order will be first all bpf programs attached to tc classes, then
|
||||
all bpf programs attached to non clsact qdiscs, and finally all
|
||||
bpf programs attached to root and clsact qdisc.
|
||||
The current output will start with all xdp program attachments, followed by
|
||||
all tc class/qdisc bpf program attachments. Both xdp programs and
|
||||
tc programs are ordered based on ifindex number. If multiple bpf
|
||||
programs attached to the same networking device through **tc filter**,
|
||||
the order will be first all bpf programs attached to tc classes, then
|
||||
all bpf programs attached to non clsact qdiscs, and finally all
|
||||
bpf programs attached to root and clsact qdisc.
|
||||
|
||||
**bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
|
||||
Attach bpf program *PROG* to network interface *NAME* with
|
||||
type specified by *ATTACH_TYPE*. Previously attached bpf program
|
||||
can be replaced by the command used with **overwrite** option.
|
||||
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
|
||||
Attach bpf program *PROG* to network interface *NAME* with
|
||||
type specified by *ATTACH_TYPE*. Previously attached bpf program
|
||||
can be replaced by the command used with **overwrite** option.
|
||||
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
|
||||
|
||||
*ATTACH_TYPE* can be of:
|
||||
**xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
|
||||
**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
|
||||
**xdpdrv** - Native XDP. runs earliest point in driver's receive path;
|
||||
**xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
|
||||
*ATTACH_TYPE* can be of:
|
||||
**xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
|
||||
**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
|
||||
**xdpdrv** - Native XDP. runs earliest point in driver's receive path;
|
||||
**xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
|
||||
|
||||
**bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
|
||||
Detach bpf program attached to network interface *NAME* with
|
||||
type specified by *ATTACH_TYPE*. To detach bpf program, same
|
||||
*ATTACH_TYPE* previously used for attach must be specified.
|
||||
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
|
||||
Detach bpf program attached to network interface *NAME* with
|
||||
type specified by *ATTACH_TYPE*. To detach bpf program, same
|
||||
*ATTACH_TYPE* previously used for attach must be specified.
|
||||
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
|
||||
|
||||
**bpftool net help**
|
||||
Print short help message.
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-perf
|
||||
================
|
||||
@ -7,12 +9,14 @@ tool for inspection of perf related bpf prog attachments
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **perf** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| }
|
||||
|
||||
*COMMANDS* :=
|
||||
{ **show** | **list** | **help** }
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
bpftool-prog
|
||||
================
|
||||
@ -7,12 +9,14 @@ tool for inspection and simple manipulation of eBPF progs
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **prog** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
|
||||
*OPTIONS* := { |COMMON_OPTIONS| |
|
||||
{ **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
|
||||
{ **-L** | **--use-loader** } }
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
==================
|
||||
bpftool-struct_ops
|
||||
==================
|
||||
@ -7,12 +9,14 @@ tool to register/unregister/introspect BPF struct_ops
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
**bpftool** [*OPTIONS*] **struct_ops** *COMMAND*
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
|
||||
*OPTIONS* := { |COMMON_OPTIONS| }
|
||||
|
||||
*COMMANDS* :=
|
||||
{ **show** | **list** | **dump** | **register** | **unregister** | **help** }
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
================
|
||||
BPFTOOL
|
||||
================
|
||||
@ -7,6 +9,8 @@ tool for inspection and simple manipulation of eBPF programs and maps
|
||||
|
||||
:Manual section: 8
|
||||
|
||||
.. include:: substitutions.rst
|
||||
|
||||
SYNOPSIS
|
||||
========
|
||||
|
||||
@ -18,8 +22,7 @@ SYNOPSIS
|
||||
|
||||
*OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
|
||||
|
||||
*OPTIONS* := { { **-V** | **--version** } |
|
||||
{ **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
|
||||
*OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| }
|
||||
|
||||
*MAP-COMMANDS* :=
|
||||
{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
|
||||
-h, --help
|
||||
Print short help message (similar to **bpftool help**).
|
||||
|
||||
@ -20,3 +22,12 @@
|
||||
Print all logs available, even debug-level information. This includes
|
||||
logs from libbpf as well as from the verifier, when attempting to
|
||||
load programs.
|
||||
|
||||
-l, --legacy
|
||||
Use legacy libbpf mode which has more relaxed BPF program
|
||||
requirements. By default, bpftool has more strict requirements
|
||||
about section names, changes pinning logic and doesn't support
|
||||
some of the older non-BTF map declarations.
|
||||
|
||||
See https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0
|
||||
for details.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
include ../../scripts/Makefile.include
|
||||
|
||||
ifeq ($(srctree),)
|
||||
@ -13,35 +13,55 @@ else
|
||||
Q = @
|
||||
endif
|
||||
|
||||
BPF_DIR = $(srctree)/tools/lib/bpf/
|
||||
BPF_DIR = $(srctree)/tools/lib/bpf
|
||||
|
||||
ifneq ($(OUTPUT),)
|
||||
LIBBPF_OUTPUT = $(OUTPUT)/libbpf/
|
||||
LIBBPF_PATH = $(LIBBPF_OUTPUT)
|
||||
BOOTSTRAP_OUTPUT = $(OUTPUT)/bootstrap/
|
||||
_OUTPUT := $(OUTPUT)
|
||||
else
|
||||
LIBBPF_OUTPUT =
|
||||
LIBBPF_PATH = $(BPF_DIR)
|
||||
BOOTSTRAP_OUTPUT = $(CURDIR)/bootstrap/
|
||||
_OUTPUT := $(CURDIR)
|
||||
endif
|
||||
BOOTSTRAP_OUTPUT := $(_OUTPUT)/bootstrap/
|
||||
|
||||
LIBBPF = $(LIBBPF_PATH)libbpf.a
|
||||
LIBBPF_BOOTSTRAP_OUTPUT = $(BOOTSTRAP_OUTPUT)libbpf/
|
||||
LIBBPF_BOOTSTRAP = $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
|
||||
LIBBPF_OUTPUT := $(_OUTPUT)/libbpf/
|
||||
LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
|
||||
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
|
||||
LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf
|
||||
LIBBPF := $(LIBBPF_OUTPUT)libbpf.a
|
||||
|
||||
LIBBPF_BOOTSTRAP_OUTPUT := $(BOOTSTRAP_OUTPUT)libbpf/
|
||||
LIBBPF_BOOTSTRAP_DESTDIR := $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
LIBBPF_BOOTSTRAP_INCLUDE := $(LIBBPF_BOOTSTRAP_DESTDIR)/include
|
||||
LIBBPF_BOOTSTRAP_HDRS_DIR := $(LIBBPF_BOOTSTRAP_INCLUDE)/bpf
|
||||
LIBBPF_BOOTSTRAP := $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
|
||||
|
||||
# We need to copy hashmap.h and nlattr.h which is not otherwise exported by
|
||||
# libbpf, but still required by bpftool.
|
||||
LIBBPF_INTERNAL_HDRS := $(addprefix $(LIBBPF_HDRS_DIR)/,hashmap.h nlattr.h)
|
||||
LIBBPF_BOOTSTRAP_INTERNAL_HDRS := $(addprefix $(LIBBPF_BOOTSTRAP_HDRS_DIR)/,hashmap.h)
|
||||
|
||||
ifeq ($(BPFTOOL_VERSION),)
|
||||
BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion)
|
||||
endif
|
||||
|
||||
$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT):
|
||||
$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR) $(LIBBPF_BOOTSTRAP_HDRS_DIR):
|
||||
$(QUIET_MKDIR)mkdir -p $@
|
||||
|
||||
$(LIBBPF): FORCE | $(LIBBPF_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a
|
||||
$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) prefix= $(LIBBPF) install_headers
|
||||
|
||||
$(LIBBPF_BOOTSTRAP): FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
$(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR)
|
||||
$(call QUIET_INSTALL, $@)
|
||||
$(Q)install -m 644 -t $(LIBBPF_HDRS_DIR) $<
|
||||
|
||||
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
|
||||
ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@
|
||||
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR) prefix= \
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
|
||||
|
||||
$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
|
||||
$(call QUIET_INSTALL, $@)
|
||||
$(Q)install -m 644 -t $(LIBBPF_BOOTSTRAP_HDRS_DIR) $<
|
||||
|
||||
$(LIBBPF)-clean: FORCE | $(LIBBPF_OUTPUT)
|
||||
$(call QUIET_CLEAN, libbpf)
|
||||
@ -59,11 +79,10 @@ CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
|
||||
CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
|
||||
CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
|
||||
-I$(if $(OUTPUT),$(OUTPUT),.) \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-I$(srctree)/kernel/bpf/ \
|
||||
-I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/include/uapi \
|
||||
-I$(srctree)/tools/lib \
|
||||
-I$(srctree)/tools/perf
|
||||
-I$(srctree)/tools/include/uapi
|
||||
CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
|
||||
ifneq ($(EXTRA_CFLAGS),)
|
||||
CFLAGS += $(EXTRA_CFLAGS)
|
||||
@ -133,10 +152,16 @@ CFLAGS += -DHAVE_LIBBFD_SUPPORT
|
||||
SRCS += $(BFD_SRCS)
|
||||
endif
|
||||
|
||||
HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
|
||||
$(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS)))
|
||||
|
||||
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
|
||||
|
||||
BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
|
||||
$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
|
||||
|
||||
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
|
||||
$(OBJS): $(LIBBPF) $(LIBBPF_INTERNAL_HDRS)
|
||||
|
||||
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
|
||||
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
|
||||
@ -159,13 +184,13 @@ else
|
||||
$(Q)cp "$(VMLINUX_H)" $@
|
||||
endif
|
||||
|
||||
$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF)
|
||||
$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
|
||||
$(QUIET_CLANG)$(CLANG) \
|
||||
-I$(if $(OUTPUT),$(OUTPUT),.) \
|
||||
-I$(srctree)/tools/include/uapi/ \
|
||||
-I$(LIBBPF_PATH) \
|
||||
-I$(srctree)/tools/lib \
|
||||
-g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@
|
||||
-I$(LIBBPF_BOOTSTRAP_INCLUDE) \
|
||||
-g -O2 -Wall -target bpf -c $< -o $@
|
||||
$(Q)$(LLVM_STRIP) -g $@
|
||||
|
||||
$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
|
||||
$(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@
|
||||
@ -180,25 +205,27 @@ endif
|
||||
CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
|
||||
|
||||
$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
|
||||
$(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
$(OUTPUT)feature.o: | zdep
|
||||
$(OUTPUT)feature.o:
|
||||
ifneq ($(feature-zlib), 1)
|
||||
$(error "No zlib found")
|
||||
endif
|
||||
|
||||
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
|
||||
$(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \
|
||||
$(LIBS_BOOTSTRAP)
|
||||
$(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
|
||||
|
||||
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
|
||||
|
||||
$(BOOTSTRAP_OUTPUT)%.o: %.c | $(BOOTSTRAP_OUTPUT)
|
||||
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
|
||||
$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
|
||||
$(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
$(OUTPUT)%.o: %.c
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
feature-detect-clean:
|
||||
$(call QUIET_CLEAN, feature-detect)
|
||||
@ -213,10 +240,12 @@ clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean
|
||||
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
|
||||
$(Q)$(RM) -r -- $(OUTPUT)feature/
|
||||
|
||||
install: $(OUTPUT)bpftool
|
||||
install-bin: $(OUTPUT)bpftool
|
||||
$(call QUIET_INSTALL, bpftool)
|
||||
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
|
||||
$(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
|
||||
|
||||
install: install-bin
|
||||
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
|
||||
$(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
|
||||
|
||||
@ -239,10 +268,7 @@ doc-uninstall:
|
||||
|
||||
FORCE:
|
||||
|
||||
zdep:
|
||||
@if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi
|
||||
|
||||
.SECONDARY:
|
||||
.PHONY: all FORCE clean install uninstall zdep
|
||||
.PHONY: all FORCE bootstrap clean install-bin install uninstall
|
||||
.PHONY: doc doc-clean doc-install doc-uninstall
|
||||
.DEFAULT_GOAL := all
|
||||
|
@ -261,7 +261,7 @@ _bpftool()
|
||||
# Deal with options
|
||||
if [[ ${words[cword]} == -* ]]; then
|
||||
local c='--version --json --pretty --bpffs --mapcompat --debug \
|
||||
--use-loader --base-btf'
|
||||
--use-loader --base-btf --legacy'
|
||||
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
|
||||
return 0
|
||||
fi
|
||||
@ -710,7 +710,8 @@ _bpftool()
|
||||
hash_of_maps devmap devmap_hash sockmap cpumap \
|
||||
xskmap sockhash cgroup_storage reuseport_sockarray \
|
||||
percpu_cgroup_storage queue stack sk_storage \
|
||||
struct_ops inode_storage task_storage ringbuf'
|
||||
struct_ops ringbuf inode_storage task_storage \
|
||||
bloom_filter'
|
||||
COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
|
||||
return 0
|
||||
;;
|
||||
|
@ -8,14 +8,15 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
|
||||
@ -37,16 +38,13 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = "VAR",
|
||||
[BTF_KIND_DATASEC] = "DATASEC",
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
};
|
||||
|
||||
struct btf_attach_table {
|
||||
DECLARE_HASHTABLE(table, 16);
|
||||
[BTF_KIND_DECL_TAG] = "DECL_TAG",
|
||||
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
|
||||
};
|
||||
|
||||
struct btf_attach_point {
|
||||
__u32 obj_id;
|
||||
__u32 btf_id;
|
||||
struct hlist_node hash;
|
||||
};
|
||||
|
||||
static const char *btf_int_enc_str(__u8 encoding)
|
||||
@ -145,6 +143,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
if (json_output)
|
||||
jsonw_uint_field(w, "type_id", t->type);
|
||||
else
|
||||
@ -328,7 +327,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
printf("\n\ttype_id=%u offset=%u size=%u",
|
||||
v->type, v->offset, v->size);
|
||||
|
||||
if (v->type <= btf__get_nr_types(btf)) {
|
||||
if (v->type < btf__type_cnt(btf)) {
|
||||
vt = btf__type_by_id(btf, v->type);
|
||||
printf(" (%s '%s')",
|
||||
btf_kind_str[btf_kind_safe(btf_kind(vt))],
|
||||
@ -347,6 +346,17 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
printf(" size=%u", t->size);
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_DECL_TAG: {
|
||||
const struct btf_decl_tag *tag = (const void *)(t + 1);
|
||||
|
||||
if (json_output) {
|
||||
jsonw_uint_field(w, "type_id", t->type);
|
||||
jsonw_int_field(w, "component_idx", tag->component_idx);
|
||||
} else {
|
||||
printf(" type_id=%u component_idx=%d", t->type, tag->component_idx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -378,14 +388,14 @@ static int dump_btf_raw(const struct btf *btf,
|
||||
}
|
||||
} else {
|
||||
const struct btf *base;
|
||||
int cnt = btf__get_nr_types(btf);
|
||||
int cnt = btf__type_cnt(btf);
|
||||
int start_id = 1;
|
||||
|
||||
base = btf__base_btf(btf);
|
||||
if (base)
|
||||
start_id = btf__get_nr_types(base) + 1;
|
||||
start_id = btf__type_cnt(base);
|
||||
|
||||
for (i = start_id; i <= cnt; i++) {
|
||||
for (i = start_id; i < cnt; i++) {
|
||||
t = btf__type_by_id(btf, i);
|
||||
dump_btf_type(btf, i, t);
|
||||
}
|
||||
@ -410,9 +420,10 @@ static int dump_btf_c(const struct btf *btf,
|
||||
struct btf_dump *d;
|
||||
int err = 0, i;
|
||||
|
||||
d = btf_dump__new(btf, NULL, NULL, btf_dump_printf);
|
||||
if (IS_ERR(d))
|
||||
return PTR_ERR(d);
|
||||
d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
|
||||
err = libbpf_get_error(d);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
printf("#ifndef __VMLINUX_H__\n");
|
||||
printf("#define __VMLINUX_H__\n");
|
||||
@ -428,9 +439,9 @@ static int dump_btf_c(const struct btf *btf,
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
int cnt = btf__get_nr_types(btf);
|
||||
int cnt = btf__type_cnt(btf);
|
||||
|
||||
for (i = 1; i <= cnt; i++) {
|
||||
for (i = 1; i < cnt; i++) {
|
||||
err = btf_dump__dump_type(d, i);
|
||||
if (err)
|
||||
goto done;
|
||||
@ -539,8 +550,8 @@ static int do_dump(int argc, char **argv)
|
||||
}
|
||||
|
||||
btf = btf__parse_split(*argv, base ?: base_btf);
|
||||
if (IS_ERR(btf)) {
|
||||
err = -PTR_ERR(btf);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err) {
|
||||
btf = NULL;
|
||||
p_err("failed to load BTF from %s: %s",
|
||||
*argv, strerror(err));
|
||||
@ -633,21 +644,8 @@ static int btf_parse_fd(int *argc, char ***argv)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static void delete_btf_table(struct btf_attach_table *tab)
|
||||
{
|
||||
struct btf_attach_point *obj;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
unsigned int bkt;
|
||||
|
||||
hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
|
||||
hash_del(&obj->hash);
|
||||
free(obj);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
|
||||
void *info, __u32 *len)
|
||||
{
|
||||
static const char * const names[] = {
|
||||
@ -655,7 +653,6 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
[BPF_OBJ_PROG] = "prog",
|
||||
[BPF_OBJ_MAP] = "map",
|
||||
};
|
||||
struct btf_attach_point *obj_node;
|
||||
__u32 btf_id, id = 0;
|
||||
int err;
|
||||
int fd;
|
||||
@ -729,28 +726,25 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
if (!btf_id)
|
||||
continue;
|
||||
|
||||
obj_node = calloc(1, sizeof(*obj_node));
|
||||
if (!obj_node) {
|
||||
p_err("failed to allocate memory: %s", strerror(errno));
|
||||
err = -ENOMEM;
|
||||
err = hashmap__append(tab, u32_as_hash_field(btf_id),
|
||||
u32_as_hash_field(id));
|
||||
if (err) {
|
||||
p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
|
||||
btf_id, id, strerror(errno));
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
obj_node->obj_id = id;
|
||||
obj_node->btf_id = btf_id;
|
||||
hash_add(tab->table, &obj_node->hash, obj_node->btf_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
delete_btf_table(tab);
|
||||
hashmap__free(tab);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
build_btf_tables(struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
build_btf_tables(struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
__u32 prog_len = sizeof(prog_info);
|
||||
@ -766,7 +760,7 @@ build_btf_tables(struct btf_attach_table *btf_prog_table,
|
||||
err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info,
|
||||
&map_len);
|
||||
if (err) {
|
||||
delete_btf_table(btf_prog_table);
|
||||
hashmap__free(btf_prog_table);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -775,10 +769,10 @@ build_btf_tables(struct btf_attach_table *btf_prog_table,
|
||||
|
||||
static void
|
||||
show_btf_plain(struct bpf_btf_info *info, int fd,
|
||||
struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct btf_attach_point *obj;
|
||||
struct hashmap_entry *entry;
|
||||
const char *name = u64_to_ptr(info->name);
|
||||
int n;
|
||||
|
||||
@ -792,29 +786,30 @@ show_btf_plain(struct bpf_btf_info *info, int fd,
|
||||
printf("size %uB", info->btf_size);
|
||||
|
||||
n = 0;
|
||||
hash_for_each_possible(btf_prog_table->table, obj, hash, info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
printf("%s%u", n++ == 0 ? " prog_ids " : ",",
|
||||
obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_prog_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
printf("%s%u", n++ == 0 ? " prog_ids " : ",",
|
||||
hash_field_as_u32(entry->value));
|
||||
}
|
||||
|
||||
n = 0;
|
||||
hash_for_each_possible(btf_map_table->table, obj, hash, info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
printf("%s%u", n++ == 0 ? " map_ids " : ",",
|
||||
obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_map_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
printf("%s%u", n++ == 0 ? " map_ids " : ",",
|
||||
hash_field_as_u32(entry->value));
|
||||
}
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void
|
||||
show_btf_json(struct bpf_btf_info *info, int fd,
|
||||
struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct btf_attach_point *obj;
|
||||
struct hashmap_entry *entry;
|
||||
const char *name = u64_to_ptr(info->name);
|
||||
|
||||
jsonw_start_object(json_wtr); /* btf object */
|
||||
@ -823,23 +818,21 @@ show_btf_json(struct bpf_btf_info *info, int fd,
|
||||
|
||||
jsonw_name(json_wtr, "prog_ids");
|
||||
jsonw_start_array(json_wtr); /* prog_ids */
|
||||
hash_for_each_possible(btf_prog_table->table, obj, hash,
|
||||
info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
jsonw_uint(json_wtr, obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_prog_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
|
||||
}
|
||||
jsonw_end_array(json_wtr); /* prog_ids */
|
||||
|
||||
jsonw_name(json_wtr, "map_ids");
|
||||
jsonw_start_array(json_wtr); /* map_ids */
|
||||
hash_for_each_possible(btf_map_table->table, obj, hash,
|
||||
info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
jsonw_uint(json_wtr, obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_map_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
|
||||
}
|
||||
jsonw_end_array(json_wtr); /* map_ids */
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr); /* pids */
|
||||
|
||||
jsonw_bool_field(json_wtr, "kernel", info->kernel_btf);
|
||||
|
||||
@ -850,8 +843,8 @@ show_btf_json(struct bpf_btf_info *info, int fd,
|
||||
}
|
||||
|
||||
static int
|
||||
show_btf(int fd, struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
show_btf(int fd, struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct bpf_btf_info info;
|
||||
__u32 len = sizeof(info);
|
||||
@ -888,8 +881,8 @@ show_btf(int fd, struct btf_attach_table *btf_prog_table,
|
||||
|
||||
static int do_show(int argc, char **argv)
|
||||
{
|
||||
struct btf_attach_table btf_prog_table;
|
||||
struct btf_attach_table btf_map_table;
|
||||
struct hashmap *btf_prog_table;
|
||||
struct hashmap *btf_map_table;
|
||||
int err, fd = -1;
|
||||
__u32 id = 0;
|
||||
|
||||
@ -905,9 +898,19 @@ static int do_show(int argc, char **argv)
|
||||
return BAD_ARG();
|
||||
}
|
||||
|
||||
hash_init(btf_prog_table.table);
|
||||
hash_init(btf_map_table.table);
|
||||
err = build_btf_tables(&btf_prog_table, &btf_map_table);
|
||||
btf_prog_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
btf_map_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!btf_prog_table || !btf_map_table) {
|
||||
hashmap__free(btf_prog_table);
|
||||
hashmap__free(btf_map_table);
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
p_err("failed to create hashmap for object references");
|
||||
return -1;
|
||||
}
|
||||
err = build_btf_tables(btf_prog_table, btf_map_table);
|
||||
if (err) {
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
@ -916,7 +919,7 @@ static int do_show(int argc, char **argv)
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_BTF);
|
||||
|
||||
if (fd >= 0) {
|
||||
err = show_btf(fd, &btf_prog_table, &btf_map_table);
|
||||
err = show_btf(fd, btf_prog_table, btf_map_table);
|
||||
close(fd);
|
||||
goto exit_free;
|
||||
}
|
||||
@ -948,7 +951,7 @@ static int do_show(int argc, char **argv)
|
||||
break;
|
||||
}
|
||||
|
||||
err = show_btf(fd, &btf_prog_table, &btf_map_table);
|
||||
err = show_btf(fd, btf_prog_table, btf_map_table);
|
||||
close(fd);
|
||||
if (err)
|
||||
break;
|
||||
@ -958,9 +961,9 @@ static int do_show(int argc, char **argv)
|
||||
jsonw_end_array(json_wtr); /* root array */
|
||||
|
||||
exit_free:
|
||||
delete_btf_table(&btf_prog_table);
|
||||
delete_btf_table(&btf_map_table);
|
||||
delete_obj_refs_table(&refs_table);
|
||||
hashmap__free(btf_prog_table);
|
||||
hashmap__free(btf_map_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -32,14 +32,16 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
|
||||
const struct btf_type *func_proto,
|
||||
__u32 prog_id)
|
||||
{
|
||||
struct bpf_prog_info_linear *prog_info = NULL;
|
||||
const struct btf_type *func_type;
|
||||
int prog_fd = -1, func_sig_len;
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
const char *prog_name = NULL;
|
||||
struct bpf_func_info *finfo;
|
||||
struct btf *prog_btf = NULL;
|
||||
struct bpf_prog_info *info;
|
||||
int prog_fd, func_sig_len;
|
||||
struct bpf_func_info finfo;
|
||||
__u32 finfo_rec_size;
|
||||
char prog_str[1024];
|
||||
int err;
|
||||
|
||||
/* Get the ptr's func_proto */
|
||||
func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
|
||||
@ -52,25 +54,30 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
|
||||
|
||||
/* Get the bpf_prog's name. Obtain from func_info. */
|
||||
prog_fd = bpf_prog_get_fd_by_id(prog_id);
|
||||
if (prog_fd == -1)
|
||||
if (prog_fd < 0)
|
||||
goto print;
|
||||
|
||||
prog_info = bpf_program__get_prog_info_linear(prog_fd,
|
||||
1UL << BPF_PROG_INFO_FUNC_INFO);
|
||||
close(prog_fd);
|
||||
if (IS_ERR(prog_info)) {
|
||||
prog_info = NULL;
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err)
|
||||
goto print;
|
||||
}
|
||||
info = &prog_info->info;
|
||||
|
||||
if (!info->btf_id || !info->nr_func_info)
|
||||
if (!info.btf_id || !info.nr_func_info)
|
||||
goto print;
|
||||
prog_btf = btf__load_from_kernel_by_id(info->btf_id);
|
||||
|
||||
finfo_rec_size = info.func_info_rec_size;
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.nr_func_info = 1;
|
||||
info.func_info_rec_size = finfo_rec_size;
|
||||
info.func_info = ptr_to_u64(&finfo);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err)
|
||||
goto print;
|
||||
|
||||
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
|
||||
if (libbpf_get_error(prog_btf))
|
||||
goto print;
|
||||
finfo = u64_to_ptr(info->func_info);
|
||||
func_type = btf__type_by_id(prog_btf, finfo->type_id);
|
||||
func_type = btf__type_by_id(prog_btf, finfo.type_id);
|
||||
if (!func_type || !btf_is_func(func_type))
|
||||
goto print;
|
||||
|
||||
@ -92,7 +99,8 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
|
||||
prog_str[sizeof(prog_str) - 1] = '\0';
|
||||
jsonw_string(d->jw, prog_str);
|
||||
btf__free(prog_btf);
|
||||
free(prog_info);
|
||||
if (prog_fd >= 0)
|
||||
close(prog_fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <sys/vfs.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
|
||||
|
||||
#include "main.h"
|
||||
@ -73,6 +74,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
|
||||
[BPF_XDP] = "xdp",
|
||||
[BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select",
|
||||
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate",
|
||||
[BPF_PERF_EVENT] = "perf_event",
|
||||
};
|
||||
|
||||
void p_err(const char *fmt, ...)
|
||||
@ -393,7 +395,7 @@ void print_hex_data_json(uint8_t *data, size_t len)
|
||||
}
|
||||
|
||||
/* extra params for nftw cb */
|
||||
static struct pinned_obj_table *build_fn_table;
|
||||
static struct hashmap *build_fn_table;
|
||||
static enum bpf_obj_type build_fn_type;
|
||||
|
||||
static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
@ -401,9 +403,9 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
{
|
||||
struct bpf_prog_info pinned_info;
|
||||
__u32 len = sizeof(pinned_info);
|
||||
struct pinned_obj *obj_node;
|
||||
enum bpf_obj_type objtype;
|
||||
int fd, err = 0;
|
||||
char *path;
|
||||
|
||||
if (typeflag != FTW_F)
|
||||
goto out_ret;
|
||||
@ -420,28 +422,26 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len))
|
||||
goto out_close;
|
||||
|
||||
obj_node = calloc(1, sizeof(*obj_node));
|
||||
if (!obj_node) {
|
||||
path = strdup(fpath);
|
||||
if (!path) {
|
||||
err = -1;
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
obj_node->id = pinned_info.id;
|
||||
obj_node->path = strdup(fpath);
|
||||
if (!obj_node->path) {
|
||||
err = -1;
|
||||
free(obj_node);
|
||||
err = hashmap__append(build_fn_table, u32_as_hash_field(pinned_info.id), path);
|
||||
if (err) {
|
||||
p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
|
||||
pinned_info.id, path, strerror(errno));
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
hash_add(build_fn_table->table, &obj_node->hash, obj_node->id);
|
||||
out_close:
|
||||
close(fd);
|
||||
out_ret:
|
||||
return err;
|
||||
}
|
||||
|
||||
int build_pinned_obj_table(struct pinned_obj_table *tab,
|
||||
int build_pinned_obj_table(struct hashmap *tab,
|
||||
enum bpf_obj_type type)
|
||||
{
|
||||
struct mntent *mntent = NULL;
|
||||
@ -470,17 +470,18 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
|
||||
return err;
|
||||
}
|
||||
|
||||
void delete_pinned_obj_table(struct pinned_obj_table *tab)
|
||||
void delete_pinned_obj_table(struct hashmap *map)
|
||||
{
|
||||
struct pinned_obj *obj;
|
||||
struct hlist_node *tmp;
|
||||
unsigned int bkt;
|
||||
struct hashmap_entry *entry;
|
||||
size_t bkt;
|
||||
|
||||
hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
|
||||
hash_del(&obj->hash);
|
||||
free(obj->path);
|
||||
free(obj);
|
||||
}
|
||||
if (!map)
|
||||
return;
|
||||
|
||||
hashmap__for_each_entry(map, entry, bkt)
|
||||
free(entry->value);
|
||||
|
||||
hashmap__free(map);
|
||||
}
|
||||
|
||||
unsigned int get_page_size(void)
|
||||
@ -962,3 +963,13 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
size_t hash_fn_for_key_as_id(const void *key, void *ctx)
|
||||
{
|
||||
return (size_t)key;
|
||||
}
|
||||
|
||||
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx)
|
||||
{
|
||||
return k1 == k2;
|
||||
}
|
||||
|
@ -467,7 +467,7 @@ static bool probe_bpf_syscall(const char *define_prefix)
|
||||
{
|
||||
bool res;
|
||||
|
||||
bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
|
||||
bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL);
|
||||
res = (errno != ENOSYS);
|
||||
|
||||
print_bool_feature("have_bpf_syscall",
|
||||
@ -624,6 +624,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
*/
|
||||
switch (id) {
|
||||
case BPF_FUNC_trace_printk:
|
||||
case BPF_FUNC_trace_vprintk:
|
||||
case BPF_FUNC_probe_write_user:
|
||||
if (!full_mode)
|
||||
continue;
|
||||
@ -642,15 +643,111 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
}
|
||||
|
||||
static void
|
||||
probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
|
||||
probe_misc_feature(struct bpf_insn *insns, size_t len,
|
||||
const char *define_prefix, __u32 ifindex,
|
||||
const char *feat_name, const char *plain_name,
|
||||
const char *define_name)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.prog_ifindex = ifindex,
|
||||
);
|
||||
bool res;
|
||||
int fd;
|
||||
|
||||
res = bpf_probe_large_insn_limit(ifindex);
|
||||
print_bool_feature("have_large_insn_limit",
|
||||
errno = 0;
|
||||
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
|
||||
insns, len, &opts);
|
||||
res = fd >= 0 || !errno;
|
||||
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
|
||||
print_bool_feature(feat_name, plain_name, define_name, res,
|
||||
define_prefix);
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for availability of kernel commit (5.3):
|
||||
*
|
||||
* c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
|
||||
*/
|
||||
static void probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[BPF_MAXINSNS + 1];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BPF_MAXINSNS; i++)
|
||||
insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
|
||||
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
|
||||
|
||||
probe_misc_feature(insns, ARRAY_SIZE(insns),
|
||||
define_prefix, ifindex,
|
||||
"have_large_insn_limit",
|
||||
"Large program size limit",
|
||||
"LARGE_INSN_LIMIT",
|
||||
res, define_prefix);
|
||||
"LARGE_INSN_LIMIT");
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for bounded loop support introduced in commit 2589726d12a1
|
||||
* ("bpf: introduce bounded loops").
|
||||
*/
|
||||
static void
|
||||
probe_bounded_loops(const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[4] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 10),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, -2),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
probe_misc_feature(insns, ARRAY_SIZE(insns),
|
||||
define_prefix, ifindex,
|
||||
"have_bounded_loops",
|
||||
"Bounded loop support",
|
||||
"BOUNDED_LOOPS");
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for the v2 instruction set extension introduced in commit 92b31a9af73b
|
||||
* ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions").
|
||||
*/
|
||||
static void
|
||||
probe_v2_isa_extension(const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[4] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
probe_misc_feature(insns, ARRAY_SIZE(insns),
|
||||
define_prefix, ifindex,
|
||||
"have_v2_isa_extension",
|
||||
"ISA extension v2",
|
||||
"V2_ISA_EXTENSION");
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for the v3 instruction set extension introduced in commit 092ed0968bb6
|
||||
* ("bpf: verifier support JMP32").
|
||||
*/
|
||||
static void
|
||||
probe_v3_isa_extension(const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[4] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
probe_misc_feature(insns, ARRAY_SIZE(insns),
|
||||
define_prefix, ifindex,
|
||||
"have_v3_isa_extension",
|
||||
"ISA extension v3",
|
||||
"V3_ISA_EXTENSION");
|
||||
}
|
||||
|
||||
static void
|
||||
@ -767,6 +864,9 @@ static void section_misc(const char *define_prefix, __u32 ifindex)
|
||||
"/*** eBPF misc features ***/",
|
||||
define_prefix);
|
||||
probe_large_insn_limit(define_prefix, ifindex);
|
||||
probe_bounded_loops(define_prefix, ifindex);
|
||||
probe_v2_isa_extension(define_prefix, ifindex);
|
||||
probe_v3_isa_extension(define_prefix, ifindex);
|
||||
print_end_section();
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/mman.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/bpf_gen_internal.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
@ -34,6 +33,11 @@ static void sanitize_identifier(char *name)
|
||||
name[i] = '_';
|
||||
}
|
||||
|
||||
static bool str_has_prefix(const char *str, const char *prefix)
|
||||
{
|
||||
return strncmp(str, prefix, strlen(prefix)) == 0;
|
||||
}
|
||||
|
||||
static bool str_has_suffix(const char *str, const char *suffix)
|
||||
{
|
||||
size_t i, n1 = strlen(str), n2 = strlen(suffix);
|
||||
@ -68,23 +72,47 @@ static void get_header_guard(char *guard, const char *obj_name)
|
||||
guard[i] = toupper(guard[i]);
|
||||
}
|
||||
|
||||
static const char *get_map_ident(const struct bpf_map *map)
|
||||
static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
|
||||
{
|
||||
static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
|
||||
const char *name = bpf_map__name(map);
|
||||
int i, n;
|
||||
|
||||
if (!bpf_map__is_internal(map))
|
||||
return name;
|
||||
if (!bpf_map__is_internal(map)) {
|
||||
snprintf(buf, buf_sz, "%s", name);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (str_has_suffix(name, ".data"))
|
||||
return "data";
|
||||
else if (str_has_suffix(name, ".rodata"))
|
||||
return "rodata";
|
||||
else if (str_has_suffix(name, ".bss"))
|
||||
return "bss";
|
||||
else if (str_has_suffix(name, ".kconfig"))
|
||||
return "kconfig";
|
||||
else
|
||||
return NULL;
|
||||
for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
|
||||
const char *sfx = sfxs[i], *p;
|
||||
|
||||
p = strstr(name, sfx);
|
||||
if (p) {
|
||||
snprintf(buf, buf_sz, "%s", p + 1);
|
||||
sanitize_identifier(buf);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
|
||||
{
|
||||
static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
|
||||
int i, n;
|
||||
|
||||
for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
|
||||
const char *pfx = pfxs[i];
|
||||
|
||||
if (str_has_prefix(sec_name, pfx)) {
|
||||
snprintf(buf, buf_sz, "%s", sec_name + 1);
|
||||
sanitize_identifier(buf);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
|
||||
@ -101,24 +129,14 @@ static int codegen_datasec_def(struct bpf_object *obj,
|
||||
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
|
||||
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
|
||||
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
|
||||
const char *sec_ident;
|
||||
char var_ident[256];
|
||||
char var_ident[256], sec_ident[256];
|
||||
bool strip_mods = false;
|
||||
|
||||
if (strcmp(sec_name, ".data") == 0) {
|
||||
sec_ident = "data";
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".bss") == 0) {
|
||||
sec_ident = "bss";
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".rodata") == 0) {
|
||||
sec_ident = "rodata";
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".kconfig") == 0) {
|
||||
sec_ident = "kconfig";
|
||||
} else {
|
||||
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strcmp(sec_name, ".kconfig") != 0)
|
||||
strip_mods = true;
|
||||
|
||||
printf(" struct %s__%s {\n", obj_name, sec_ident);
|
||||
for (i = 0; i < vlen; i++, sec_var++) {
|
||||
@ -193,24 +211,64 @@ static int codegen_datasec_def(struct bpf_object *obj,
|
||||
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
|
||||
{
|
||||
struct btf *btf = bpf_object__btf(obj);
|
||||
int n = btf__get_nr_types(btf);
|
||||
int n = btf__type_cnt(btf);
|
||||
struct btf_dump *d;
|
||||
struct bpf_map *map;
|
||||
const struct btf_type *sec;
|
||||
char sec_ident[256], map_ident[256];
|
||||
int i, err = 0;
|
||||
|
||||
d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
|
||||
if (IS_ERR(d))
|
||||
return PTR_ERR(d);
|
||||
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
|
||||
err = libbpf_get_error(d);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 1; i <= n; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
|
||||
if (!btf_is_datasec(t))
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
/* only generate definitions for memory-mapped internal maps */
|
||||
if (!bpf_map__is_internal(map))
|
||||
continue;
|
||||
if (!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
err = codegen_datasec_def(obj, btf, d, t, obj_name);
|
||||
if (err)
|
||||
goto out;
|
||||
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
|
||||
continue;
|
||||
|
||||
sec = NULL;
|
||||
for (i = 1; i < n; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name;
|
||||
|
||||
if (!btf_is_datasec(t))
|
||||
continue;
|
||||
|
||||
name = btf__str_by_offset(btf, t->name_off);
|
||||
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
|
||||
continue;
|
||||
|
||||
if (strcmp(sec_ident, map_ident) == 0) {
|
||||
sec = t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* In some cases (e.g., sections like .rodata.cst16 containing
|
||||
* compiler allocated string constants only) there will be
|
||||
* special internal maps with no corresponding DATASEC BTF
|
||||
* type. In such case, generate empty structs for each such
|
||||
* map. It will still be memory-mapped and its contents
|
||||
* accessible from user-space through BPF skeleton.
|
||||
*/
|
||||
if (!sec) {
|
||||
printf(" struct %s__%s {\n", obj_name, map_ident);
|
||||
printf(" } *%s;\n", map_ident);
|
||||
} else {
|
||||
err = codegen_datasec_def(obj, btf, d, sec, obj_name);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
out:
|
||||
btf_dump__free(d);
|
||||
return err;
|
||||
@ -238,8 +296,8 @@ static void codegen(const char *template, ...)
|
||||
} else if (c == '\n') {
|
||||
break;
|
||||
} else {
|
||||
p_err("unrecognized character at pos %td in template '%s'",
|
||||
src - template - 1, template);
|
||||
p_err("unrecognized character at pos %td in template '%s': '%c'",
|
||||
src - template - 1, template, c);
|
||||
free(s);
|
||||
exit(-1);
|
||||
}
|
||||
@ -386,6 +444,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
||||
{
|
||||
struct bpf_program *prog;
|
||||
struct bpf_map *map;
|
||||
char ident[256];
|
||||
|
||||
codegen("\
|
||||
\n\
|
||||
@ -406,10 +465,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char * ident;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
if (bpf_map__is_internal(map) &&
|
||||
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
@ -430,21 +486,16 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
||||
|
||||
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
|
||||
{
|
||||
struct bpf_object_load_attr load_attr = {};
|
||||
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
|
||||
struct bpf_map *map;
|
||||
char ident[256];
|
||||
int err = 0;
|
||||
|
||||
err = bpf_object__gen_loader(obj, &opts);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
load_attr.obj = obj;
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
load_attr.log_level = 1 + 2 + 4;
|
||||
|
||||
err = bpf_object__load_xattr(&load_attr);
|
||||
err = bpf_object__load(obj);
|
||||
if (err) {
|
||||
p_err("failed to load object file");
|
||||
goto out;
|
||||
@ -478,12 +529,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
||||
",
|
||||
obj_name, opts.data_sz);
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char *ident;
|
||||
const void *mmap_data = NULL;
|
||||
size_t mmap_size = 0;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
|
||||
if (!bpf_map__is_internal(map) ||
|
||||
@ -545,15 +594,15 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
||||
return err; \n\
|
||||
", obj_name);
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char *ident, *mmap_flags;
|
||||
const char *mmap_flags;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
|
||||
if (!bpf_map__is_internal(map) ||
|
||||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
|
||||
mmap_flags = "PROT_READ";
|
||||
else
|
||||
@ -603,7 +652,8 @@ static int do_skeleton(int argc, char **argv)
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
|
||||
struct bpf_object *obj = NULL;
|
||||
const char *file, *ident;
|
||||
const char *file;
|
||||
char ident[256];
|
||||
struct bpf_program *prog;
|
||||
int fd, err = -1;
|
||||
struct bpf_map *map;
|
||||
@ -663,19 +713,22 @@ static int do_skeleton(int argc, char **argv)
|
||||
if (obj_name[0] == '\0')
|
||||
get_obj_name(obj_name, file);
|
||||
opts.object_name = obj_name;
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
opts.kernel_log_level = 1 + 2 + 4;
|
||||
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
|
||||
if (IS_ERR(obj)) {
|
||||
err = libbpf_get_error(obj);
|
||||
if (err) {
|
||||
char err_buf[256];
|
||||
|
||||
libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf));
|
||||
libbpf_strerror(err, err_buf, sizeof(err_buf));
|
||||
p_err("failed to open BPF object file: %s", err_buf);
|
||||
obj = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
ident = get_map_ident(map);
|
||||
if (!ident) {
|
||||
if (!get_map_ident(map, ident, sizeof(ident))) {
|
||||
p_err("ignoring unrecognized internal map '%s'...",
|
||||
bpf_map__name(map));
|
||||
continue;
|
||||
@ -728,8 +781,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
if (map_cnt) {
|
||||
printf("\tstruct {\n");
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
if (use_loader)
|
||||
printf("\t\tstruct bpf_map_desc %s;\n", ident);
|
||||
@ -803,7 +855,10 @@ static int do_skeleton(int argc, char **argv)
|
||||
} \n\
|
||||
\n\
|
||||
err = %1$s__create_skeleton(obj); \n\
|
||||
err = err ?: bpf_object__open_skeleton(obj->skeleton, opts);\n\
|
||||
if (err) \n\
|
||||
goto err_out; \n\
|
||||
\n\
|
||||
err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
|
||||
if (err) \n\
|
||||
goto err_out; \n\
|
||||
\n\
|
||||
@ -862,6 +917,8 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\n\
|
||||
static inline const void *%1$s__elf_bytes(size_t *sz); \n\
|
||||
\n\
|
||||
static inline int \n\
|
||||
%1$s__create_skeleton(struct %1$s *obj) \n\
|
||||
{ \n\
|
||||
@ -893,9 +950,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
);
|
||||
i = 0;
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
ident = get_map_ident(map);
|
||||
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
|
||||
codegen("\
|
||||
@ -943,10 +998,20 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\n\
|
||||
s->data_sz = %d; \n\
|
||||
s->data = (void *)\"\\ \n\
|
||||
",
|
||||
file_sz);
|
||||
s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
|
||||
\n\
|
||||
return 0; \n\
|
||||
err: \n\
|
||||
bpf_object__destroy_skeleton(s); \n\
|
||||
return -ENOMEM; \n\
|
||||
} \n\
|
||||
\n\
|
||||
static inline const void *%2$s__elf_bytes(size_t *sz) \n\
|
||||
{ \n\
|
||||
*sz = %1$d; \n\
|
||||
return (const void *)\"\\ \n\
|
||||
"
|
||||
, file_sz, obj_name);
|
||||
|
||||
/* embed contents of BPF object file */
|
||||
print_hex(obj_data, file_sz);
|
||||
@ -954,11 +1019,6 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\"; \n\
|
||||
\n\
|
||||
return 0; \n\
|
||||
err: \n\
|
||||
bpf_object__destroy_skeleton(s); \n\
|
||||
return -ENOMEM; \n\
|
||||
} \n\
|
||||
\n\
|
||||
#endif /* %s */ \n\
|
||||
|
@ -46,7 +46,8 @@ static int do_pin(int argc, char **argv)
|
||||
}
|
||||
|
||||
obj = bpf_object__open(objfile);
|
||||
if (IS_ERR(obj)) {
|
||||
err = libbpf_get_error(obj);
|
||||
if (err) {
|
||||
p_err("can't open objfile %s", objfile);
|
||||
goto close_map_fd;
|
||||
}
|
||||
@ -57,15 +58,15 @@ static int do_pin(int argc, char **argv)
|
||||
goto close_obj;
|
||||
}
|
||||
|
||||
prog = bpf_program__next(NULL, obj);
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
if (!prog) {
|
||||
p_err("can't find bpf program in objfile %s", objfile);
|
||||
goto close_obj;
|
||||
}
|
||||
|
||||
link = bpf_program__attach_iter(prog, &iter_opts);
|
||||
if (IS_ERR(link)) {
|
||||
err = PTR_ERR(link);
|
||||
err = libbpf_get_error(link);
|
||||
if (err) {
|
||||
p_err("attach_iter failed for program %s",
|
||||
bpf_program__name(prog));
|
||||
goto close_obj;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
@ -20,6 +21,8 @@ static const char * const link_type_name[] = {
|
||||
[BPF_LINK_TYPE_NETNS] = "netns",
|
||||
};
|
||||
|
||||
static struct hashmap *link_table;
|
||||
|
||||
static int link_parse_fd(int *argc, char ***argv)
|
||||
{
|
||||
int fd;
|
||||
@ -156,19 +159,18 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!hash_empty(link_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(link_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
jsonw_name(json_wtr, "pinned");
|
||||
jsonw_start_array(json_wtr);
|
||||
hash_for_each_possible(link_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
jsonw_string(json_wtr, obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(link_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
jsonw_string(json_wtr, entry->value);
|
||||
jsonw_end_array(json_wtr);
|
||||
}
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr);
|
||||
|
||||
jsonw_end_object(json_wtr);
|
||||
|
||||
@ -244,15 +246,14 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!hash_empty(link_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(link_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
hash_for_each_possible(link_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
printf("\n\tpinned %s", obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(link_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
printf("\n\tpinned %s", (char *)entry->value);
|
||||
}
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
|
||||
@ -302,8 +303,15 @@ static int do_show(int argc, char **argv)
|
||||
__u32 id = 0;
|
||||
int err, fd;
|
||||
|
||||
if (show_pinned)
|
||||
build_pinned_obj_table(&link_table, BPF_OBJ_LINK);
|
||||
if (show_pinned) {
|
||||
link_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!link_table) {
|
||||
p_err("failed to create hashmap for pinned paths");
|
||||
return -1;
|
||||
}
|
||||
build_pinned_obj_table(link_table, BPF_OBJ_LINK);
|
||||
}
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
|
||||
|
||||
if (argc == 2) {
|
||||
@ -344,7 +352,10 @@ static int do_show(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
delete_obj_refs_table(&refs_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
if (show_pinned)
|
||||
delete_pinned_obj_table(link_table);
|
||||
|
||||
return errno == ENOENT ? 0 : -1;
|
||||
}
|
||||
|
@ -10,8 +10,9 @@
|
||||
#include <string.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "main.h"
|
||||
|
||||
@ -30,11 +31,9 @@ bool block_mount;
|
||||
bool verifier_logs;
|
||||
bool relaxed_maps;
|
||||
bool use_loader;
|
||||
bool legacy_libbpf;
|
||||
struct btf *base_btf;
|
||||
struct pinned_obj_table prog_table;
|
||||
struct pinned_obj_table map_table;
|
||||
struct pinned_obj_table link_table;
|
||||
struct obj_refs_table refs_table;
|
||||
struct hashmap *refs_table;
|
||||
|
||||
static void __noreturn clean_and_exit(int i)
|
||||
{
|
||||
@ -94,6 +93,7 @@ static int do_version(int argc, char **argv)
|
||||
jsonw_name(json_wtr, "features");
|
||||
jsonw_start_object(json_wtr); /* features */
|
||||
jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
|
||||
jsonw_bool_field(json_wtr, "libbpf_strict", !legacy_libbpf);
|
||||
jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
|
||||
jsonw_end_object(json_wtr); /* features */
|
||||
|
||||
@ -107,6 +107,10 @@ static int do_version(int argc, char **argv)
|
||||
printf(" libbfd");
|
||||
nb_features++;
|
||||
}
|
||||
if (!legacy_libbpf) {
|
||||
printf("%s libbpf_strict", nb_features++ ? "," : "");
|
||||
nb_features++;
|
||||
}
|
||||
if (has_skeletons)
|
||||
printf("%s skeletons", nb_features++ ? "," : "");
|
||||
printf("\n");
|
||||
@ -398,8 +402,10 @@ int main(int argc, char **argv)
|
||||
{ "debug", no_argument, NULL, 'd' },
|
||||
{ "use-loader", no_argument, NULL, 'L' },
|
||||
{ "base-btf", required_argument, NULL, 'B' },
|
||||
{ "legacy", no_argument, NULL, 'l' },
|
||||
{ 0 }
|
||||
};
|
||||
bool version_requested = false;
|
||||
int opt, ret;
|
||||
|
||||
setlinebuf(stdout);
|
||||
@ -411,16 +417,13 @@ int main(int argc, char **argv)
|
||||
block_mount = false;
|
||||
bin_name = argv[0];
|
||||
|
||||
hash_init(prog_table.table);
|
||||
hash_init(map_table.table);
|
||||
hash_init(link_table.table);
|
||||
|
||||
opterr = 0;
|
||||
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
|
||||
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
|
||||
options, NULL)) >= 0) {
|
||||
switch (opt) {
|
||||
case 'V':
|
||||
return do_version(argc, argv);
|
||||
version_requested = true;
|
||||
break;
|
||||
case 'h':
|
||||
return do_help(argc, argv);
|
||||
case 'p':
|
||||
@ -462,6 +465,9 @@ int main(int argc, char **argv)
|
||||
case 'L':
|
||||
use_loader = true;
|
||||
break;
|
||||
case 'l':
|
||||
legacy_libbpf = true;
|
||||
break;
|
||||
default:
|
||||
p_err("unrecognized option '%s'", argv[optind - 1]);
|
||||
if (json_output)
|
||||
@ -471,21 +477,25 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if (!legacy_libbpf) {
|
||||
ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
|
||||
if (ret)
|
||||
p_err("failed to enable libbpf strict mode: %d", ret);
|
||||
}
|
||||
|
||||
argc -= optind;
|
||||
argv += optind;
|
||||
if (argc < 0)
|
||||
usage();
|
||||
|
||||
if (version_requested)
|
||||
return do_version(argc, argv);
|
||||
|
||||
ret = cmd_select(cmds, argc, argv, do_help);
|
||||
|
||||
if (json_output)
|
||||
jsonw_destroy(&json_wtr);
|
||||
|
||||
if (show_pinned) {
|
||||
delete_pinned_obj_table(&prog_table);
|
||||
delete_pinned_obj_table(&map_table);
|
||||
delete_pinned_obj_table(&link_table);
|
||||
}
|
||||
btf__free(base_btf);
|
||||
|
||||
return ret;
|
||||
|
@ -11,9 +11,9 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <tools/libc_compat.h>
|
||||
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
@ -57,7 +57,7 @@ static inline void *u64_to_ptr(__u64 ptr)
|
||||
#define HELP_SPEC_PROGRAM \
|
||||
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
|
||||
#define HELP_SPEC_OPTIONS \
|
||||
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
|
||||
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}"
|
||||
#define HELP_SPEC_MAP \
|
||||
"MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
|
||||
#define HELP_SPEC_LINK \
|
||||
@ -90,11 +90,9 @@ extern bool block_mount;
|
||||
extern bool verifier_logs;
|
||||
extern bool relaxed_maps;
|
||||
extern bool use_loader;
|
||||
extern bool legacy_libbpf;
|
||||
extern struct btf *base_btf;
|
||||
extern struct pinned_obj_table prog_table;
|
||||
extern struct pinned_obj_table map_table;
|
||||
extern struct pinned_obj_table link_table;
|
||||
extern struct obj_refs_table refs_table;
|
||||
extern struct hashmap *refs_table;
|
||||
|
||||
void __printf(1, 2) p_err(const char *fmt, ...);
|
||||
void __printf(1, 2) p_info(const char *fmt, ...);
|
||||
@ -108,28 +106,12 @@ void set_max_rlimit(void);
|
||||
|
||||
int mount_tracefs(const char *target);
|
||||
|
||||
struct pinned_obj_table {
|
||||
DECLARE_HASHTABLE(table, 16);
|
||||
};
|
||||
|
||||
struct pinned_obj {
|
||||
__u32 id;
|
||||
char *path;
|
||||
struct hlist_node hash;
|
||||
};
|
||||
|
||||
struct obj_refs_table {
|
||||
DECLARE_HASHTABLE(table, 16);
|
||||
};
|
||||
|
||||
struct obj_ref {
|
||||
int pid;
|
||||
char comm[16];
|
||||
};
|
||||
|
||||
struct obj_refs {
|
||||
struct hlist_node node;
|
||||
__u32 id;
|
||||
int ref_cnt;
|
||||
struct obj_ref *refs;
|
||||
};
|
||||
@ -137,15 +119,15 @@ struct obj_refs {
|
||||
struct btf;
|
||||
struct bpf_line_info;
|
||||
|
||||
int build_pinned_obj_table(struct pinned_obj_table *table,
|
||||
int build_pinned_obj_table(struct hashmap *table,
|
||||
enum bpf_obj_type type);
|
||||
void delete_pinned_obj_table(struct pinned_obj_table *tab);
|
||||
__weak int build_obj_refs_table(struct obj_refs_table *table,
|
||||
void delete_pinned_obj_table(struct hashmap *table);
|
||||
__weak int build_obj_refs_table(struct hashmap **table,
|
||||
enum bpf_obj_type type);
|
||||
__weak void delete_obj_refs_table(struct obj_refs_table *table);
|
||||
__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
||||
__weak void delete_obj_refs_table(struct hashmap *table);
|
||||
__weak void emit_obj_refs_json(struct hashmap *table, __u32 id,
|
||||
json_writer_t *json_wtr);
|
||||
__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id,
|
||||
__weak void emit_obj_refs_plain(struct hashmap *table, __u32 id,
|
||||
const char *prefix);
|
||||
void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
|
||||
void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
|
||||
@ -259,4 +241,23 @@ int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
|
||||
|
||||
int print_all_levels(__maybe_unused enum libbpf_print_level level,
|
||||
const char *format, va_list args);
|
||||
|
||||
size_t hash_fn_for_key_as_id(const void *key, void *ctx);
|
||||
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
|
||||
|
||||
static inline void *u32_as_hash_field(__u32 x)
|
||||
{
|
||||
return (void *)(uintptr_t)x;
|
||||
}
|
||||
|
||||
static inline __u32 hash_field_as_u32(const void *x)
|
||||
{
|
||||
return (__u32)(uintptr_t)x;
|
||||
}
|
||||
|
||||
static inline bool hashmap__empty(struct hashmap *map)
|
||||
{
|
||||
return map ? hashmap__size(map) == 0 : true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
@ -52,10 +53,13 @@ const char * const map_type_name[] = {
|
||||
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
|
||||
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
|
||||
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
|
||||
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
|
||||
};
|
||||
|
||||
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
|
||||
|
||||
static struct hashmap *map_table;
|
||||
|
||||
static bool map_is_per_cpu(__u32 type)
|
||||
{
|
||||
return type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
@ -535,19 +539,18 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
|
||||
if (info->btf_id)
|
||||
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
|
||||
|
||||
if (!hash_empty(map_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(map_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
jsonw_name(json_wtr, "pinned");
|
||||
jsonw_start_array(json_wtr);
|
||||
hash_for_each_possible(map_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
jsonw_string(json_wtr, obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(map_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
jsonw_string(json_wtr, entry->value);
|
||||
jsonw_end_array(json_wtr);
|
||||
}
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr);
|
||||
|
||||
jsonw_end_object(json_wtr);
|
||||
|
||||
@ -610,13 +613,12 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
|
||||
}
|
||||
close(fd);
|
||||
|
||||
if (!hash_empty(map_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(map_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
hash_for_each_possible(map_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
printf("\n\tpinned %s", obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(map_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
printf("\n\tpinned %s", (char *)entry->value);
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
@ -636,7 +638,7 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
|
||||
if (frozen)
|
||||
printf("%sfrozen", info->btf_id ? " " : "");
|
||||
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
return 0;
|
||||
@ -694,8 +696,15 @@ static int do_show(int argc, char **argv)
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
if (show_pinned)
|
||||
build_pinned_obj_table(&map_table, BPF_OBJ_MAP);
|
||||
if (show_pinned) {
|
||||
map_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!map_table) {
|
||||
p_err("failed to create hashmap for pinned paths");
|
||||
return -1;
|
||||
}
|
||||
build_pinned_obj_table(map_table, BPF_OBJ_MAP);
|
||||
}
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_MAP);
|
||||
|
||||
if (argc == 2)
|
||||
@ -740,7 +749,10 @@ static int do_show(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
delete_obj_refs_table(&refs_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
if (show_pinned)
|
||||
delete_pinned_obj_table(map_table);
|
||||
|
||||
return errno == ENOENT ? 0 : -1;
|
||||
}
|
||||
@ -800,7 +812,7 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
|
||||
if (info->btf_vmlinux_value_type_id) {
|
||||
if (!btf_vmlinux) {
|
||||
btf_vmlinux = libbpf_find_kernel_btf();
|
||||
if (IS_ERR(btf_vmlinux))
|
||||
if (libbpf_get_error(btf_vmlinux))
|
||||
p_err("failed to get kernel btf");
|
||||
}
|
||||
return btf_vmlinux;
|
||||
@ -820,13 +832,13 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
|
||||
|
||||
static void free_map_kv_btf(struct btf *btf)
|
||||
{
|
||||
if (!IS_ERR(btf) && btf != btf_vmlinux)
|
||||
if (!libbpf_get_error(btf) && btf != btf_vmlinux)
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
static void free_btf_vmlinux(void)
|
||||
{
|
||||
if (!IS_ERR(btf_vmlinux))
|
||||
if (!libbpf_get_error(btf_vmlinux))
|
||||
btf__free(btf_vmlinux);
|
||||
}
|
||||
|
||||
@ -851,8 +863,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
|
||||
|
||||
if (wtr) {
|
||||
btf = get_map_kv_btf(info);
|
||||
if (IS_ERR(btf)) {
|
||||
err = PTR_ERR(btf);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err) {
|
||||
goto exit_free;
|
||||
}
|
||||
|
||||
@ -1249,7 +1261,10 @@ static int do_pin(int argc, char **argv)
|
||||
|
||||
static int do_create(int argc, char **argv)
|
||||
{
|
||||
struct bpf_create_map_attr attr = { NULL, };
|
||||
LIBBPF_OPTS(bpf_map_create_opts, attr);
|
||||
enum bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
__u32 key_size = 0, value_size = 0, max_entries = 0;
|
||||
const char *map_name = NULL;
|
||||
const char *pinfile;
|
||||
int err = -1, fd;
|
||||
|
||||
@ -1264,30 +1279,30 @@ static int do_create(int argc, char **argv)
|
||||
if (is_prefix(*argv, "type")) {
|
||||
NEXT_ARG();
|
||||
|
||||
if (attr.map_type) {
|
||||
if (map_type) {
|
||||
p_err("map type already specified");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
attr.map_type = map_type_from_str(*argv);
|
||||
if ((int)attr.map_type < 0) {
|
||||
map_type = map_type_from_str(*argv);
|
||||
if ((int)map_type < 0) {
|
||||
p_err("unrecognized map type: %s", *argv);
|
||||
goto exit;
|
||||
}
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "name")) {
|
||||
NEXT_ARG();
|
||||
attr.name = GET_ARG();
|
||||
map_name = GET_ARG();
|
||||
} else if (is_prefix(*argv, "key")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.key_size,
|
||||
if (parse_u32_arg(&argc, &argv, &key_size,
|
||||
"key size"))
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "value")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.value_size,
|
||||
if (parse_u32_arg(&argc, &argv, &value_size,
|
||||
"value size"))
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "entries")) {
|
||||
if (parse_u32_arg(&argc, &argv, &attr.max_entries,
|
||||
if (parse_u32_arg(&argc, &argv, &max_entries,
|
||||
"max entries"))
|
||||
goto exit;
|
||||
} else if (is_prefix(*argv, "flags")) {
|
||||
@ -1328,14 +1343,14 @@ static int do_create(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if (!attr.name) {
|
||||
if (!map_name) {
|
||||
p_err("map name not specified");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
fd = bpf_create_map_xattr(&attr);
|
||||
fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
|
||||
if (fd < 0) {
|
||||
p_err("map create failed: %s", strerror(errno));
|
||||
goto exit;
|
||||
@ -1466,7 +1481,7 @@ static int do_help(int argc, char **argv)
|
||||
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
|
||||
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
|
||||
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
|
||||
" task_storage }\n"
|
||||
" task_storage | bloom_filter }\n"
|
||||
" " HELP_SPEC_OPTIONS " |\n"
|
||||
" {-f|--bpffs} | {-n|--nomount} }\n"
|
||||
"",
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <perf-sys.h>
|
||||
|
||||
#include "main.h"
|
||||
|
||||
@ -125,7 +124,7 @@ int do_event_pipe(int argc, char **argv)
|
||||
.wakeup_events = 1,
|
||||
};
|
||||
struct bpf_map_info map_info = {};
|
||||
struct perf_buffer_raw_opts opts = {};
|
||||
LIBBPF_OPTS(perf_buffer_raw_opts, opts);
|
||||
struct event_pipe_ctx ctx = {
|
||||
.all_cpus = true,
|
||||
.cpu = -1,
|
||||
@ -191,14 +190,11 @@ int do_event_pipe(int argc, char **argv)
|
||||
ctx.idx = 0;
|
||||
}
|
||||
|
||||
opts.attr = &perf_attr;
|
||||
opts.event_cb = print_bpf_output;
|
||||
opts.ctx = &ctx;
|
||||
opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
|
||||
opts.cpus = &ctx.cpu;
|
||||
opts.map_keys = &ctx.idx;
|
||||
|
||||
pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts);
|
||||
pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
|
||||
print_bpf_output, &ctx, &opts);
|
||||
err = libbpf_get_error(pb);
|
||||
if (err) {
|
||||
p_err("failed to create perf buffer: %s (%d)",
|
||||
|
@ -6,35 +6,37 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
|
||||
#include "main.h"
|
||||
#include "skeleton/pid_iter.h"
|
||||
|
||||
#ifdef BPFTOOL_WITHOUT_SKELETONS
|
||||
|
||||
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
|
||||
{
|
||||
return -ENOTSUP;
|
||||
}
|
||||
void delete_obj_refs_table(struct obj_refs_table *table) {}
|
||||
void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {}
|
||||
void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {}
|
||||
void delete_obj_refs_table(struct hashmap *map) {}
|
||||
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
|
||||
void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
|
||||
|
||||
#else /* BPFTOOL_WITHOUT_SKELETONS */
|
||||
|
||||
#include "pid_iter.skel.h"
|
||||
|
||||
static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
|
||||
static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
|
||||
{
|
||||
struct hashmap_entry *entry;
|
||||
struct obj_refs *refs;
|
||||
struct obj_ref *ref;
|
||||
int err, i;
|
||||
void *tmp;
|
||||
int i;
|
||||
|
||||
hash_for_each_possible(table->table, refs, node, e->id) {
|
||||
if (refs->id != e->id)
|
||||
continue;
|
||||
hashmap__for_each_key_entry(map, entry, u32_as_hash_field(e->id)) {
|
||||
refs = entry->value;
|
||||
|
||||
for (i = 0; i < refs->ref_cnt; i++) {
|
||||
if (refs->refs[i].pid == e->pid)
|
||||
@ -64,7 +66,6 @@ static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
|
||||
return;
|
||||
}
|
||||
|
||||
refs->id = e->id;
|
||||
refs->refs = malloc(sizeof(*refs->refs));
|
||||
if (!refs->refs) {
|
||||
free(refs);
|
||||
@ -76,7 +77,11 @@ static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
|
||||
ref->pid = e->pid;
|
||||
memcpy(ref->comm, e->comm, sizeof(ref->comm));
|
||||
refs->ref_cnt = 1;
|
||||
hash_add(table->table, &refs->node, e->id);
|
||||
|
||||
err = hashmap__append(map, u32_as_hash_field(e->id), refs);
|
||||
if (err)
|
||||
p_err("failed to append entry to hashmap for ID %u: %s",
|
||||
e->id, strerror(errno));
|
||||
}
|
||||
|
||||
static int __printf(2, 0)
|
||||
@ -87,7 +92,7 @@ libbpf_print_none(__maybe_unused enum libbpf_print_level level,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
|
||||
{
|
||||
struct pid_iter_entry *e;
|
||||
char buf[4096 / sizeof(*e) * sizeof(*e)];
|
||||
@ -95,7 +100,11 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
int err, ret, fd = -1, i;
|
||||
libbpf_print_fn_t default_print;
|
||||
|
||||
hash_init(table->table);
|
||||
*map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
|
||||
if (!*map) {
|
||||
p_err("failed to create hashmap for PID references");
|
||||
return -1;
|
||||
}
|
||||
set_max_rlimit();
|
||||
|
||||
skel = pid_iter_bpf__open();
|
||||
@ -151,7 +160,7 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
|
||||
e = (void *)buf;
|
||||
for (i = 0; i < ret; i++, e++) {
|
||||
add_ref(table, e);
|
||||
add_ref(*map, e);
|
||||
}
|
||||
}
|
||||
err = 0;
|
||||
@ -162,39 +171,44 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
return err;
|
||||
}
|
||||
|
||||
void delete_obj_refs_table(struct obj_refs_table *table)
|
||||
void delete_obj_refs_table(struct hashmap *map)
|
||||
{
|
||||
struct obj_refs *refs;
|
||||
struct hlist_node *tmp;
|
||||
unsigned int bkt;
|
||||
struct hashmap_entry *entry;
|
||||
size_t bkt;
|
||||
|
||||
if (!map)
|
||||
return;
|
||||
|
||||
hashmap__for_each_entry(map, entry, bkt) {
|
||||
struct obj_refs *refs = entry->value;
|
||||
|
||||
hash_for_each_safe(table->table, bkt, tmp, refs, node) {
|
||||
hash_del(&refs->node);
|
||||
free(refs->refs);
|
||||
free(refs);
|
||||
}
|
||||
|
||||
hashmap__free(map);
|
||||
}
|
||||
|
||||
void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
||||
void emit_obj_refs_json(struct hashmap *map, __u32 id,
|
||||
json_writer_t *json_writer)
|
||||
{
|
||||
struct obj_refs *refs;
|
||||
struct obj_ref *ref;
|
||||
int i;
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
if (hash_empty(table->table))
|
||||
if (hashmap__empty(map))
|
||||
return;
|
||||
|
||||
hash_for_each_possible(table->table, refs, node, id) {
|
||||
if (refs->id != id)
|
||||
continue;
|
||||
hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
|
||||
struct obj_refs *refs = entry->value;
|
||||
int i;
|
||||
|
||||
if (refs->ref_cnt == 0)
|
||||
break;
|
||||
|
||||
jsonw_name(json_writer, "pids");
|
||||
jsonw_start_array(json_writer);
|
||||
for (i = 0; i < refs->ref_cnt; i++) {
|
||||
ref = &refs->refs[i];
|
||||
struct obj_ref *ref = &refs->refs[i];
|
||||
|
||||
jsonw_start_object(json_writer);
|
||||
jsonw_int_field(json_writer, "pid", ref->pid);
|
||||
jsonw_string_field(json_writer, "comm", ref->comm);
|
||||
@ -205,24 +219,24 @@ void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
||||
}
|
||||
}
|
||||
|
||||
void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix)
|
||||
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
|
||||
{
|
||||
struct obj_refs *refs;
|
||||
struct obj_ref *ref;
|
||||
int i;
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
if (hash_empty(table->table))
|
||||
if (hashmap__empty(map))
|
||||
return;
|
||||
|
||||
hash_for_each_possible(table->table, refs, node, id) {
|
||||
if (refs->id != id)
|
||||
continue;
|
||||
hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
|
||||
struct obj_refs *refs = entry->value;
|
||||
int i;
|
||||
|
||||
if (refs->ref_cnt == 0)
|
||||
break;
|
||||
|
||||
printf("%s", prefix);
|
||||
for (i = 0; i < refs->ref_cnt; i++) {
|
||||
ref = &refs->refs[i];
|
||||
struct obj_ref *ref = &refs->refs[i];
|
||||
|
||||
printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
|
||||
}
|
||||
break;
|
||||
|
@ -24,8 +24,8 @@
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/bpf_gen_internal.h>
|
||||
#include <bpf/skel_internal.h>
|
||||
|
||||
#include "cfg.h"
|
||||
@ -85,6 +85,8 @@ static const char * const attach_type_strings[] = {
|
||||
[__MAX_BPF_ATTACH_TYPE] = NULL,
|
||||
};
|
||||
|
||||
static struct hashmap *prog_table;
|
||||
|
||||
static enum bpf_attach_type parse_attach_type(const char *str)
|
||||
{
|
||||
enum bpf_attach_type type;
|
||||
@ -98,6 +100,76 @@ static enum bpf_attach_type parse_attach_type(const char *str)
|
||||
return __MAX_BPF_ATTACH_TYPE;
|
||||
}
|
||||
|
||||
static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
|
||||
void **info_data, size_t *const info_data_sz)
|
||||
{
|
||||
struct bpf_prog_info holder = {};
|
||||
size_t needed = 0;
|
||||
void *ptr;
|
||||
|
||||
if (mode == DUMP_JITED) {
|
||||
holder.jited_prog_len = info->jited_prog_len;
|
||||
needed += info->jited_prog_len;
|
||||
} else {
|
||||
holder.xlated_prog_len = info->xlated_prog_len;
|
||||
needed += info->xlated_prog_len;
|
||||
}
|
||||
|
||||
holder.nr_jited_ksyms = info->nr_jited_ksyms;
|
||||
needed += info->nr_jited_ksyms * sizeof(__u64);
|
||||
|
||||
holder.nr_jited_func_lens = info->nr_jited_func_lens;
|
||||
needed += info->nr_jited_func_lens * sizeof(__u32);
|
||||
|
||||
holder.nr_func_info = info->nr_func_info;
|
||||
holder.func_info_rec_size = info->func_info_rec_size;
|
||||
needed += info->nr_func_info * info->func_info_rec_size;
|
||||
|
||||
holder.nr_line_info = info->nr_line_info;
|
||||
holder.line_info_rec_size = info->line_info_rec_size;
|
||||
needed += info->nr_line_info * info->line_info_rec_size;
|
||||
|
||||
holder.nr_jited_line_info = info->nr_jited_line_info;
|
||||
holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
|
||||
needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
|
||||
|
||||
if (needed > *info_data_sz) {
|
||||
ptr = realloc(*info_data, needed);
|
||||
if (!ptr)
|
||||
return -1;
|
||||
|
||||
*info_data = ptr;
|
||||
*info_data_sz = needed;
|
||||
}
|
||||
ptr = *info_data;
|
||||
|
||||
if (mode == DUMP_JITED) {
|
||||
holder.jited_prog_insns = ptr_to_u64(ptr);
|
||||
ptr += holder.jited_prog_len;
|
||||
} else {
|
||||
holder.xlated_prog_insns = ptr_to_u64(ptr);
|
||||
ptr += holder.xlated_prog_len;
|
||||
}
|
||||
|
||||
holder.jited_ksyms = ptr_to_u64(ptr);
|
||||
ptr += holder.nr_jited_ksyms * sizeof(__u64);
|
||||
|
||||
holder.jited_func_lens = ptr_to_u64(ptr);
|
||||
ptr += holder.nr_jited_func_lens * sizeof(__u32);
|
||||
|
||||
holder.func_info = ptr_to_u64(ptr);
|
||||
ptr += holder.nr_func_info * holder.func_info_rec_size;
|
||||
|
||||
holder.line_info = ptr_to_u64(ptr);
|
||||
ptr += holder.nr_line_info * holder.line_info_rec_size;
|
||||
|
||||
holder.jited_line_info = ptr_to_u64(ptr);
|
||||
ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
|
||||
|
||||
*info = holder;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
|
||||
{
|
||||
struct timespec real_time_ts, boot_time_ts;
|
||||
@ -417,19 +489,18 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
|
||||
if (info->btf_id)
|
||||
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
|
||||
|
||||
if (!hash_empty(prog_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(prog_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
jsonw_name(json_wtr, "pinned");
|
||||
jsonw_start_array(json_wtr);
|
||||
hash_for_each_possible(prog_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
jsonw_string(json_wtr, obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(prog_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
jsonw_string(json_wtr, entry->value);
|
||||
jsonw_end_array(json_wtr);
|
||||
}
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr);
|
||||
|
||||
show_prog_metadata(fd, info->nr_map_ids);
|
||||
|
||||
@ -489,19 +560,18 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
|
||||
if (info->nr_map_ids)
|
||||
show_prog_maps(fd, info->nr_map_ids);
|
||||
|
||||
if (!hash_empty(prog_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(prog_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
hash_for_each_possible(prog_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
printf("\n\tpinned %s", obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(prog_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
printf("\n\tpinned %s", (char *)entry->value);
|
||||
}
|
||||
|
||||
if (info->btf_id)
|
||||
printf("\n\tbtf_id %d", info->btf_id);
|
||||
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
|
||||
@ -568,8 +638,15 @@ static int do_show(int argc, char **argv)
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
if (show_pinned)
|
||||
build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
|
||||
if (show_pinned) {
|
||||
prog_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!prog_table) {
|
||||
p_err("failed to create hashmap for pinned paths");
|
||||
return -1;
|
||||
}
|
||||
build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
|
||||
}
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
|
||||
|
||||
if (argc == 2)
|
||||
@ -612,7 +689,10 @@ static int do_show(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
delete_obj_refs_table(&refs_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
if (show_pinned)
|
||||
delete_pinned_obj_table(prog_table);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -796,16 +876,18 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||||
|
||||
static int do_dump(int argc, char **argv)
|
||||
{
|
||||
struct bpf_prog_info_linear *info_linear;
|
||||
struct bpf_prog_info info;
|
||||
__u32 info_len = sizeof(info);
|
||||
size_t info_data_sz = 0;
|
||||
void *info_data = NULL;
|
||||
char *filepath = NULL;
|
||||
bool opcodes = false;
|
||||
bool visual = false;
|
||||
enum dump_mode mode;
|
||||
bool linum = false;
|
||||
int *fds = NULL;
|
||||
int nb_fds, i = 0;
|
||||
int *fds = NULL;
|
||||
int err = -1;
|
||||
__u64 arrays;
|
||||
|
||||
if (is_prefix(*argv, "jited")) {
|
||||
if (disasm_init())
|
||||
@ -865,43 +947,44 @@ static int do_dump(int argc, char **argv)
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
if (mode == DUMP_JITED)
|
||||
arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
|
||||
else
|
||||
arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
|
||||
|
||||
arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
|
||||
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
|
||||
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
|
||||
arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
|
||||
arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
|
||||
|
||||
if (json_output && nb_fds > 1)
|
||||
jsonw_start_array(json_wtr); /* root array */
|
||||
for (i = 0; i < nb_fds; i++) {
|
||||
info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
|
||||
if (IS_ERR_OR_NULL(info_linear)) {
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
|
||||
if (err) {
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
break;
|
||||
}
|
||||
|
||||
err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
|
||||
if (err) {
|
||||
p_err("can't grow prog info_data");
|
||||
break;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
|
||||
if (err) {
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
break;
|
||||
}
|
||||
|
||||
if (json_output && nb_fds > 1) {
|
||||
jsonw_start_object(json_wtr); /* prog object */
|
||||
print_prog_header_json(&info_linear->info);
|
||||
print_prog_header_json(&info);
|
||||
jsonw_name(json_wtr, "insns");
|
||||
} else if (nb_fds > 1) {
|
||||
print_prog_header_plain(&info_linear->info);
|
||||
print_prog_header_plain(&info);
|
||||
}
|
||||
|
||||
err = prog_dump(&info_linear->info, mode, filepath, opcodes,
|
||||
visual, linum);
|
||||
err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
|
||||
|
||||
if (json_output && nb_fds > 1)
|
||||
jsonw_end_object(json_wtr); /* prog object */
|
||||
else if (i != nb_fds - 1 && nb_fds > 1)
|
||||
printf("\n");
|
||||
|
||||
free(info_linear);
|
||||
if (err)
|
||||
break;
|
||||
close(fds[i]);
|
||||
@ -913,6 +996,7 @@ static int do_dump(int argc, char **argv)
|
||||
for (; i < nb_fds; i++)
|
||||
close(fds[i]);
|
||||
exit_free:
|
||||
free(info_data);
|
||||
free(fds);
|
||||
return err;
|
||||
}
|
||||
@ -1380,7 +1464,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
|
||||
.relaxed_maps = relaxed_maps,
|
||||
);
|
||||
struct bpf_object_load_attr load_attr = { 0 };
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
struct map_replace *map_replace = NULL;
|
||||
struct bpf_program *prog = NULL, *pos;
|
||||
@ -1402,8 +1485,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
|
||||
while (argc) {
|
||||
if (is_prefix(*argv, "type")) {
|
||||
char *type;
|
||||
|
||||
NEXT_ARG();
|
||||
|
||||
if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
|
||||
@ -1413,21 +1494,26 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
if (!REQ_ARGS(1))
|
||||
goto err_free_reuse_maps;
|
||||
|
||||
/* Put a '/' at the end of type to appease libbpf */
|
||||
type = malloc(strlen(*argv) + 2);
|
||||
if (!type) {
|
||||
p_err("mem alloc failed");
|
||||
goto err_free_reuse_maps;
|
||||
}
|
||||
*type = 0;
|
||||
strcat(type, *argv);
|
||||
strcat(type, "/");
|
||||
err = libbpf_prog_type_by_name(*argv, &common_prog_type,
|
||||
&expected_attach_type);
|
||||
if (err < 0) {
|
||||
/* Put a '/' at the end of type to appease libbpf */
|
||||
char *type = malloc(strlen(*argv) + 2);
|
||||
|
||||
err = get_prog_type_by_name(type, &common_prog_type,
|
||||
&expected_attach_type);
|
||||
free(type);
|
||||
if (err < 0)
|
||||
goto err_free_reuse_maps;
|
||||
if (!type) {
|
||||
p_err("mem alloc failed");
|
||||
goto err_free_reuse_maps;
|
||||
}
|
||||
*type = 0;
|
||||
strcat(type, *argv);
|
||||
strcat(type, "/");
|
||||
|
||||
err = get_prog_type_by_name(type, &common_prog_type,
|
||||
&expected_attach_type);
|
||||
free(type);
|
||||
if (err < 0)
|
||||
goto err_free_reuse_maps;
|
||||
}
|
||||
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "map")) {
|
||||
@ -1511,6 +1597,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
open_opts.kernel_log_level = 1 + 2 + 4;
|
||||
|
||||
obj = bpf_object__open_file(file, &open_opts);
|
||||
if (libbpf_get_error(obj)) {
|
||||
p_err("failed to open object file");
|
||||
@ -1565,7 +1655,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
j = 0;
|
||||
idx = 0;
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
if (!bpf_map__is_offload_neutral(map))
|
||||
if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
|
||||
bpf_map__set_ifindex(map, ifindex);
|
||||
|
||||
if (j < old_map_fds && idx == map_replace[j].idx) {
|
||||
@ -1590,12 +1680,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
goto err_close_obj;
|
||||
}
|
||||
|
||||
load_attr.obj = obj;
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
load_attr.log_level = 1 + 2 + 4;
|
||||
|
||||
err = bpf_object__load_xattr(&load_attr);
|
||||
err = bpf_object__load(obj);
|
||||
if (err) {
|
||||
p_err("failed to load object file");
|
||||
goto err_close_obj;
|
||||
@ -1606,7 +1691,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
goto err_close_obj;
|
||||
|
||||
if (first_prog_only) {
|
||||
prog = bpf_program__next(NULL, obj);
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
if (!prog) {
|
||||
p_err("object file doesn't contain any bpf program");
|
||||
goto err_close_obj;
|
||||
@ -1650,6 +1735,11 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
else
|
||||
bpf_object__unpin_programs(obj, pinfile);
|
||||
err_close_obj:
|
||||
if (!legacy_libbpf) {
|
||||
p_info("Warning: bpftool is now running in libbpf strict mode and has more stringent requirements about BPF programs.\n"
|
||||
"If it used to work for this object file but now doesn't, see --legacy option for more details.\n");
|
||||
}
|
||||
|
||||
bpf_object__close(obj);
|
||||
err_free_reuse_maps:
|
||||
for (i = 0; i < old_map_fds; i++)
|
||||
@ -1682,17 +1772,19 @@ static int try_loader(struct gen_loader_opts *gen)
|
||||
sizeof(struct bpf_prog_desc));
|
||||
int log_buf_sz = (1u << 24) - 1;
|
||||
int err, fds_before, fd_delta;
|
||||
char *log_buf;
|
||||
char *log_buf = NULL;
|
||||
|
||||
ctx = alloca(ctx_sz);
|
||||
memset(ctx, 0, ctx_sz);
|
||||
ctx->sz = ctx_sz;
|
||||
ctx->log_level = 1;
|
||||
ctx->log_size = log_buf_sz;
|
||||
log_buf = malloc(log_buf_sz);
|
||||
if (!log_buf)
|
||||
return -ENOMEM;
|
||||
ctx->log_buf = (long) log_buf;
|
||||
if (verifier_logs) {
|
||||
ctx->log_level = 1 + 2 + 4;
|
||||
ctx->log_size = log_buf_sz;
|
||||
log_buf = malloc(log_buf_sz);
|
||||
if (!log_buf)
|
||||
return -ENOMEM;
|
||||
ctx->log_buf = (long) log_buf;
|
||||
}
|
||||
opts.ctx = ctx;
|
||||
opts.data = gen->data;
|
||||
opts.data_sz = gen->data_sz;
|
||||
@ -1701,9 +1793,9 @@ static int try_loader(struct gen_loader_opts *gen)
|
||||
fds_before = count_open_fds();
|
||||
err = bpf_load_and_run(&opts);
|
||||
fd_delta = count_open_fds() - fds_before;
|
||||
if (err < 0) {
|
||||
if (err < 0 || verifier_logs) {
|
||||
fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
|
||||
if (fd_delta)
|
||||
if (fd_delta && err < 0)
|
||||
fprintf(stderr, "loader prog leaked %d FDs\n",
|
||||
fd_delta);
|
||||
}
|
||||
@ -1715,7 +1807,6 @@ static int do_loader(int argc, char **argv)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
|
||||
DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
|
||||
struct bpf_object_load_attr load_attr = {};
|
||||
struct bpf_object *obj;
|
||||
const char *file;
|
||||
int err = 0;
|
||||
@ -1724,6 +1815,10 @@ static int do_loader(int argc, char **argv)
|
||||
return -1;
|
||||
file = GET_ARG();
|
||||
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
open_opts.kernel_log_level = 1 + 2 + 4;
|
||||
|
||||
obj = bpf_object__open_file(file, &open_opts);
|
||||
if (libbpf_get_error(obj)) {
|
||||
p_err("failed to open object file");
|
||||
@ -1734,12 +1829,7 @@ static int do_loader(int argc, char **argv)
|
||||
if (err)
|
||||
goto err_close_obj;
|
||||
|
||||
load_attr.obj = obj;
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
load_attr.log_level = 1 + 2 + 4;
|
||||
|
||||
err = bpf_object__load_xattr(&load_attr);
|
||||
err = bpf_object__load(obj);
|
||||
if (err) {
|
||||
p_err("failed to load object file");
|
||||
goto err_close_obj;
|
||||
@ -2009,41 +2099,58 @@ static void profile_print_readings(void)
|
||||
|
||||
static char *profile_target_name(int tgt_fd)
|
||||
{
|
||||
struct bpf_prog_info_linear *info_linear;
|
||||
struct bpf_func_info *func_info;
|
||||
struct bpf_func_info func_info;
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
const struct btf_type *t;
|
||||
__u32 func_info_rec_size;
|
||||
struct btf *btf = NULL;
|
||||
char *name = NULL;
|
||||
int err;
|
||||
|
||||
info_linear = bpf_program__get_prog_info_linear(
|
||||
tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
|
||||
if (IS_ERR_OR_NULL(info_linear)) {
|
||||
p_err("failed to get info_linear for prog FD %d", tgt_fd);
|
||||
return NULL;
|
||||
err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
|
||||
if (err) {
|
||||
p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (info_linear->info.btf_id == 0) {
|
||||
if (info.btf_id == 0) {
|
||||
p_err("prog FD %d doesn't have valid btf", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
|
||||
func_info_rec_size = info.func_info_rec_size;
|
||||
if (info.nr_func_info == 0) {
|
||||
p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.nr_func_info = 1;
|
||||
info.func_info_rec_size = func_info_rec_size;
|
||||
info.func_info = ptr_to_u64(&func_info);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
|
||||
if (err) {
|
||||
p_err("failed to get func_info for prog FD %d", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
btf = btf__load_from_kernel_by_id(info.btf_id);
|
||||
if (libbpf_get_error(btf)) {
|
||||
p_err("failed to load btf for prog FD %d", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
func_info = u64_to_ptr(info_linear->info.func_info);
|
||||
t = btf__type_by_id(btf, func_info[0].type_id);
|
||||
t = btf__type_by_id(btf, func_info.type_id);
|
||||
if (!t) {
|
||||
p_err("btf %d doesn't have type %d",
|
||||
info_linear->info.btf_id, func_info[0].type_id);
|
||||
info.btf_id, func_info.type_id);
|
||||
goto out;
|
||||
}
|
||||
name = strdup(btf__name_by_offset(btf, t->name_off));
|
||||
out:
|
||||
btf__free(btf);
|
||||
free(info_linear);
|
||||
return name;
|
||||
}
|
||||
|
||||
|
@ -71,8 +71,8 @@ int iter(struct bpf_iter__task_file *ctx)
|
||||
|
||||
e.pid = task->tgid;
|
||||
e.id = get_obj_id(file->private_data, obj_type);
|
||||
bpf_probe_read_kernel(&e.comm, sizeof(e.comm),
|
||||
task->group_leader->comm);
|
||||
bpf_probe_read_kernel_str(&e.comm, sizeof(e.comm),
|
||||
task->group_leader->comm);
|
||||
bpf_seq_write(ctx->meta->seq, &e, sizeof(e));
|
||||
|
||||
return 0;
|
||||
|
@ -32,7 +32,7 @@ static const struct btf *get_btf_vmlinux(void)
|
||||
return btf_vmlinux;
|
||||
|
||||
btf_vmlinux = libbpf_find_kernel_btf();
|
||||
if (IS_ERR(btf_vmlinux))
|
||||
if (libbpf_get_error(btf_vmlinux))
|
||||
p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
|
||||
|
||||
return btf_vmlinux;
|
||||
@ -45,7 +45,7 @@ static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
|
||||
const char *st_ops_name;
|
||||
|
||||
kern_btf = get_btf_vmlinux();
|
||||
if (IS_ERR(kern_btf))
|
||||
if (libbpf_get_error(kern_btf))
|
||||
return "<btf_vmlinux_not_found>";
|
||||
|
||||
t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
|
||||
@ -63,7 +63,7 @@ static __s32 get_map_info_type_id(void)
|
||||
return map_info_type_id;
|
||||
|
||||
kern_btf = get_btf_vmlinux();
|
||||
if (IS_ERR(kern_btf)) {
|
||||
if (libbpf_get_error(kern_btf)) {
|
||||
map_info_type_id = PTR_ERR(kern_btf);
|
||||
return map_info_type_id;
|
||||
}
|
||||
@ -252,7 +252,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
|
||||
}
|
||||
|
||||
fd = bpf_map_get_fd_by_id(id);
|
||||
if (fd == -1) {
|
||||
if (fd < 0) {
|
||||
p_err("can't get map by id (%lu): %s", id, strerror(errno));
|
||||
res.nr_errs++;
|
||||
return res;
|
||||
@ -415,7 +415,7 @@ static int do_dump(int argc, char **argv)
|
||||
}
|
||||
|
||||
kern_btf = get_btf_vmlinux();
|
||||
if (IS_ERR(kern_btf))
|
||||
if (libbpf_get_error(kern_btf))
|
||||
return -1;
|
||||
|
||||
if (!json_output) {
|
||||
@ -479,7 +479,7 @@ static int do_unregister(int argc, char **argv)
|
||||
|
||||
static int do_register(int argc, char **argv)
|
||||
{
|
||||
struct bpf_object_load_attr load_attr = {};
|
||||
LIBBPF_OPTS(bpf_object_open_opts, open_opts);
|
||||
const struct bpf_map_def *def;
|
||||
struct bpf_map_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
@ -494,18 +494,17 @@ static int do_register(int argc, char **argv)
|
||||
|
||||
file = GET_ARG();
|
||||
|
||||
obj = bpf_object__open(file);
|
||||
if (IS_ERR_OR_NULL(obj))
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
open_opts.kernel_log_level = 1 + 2 + 4;
|
||||
|
||||
obj = bpf_object__open_file(file, &open_opts);
|
||||
if (libbpf_get_error(obj))
|
||||
return -1;
|
||||
|
||||
set_max_rlimit();
|
||||
|
||||
load_attr.obj = obj;
|
||||
if (verifier_logs)
|
||||
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||
load_attr.log_level = 1 + 2 + 4;
|
||||
|
||||
if (bpf_object__load_xattr(&load_attr)) {
|
||||
if (bpf_object__load(obj)) {
|
||||
bpf_object__close(obj);
|
||||
return -1;
|
||||
}
|
||||
@ -516,7 +515,7 @@ static int do_register(int argc, char **argv)
|
||||
continue;
|
||||
|
||||
link = bpf_map__attach_struct_ops(map);
|
||||
if (IS_ERR(link)) {
|
||||
if (libbpf_get_error(link)) {
|
||||
p_err("can't register struct_ops %s: %s",
|
||||
bpf_map__name(map),
|
||||
strerror(-PTR_ERR(link)));
|
||||
@ -596,7 +595,7 @@ int do_struct_ops(int argc, char **argv)
|
||||
|
||||
err = cmd_select(cmds, argc, argv, do_help);
|
||||
|
||||
if (!IS_ERR(btf_vmlinux))
|
||||
if (!libbpf_get_error(btf_vmlinux))
|
||||
btf__free(btf_vmlinux);
|
||||
|
||||
return err;
|
||||
|
@ -23,6 +23,7 @@ CC = $(HOSTCC)
|
||||
LD = $(HOSTLD)
|
||||
ARCH = $(HOSTARCH)
|
||||
RM ?= rm
|
||||
CROSS_COMPILE =
|
||||
|
||||
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
|
||||
|
||||
@ -30,27 +31,33 @@ LIBBPF_SRC := $(srctree)/tools/lib/bpf/
|
||||
SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
|
||||
|
||||
BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
|
||||
LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
|
||||
SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
|
||||
|
||||
LIBBPF_DESTDIR := $(LIBBPF_OUT)
|
||||
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
|
||||
|
||||
BINARY := $(OUTPUT)/resolve_btfids
|
||||
BINARY_IN := $(BINARY)-in.o
|
||||
|
||||
all: $(BINARY)
|
||||
|
||||
$(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd:
|
||||
$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT):
|
||||
$(call msg,MKDIR,,$@)
|
||||
$(Q)mkdir -p $(@)
|
||||
|
||||
$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
|
||||
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) prefix= \
|
||||
$(abspath $@) install_headers
|
||||
|
||||
CFLAGS := -g \
|
||||
-I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/include/uapi \
|
||||
-I$(LIBBPF_SRC) \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-I$(SUBCMD_SRC)
|
||||
|
||||
LIBS = -lelf -lz
|
||||
@ -58,7 +65,7 @@ LIBS = -lelf -lz
|
||||
export srctree OUTPUT CFLAGS Q
|
||||
include $(srctree)/tools/build/Makefile.include
|
||||
|
||||
$(BINARY_IN): fixdep FORCE | $(OUTPUT)
|
||||
$(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT)
|
||||
$(Q)$(MAKE) $(build)=resolve_btfids
|
||||
|
||||
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
|
||||
@ -68,7 +75,8 @@ $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
|
||||
clean_objects := $(wildcard $(OUTPUT)/*.o \
|
||||
$(OUTPUT)/.*.o.cmd \
|
||||
$(OUTPUT)/.*.o.d \
|
||||
$(OUTPUT)/libbpf \
|
||||
$(LIBBPF_OUT) \
|
||||
$(LIBBPF_DESTDIR) \
|
||||
$(OUTPUT)/libsubcmd \
|
||||
$(OUTPUT)/resolve_btfids)
|
||||
|
||||
|
@ -60,8 +60,8 @@
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <linux/err.h>
|
||||
#include <btf.h>
|
||||
#include <libbpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <parse-options.h>
|
||||
|
||||
#define BTF_IDS_SECTION ".BTF_ids"
|
||||
@ -83,12 +83,14 @@ struct btf_id {
|
||||
int cnt;
|
||||
};
|
||||
int addr_cnt;
|
||||
bool is_set;
|
||||
Elf64_Addr addr[ADDR_CNT];
|
||||
};
|
||||
|
||||
struct object {
|
||||
const char *path;
|
||||
const char *btf;
|
||||
const char *base_btf_path;
|
||||
|
||||
struct {
|
||||
int fd;
|
||||
@ -450,8 +452,10 @@ static int symbols_collect(struct object *obj)
|
||||
* in symbol's size, together with 'cnt' field hence
|
||||
* that - 1.
|
||||
*/
|
||||
if (id)
|
||||
if (id) {
|
||||
id->cnt = sym.st_size / sizeof(int) - 1;
|
||||
id->is_set = true;
|
||||
}
|
||||
} else {
|
||||
pr_err("FAILED unsupported prefix %s\n", prefix);
|
||||
return -1;
|
||||
@ -477,25 +481,36 @@ static int symbols_resolve(struct object *obj)
|
||||
int nr_structs = obj->nr_structs;
|
||||
int nr_unions = obj->nr_unions;
|
||||
int nr_funcs = obj->nr_funcs;
|
||||
struct btf *base_btf = NULL;
|
||||
int err, type_id;
|
||||
struct btf *btf;
|
||||
__u32 nr_types;
|
||||
|
||||
btf = btf__parse(obj->btf ?: obj->path, NULL);
|
||||
if (obj->base_btf_path) {
|
||||
base_btf = btf__parse(obj->base_btf_path, NULL);
|
||||
err = libbpf_get_error(base_btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load base BTF from %s: %s\n",
|
||||
obj->base_btf_path, strerror(-err));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load BTF from %s: %s\n",
|
||||
obj->btf ?: obj->path, strerror(-err));
|
||||
return -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -1;
|
||||
nr_types = btf__get_nr_types(btf);
|
||||
nr_types = btf__type_cnt(btf);
|
||||
|
||||
/*
|
||||
* Iterate all the BTF types and search for collected symbol IDs.
|
||||
*/
|
||||
for (type_id = 1; type_id <= nr_types; type_id++) {
|
||||
for (type_id = 1; type_id < nr_types; type_id++) {
|
||||
const struct btf_type *type;
|
||||
struct rb_root *root;
|
||||
struct btf_id *id;
|
||||
@ -545,6 +560,7 @@ static int symbols_resolve(struct object *obj)
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
btf__free(base_btf);
|
||||
btf__free(btf);
|
||||
return err;
|
||||
}
|
||||
@ -555,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
|
||||
int *ptr = data->d_buf;
|
||||
int i;
|
||||
|
||||
if (!id->id) {
|
||||
if (!id->id && !id->is_set)
|
||||
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
|
||||
}
|
||||
|
||||
for (i = 0; i < id->addr_cnt; i++) {
|
||||
unsigned long addr = id->addr[i];
|
||||
@ -678,7 +693,6 @@ static const char * const resolve_btfids_usage[] = {
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
bool no_fail = false;
|
||||
struct object obj = {
|
||||
.efile = {
|
||||
.idlist_shndx = -1,
|
||||
@ -695,8 +709,8 @@ int main(int argc, const char **argv)
|
||||
"be more verbose (show errors, etc)"),
|
||||
OPT_STRING(0, "btf", &obj.btf, "BTF data",
|
||||
"BTF data"),
|
||||
OPT_BOOLEAN(0, "no-fail", &no_fail,
|
||||
"do not fail if " BTF_IDS_SECTION " section is not found"),
|
||||
OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
|
||||
"path of file providing base BTF"),
|
||||
OPT_END()
|
||||
};
|
||||
int err = -1;
|
||||
@ -717,9 +731,7 @@ int main(int argc, const char **argv)
|
||||
*/
|
||||
if (obj.efile.idlist_shndx == -1 ||
|
||||
obj.efile.symbols_shndx == -1) {
|
||||
if (no_fail)
|
||||
return 0;
|
||||
pr_err("FAILED to find needed sections\n");
|
||||
pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@ -9,10 +9,10 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
LIBBPF_SRC := $(abspath ../../lib/bpf)
|
||||
BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
|
||||
BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
|
||||
BPF_INCLUDE := $(BPFOBJ_OUTPUT)
|
||||
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib) \
|
||||
-I$(abspath ../../include/uapi)
|
||||
CFLAGS := -g -Wall
|
||||
BPF_DESTDIR := $(BPFOBJ_OUTPUT)
|
||||
BPF_INCLUDE := $(BPF_DESTDIR)/include
|
||||
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
|
||||
CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
|
||||
|
||||
# Try to detect best kernel BTF source
|
||||
KERNEL_REL := $(shell uname -r)
|
||||
@ -33,7 +33,7 @@ endif
|
||||
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
.PHONY: all clean runqslower
|
||||
.PHONY: all clean runqslower libbpf_hdrs
|
||||
all: runqslower
|
||||
|
||||
runqslower: $(OUTPUT)/runqslower
|
||||
@ -46,13 +46,15 @@ clean:
|
||||
$(Q)$(RM) $(OUTPUT)runqslower
|
||||
$(Q)$(RM) -r .output
|
||||
|
||||
libbpf_hdrs: $(BPFOBJ)
|
||||
|
||||
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
|
||||
|
||||
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
|
||||
$(OUTPUT)/runqslower.bpf.o
|
||||
$(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
|
||||
|
||||
$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h
|
||||
$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs
|
||||
|
||||
$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL)
|
||||
$(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
|
||||
@ -81,8 +83,9 @@ else
|
||||
endif
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) $@
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \
|
||||
DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers
|
||||
|
||||
$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
|
||||
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
|
||||
CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
|
@ -68,7 +68,7 @@ int handle__sched_switch(u64 *ctx)
|
||||
*/
|
||||
struct task_struct *prev = (struct task_struct *)ctx[1];
|
||||
struct task_struct *next = (struct task_struct *)ctx[2];
|
||||
struct event event = {};
|
||||
struct runq_event event = {};
|
||||
u64 *tsp, delta_us;
|
||||
long state;
|
||||
u32 pid;
|
||||
|
@ -100,7 +100,7 @@ static int bump_memlock_rlimit(void)
|
||||
|
||||
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
|
||||
{
|
||||
const struct event *e = data;
|
||||
const struct runq_event *e = data;
|
||||
struct tm *tm;
|
||||
char ts[32];
|
||||
time_t t;
|
||||
@ -123,7 +123,6 @@ int main(int argc, char **argv)
|
||||
.parser = parse_arg,
|
||||
.doc = argp_program_doc,
|
||||
};
|
||||
struct perf_buffer_opts pb_opts;
|
||||
struct perf_buffer *pb = NULL;
|
||||
struct runqslower_bpf *obj;
|
||||
int err;
|
||||
@ -165,9 +164,8 @@ int main(int argc, char **argv)
|
||||
printf("Tracing run queue latency higher than %llu us\n", env.min_us);
|
||||
printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
|
||||
|
||||
pb_opts.sample_cb = handle_event;
|
||||
pb_opts.lost_cb = handle_lost_events;
|
||||
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, &pb_opts);
|
||||
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
|
||||
handle_event, handle_lost_events, NULL, NULL);
|
||||
err = libbpf_get_error(pb);
|
||||
if (err) {
|
||||
pb = NULL;
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#define TASK_COMM_LEN 16
|
||||
|
||||
struct event {
|
||||
struct runq_event {
|
||||
char task[TASK_COMM_LEN];
|
||||
__u64 delta_us;
|
||||
pid_t pid;
|
||||
|
@ -99,7 +99,7 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
|
||||
###
|
||||
## HOSTCC C flags
|
||||
|
||||
host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(KBUILD_HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
|
||||
host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
|
||||
|
||||
# output directory for tests below
|
||||
TMPOUT = .tmp_$$$$
|
||||
|
@ -51,6 +51,7 @@ FEATURE_TESTS_BASIC := \
|
||||
libslang \
|
||||
libslang-include-subdir \
|
||||
libtraceevent \
|
||||
libtracefs \
|
||||
libcrypto \
|
||||
libunwind \
|
||||
pthread-attr-setaffinity-np \
|
||||
|
@ -35,6 +35,7 @@ FILES= \
|
||||
test-libslang.bin \
|
||||
test-libslang-include-subdir.bin \
|
||||
test-libtraceevent.bin \
|
||||
test-libtracefs.bin \
|
||||
test-libcrypto.bin \
|
||||
test-libunwind.bin \
|
||||
test-libunwind-debug-frame.bin \
|
||||
@ -89,7 +90,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
|
||||
###############################
|
||||
|
||||
$(OUTPUT)test-all.bin:
|
||||
$(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap
|
||||
$(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap
|
||||
|
||||
$(OUTPUT)test-hello.bin:
|
||||
$(BUILD)
|
||||
@ -198,6 +199,9 @@ $(OUTPUT)test-libslang-include-subdir.bin:
|
||||
$(OUTPUT)test-libtraceevent.bin:
|
||||
$(BUILD) -ltraceevent
|
||||
|
||||
$(OUTPUT)test-libtracefs.bin:
|
||||
$(BUILD) -ltracefs
|
||||
|
||||
$(OUTPUT)test-libcrypto.bin:
|
||||
$(BUILD) -lcrypto
|
||||
|
||||
@ -292,7 +296,7 @@ $(OUTPUT)test-jvmti-cmlr.bin:
|
||||
$(BUILD)
|
||||
|
||||
$(OUTPUT)test-llvm.bin:
|
||||
$(BUILDXX) -std=gnu++11 \
|
||||
$(BUILDXX) -std=gnu++14 \
|
||||
-I$(shell $(LLVM_CONFIG) --includedir) \
|
||||
-L$(shell $(LLVM_CONFIG) --libdir) \
|
||||
$(shell $(LLVM_CONFIG) --libs Core BPF) \
|
||||
@ -300,12 +304,12 @@ $(OUTPUT)test-llvm.bin:
|
||||
> $(@:.bin=.make.output) 2>&1
|
||||
|
||||
$(OUTPUT)test-llvm-version.bin:
|
||||
$(BUILDXX) -std=gnu++11 \
|
||||
$(BUILDXX) -std=gnu++14 \
|
||||
-I$(shell $(LLVM_CONFIG) --includedir) \
|
||||
> $(@:.bin=.make.output) 2>&1
|
||||
|
||||
$(OUTPUT)test-clang.bin:
|
||||
$(BUILDXX) -std=gnu++11 \
|
||||
$(BUILDXX) -std=gnu++14 \
|
||||
-I$(shell $(LLVM_CONFIG) --includedir) \
|
||||
-L$(shell $(LLVM_CONFIG) --libdir) \
|
||||
-Wl,--start-group -lclangBasic -lclangDriver \
|
||||
|
@ -14,6 +14,12 @@
|
||||
# define __NR_bpf 349
|
||||
# elif defined(__s390__)
|
||||
# define __NR_bpf 351
|
||||
# elif defined(__mips__) && defined(_ABIO32)
|
||||
# define __NR_bpf 4355
|
||||
# elif defined(__mips__) && defined(_ABIN32)
|
||||
# define __NR_bpf 6319
|
||||
# elif defined(__mips__) && defined(_ABI64)
|
||||
# define __NR_bpf 5315
|
||||
# else
|
||||
# error __NR_bpf not defined. libbpf does not support your arch.
|
||||
# endif
|
||||
|
@ -11,7 +11,7 @@ from drgn.helpers.linux import list_for_each_entry, list_empty
|
||||
from drgn.helpers.linux import for_each_page
|
||||
from drgn.helpers.linux.cpumask import for_each_online_cpu
|
||||
from drgn.helpers.linux.percpu import per_cpu_ptr
|
||||
from drgn import container_of, FaultError, Object
|
||||
from drgn import container_of, FaultError, Object, cast
|
||||
|
||||
|
||||
DESC = """
|
||||
@ -69,15 +69,15 @@ def oo_objects(s):
|
||||
|
||||
|
||||
def count_partial(n, fn):
|
||||
nr_pages = 0
|
||||
for page in list_for_each_entry('struct page', n.partial.address_of_(),
|
||||
'lru'):
|
||||
nr_pages += fn(page)
|
||||
return nr_pages
|
||||
nr_objs = 0
|
||||
for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
|
||||
'slab_list'):
|
||||
nr_objs += fn(slab)
|
||||
return nr_objs
|
||||
|
||||
|
||||
def count_free(page):
|
||||
return page.objects - page.inuse
|
||||
def count_free(slab):
|
||||
return slab.objects - slab.inuse
|
||||
|
||||
|
||||
def slub_get_slabinfo(s, cfg):
|
||||
@ -145,14 +145,14 @@ def detect_kernel_config():
|
||||
return cfg
|
||||
|
||||
|
||||
def for_each_slab_page(prog):
|
||||
def for_each_slab(prog):
|
||||
PGSlab = 1 << prog.constant('PG_slab')
|
||||
PGHead = 1 << prog.constant('PG_head')
|
||||
|
||||
for page in for_each_page(prog):
|
||||
try:
|
||||
if page.flags.value_() & PGSlab:
|
||||
yield page
|
||||
yield cast('struct slab *', page)
|
||||
except FaultError:
|
||||
pass
|
||||
|
||||
@ -190,13 +190,13 @@ def main():
|
||||
'list'):
|
||||
obj_cgroups.add(ptr.value_())
|
||||
|
||||
# look over all slab pages, belonging to non-root memcgs
|
||||
# and look for objects belonging to the given memory cgroup
|
||||
for page in for_each_slab_page(prog):
|
||||
objcg_vec_raw = page.memcg_data.value_()
|
||||
# look over all slab folios and look for objects belonging
|
||||
# to the given memory cgroup
|
||||
for slab in for_each_slab(prog):
|
||||
objcg_vec_raw = slab.memcg_data.value_()
|
||||
if objcg_vec_raw == 0:
|
||||
continue
|
||||
cache = page.slab_cache
|
||||
cache = slab.slab_cache
|
||||
if not cache:
|
||||
continue
|
||||
addr = cache.value_()
|
||||
|
@ -279,6 +279,7 @@ static void print_event(struct iio_event_data *event)
|
||||
printf(", direction: %s", iio_ev_dir_text[dir]);
|
||||
|
||||
printf("\n");
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
/* Enable or disable events in sysfs if the knob is available */
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <asm-generic/bitops/fls.h>
|
||||
#include <asm-generic/bitops/__fls.h>
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
#ifndef _TOOLS_LINUX_BITOPS_H_
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
|
@ -1,9 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _PERF_BITOPS_H
|
||||
#define _PERF_BITOPS_H
|
||||
#ifndef _TOOLS_LINUX_BITMAP_H
|
||||
#define _TOOLS_LINUX_BITMAP_H
|
||||
|
||||
#include <string.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/find.h>
|
||||
#include <stdlib.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
@ -181,4 +182,4 @@ static inline int bitmap_intersects(const unsigned long *src1,
|
||||
return __bitmap_intersects(src1, src2, nbits);
|
||||
}
|
||||
|
||||
#endif /* _PERF_BITOPS_H */
|
||||
#endif /* _TOOLS_LINUX_BITMAP_H */
|
||||
|
@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
|
||||
return val * GOLDEN_RATIO_32;
|
||||
}
|
||||
|
||||
#ifndef HAVE_ARCH_HASH_32
|
||||
#define hash_32 hash_32_generic
|
||||
#endif
|
||||
static inline u32 hash_32_generic(u32 val, unsigned int bits)
|
||||
static inline u32 hash_32(u32 val, unsigned int bits)
|
||||
{
|
||||
/* High bits are more random, so use them. */
|
||||
return __hash_32(val) >> (32 - bits);
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <assert.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/math.h>
|
||||
#include <endian.h>
|
||||
#include <byteswap.h>
|
||||
|
||||
@ -14,8 +15,6 @@
|
||||
#define UINT_MAX (~0U)
|
||||
#endif
|
||||
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
|
||||
#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
|
||||
#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
|
||||
|
||||
@ -52,15 +51,6 @@
|
||||
_min1 < _min2 ? _min1 : _min2; })
|
||||
#endif
|
||||
|
||||
#ifndef roundup
|
||||
#define roundup(x, y) ( \
|
||||
{ \
|
||||
const typeof(y) __y = y; \
|
||||
(((x) + (__y - 1)) / __y) * __y; \
|
||||
} \
|
||||
)
|
||||
#endif
|
||||
|
||||
#ifndef BUG_ON
|
||||
#ifdef NDEBUG
|
||||
#define BUG_ON(cond) do { if (cond) {} } while (0)
|
||||
@ -102,17 +92,9 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
|
||||
int scnprintf(char * buf, size_t size, const char * fmt, ...);
|
||||
int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||
|
||||
/*
|
||||
* This looks more complex than it should be. But we need to
|
||||
* get the type for the ~ right in round_down (it needs to be
|
||||
* as wide as the result!), and we want to evaluate the macro
|
||||
* arguments just once each.
|
||||
*/
|
||||
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
|
||||
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
|
||||
#define round_down(x, y) ((x) & ~__round_mask(x, y))
|
||||
#endif
|
||||
|
||||
#define current_gfp_context(k) 0
|
||||
#define synchronize_rcu()
|
||||
|
@ -66,6 +66,17 @@ struct unwind_hint {
|
||||
static void __used __section(".discard.func_stack_frame_non_standard") \
|
||||
*__func_stack_frame_non_standard_##func = func
|
||||
|
||||
/*
|
||||
* STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
|
||||
* for the case where a function is intentionally missing frame pointer setup,
|
||||
* but otherwise needs objtool/ORC coverage when frame pointers are disabled.
|
||||
*/
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
|
||||
#else
|
||||
#define STACK_FRAME_NON_STANDARD_FP(func)
|
||||
#endif
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
@ -127,6 +138,7 @@ struct unwind_hint {
|
||||
#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
|
||||
"\n\t"
|
||||
#define STACK_FRAME_NON_STANDARD(func)
|
||||
#define STACK_FRAME_NON_STANDARD_FP(func)
|
||||
#else
|
||||
#define ANNOTATE_INTRA_FUNCTION_CALL
|
||||
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
|
||||
|
@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
|
||||
return true;
|
||||
}
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#endif
|
||||
|
@ -265,12 +265,17 @@ struct stat {
|
||||
* - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
|
||||
* - the system call is performed by calling the syscall instruction
|
||||
* - syscall return comes in rax
|
||||
* - rcx and r8..r11 may be clobbered, others are preserved.
|
||||
* - rcx and r11 are clobbered, others are preserved.
|
||||
* - the arguments are cast to long and assigned into the target registers
|
||||
* which are then simply passed as registers to the asm code, so that we
|
||||
* don't have to experience issues with register constraints.
|
||||
* - the syscall number is always specified last in order to allow to force
|
||||
* some registers before (gcc refuses a %-register at the last position).
|
||||
* - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1
|
||||
* Calling Conventions.
|
||||
*
|
||||
* Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/x86-64-psABI
|
||||
*
|
||||
*/
|
||||
|
||||
#define my_syscall0(num) \
|
||||
@ -280,9 +285,9 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret) \
|
||||
: "=a"(_ret) \
|
||||
: "0"(_num) \
|
||||
: "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
); \
|
||||
_ret; \
|
||||
})
|
||||
@ -295,10 +300,10 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret) \
|
||||
: "=a"(_ret) \
|
||||
: "r"(_arg1), \
|
||||
"0"(_num) \
|
||||
: "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
); \
|
||||
_ret; \
|
||||
})
|
||||
@ -312,10 +317,10 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret) \
|
||||
: "=a"(_ret) \
|
||||
: "r"(_arg1), "r"(_arg2), \
|
||||
"0"(_num) \
|
||||
: "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
); \
|
||||
_ret; \
|
||||
})
|
||||
@ -330,10 +335,10 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret) \
|
||||
: "=a"(_ret) \
|
||||
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
|
||||
"0"(_num) \
|
||||
: "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
); \
|
||||
_ret; \
|
||||
})
|
||||
@ -349,10 +354,10 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret), "=r"(_arg4) \
|
||||
: "=a"(_ret) \
|
||||
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
|
||||
"0"(_num) \
|
||||
: "rcx", "r8", "r9", "r11", "memory", "cc" \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
); \
|
||||
_ret; \
|
||||
})
|
||||
@ -369,10 +374,10 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \
|
||||
: "=a"(_ret) \
|
||||
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
|
||||
"0"(_num) \
|
||||
: "rcx", "r9", "r11", "memory", "cc" \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
); \
|
||||
_ret; \
|
||||
})
|
||||
@ -390,7 +395,7 @@ struct stat {
|
||||
\
|
||||
asm volatile ( \
|
||||
"syscall\n" \
|
||||
: "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \
|
||||
: "=a"(_ret) \
|
||||
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
|
||||
"r"(_arg6), "0"(_num) \
|
||||
: "rcx", "r11", "memory", "cc" \
|
||||
@ -415,7 +420,7 @@ asm(".section .text\n"
|
||||
"and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call
|
||||
"call main\n" // main() returns the status code, we'll exit with it.
|
||||
"mov %eax, %edi\n" // retrieve exit code (32 bit)
|
||||
"mov $60, %rax\n" // NR_exit == 60
|
||||
"mov $60, %eax\n" // NR_exit == 60
|
||||
"syscall\n" // really exit
|
||||
"hlt\n" // ensure it does not return
|
||||
"");
|
||||
@ -1566,6 +1571,12 @@ pid_t sys_getpid(void)
|
||||
return my_syscall0(__NR_getpid);
|
||||
}
|
||||
|
||||
static __attribute__((unused))
|
||||
pid_t sys_gettid(void)
|
||||
{
|
||||
return my_syscall0(__NR_gettid);
|
||||
}
|
||||
|
||||
static __attribute__((unused))
|
||||
int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||
{
|
||||
@ -2024,6 +2035,18 @@ pid_t getpid(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __attribute__((unused))
|
||||
pid_t gettid(void)
|
||||
{
|
||||
pid_t ret = sys_gettid();
|
||||
|
||||
if (ret < 0) {
|
||||
SET_ERRNO(-ret);
|
||||
ret = -1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __attribute__((unused))
|
||||
int gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||
{
|
||||
|
@ -880,8 +880,14 @@ __SYSCALL(__NR_memfd_secret, sys_memfd_secret)
|
||||
#define __NR_process_mrelease 448
|
||||
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
|
||||
|
||||
#define __NR_futex_waitv 449
|
||||
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
|
||||
|
||||
#define __NR_set_mempolicy_home_node 450
|
||||
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
|
||||
|
||||
#undef __NR_syscalls
|
||||
#define __NR_syscalls 449
|
||||
#define __NR_syscalls 451
|
||||
|
||||
/*
|
||||
* 32 bit systems traditionally used different
|
||||
|
@ -1096,6 +1096,24 @@ extern "C" {
|
||||
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
|
||||
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
|
||||
*
|
||||
* This queries metadata about a framebuffer. User-space fills
|
||||
* &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
|
||||
* struct as the output.
|
||||
*
|
||||
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
|
||||
* will be filled with GEM buffer handles. Planes are valid until one has a
|
||||
* zero handle -- this can be used to compute the number of planes.
|
||||
*
|
||||
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
|
||||
* until one has a zero &drm_mode_fb_cmd2.pitches.
|
||||
*
|
||||
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
|
||||
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
|
||||
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
|
||||
*/
|
||||
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
|
||||
|
||||
/*
|
||||
|
@ -1522,6 +1522,12 @@ struct drm_i915_gem_caching {
|
||||
#define I915_TILING_NONE 0
|
||||
#define I915_TILING_X 1
|
||||
#define I915_TILING_Y 2
|
||||
/*
|
||||
* Do not add new tiling types here. The I915_TILING_* values are for
|
||||
* de-tiling fence registers that no longer exist on modern platforms. Although
|
||||
* the hardware may support new types of tiling in general (e.g., Tile4), we
|
||||
* do not need to add them to the uapi that is specific to now-defunct ioctls.
|
||||
*/
|
||||
#define I915_TILING_LAST I915_TILING_Y
|
||||
|
||||
#define I915_BIT_6_SWIZZLE_NONE 0
|
||||
@ -1824,6 +1830,7 @@ struct drm_i915_gem_context_param {
|
||||
* Extensions:
|
||||
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
|
||||
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
|
||||
* i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
|
||||
*/
|
||||
#define I915_CONTEXT_PARAM_ENGINES 0xa
|
||||
|
||||
@ -1846,6 +1853,55 @@ struct drm_i915_gem_context_param {
|
||||
* attempted to use it, never re-use this context param number.
|
||||
*/
|
||||
#define I915_CONTEXT_PARAM_RINGSIZE 0xc
|
||||
|
||||
/*
|
||||
* I915_CONTEXT_PARAM_PROTECTED_CONTENT:
|
||||
*
|
||||
* Mark that the context makes use of protected content, which will result
|
||||
* in the context being invalidated when the protected content session is.
|
||||
* Given that the protected content session is killed on suspend, the device
|
||||
* is kept awake for the lifetime of a protected context, so the user should
|
||||
* make sure to dispose of them once done.
|
||||
* This flag can only be set at context creation time and, when set to true,
|
||||
* must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
|
||||
* to false. This flag can't be set to true in conjunction with setting the
|
||||
* I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
|
||||
*
|
||||
* .. code-block:: C
|
||||
*
|
||||
* struct drm_i915_gem_context_create_ext_setparam p_protected = {
|
||||
* .base = {
|
||||
* .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
|
||||
* },
|
||||
* .param = {
|
||||
* .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
|
||||
* .value = 1,
|
||||
* }
|
||||
* };
|
||||
* struct drm_i915_gem_context_create_ext_setparam p_norecover = {
|
||||
* .base = {
|
||||
* .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
|
||||
* .next_extension = to_user_pointer(&p_protected),
|
||||
* },
|
||||
* .param = {
|
||||
* .param = I915_CONTEXT_PARAM_RECOVERABLE,
|
||||
* .value = 0,
|
||||
* }
|
||||
* };
|
||||
* struct drm_i915_gem_context_create_ext create = {
|
||||
* .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
|
||||
* .extensions = to_user_pointer(&p_norecover);
|
||||
* };
|
||||
*
|
||||
* ctx_id = gem_context_create_ext(drm_fd, &create);
|
||||
*
|
||||
* In addition to the normal failure cases, setting this flag during context
|
||||
* creation can result in the following errors:
|
||||
*
|
||||
* -ENODEV: feature not available
|
||||
* -EPERM: trying to mark a recoverable or not bannable context as protected
|
||||
*/
|
||||
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
|
||||
/* Must be kept compact -- no holes and well documented */
|
||||
|
||||
__u64 value;
|
||||
@ -2049,6 +2105,135 @@ struct i915_context_engines_bond {
|
||||
struct i915_engine_class_instance engines[N__]; \
|
||||
} __attribute__((packed)) name__
|
||||
|
||||
/**
|
||||
* struct i915_context_engines_parallel_submit - Configure engine for
|
||||
* parallel submission.
|
||||
*
|
||||
* Setup a slot in the context engine map to allow multiple BBs to be submitted
|
||||
* in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
|
||||
* in parallel. Multiple hardware contexts are created internally in the i915 to
|
||||
* run these BBs. Once a slot is configured for N BBs only N BBs can be
|
||||
* submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
|
||||
* doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
|
||||
* many BBs there are based on the slot's configuration. The N BBs are the last
|
||||
* N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
|
||||
*
|
||||
* The default placement behavior is to create implicit bonds between each
|
||||
* context if each context maps to more than 1 physical engine (e.g. context is
|
||||
* a virtual engine). Also we only allow contexts of same engine class and these
|
||||
* contexts must be in logically contiguous order. Examples of the placement
|
||||
* behavior are described below. Lastly, the default is to not allow BBs to be
|
||||
* preempted mid-batch. Rather insert coordinated preemption points on all
|
||||
* hardware contexts between each set of BBs. Flags could be added in the future
|
||||
* to change both of these default behaviors.
|
||||
*
|
||||
* Returns -EINVAL if hardware context placement configuration is invalid or if
|
||||
* the placement configuration isn't supported on the platform / submission
|
||||
* interface.
|
||||
* Returns -ENODEV if extension isn't supported on the platform / submission
|
||||
* interface.
|
||||
*
|
||||
* .. code-block:: none
|
||||
*
|
||||
* Examples syntax:
|
||||
* CS[X] = generic engine of same class, logical instance X
|
||||
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
|
||||
*
|
||||
* Example 1 pseudo code:
|
||||
* set_engines(INVALID)
|
||||
* set_parallel(engine_index=0, width=2, num_siblings=1,
|
||||
* engines=CS[0],CS[1])
|
||||
*
|
||||
* Results in the following valid placement:
|
||||
* CS[0], CS[1]
|
||||
*
|
||||
* Example 2 pseudo code:
|
||||
* set_engines(INVALID)
|
||||
* set_parallel(engine_index=0, width=2, num_siblings=2,
|
||||
* engines=CS[0],CS[2],CS[1],CS[3])
|
||||
*
|
||||
* Results in the following valid placements:
|
||||
* CS[0], CS[1]
|
||||
* CS[2], CS[3]
|
||||
*
|
||||
* This can be thought of as two virtual engines, each containing two
|
||||
* engines thereby making a 2D array. However, there are bonds tying the
|
||||
* entries together and placing restrictions on how they can be scheduled.
|
||||
* Specifically, the scheduler can choose only vertical columns from the 2D
|
||||
* array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
|
||||
* scheduler wants to submit to CS[0], it must also choose CS[1] and vice
|
||||
* versa. Same for CS[2] requires also using CS[3].
|
||||
* VE[0] = CS[0], CS[2]
|
||||
* VE[1] = CS[1], CS[3]
|
||||
*
|
||||
* Example 3 pseudo code:
|
||||
* set_engines(INVALID)
|
||||
* set_parallel(engine_index=0, width=2, num_siblings=2,
|
||||
* engines=CS[0],CS[1],CS[1],CS[3])
|
||||
*
|
||||
* Results in the following valid and invalid placements:
|
||||
* CS[0], CS[1]
|
||||
* CS[1], CS[3] - Not logically contiguous, return -EINVAL
|
||||
*/
|
||||
struct i915_context_engines_parallel_submit {
|
||||
/**
|
||||
* @base: base user extension.
|
||||
*/
|
||||
struct i915_user_extension base;
|
||||
|
||||
/**
|
||||
* @engine_index: slot for parallel engine
|
||||
*/
|
||||
__u16 engine_index;
|
||||
|
||||
/**
|
||||
* @width: number of contexts per parallel engine or in other words the
|
||||
* number of batches in each submission
|
||||
*/
|
||||
__u16 width;
|
||||
|
||||
/**
|
||||
* @num_siblings: number of siblings per context or in other words the
|
||||
* number of possible placements for each submission
|
||||
*/
|
||||
__u16 num_siblings;
|
||||
|
||||
/**
|
||||
* @mbz16: reserved for future use; must be zero
|
||||
*/
|
||||
__u16 mbz16;
|
||||
|
||||
/**
|
||||
* @flags: all undefined flags must be zero, currently not defined flags
|
||||
*/
|
||||
__u64 flags;
|
||||
|
||||
/**
|
||||
* @mbz64: reserved for future use; must be zero
|
||||
*/
|
||||
__u64 mbz64[3];
|
||||
|
||||
/**
|
||||
* @engines: 2-d array of engine instances to configure parallel engine
|
||||
*
|
||||
* length = width (i) * num_siblings (j)
|
||||
* index = j + i * num_siblings
|
||||
*/
|
||||
struct i915_engine_class_instance engines[0];
|
||||
|
||||
} __packed;
|
||||
|
||||
#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
|
||||
struct i915_user_extension base; \
|
||||
__u16 engine_index; \
|
||||
__u16 width; \
|
||||
__u16 num_siblings; \
|
||||
__u16 mbz16; \
|
||||
__u64 flags; \
|
||||
__u64 mbz64[3]; \
|
||||
struct i915_engine_class_instance engines[N__]; \
|
||||
} __attribute__((packed)) name__
|
||||
|
||||
/**
|
||||
* DOC: Context Engine Map uAPI
|
||||
*
|
||||
@ -2108,6 +2293,7 @@ struct i915_context_param_engines {
|
||||
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
|
||||
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
|
||||
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
|
||||
#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
|
||||
struct i915_engine_class_instance engines[0];
|
||||
} __attribute__((packed));
|
||||
|
||||
@ -2726,14 +2912,20 @@ struct drm_i915_engine_info {
|
||||
|
||||
/** @flags: Engine flags. */
|
||||
__u64 flags;
|
||||
#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
|
||||
|
||||
/** @capabilities: Capabilities of this engine. */
|
||||
__u64 capabilities;
|
||||
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
|
||||
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
|
||||
|
||||
/** @logical_instance: Logical instance of engine */
|
||||
__u16 logical_instance;
|
||||
|
||||
/** @rsvd1: Reserved fields. */
|
||||
__u64 rsvd1[4];
|
||||
__u16 rsvd1[3];
|
||||
/** @rsvd2: Reserved fields. */
|
||||
__u64 rsvd2[3];
|
||||
};
|
||||
|
||||
/**
|
||||
@ -2979,8 +3171,12 @@ struct drm_i915_gem_create_ext {
|
||||
*
|
||||
* For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
|
||||
* struct drm_i915_gem_create_ext_memory_regions.
|
||||
*
|
||||
* For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
|
||||
* struct drm_i915_gem_create_ext_protected_content.
|
||||
*/
|
||||
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
|
||||
#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
|
||||
__u64 extensions;
|
||||
};
|
||||
|
||||
@ -3038,6 +3234,50 @@ struct drm_i915_gem_create_ext_memory_regions {
|
||||
__u64 regions;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_i915_gem_create_ext_protected_content - The
|
||||
* I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
|
||||
*
|
||||
* If this extension is provided, buffer contents are expected to be protected
|
||||
* by PXP encryption and require decryption for scan out and processing. This
|
||||
* is only possible on platforms that have PXP enabled, on all other scenarios
|
||||
* using this extension will cause the ioctl to fail and return -ENODEV. The
|
||||
* flags parameter is reserved for future expansion and must currently be set
|
||||
* to zero.
|
||||
*
|
||||
* The buffer contents are considered invalid after a PXP session teardown.
|
||||
*
|
||||
* The encryption is guaranteed to be processed correctly only if the object
|
||||
* is submitted with a context created using the
|
||||
* I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
|
||||
* at submission time on the validity of the objects involved.
|
||||
*
|
||||
* Below is an example on how to create a protected object:
|
||||
*
|
||||
* .. code-block:: C
|
||||
*
|
||||
* struct drm_i915_gem_create_ext_protected_content protected_ext = {
|
||||
* .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
|
||||
* .flags = 0,
|
||||
* };
|
||||
* struct drm_i915_gem_create_ext create_ext = {
|
||||
* .size = PAGE_SIZE,
|
||||
* .extensions = (uintptr_t)&protected_ext,
|
||||
* };
|
||||
*
|
||||
* int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
|
||||
* if (err) ...
|
||||
*/
|
||||
struct drm_i915_gem_create_ext_protected_content {
|
||||
/** @base: Extension link. See struct i915_user_extension. */
|
||||
struct i915_user_extension base;
|
||||
/** @flags: reserved for future usage, currently MBZ */
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
/* ID of the protected content session managed by i915 when PXP is active */
|
||||
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
@ -906,6 +906,7 @@ enum bpf_map_type {
|
||||
BPF_MAP_TYPE_RINGBUF,
|
||||
BPF_MAP_TYPE_INODE_STORAGE,
|
||||
BPF_MAP_TYPE_TASK_STORAGE,
|
||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
};
|
||||
|
||||
/* Note that tracing related programs such as
|
||||
@ -1274,6 +1275,13 @@ union bpf_attr {
|
||||
* struct stored as the
|
||||
* map value
|
||||
*/
|
||||
/* Any per-map-type extra fields
|
||||
*
|
||||
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
|
||||
* number of hash functions (if 0, the bloom filter will default
|
||||
* to using 5 hash functions).
|
||||
*/
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||
@ -1334,8 +1342,10 @@ union bpf_attr {
|
||||
/* or valid module BTF object fd or 0 to attach to vmlinux */
|
||||
__u32 attach_btf_obj_fd;
|
||||
};
|
||||
__u32 :32; /* pad */
|
||||
__u32 core_relo_cnt; /* number of bpf_core_relo */
|
||||
__aligned_u64 fd_array; /* array of FDs */
|
||||
__aligned_u64 core_relos;
|
||||
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1629,7 +1639,7 @@ union bpf_attr {
|
||||
* u32 bpf_get_smp_processor_id(void)
|
||||
* Description
|
||||
* Get the SMP (symmetric multiprocessing) processor id. Note that
|
||||
* all programs run with preemption disabled, which means that the
|
||||
* all programs run with migration disabled, which means that the
|
||||
* SMP processor id is stable during all the execution of the
|
||||
* program.
|
||||
* Return
|
||||
@ -1736,7 +1746,7 @@ union bpf_attr {
|
||||
* if the maximum number of tail calls has been reached for this
|
||||
* chain of programs. This limit is defined in the kernel by the
|
||||
* macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
|
||||
* which is currently set to 32.
|
||||
* which is currently set to 33.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
@ -4046,7 +4056,7 @@ union bpf_attr {
|
||||
* arguments. The *data* are a **u64** array and corresponding format string
|
||||
* values are stored in the array. For strings and pointers where pointees
|
||||
* are accessed, only the pointer values are stored in the *data* array.
|
||||
* The *data_len* is the size of *data* in bytes.
|
||||
* The *data_len* is the size of *data* in bytes - must be a multiple of 8.
|
||||
*
|
||||
* Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
|
||||
* Reading kernel memory may fail due to either invalid address or
|
||||
@ -4751,7 +4761,8 @@ union bpf_attr {
|
||||
* Each format specifier in **fmt** corresponds to one u64 element
|
||||
* in the **data** array. For strings and pointers where pointees
|
||||
* are accessed, only the pointer values are stored in the *data*
|
||||
* array. The *data_len* is the size of *data* in bytes.
|
||||
* array. The *data_len* is the size of *data* in bytes - must be
|
||||
* a multiple of 8.
|
||||
*
|
||||
* Formats **%s** and **%p{i,I}{4,6}** require to read kernel
|
||||
* memory. Reading kernel memory may fail due to either invalid
|
||||
@ -4877,6 +4888,136 @@ union bpf_attr {
|
||||
* Get the struct pt_regs associated with **task**.
|
||||
* Return
|
||||
* A pointer to struct pt_regs.
|
||||
*
|
||||
* long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
|
||||
* Description
|
||||
* Get branch trace from hardware engines like Intel LBR. The
|
||||
* hardware engine is stopped shortly after the helper is
|
||||
* called. Therefore, the user need to filter branch entries
|
||||
* based on the actual use case. To capture branch trace
|
||||
* before the trigger point of the BPF program, the helper
|
||||
* should be called at the beginning of the BPF program.
|
||||
*
|
||||
* The data is stored as struct perf_branch_entry into output
|
||||
* buffer *entries*. *size* is the size of *entries* in bytes.
|
||||
* *flags* is reserved for now and must be zero.
|
||||
*
|
||||
* Return
|
||||
* On success, number of bytes written to *buf*. On error, a
|
||||
* negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-ENOENT** if architecture does not support branch records.
|
||||
*
|
||||
* long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
|
||||
* Description
|
||||
* Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
|
||||
* to format and can handle more format args as a result.
|
||||
*
|
||||
* Arguments are to be used as in **bpf_seq_printf**\ () helper.
|
||||
* Return
|
||||
* The number of bytes written to the buffer, or a negative error
|
||||
* in case of failure.
|
||||
*
|
||||
* struct unix_sock *bpf_skc_to_unix_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *unix_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
|
||||
* Description
|
||||
* Get the address of a kernel symbol, returned in *res*. *res* is
|
||||
* set to 0 if the symbol is not found.
|
||||
* Return
|
||||
* On success, zero. On error, a negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-EINVAL** if string *name* is not the same size as *name_sz*.
|
||||
*
|
||||
* **-ENOENT** if symbol is not found.
|
||||
*
|
||||
* **-EPERM** if caller does not have permission to obtain kernel address.
|
||||
*
|
||||
* long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
|
||||
* Description
|
||||
* Find vma of *task* that contains *addr*, call *callback_fn*
|
||||
* function with *task*, *vma*, and *callback_ctx*.
|
||||
* The *callback_fn* should be a static function and
|
||||
* the *callback_ctx* should be a pointer to the stack.
|
||||
* The *flags* is used to control certain aspects of the helper.
|
||||
* Currently, the *flags* must be 0.
|
||||
*
|
||||
* The expected callback signature is
|
||||
*
|
||||
* long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
|
||||
* **-EBUSY** if failed to try lock mmap_lock.
|
||||
* **-EINVAL** for invalid **flags**.
|
||||
*
|
||||
* long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
|
||||
* Description
|
||||
* For **nr_loops**, call **callback_fn** function
|
||||
* with **callback_ctx** as the context parameter.
|
||||
* The **callback_fn** should be a static function and
|
||||
* the **callback_ctx** should be a pointer to the stack.
|
||||
* The **flags** is used to control certain aspects of the helper.
|
||||
* Currently, the **flags** must be 0. Currently, nr_loops is
|
||||
* limited to 1 << 23 (~8 million) loops.
|
||||
*
|
||||
* long (\*callback_fn)(u32 index, void \*ctx);
|
||||
*
|
||||
* where **index** is the current index in the loop. The index
|
||||
* is zero-indexed.
|
||||
*
|
||||
* If **callback_fn** returns 0, the helper will continue to the next
|
||||
* loop. If return value is 1, the helper will skip the rest of
|
||||
* the loops and return. Other return values are not used now,
|
||||
* and will be rejected by the verifier.
|
||||
*
|
||||
* Return
|
||||
* The number of loops performed, **-EINVAL** for invalid **flags**,
|
||||
* **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
|
||||
*
|
||||
* long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
|
||||
* Description
|
||||
* Do strncmp() between **s1** and **s2**. **s1** doesn't need
|
||||
* to be null-terminated and **s1_sz** is the maximum storage
|
||||
* size of **s1**. **s2** must be a read-only string.
|
||||
* Return
|
||||
* An integer less than, equal to, or greater than zero
|
||||
* if the first **s1_sz** bytes of **s1** is found to be
|
||||
* less than, to match, or be greater than **s2**.
|
||||
*
|
||||
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
|
||||
* Description
|
||||
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
|
||||
* returned in **value**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-EINVAL** if n >= arguments count of traced function.
|
||||
*
|
||||
* long bpf_get_func_ret(void *ctx, u64 *value)
|
||||
* Description
|
||||
* Get return value of the traced function (for tracing programs)
|
||||
* in **value**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
|
||||
*
|
||||
* long bpf_get_func_arg_cnt(void *ctx)
|
||||
* Description
|
||||
* Get number of arguments of the traced function (for tracing programs).
|
||||
*
|
||||
* Return
|
||||
* The number of arguments of the traced function.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5055,6 +5196,16 @@ union bpf_attr {
|
||||
FN(get_func_ip), \
|
||||
FN(get_attach_cookie), \
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
FN(trace_vprintk), \
|
||||
FN(skc_to_unix_sock), \
|
||||
FN(kallsyms_lookup_name), \
|
||||
FN(find_vma), \
|
||||
FN(loop), \
|
||||
FN(strncmp), \
|
||||
FN(get_func_arg), \
|
||||
FN(get_func_ret), \
|
||||
FN(get_func_arg_cnt), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
@ -5284,6 +5435,8 @@ struct __sk_buff {
|
||||
__u32 gso_segs;
|
||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||
__u32 gso_size;
|
||||
__u32 :32; /* Padding, future use. */
|
||||
__u64 hwtstamp;
|
||||
};
|
||||
|
||||
struct bpf_tunnel_key {
|
||||
@ -5577,6 +5730,7 @@ struct bpf_prog_info {
|
||||
__u64 run_time_ns;
|
||||
__u64 run_cnt;
|
||||
__u64 recursion_misses;
|
||||
__u32 verified_insns;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_map_info {
|
||||
@ -5594,6 +5748,8 @@ struct bpf_map_info {
|
||||
__u32 btf_id;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 :32; /* alignment pad */
|
||||
__u64 map_extra;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_btf_info {
|
||||
@ -6226,6 +6382,7 @@ struct bpf_sk_lookup {
|
||||
__u32 local_ip4; /* Network byte order */
|
||||
__u32 local_ip6[4]; /* Network byte order */
|
||||
__u32 local_port; /* Host byte order */
|
||||
__u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -6258,4 +6415,78 @@ enum {
|
||||
BTF_F_ZERO = (1ULL << 3),
|
||||
};
|
||||
|
||||
/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
|
||||
* has to be adjusted by relocations. It is emitted by llvm and passed to
|
||||
* libbpf and later to the kernel.
|
||||
*/
|
||||
enum bpf_core_relo_kind {
|
||||
BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
|
||||
BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
|
||||
BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
|
||||
BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
|
||||
BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
|
||||
BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
|
||||
BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
|
||||
BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
|
||||
BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
|
||||
BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
|
||||
BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
|
||||
BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
|
||||
};
|
||||
|
||||
/*
|
||||
* "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
|
||||
* and from libbpf to the kernel.
|
||||
*
|
||||
* CO-RE relocation captures the following data:
|
||||
* - insn_off - instruction offset (in bytes) within a BPF program that needs
|
||||
* its insn->imm field to be relocated with actual field info;
|
||||
* - type_id - BTF type ID of the "root" (containing) entity of a relocatable
|
||||
* type or field;
|
||||
* - access_str_off - offset into corresponding .BTF string section. String
|
||||
* interpretation depends on specific relocation kind:
|
||||
* - for field-based relocations, string encodes an accessed field using
|
||||
* a sequence of field and array indices, separated by colon (:). It's
|
||||
* conceptually very close to LLVM's getelementptr ([0]) instruction's
|
||||
* arguments for identifying offset to a field.
|
||||
* - for type-based relocations, strings is expected to be just "0";
|
||||
* - for enum value-based relocations, string contains an index of enum
|
||||
* value within its enum type;
|
||||
* - kind - one of enum bpf_core_relo_kind;
|
||||
*
|
||||
* Example:
|
||||
* struct sample {
|
||||
* int a;
|
||||
* struct {
|
||||
* int b[10];
|
||||
* };
|
||||
* };
|
||||
*
|
||||
* struct sample *s = ...;
|
||||
* int *x = &s->a; // encoded as "0:0" (a is field #0)
|
||||
* int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
|
||||
* // b is field #0 inside anon struct, accessing elem #5)
|
||||
* int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
|
||||
*
|
||||
* type_id for all relocs in this example will capture BTF type id of
|
||||
* `struct sample`.
|
||||
*
|
||||
* Such relocation is emitted when using __builtin_preserve_access_index()
|
||||
* Clang built-in, passing expression that captures field address, e.g.:
|
||||
*
|
||||
* bpf_probe_read(&dst, sizeof(dst),
|
||||
* __builtin_preserve_access_index(&src->a.b.c));
|
||||
*
|
||||
* In this case Clang will emit field relocation recording necessary data to
|
||||
* be able to find offset of embedded `a.b.c` field within `src` struct.
|
||||
*
|
||||
* [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
|
||||
*/
|
||||
struct bpf_core_relo {
|
||||
__u32 insn_off;
|
||||
__u32 type_id;
|
||||
__u32 access_str_off;
|
||||
enum bpf_core_relo_kind kind;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
||||
|
@ -43,7 +43,7 @@ struct btf_type {
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC, FUNC_PROTO and VAR.
|
||||
* FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -56,25 +56,30 @@ struct btf_type {
|
||||
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
|
||||
#define BTF_INFO_KFLAG(info) ((info) >> 31)
|
||||
|
||||
#define BTF_KIND_UNKN 0 /* Unknown */
|
||||
#define BTF_KIND_INT 1 /* Integer */
|
||||
#define BTF_KIND_PTR 2 /* Pointer */
|
||||
#define BTF_KIND_ARRAY 3 /* Array */
|
||||
#define BTF_KIND_STRUCT 4 /* Struct */
|
||||
#define BTF_KIND_UNION 5 /* Union */
|
||||
#define BTF_KIND_ENUM 6 /* Enumeration */
|
||||
#define BTF_KIND_FWD 7 /* Forward */
|
||||
#define BTF_KIND_TYPEDEF 8 /* Typedef */
|
||||
#define BTF_KIND_VOLATILE 9 /* Volatile */
|
||||
#define BTF_KIND_CONST 10 /* Const */
|
||||
#define BTF_KIND_RESTRICT 11 /* Restrict */
|
||||
#define BTF_KIND_FUNC 12 /* Function */
|
||||
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
|
||||
#define BTF_KIND_VAR 14 /* Variable */
|
||||
#define BTF_KIND_DATASEC 15 /* Section */
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#define BTF_KIND_MAX BTF_KIND_FLOAT
|
||||
#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
|
||||
enum {
|
||||
BTF_KIND_UNKN = 0, /* Unknown */
|
||||
BTF_KIND_INT = 1, /* Integer */
|
||||
BTF_KIND_PTR = 2, /* Pointer */
|
||||
BTF_KIND_ARRAY = 3, /* Array */
|
||||
BTF_KIND_STRUCT = 4, /* Struct */
|
||||
BTF_KIND_UNION = 5, /* Union */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration */
|
||||
BTF_KIND_FWD = 7, /* Forward */
|
||||
BTF_KIND_TYPEDEF = 8, /* Typedef */
|
||||
BTF_KIND_VOLATILE = 9, /* Volatile */
|
||||
BTF_KIND_CONST = 10, /* Const */
|
||||
BTF_KIND_RESTRICT = 11, /* Restrict */
|
||||
BTF_KIND_FUNC = 12, /* Function */
|
||||
BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
|
||||
BTF_KIND_VAR = 14, /* Variable */
|
||||
BTF_KIND_DATASEC = 15, /* Section */
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
|
||||
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
};
|
||||
|
||||
/* For some specific BTF_KIND, "struct btf_type" is immediately
|
||||
* followed by extra data.
|
||||
@ -170,4 +175,15 @@ struct btf_var_secinfo {
|
||||
__u32 size;
|
||||
};
|
||||
|
||||
/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
|
||||
* additional information related to the tag applied location.
|
||||
* If component_idx == -1, the tag is applied to a struct, union,
|
||||
* variable or function. Otherwise, it is applied to a struct/union
|
||||
* member or a func argument, and component_idx indicates which member
|
||||
* or argument (0 ... vlen-1).
|
||||
*/
|
||||
struct btf_decl_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BTF_H__ */
|
||||
|
@ -7,24 +7,23 @@
|
||||
|
||||
/* This struct should be in sync with struct rtnl_link_stats64 */
|
||||
struct rtnl_link_stats {
|
||||
__u32 rx_packets; /* total packets received */
|
||||
__u32 tx_packets; /* total packets transmitted */
|
||||
__u32 rx_bytes; /* total bytes received */
|
||||
__u32 tx_bytes; /* total bytes transmitted */
|
||||
__u32 rx_errors; /* bad packets received */
|
||||
__u32 tx_errors; /* packet transmit problems */
|
||||
__u32 rx_dropped; /* no space in linux buffers */
|
||||
__u32 tx_dropped; /* no space available in linux */
|
||||
__u32 multicast; /* multicast packets received */
|
||||
__u32 rx_packets;
|
||||
__u32 tx_packets;
|
||||
__u32 rx_bytes;
|
||||
__u32 tx_bytes;
|
||||
__u32 rx_errors;
|
||||
__u32 tx_errors;
|
||||
__u32 rx_dropped;
|
||||
__u32 tx_dropped;
|
||||
__u32 multicast;
|
||||
__u32 collisions;
|
||||
|
||||
/* detailed rx_errors: */
|
||||
__u32 rx_length_errors;
|
||||
__u32 rx_over_errors; /* receiver ring buff overflow */
|
||||
__u32 rx_crc_errors; /* recved pkt with crc error */
|
||||
__u32 rx_frame_errors; /* recv'd frame alignment error */
|
||||
__u32 rx_fifo_errors; /* recv'r fifo overrun */
|
||||
__u32 rx_missed_errors; /* receiver missed packet */
|
||||
__u32 rx_over_errors;
|
||||
__u32 rx_crc_errors;
|
||||
__u32 rx_frame_errors;
|
||||
__u32 rx_fifo_errors;
|
||||
__u32 rx_missed_errors;
|
||||
|
||||
/* detailed tx_errors */
|
||||
__u32 tx_aborted_errors;
|
||||
@ -37,29 +36,201 @@ struct rtnl_link_stats {
|
||||
__u32 rx_compressed;
|
||||
__u32 tx_compressed;
|
||||
|
||||
__u32 rx_nohandler; /* dropped, no handler found */
|
||||
__u32 rx_nohandler;
|
||||
};
|
||||
|
||||
/* The main device statistics structure */
|
||||
/**
|
||||
* struct rtnl_link_stats64 - The main device statistics structure.
|
||||
*
|
||||
* @rx_packets: Number of good packets received by the interface.
|
||||
* For hardware interfaces counts all good packets received from the device
|
||||
* by the host, including packets which host had to drop at various stages
|
||||
* of processing (even in the driver).
|
||||
*
|
||||
* @tx_packets: Number of packets successfully transmitted.
|
||||
* For hardware interfaces counts packets which host was able to successfully
|
||||
* hand over to the device, which does not necessarily mean that packets
|
||||
* had been successfully transmitted out of the device, only that device
|
||||
* acknowledged it copied them out of host memory.
|
||||
*
|
||||
* @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
|
||||
*
|
||||
* For IEEE 802.3 devices should count the length of Ethernet Frames
|
||||
* excluding the FCS.
|
||||
*
|
||||
* @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
|
||||
*
|
||||
* For IEEE 802.3 devices should count the length of Ethernet Frames
|
||||
* excluding the FCS.
|
||||
*
|
||||
* @rx_errors: Total number of bad packets received on this network device.
|
||||
* This counter must include events counted by @rx_length_errors,
|
||||
* @rx_crc_errors, @rx_frame_errors and other errors not otherwise
|
||||
* counted.
|
||||
*
|
||||
* @tx_errors: Total number of transmit problems.
|
||||
* This counter must include events counter by @tx_aborted_errors,
|
||||
* @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
|
||||
* @tx_window_errors and other errors not otherwise counted.
|
||||
*
|
||||
* @rx_dropped: Number of packets received but not processed,
|
||||
* e.g. due to lack of resources or unsupported protocol.
|
||||
* For hardware interfaces this counter may include packets discarded
|
||||
* due to L2 address filtering but should not include packets dropped
|
||||
* by the device due to buffer exhaustion which are counted separately in
|
||||
* @rx_missed_errors (since procfs folds those two counters together).
|
||||
*
|
||||
* @tx_dropped: Number of packets dropped on their way to transmission,
|
||||
* e.g. due to lack of resources.
|
||||
*
|
||||
* @multicast: Multicast packets received.
|
||||
* For hardware interfaces this statistic is commonly calculated
|
||||
* at the device level (unlike @rx_packets) and therefore may include
|
||||
* packets which did not reach the host.
|
||||
*
|
||||
* For IEEE 802.3 devices this counter may be equivalent to:
|
||||
*
|
||||
* - 30.3.1.1.21 aMulticastFramesReceivedOK
|
||||
*
|
||||
* @collisions: Number of collisions during packet transmissions.
|
||||
*
|
||||
* @rx_length_errors: Number of packets dropped due to invalid length.
|
||||
* Part of aggregate "frame" errors in `/proc/net/dev`.
|
||||
*
|
||||
* For IEEE 802.3 devices this counter should be equivalent to a sum
|
||||
* of the following attributes:
|
||||
*
|
||||
* - 30.3.1.1.23 aInRangeLengthErrors
|
||||
* - 30.3.1.1.24 aOutOfRangeLengthField
|
||||
* - 30.3.1.1.25 aFrameTooLongErrors
|
||||
*
|
||||
* @rx_over_errors: Receiver FIFO overflow event counter.
|
||||
*
|
||||
* Historically the count of overflow events. Such events may be
|
||||
* reported in the receive descriptors or via interrupts, and may
|
||||
* not correspond one-to-one with dropped packets.
|
||||
*
|
||||
* The recommended interpretation for high speed interfaces is -
|
||||
* number of packets dropped because they did not fit into buffers
|
||||
* provided by the host, e.g. packets larger than MTU or next buffer
|
||||
* in the ring was not available for a scatter transfer.
|
||||
*
|
||||
* Part of aggregate "frame" errors in `/proc/net/dev`.
|
||||
*
|
||||
* This statistics was historically used interchangeably with
|
||||
* @rx_fifo_errors.
|
||||
*
|
||||
* This statistic corresponds to hardware events and is not commonly used
|
||||
* on software devices.
|
||||
*
|
||||
* @rx_crc_errors: Number of packets received with a CRC error.
|
||||
* Part of aggregate "frame" errors in `/proc/net/dev`.
|
||||
*
|
||||
* For IEEE 802.3 devices this counter must be equivalent to:
|
||||
*
|
||||
* - 30.3.1.1.6 aFrameCheckSequenceErrors
|
||||
*
|
||||
* @rx_frame_errors: Receiver frame alignment errors.
|
||||
* Part of aggregate "frame" errors in `/proc/net/dev`.
|
||||
*
|
||||
* For IEEE 802.3 devices this counter should be equivalent to:
|
||||
*
|
||||
* - 30.3.1.1.7 aAlignmentErrors
|
||||
*
|
||||
* @rx_fifo_errors: Receiver FIFO error counter.
|
||||
*
|
||||
* Historically the count of overflow events. Those events may be
|
||||
* reported in the receive descriptors or via interrupts, and may
|
||||
* not correspond one-to-one with dropped packets.
|
||||
*
|
||||
* This statistics was used interchangeably with @rx_over_errors.
|
||||
* Not recommended for use in drivers for high speed interfaces.
|
||||
*
|
||||
* This statistic is used on software devices, e.g. to count software
|
||||
* packet queue overflow (can) or sequencing errors (GRE).
|
||||
*
|
||||
* @rx_missed_errors: Count of packets missed by the host.
|
||||
* Folded into the "drop" counter in `/proc/net/dev`.
|
||||
*
|
||||
* Counts number of packets dropped by the device due to lack
|
||||
* of buffer space. This usually indicates that the host interface
|
||||
* is slower than the network interface, or host is not keeping up
|
||||
* with the receive packet rate.
|
||||
*
|
||||
* This statistic corresponds to hardware events and is not used
|
||||
* on software devices.
|
||||
*
|
||||
* @tx_aborted_errors:
|
||||
* Part of aggregate "carrier" errors in `/proc/net/dev`.
|
||||
* For IEEE 802.3 devices capable of half-duplex operation this counter
|
||||
* must be equivalent to:
|
||||
*
|
||||
* - 30.3.1.1.11 aFramesAbortedDueToXSColls
|
||||
*
|
||||
* High speed interfaces may use this counter as a general device
|
||||
* discard counter.
|
||||
*
|
||||
* @tx_carrier_errors: Number of frame transmission errors due to loss
|
||||
* of carrier during transmission.
|
||||
* Part of aggregate "carrier" errors in `/proc/net/dev`.
|
||||
*
|
||||
* For IEEE 802.3 devices this counter must be equivalent to:
|
||||
*
|
||||
* - 30.3.1.1.13 aCarrierSenseErrors
|
||||
*
|
||||
* @tx_fifo_errors: Number of frame transmission errors due to device
|
||||
* FIFO underrun / underflow. This condition occurs when the device
|
||||
* begins transmission of a frame but is unable to deliver the
|
||||
* entire frame to the transmitter in time for transmission.
|
||||
* Part of aggregate "carrier" errors in `/proc/net/dev`.
|
||||
*
|
||||
* @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
|
||||
* old half-duplex Ethernet.
|
||||
* Part of aggregate "carrier" errors in `/proc/net/dev`.
|
||||
*
|
||||
* For IEEE 802.3 devices possibly equivalent to:
|
||||
*
|
||||
* - 30.3.2.1.4 aSQETestErrors
|
||||
*
|
||||
* @tx_window_errors: Number of frame transmission errors due
|
||||
* to late collisions (for Ethernet - after the first 64B of transmission).
|
||||
* Part of aggregate "carrier" errors in `/proc/net/dev`.
|
||||
*
|
||||
* For IEEE 802.3 devices this counter must be equivalent to:
|
||||
*
|
||||
* - 30.3.1.1.10 aLateCollisions
|
||||
*
|
||||
* @rx_compressed: Number of correctly received compressed packets.
|
||||
* This counters is only meaningful for interfaces which support
|
||||
* packet compression (e.g. CSLIP, PPP).
|
||||
*
|
||||
* @tx_compressed: Number of transmitted compressed packets.
|
||||
* This counters is only meaningful for interfaces which support
|
||||
* packet compression (e.g. CSLIP, PPP).
|
||||
*
|
||||
* @rx_nohandler: Number of packets received on the interface
|
||||
* but dropped by the networking stack because the device is
|
||||
* not designated to receive packets (e.g. backup link in a bond).
|
||||
*/
|
||||
struct rtnl_link_stats64 {
|
||||
__u64 rx_packets; /* total packets received */
|
||||
__u64 tx_packets; /* total packets transmitted */
|
||||
__u64 rx_bytes; /* total bytes received */
|
||||
__u64 tx_bytes; /* total bytes transmitted */
|
||||
__u64 rx_errors; /* bad packets received */
|
||||
__u64 tx_errors; /* packet transmit problems */
|
||||
__u64 rx_dropped; /* no space in linux buffers */
|
||||
__u64 tx_dropped; /* no space available in linux */
|
||||
__u64 multicast; /* multicast packets received */
|
||||
__u64 rx_packets;
|
||||
__u64 tx_packets;
|
||||
__u64 rx_bytes;
|
||||
__u64 tx_bytes;
|
||||
__u64 rx_errors;
|
||||
__u64 tx_errors;
|
||||
__u64 rx_dropped;
|
||||
__u64 tx_dropped;
|
||||
__u64 multicast;
|
||||
__u64 collisions;
|
||||
|
||||
/* detailed rx_errors: */
|
||||
__u64 rx_length_errors;
|
||||
__u64 rx_over_errors; /* receiver ring buff overflow */
|
||||
__u64 rx_crc_errors; /* recved pkt with crc error */
|
||||
__u64 rx_frame_errors; /* recv'd frame alignment error */
|
||||
__u64 rx_fifo_errors; /* recv'r fifo overrun */
|
||||
__u64 rx_missed_errors; /* receiver missed packet */
|
||||
__u64 rx_over_errors;
|
||||
__u64 rx_crc_errors;
|
||||
__u64 rx_frame_errors;
|
||||
__u64 rx_fifo_errors;
|
||||
__u64 rx_missed_errors;
|
||||
|
||||
/* detailed tx_errors */
|
||||
__u64 tx_aborted_errors;
|
||||
@ -71,8 +242,7 @@ struct rtnl_link_stats64 {
|
||||
/* for cslip etc */
|
||||
__u64 rx_compressed;
|
||||
__u64 tx_compressed;
|
||||
|
||||
__u64 rx_nohandler; /* dropped, no handler found */
|
||||
__u64 rx_nohandler;
|
||||
};
|
||||
|
||||
/* The struct should be in sync with struct ifmap */
|
||||
@ -170,12 +340,30 @@ enum {
|
||||
IFLA_PROP_LIST,
|
||||
IFLA_ALT_IFNAME, /* Alternative ifname */
|
||||
IFLA_PERM_ADDRESS,
|
||||
IFLA_PROTO_DOWN_REASON,
|
||||
|
||||
/* device (sysfs) name as parent, used instead
|
||||
* of IFLA_LINK where there's no parent netdev
|
||||
*/
|
||||
IFLA_PARENT_DEV_NAME,
|
||||
IFLA_PARENT_DEV_BUS_NAME,
|
||||
IFLA_GRO_MAX_SIZE,
|
||||
|
||||
__IFLA_MAX
|
||||
};
|
||||
|
||||
|
||||
#define IFLA_MAX (__IFLA_MAX - 1)
|
||||
|
||||
enum {
|
||||
IFLA_PROTO_DOWN_REASON_UNSPEC,
|
||||
IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
|
||||
IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
|
||||
|
||||
__IFLA_PROTO_DOWN_REASON_CNT,
|
||||
IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
|
||||
};
|
||||
|
||||
/* backwards compatibility for userspace */
|
||||
#ifndef __KERNEL__
|
||||
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
|
||||
@ -293,6 +481,7 @@ enum {
|
||||
IFLA_BR_MCAST_MLD_VERSION,
|
||||
IFLA_BR_VLAN_STATS_PER_PORT,
|
||||
IFLA_BR_MULTI_BOOLOPT,
|
||||
IFLA_BR_MCAST_QUERIER_STATE,
|
||||
__IFLA_BR_MAX,
|
||||
};
|
||||
|
||||
@ -346,6 +535,8 @@ enum {
|
||||
IFLA_BRPORT_BACKUP_PORT,
|
||||
IFLA_BRPORT_MRP_RING_OPEN,
|
||||
IFLA_BRPORT_MRP_IN_OPEN,
|
||||
IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
|
||||
IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
|
||||
__IFLA_BRPORT_MAX
|
||||
};
|
||||
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
|
||||
@ -433,6 +624,7 @@ enum macvlan_macaddr_mode {
|
||||
};
|
||||
|
||||
#define MACVLAN_FLAG_NOPROMISC 1
|
||||
#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */
|
||||
|
||||
/* VRF section */
|
||||
enum {
|
||||
@ -597,6 +789,18 @@ enum ifla_geneve_df {
|
||||
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
|
||||
};
|
||||
|
||||
/* Bareudp section */
|
||||
enum {
|
||||
IFLA_BAREUDP_UNSPEC,
|
||||
IFLA_BAREUDP_PORT,
|
||||
IFLA_BAREUDP_ETHERTYPE,
|
||||
IFLA_BAREUDP_SRCPORT_MIN,
|
||||
IFLA_BAREUDP_MULTIPROTO_MODE,
|
||||
__IFLA_BAREUDP_MAX
|
||||
};
|
||||
|
||||
#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
|
||||
|
||||
/* PPP section */
|
||||
enum {
|
||||
IFLA_PPP_UNSPEC,
|
||||
@ -655,6 +859,7 @@ enum {
|
||||
IFLA_BOND_TLB_DYNAMIC_LB,
|
||||
IFLA_BOND_PEER_NOTIF_DELAY,
|
||||
IFLA_BOND_AD_LACP_ACTIVE,
|
||||
IFLA_BOND_MISSED_MAX,
|
||||
__IFLA_BOND_MAX,
|
||||
};
|
||||
|
||||
@ -899,7 +1104,14 @@ enum {
|
||||
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
|
||||
|
||||
|
||||
/* HSR section */
|
||||
/* HSR/PRP section, both uses same interface */
|
||||
|
||||
/* Different redundancy protocols for hsr device */
|
||||
enum {
|
||||
HSR_PROTOCOL_HSR,
|
||||
HSR_PROTOCOL_PRP,
|
||||
HSR_PROTOCOL_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
IFLA_HSR_UNSPEC,
|
||||
@ -909,6 +1121,9 @@ enum {
|
||||
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
|
||||
IFLA_HSR_SEQ_NR,
|
||||
IFLA_HSR_VERSION, /* HSR version */
|
||||
IFLA_HSR_PROTOCOL, /* Indicate different protocol than
|
||||
* HSR. For example PRP.
|
||||
*/
|
||||
__IFLA_HSR_MAX,
|
||||
};
|
||||
|
||||
@ -1033,6 +1248,8 @@ enum {
|
||||
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
|
||||
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
|
||||
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
|
||||
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
|
||||
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
|
||||
|
||||
enum {
|
||||
IFLA_RMNET_UNSPEC,
|
||||
@ -1048,4 +1265,14 @@ struct ifla_rmnet_flags {
|
||||
__u32 mask;
|
||||
};
|
||||
|
||||
/* MCTP section */
|
||||
|
||||
enum {
|
||||
IFLA_MCTP_UNSPEC,
|
||||
IFLA_MCTP_NET,
|
||||
__IFLA_MCTP_MAX,
|
||||
};
|
||||
|
||||
#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
|
||||
|
||||
#endif /* _UAPI_LINUX_IF_LINK_H */
|
||||
|
@ -269,6 +269,7 @@ struct kvm_xen_exit {
|
||||
#define KVM_EXIT_AP_RESET_HOLD 32
|
||||
#define KVM_EXIT_X86_BUS_LOCK 33
|
||||
#define KVM_EXIT_XEN 34
|
||||
#define KVM_EXIT_RISCV_SBI 35
|
||||
|
||||
/* For KVM_EXIT_INTERNAL_ERROR */
|
||||
/* Emulate instruction failed. */
|
||||
@ -397,13 +398,23 @@ struct kvm_run {
|
||||
* "ndata" is correct, that new fields are enumerated in "flags",
|
||||
* and that each flag enumerates fields that are 64-bit aligned
|
||||
* and sized (so that ndata+internal.data[] is valid/accurate).
|
||||
*
|
||||
* Space beyond the defined fields may be used to store arbitrary
|
||||
* debug information relating to the emulation failure. It is
|
||||
* accounted for in "ndata" but the format is unspecified and is
|
||||
* not represented in "flags". Any such information is *not* ABI!
|
||||
*/
|
||||
struct {
|
||||
__u32 suberror;
|
||||
__u32 ndata;
|
||||
__u64 flags;
|
||||
__u8 insn_size;
|
||||
__u8 insn_bytes[15];
|
||||
union {
|
||||
struct {
|
||||
__u8 insn_size;
|
||||
__u8 insn_bytes[15];
|
||||
};
|
||||
};
|
||||
/* Arbitrary debug data may follow. */
|
||||
} emulation_failure;
|
||||
/* KVM_EXIT_OSI */
|
||||
struct {
|
||||
@ -469,6 +480,13 @@ struct kvm_run {
|
||||
} msr;
|
||||
/* KVM_EXIT_XEN */
|
||||
struct kvm_xen_exit xen;
|
||||
/* KVM_EXIT_RISCV_SBI */
|
||||
struct {
|
||||
unsigned long extension_id;
|
||||
unsigned long function_id;
|
||||
unsigned long args[6];
|
||||
unsigned long ret[2];
|
||||
} riscv_sbi;
|
||||
/* Fix the size of the union. */
|
||||
char padding[256];
|
||||
};
|
||||
@ -1112,6 +1130,11 @@ struct kvm_ppc_resize_hpt {
|
||||
#define KVM_CAP_BINARY_STATS_FD 203
|
||||
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
|
||||
#define KVM_CAP_ARM_MTE 205
|
||||
#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
|
||||
#define KVM_CAP_VM_GPA_BITS 207
|
||||
#define KVM_CAP_XSAVE2 208
|
||||
#define KVM_CAP_SYS_ATTRIBUTES 209
|
||||
#define KVM_CAP_PPC_AIL_MODE_3 210
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
@ -1143,11 +1166,20 @@ struct kvm_irq_routing_hv_sint {
|
||||
__u32 sint;
|
||||
};
|
||||
|
||||
struct kvm_irq_routing_xen_evtchn {
|
||||
__u32 port;
|
||||
__u32 vcpu;
|
||||
__u32 priority;
|
||||
};
|
||||
|
||||
#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1))
|
||||
|
||||
/* gsi routing entry types */
|
||||
#define KVM_IRQ_ROUTING_IRQCHIP 1
|
||||
#define KVM_IRQ_ROUTING_MSI 2
|
||||
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
|
||||
#define KVM_IRQ_ROUTING_HV_SINT 4
|
||||
#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
|
||||
|
||||
struct kvm_irq_routing_entry {
|
||||
__u32 gsi;
|
||||
@ -1159,6 +1191,7 @@ struct kvm_irq_routing_entry {
|
||||
struct kvm_irq_routing_msi msi;
|
||||
struct kvm_irq_routing_s390_adapter adapter;
|
||||
struct kvm_irq_routing_hv_sint hv_sint;
|
||||
struct kvm_irq_routing_xen_evtchn xen_evtchn;
|
||||
__u32 pad[8];
|
||||
} u;
|
||||
};
|
||||
@ -1189,6 +1222,7 @@ struct kvm_x86_mce {
|
||||
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
|
||||
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
|
||||
|
||||
struct kvm_xen_hvm_config {
|
||||
__u32 flags;
|
||||
@ -1223,11 +1257,16 @@ struct kvm_irqfd {
|
||||
|
||||
/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
|
||||
#define KVM_CLOCK_TSC_STABLE 2
|
||||
#define KVM_CLOCK_REALTIME (1 << 2)
|
||||
#define KVM_CLOCK_HOST_TSC (1 << 3)
|
||||
|
||||
struct kvm_clock_data {
|
||||
__u64 clock;
|
||||
__u32 flags;
|
||||
__u32 pad[9];
|
||||
__u32 pad0;
|
||||
__u64 realtime;
|
||||
__u64 host_tsc;
|
||||
__u32 pad[4];
|
||||
};
|
||||
|
||||
/* For KVM_CAP_SW_TLB */
|
||||
@ -2007,4 +2046,7 @@ struct kvm_stats_desc {
|
||||
|
||||
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
|
||||
|
||||
/* Available with KVM_CAP_XSAVE2 */
|
||||
#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
|
||||
|
||||
#endif /* __LINUX_KVM_H */
|
||||
|
@ -465,6 +465,8 @@ struct perf_event_attr {
|
||||
/*
|
||||
* User provided data if sigtrap=1, passed back to user via
|
||||
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
|
||||
* Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
|
||||
* truncated accordingly on 32 bit architectures.
|
||||
*/
|
||||
__u64 sig_data;
|
||||
};
|
||||
@ -1141,6 +1143,21 @@ enum perf_event_type {
|
||||
*/
|
||||
PERF_RECORD_TEXT_POKE = 20,
|
||||
|
||||
/*
|
||||
* Data written to the AUX area by hardware due to aux_output, may need
|
||||
* to be matched to the event by an architecture-specific hardware ID.
|
||||
* This records the hardware ID, but requires sample_id to provide the
|
||||
* event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
|
||||
* records from multiple events.
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 hw_id;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
|
||||
|
||||
PERF_RECORD_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
@ -1210,14 +1227,16 @@ union perf_mem_data_src {
|
||||
mem_remote:1, /* remote */
|
||||
mem_snoopx:2, /* snoop mode, ext */
|
||||
mem_blk:3, /* access blocked */
|
||||
mem_rsvd:21;
|
||||
mem_hops:3, /* hop level */
|
||||
mem_rsvd:18;
|
||||
};
|
||||
};
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
union perf_mem_data_src {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 mem_rsvd:21,
|
||||
__u64 mem_rsvd:18,
|
||||
mem_hops:3, /* hop level */
|
||||
mem_blk:3, /* access blocked */
|
||||
mem_snoopx:2, /* snoop mode, ext */
|
||||
mem_remote:1, /* remote */
|
||||
@ -1241,7 +1260,13 @@ union perf_mem_data_src {
|
||||
#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
|
||||
#define PERF_MEM_OP_SHIFT 0
|
||||
|
||||
/* memory hierarchy (memory level, hit or miss) */
|
||||
/*
|
||||
* PERF_MEM_LVL_* namespace being depricated to some extent in the
|
||||
* favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
|
||||
* Supporting this namespace inorder to not break defined ABIs.
|
||||
*
|
||||
* memory hierarchy (memory level, hit or miss)
|
||||
*/
|
||||
#define PERF_MEM_LVL_NA 0x01 /* not available */
|
||||
#define PERF_MEM_LVL_HIT 0x02 /* hit level */
|
||||
#define PERF_MEM_LVL_MISS 0x04 /* miss level */
|
||||
@ -1307,6 +1332,14 @@ union perf_mem_data_src {
|
||||
#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
|
||||
#define PERF_MEM_BLK_SHIFT 40
|
||||
|
||||
/* hop level */
|
||||
#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
|
||||
#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
|
||||
#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
|
||||
#define PERF_MEM_HOPS_3 0x04 /* remote board */
|
||||
/* 5-7 available */
|
||||
#define PERF_MEM_HOPS_SHIFT 43
|
||||
|
||||
#define PERF_MEM_S(a, s) \
|
||||
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
|
||||
|
||||
|
@ -235,7 +235,7 @@ struct prctl_mm_map {
|
||||
#define PR_GET_TAGGED_ADDR_CTRL 56
|
||||
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
||||
/* MTE tag check fault modes */
|
||||
# define PR_MTE_TCF_NONE 0
|
||||
# define PR_MTE_TCF_NONE 0UL
|
||||
# define PR_MTE_TCF_SYNC (1UL << 1)
|
||||
# define PR_MTE_TCF_ASYNC (1UL << 2)
|
||||
# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
|
||||
@ -268,5 +268,11 @@ struct prctl_mm_map {
|
||||
# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
|
||||
# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
|
||||
# define PR_SCHED_CORE_MAX 4
|
||||
# define PR_SCHED_CORE_SCOPE_THREAD 0
|
||||
# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
|
||||
# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
|
||||
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
# define PR_SET_VMA_ANON_NAME 0
|
||||
|
||||
#endif /* _LINUX_PRCTL_H */
|
||||
|
@ -204,6 +204,11 @@ typedef int __bitwise snd_pcm_format_t;
|
||||
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
|
||||
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
|
||||
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
|
||||
/*
|
||||
* For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
|
||||
* available bit count in most significant bit. It's for the case of so-called 'left-justified' or
|
||||
* `right-padding` sample which has less width than 32 bit.
|
||||
*/
|
||||
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
|
||||
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
|
||||
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
|
||||
@ -302,7 +307,7 @@ typedef int __bitwise snd_pcm_subformat_t;
|
||||
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
|
||||
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
|
||||
#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
|
||||
|
||||
#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
|
||||
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
|
||||
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
|
||||
|
||||
@ -1004,7 +1009,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
|
||||
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
|
||||
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
|
||||
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
|
||||
// (1 << 3) is unused.
|
||||
/* (1 << 3) is unused. */
|
||||
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
|
||||
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
|
||||
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
|
||||
|
1
tools/lib/bpf/.gitignore
vendored
1
tools/lib/bpf/.gitignore
vendored
@ -1,5 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
libbpf_version.h
|
||||
libbpf.pc
|
||||
libbpf.so.*
|
||||
TAGS
|
||||
|
@ -8,7 +8,8 @@ VERSION_SCRIPT := libbpf.map
|
||||
LIBBPF_VERSION := $(shell \
|
||||
grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \
|
||||
sort -rV | head -n1 | cut -d'_' -f2)
|
||||
LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
|
||||
LIBBPF_MAJOR_VERSION := $(word 1,$(subst ., ,$(LIBBPF_VERSION)))
|
||||
LIBBPF_MINOR_VERSION := $(word 2,$(subst ., ,$(LIBBPF_VERSION)))
|
||||
|
||||
MAKEFLAGS += --no-print-directory
|
||||
|
||||
@ -59,7 +60,8 @@ ifndef VERBOSE
|
||||
VERBOSE = 0
|
||||
endif
|
||||
|
||||
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
|
||||
INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.) \
|
||||
-I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
|
||||
|
||||
export prefix libdir src obj
|
||||
|
||||
@ -82,11 +84,13 @@ else
|
||||
endif
|
||||
|
||||
# Append required CFLAGS
|
||||
override CFLAGS += -std=gnu89
|
||||
override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum
|
||||
override CFLAGS += -Werror -Wall
|
||||
override CFLAGS += $(INCLUDES)
|
||||
override CFLAGS += -fvisibility=hidden
|
||||
override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
|
||||
override CFLAGS += $(CLANG_CROSS_FLAGS)
|
||||
|
||||
# flags specific for shared library
|
||||
SHLIB_FLAGS := -DSHARED -fPIC
|
||||
@ -112,6 +116,7 @@ STATIC_OBJDIR := $(OUTPUT)staticobjs/
|
||||
BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
|
||||
BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
|
||||
BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h
|
||||
BPF_GENERATED := $(BPF_HELPER_DEFS)
|
||||
|
||||
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
|
||||
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
|
||||
@ -136,25 +141,19 @@ all: fixdep
|
||||
|
||||
all_cmd: $(CMD_TARGETS) check
|
||||
|
||||
$(BPF_IN_SHARED): force $(BPF_HELPER_DEFS)
|
||||
$(BPF_IN_SHARED): force $(BPF_GENERATED)
|
||||
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
|
||||
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
|
||||
(diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf_common.h' differs from latest version at 'include/uapi/linux/bpf_common.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/netlink.h -a -f ../../../include/uapi/linux/netlink.h && ( \
|
||||
(diff -B ../../include/uapi/linux/netlink.h ../../../include/uapi/linux/netlink.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/netlink.h' differs from latest version at 'include/uapi/linux/netlink.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
|
||||
(diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
|
||||
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
|
||||
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
|
||||
|
||||
$(BPF_IN_STATIC): force $(BPF_HELPER_DEFS)
|
||||
$(BPF_IN_STATIC): force $(BPF_GENERATED)
|
||||
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
|
||||
|
||||
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
|
||||
@ -164,7 +163,7 @@ $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
|
||||
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
|
||||
|
||||
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) $(VERSION_SCRIPT)
|
||||
$(QUIET_LINK)$(CC) $(LDFLAGS) \
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \
|
||||
--shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
|
||||
-Wl,--version-script=$(VERSION_SCRIPT) $< -lelf -lz -o $@
|
||||
@ln -sf $(@F) $(OUTPUT)libbpf.so
|
||||
@ -179,7 +178,7 @@ $(OUTPUT)libbpf.pc:
|
||||
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
|
||||
< libbpf.pc.template > $@
|
||||
|
||||
check: check_abi
|
||||
check: check_abi check_version
|
||||
|
||||
check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
||||
@if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
|
||||
@ -205,6 +204,21 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
HDR_MAJ_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
HDR_MIN_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
|
||||
check_version: $(VERSION_SCRIPT) libbpf_version.h
|
||||
@if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \
|
||||
echo "Error: libbpf major version mismatch detected: " \
|
||||
"'$(HDR_MAJ_VERSION)' != '$(LIBBPF_MAJOR_VERSION)'" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ "$(HDR_MIN_VERSION)" != "$(LIBBPF_MINOR_VERSION)" ]; then \
|
||||
echo "Error: libbpf minor version mismatch detected: " \
|
||||
"'$(HDR_MIN_VERSION)' != '$(LIBBPF_MINOR_VERSION)'" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
define do_install_mkdir
|
||||
if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
|
||||
@ -223,14 +237,24 @@ install_lib: all_cmd
|
||||
$(call do_install_mkdir,$(libdir_SQ)); \
|
||||
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
|
||||
|
||||
INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
bpf_helpers.h $(BPF_HELPER_DEFS) bpf_tracing.h \
|
||||
bpf_endian.h bpf_core_read.h skel_internal.h
|
||||
SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
|
||||
skel_internal.h libbpf_version.h
|
||||
GEN_HDRS := $(BPF_GENERATED)
|
||||
|
||||
install_headers: $(BPF_HELPER_DEFS)
|
||||
$(call QUIET_INSTALL, headers) \
|
||||
$(foreach hdr,$(INSTALL_HEADERS), \
|
||||
$(call do_install,$(hdr),$(prefix)/include/bpf,644);)
|
||||
INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf
|
||||
INSTALL_SRC_HDRS := $(addprefix $(INSTALL_PFX)/, $(SRC_HDRS))
|
||||
INSTALL_GEN_HDRS := $(addprefix $(INSTALL_PFX)/, $(notdir $(GEN_HDRS)))
|
||||
|
||||
$(INSTALL_SRC_HDRS): $(INSTALL_PFX)/%.h: %.h
|
||||
$(call QUIET_INSTALL, $@) \
|
||||
$(call do_install,$<,$(prefix)/include/bpf,644)
|
||||
|
||||
$(INSTALL_GEN_HDRS): $(INSTALL_PFX)/%.h: $(OUTPUT)%.h
|
||||
$(call QUIET_INSTALL, $@) \
|
||||
$(call do_install,$<,$(prefix)/include/bpf,644)
|
||||
|
||||
install_headers: $(BPF_GENERATED) $(INSTALL_SRC_HDRS) $(INSTALL_GEN_HDRS)
|
||||
|
||||
install_pkgconfig: $(PC_FILE)
|
||||
$(call QUIET_INSTALL, $(PC_FILE)) \
|
||||
@ -240,12 +264,12 @@ install: install_lib install_pkgconfig install_headers
|
||||
|
||||
clean:
|
||||
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
|
||||
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
|
||||
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \
|
||||
$(SHARED_OBJDIR) $(STATIC_OBJDIR) \
|
||||
$(addprefix $(OUTPUT), \
|
||||
*.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc)
|
||||
|
||||
PHONY += force cscope tags
|
||||
PHONY += force cscope tags check check_abi check_version
|
||||
force:
|
||||
|
||||
cscope:
|
||||
|
@ -28,6 +28,9 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <errno.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
#include <limits.h>
|
||||
#include <sys/resource.h>
|
||||
#include "bpf.h"
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
@ -49,6 +52,12 @@
|
||||
# define __NR_bpf 351
|
||||
# elif defined(__arc__)
|
||||
# define __NR_bpf 280
|
||||
# elif defined(__mips__) && defined(_ABIO32)
|
||||
# define __NR_bpf 4355
|
||||
# elif defined(__mips__) && defined(_ABIN32)
|
||||
# define __NR_bpf 6319
|
||||
# elif defined(__mips__) && defined(_ABI64)
|
||||
# define __NR_bpf 5315
|
||||
# else
|
||||
# error __NR_bpf not defined. libbpf does not support your arch.
|
||||
# endif
|
||||
@ -65,133 +74,217 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
|
||||
return syscall(__NR_bpf, cmd, attr, size);
|
||||
}
|
||||
|
||||
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
|
||||
static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
|
||||
unsigned int size)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = sys_bpf(cmd, attr, size);
|
||||
return ensure_good_fd(fd);
|
||||
}
|
||||
|
||||
#define PROG_LOAD_ATTEMPTS 5
|
||||
|
||||
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
|
||||
{
|
||||
int retries = 5;
|
||||
int fd;
|
||||
|
||||
do {
|
||||
fd = sys_bpf(BPF_PROG_LOAD, attr, size);
|
||||
} while (fd < 0 && errno == EAGAIN && retries-- > 0);
|
||||
fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
|
||||
} while (fd < 0 && errno == EAGAIN && --attempts > 0);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
|
||||
* memcg-based memory accounting for BPF maps and progs. This was done in [0].
|
||||
* We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
|
||||
* the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
|
||||
*
|
||||
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
|
||||
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
|
||||
*/
|
||||
int probe_memcg_account(void)
|
||||
{
|
||||
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insn_cnt = sizeof(insns) / sizeof(insns[0]);
|
||||
union bpf_attr attr;
|
||||
int prog_fd;
|
||||
|
||||
/* attempt loading freplace trying to use custom BTF */
|
||||
memset(&attr, 0, prog_load_attr_sz);
|
||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = insn_cnt;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
|
||||
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
|
||||
if (prog_fd >= 0) {
|
||||
close(prog_fd);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool memlock_bumped;
|
||||
static rlim_t memlock_rlim = RLIM_INFINITY;
|
||||
|
||||
int libbpf_set_memlock_rlim(size_t memlock_bytes)
|
||||
{
|
||||
if (memlock_bumped)
|
||||
return libbpf_err(-EBUSY);
|
||||
|
||||
memlock_rlim = memlock_bytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bump_rlimit_memlock(void)
|
||||
{
|
||||
struct rlimit rlim;
|
||||
|
||||
/* this the default in libbpf 1.0, but for now user has to opt-in explicitly */
|
||||
if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK))
|
||||
return 0;
|
||||
|
||||
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
|
||||
return 0;
|
||||
|
||||
memlock_bumped = true;
|
||||
|
||||
/* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_rlim == 0)
|
||||
return 0;
|
||||
|
||||
rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &rlim))
|
||||
return -errno;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map_create(enum bpf_map_type map_type,
|
||||
const char *map_name,
|
||||
__u32 key_size,
|
||||
__u32 value_size,
|
||||
__u32 max_entries,
|
||||
const struct bpf_map_create_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, '\0', sizeof(attr));
|
||||
bump_rlimit_memlock();
|
||||
|
||||
attr.map_type = create_attr->map_type;
|
||||
attr.key_size = create_attr->key_size;
|
||||
attr.value_size = create_attr->value_size;
|
||||
attr.max_entries = create_attr->max_entries;
|
||||
attr.map_flags = create_attr->map_flags;
|
||||
if (create_attr->name)
|
||||
memcpy(attr.map_name, create_attr->name,
|
||||
min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
|
||||
attr.numa_node = create_attr->numa_node;
|
||||
attr.btf_fd = create_attr->btf_fd;
|
||||
attr.btf_key_type_id = create_attr->btf_key_type_id;
|
||||
attr.btf_value_type_id = create_attr->btf_value_type_id;
|
||||
attr.map_ifindex = create_attr->map_ifindex;
|
||||
if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
|
||||
attr.btf_vmlinux_value_type_id =
|
||||
create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
attr.inner_map_fd = create_attr->inner_map_fd;
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
if (!OPTS_VALID(opts, bpf_map_create_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
attr.map_type = map_type;
|
||||
if (map_name)
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
attr.max_entries = max_entries;
|
||||
|
||||
attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
|
||||
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
|
||||
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
|
||||
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
|
||||
|
||||
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
|
||||
attr.map_flags = OPTS_GET(opts, map_flags, 0);
|
||||
attr.map_extra = OPTS_GET(opts, map_extra, 0);
|
||||
attr.numa_node = OPTS_GET(opts, numa_node, 0);
|
||||
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
|
||||
|
||||
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, p);
|
||||
|
||||
p.map_flags = create_attr->map_flags;
|
||||
p.numa_node = create_attr->numa_node;
|
||||
p.btf_fd = create_attr->btf_fd;
|
||||
p.btf_key_type_id = create_attr->btf_key_type_id;
|
||||
p.btf_value_type_id = create_attr->btf_value_type_id;
|
||||
p.map_ifindex = create_attr->map_ifindex;
|
||||
if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS)
|
||||
p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
p.inner_map_fd = create_attr->inner_map_fd;
|
||||
|
||||
return bpf_map_create(create_attr->map_type, create_attr->name,
|
||||
create_attr->key_size, create_attr->value_size,
|
||||
create_attr->max_entries, &p);
|
||||
}
|
||||
|
||||
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size, int max_entries,
|
||||
__u32 map_flags, int node)
|
||||
{
|
||||
struct bpf_create_map_attr map_attr = {};
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
|
||||
map_attr.name = name;
|
||||
map_attr.map_type = map_type;
|
||||
map_attr.map_flags = map_flags;
|
||||
map_attr.key_size = key_size;
|
||||
map_attr.value_size = value_size;
|
||||
map_attr.max_entries = max_entries;
|
||||
opts.map_flags = map_flags;
|
||||
if (node >= 0) {
|
||||
map_attr.numa_node = node;
|
||||
map_attr.map_flags |= BPF_F_NUMA_NODE;
|
||||
opts.numa_node = node;
|
||||
opts.map_flags |= BPF_F_NUMA_NODE;
|
||||
}
|
||||
|
||||
return bpf_create_map_xattr(&map_attr);
|
||||
return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map(enum bpf_map_type map_type, int key_size,
|
||||
int value_size, int max_entries, __u32 map_flags)
|
||||
{
|
||||
struct bpf_create_map_attr map_attr = {};
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
|
||||
|
||||
map_attr.map_type = map_type;
|
||||
map_attr.map_flags = map_flags;
|
||||
map_attr.key_size = key_size;
|
||||
map_attr.value_size = value_size;
|
||||
map_attr.max_entries = max_entries;
|
||||
|
||||
return bpf_create_map_xattr(&map_attr);
|
||||
return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size, int max_entries,
|
||||
__u32 map_flags)
|
||||
{
|
||||
struct bpf_create_map_attr map_attr = {};
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
|
||||
|
||||
map_attr.name = name;
|
||||
map_attr.map_type = map_type;
|
||||
map_attr.map_flags = map_flags;
|
||||
map_attr.key_size = key_size;
|
||||
map_attr.value_size = value_size;
|
||||
map_attr.max_entries = max_entries;
|
||||
|
||||
return bpf_create_map_xattr(&map_attr);
|
||||
return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int inner_map_fd, int max_entries,
|
||||
__u32 map_flags, int node)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, '\0', sizeof(attr));
|
||||
|
||||
attr.map_type = map_type;
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = 4;
|
||||
attr.inner_map_fd = inner_map_fd;
|
||||
attr.max_entries = max_entries;
|
||||
attr.map_flags = map_flags;
|
||||
if (name)
|
||||
memcpy(attr.map_name, name,
|
||||
min(strlen(name), BPF_OBJ_NAME_LEN - 1));
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
|
||||
opts.inner_map_fd = inner_map_fd;
|
||||
opts.map_flags = map_flags;
|
||||
if (node >= 0) {
|
||||
attr.map_flags |= BPF_F_NUMA_NODE;
|
||||
attr.numa_node = node;
|
||||
opts.map_flags |= BPF_F_NUMA_NODE;
|
||||
opts.numa_node = node;
|
||||
}
|
||||
|
||||
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int inner_map_fd, int max_entries,
|
||||
__u32 map_flags)
|
||||
{
|
||||
return bpf_create_map_in_map_node(map_type, name, key_size,
|
||||
inner_map_fd, max_entries, map_flags,
|
||||
-1);
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts,
|
||||
.inner_map_fd = inner_map_fd,
|
||||
.map_flags = map_flags,
|
||||
);
|
||||
|
||||
return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -219,57 +312,95 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
||||
return info;
|
||||
}
|
||||
|
||||
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0)
|
||||
int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||
__u32 func_info_rec_size, line_info_rec_size;
|
||||
int fd, attempts;
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
char *log_buf;
|
||||
|
||||
if (!load_attr->log_buf != !load_attr->log_buf_sz)
|
||||
bump_rlimit_memlock();
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_prog_load_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
|
||||
attempts = OPTS_GET(opts, attempts, 0);
|
||||
if (attempts < 0)
|
||||
return libbpf_err(-EINVAL);
|
||||
if (attempts == 0)
|
||||
attempts = PROG_LOAD_ATTEMPTS;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = load_attr->prog_type;
|
||||
attr.expected_attach_type = load_attr->expected_attach_type;
|
||||
|
||||
if (load_attr->attach_prog_fd)
|
||||
attr.attach_prog_fd = load_attr->attach_prog_fd;
|
||||
attr.prog_type = prog_type;
|
||||
attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
|
||||
|
||||
attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
|
||||
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
|
||||
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
|
||||
attr.kern_version = OPTS_GET(opts, kern_version, 0);
|
||||
|
||||
if (prog_name)
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
attr.license = ptr_to_u64(license);
|
||||
|
||||
if (insn_cnt > UINT_MAX)
|
||||
return libbpf_err(-E2BIG);
|
||||
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = (__u32)insn_cnt;
|
||||
|
||||
attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
|
||||
attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
|
||||
|
||||
if (attach_prog_fd && attach_btf_obj_fd)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
|
||||
if (attach_prog_fd)
|
||||
attr.attach_prog_fd = attach_prog_fd;
|
||||
else
|
||||
attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd;
|
||||
attr.attach_btf_id = load_attr->attach_btf_id;
|
||||
attr.attach_btf_obj_fd = attach_btf_obj_fd;
|
||||
|
||||
attr.prog_ifindex = load_attr->prog_ifindex;
|
||||
attr.kern_version = load_attr->kern_version;
|
||||
log_buf = OPTS_GET(opts, log_buf, NULL);
|
||||
log_size = OPTS_GET(opts, log_size, 0);
|
||||
log_level = OPTS_GET(opts, log_level, 0);
|
||||
|
||||
attr.insn_cnt = (__u32)load_attr->insn_cnt;
|
||||
attr.insns = ptr_to_u64(load_attr->insns);
|
||||
attr.license = ptr_to_u64(load_attr->license);
|
||||
if (!!log_buf != !!log_size)
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level > (4 | 2 | 1))
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level && !log_buf)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
attr.log_level = load_attr->log_level;
|
||||
if (attr.log_level) {
|
||||
attr.log_buf = ptr_to_u64(load_attr->log_buf);
|
||||
attr.log_size = load_attr->log_buf_sz;
|
||||
func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
|
||||
func_info = OPTS_GET(opts, func_info, NULL);
|
||||
attr.func_info_rec_size = func_info_rec_size;
|
||||
attr.func_info = ptr_to_u64(func_info);
|
||||
attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
|
||||
|
||||
line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
|
||||
line_info = OPTS_GET(opts, line_info, NULL);
|
||||
attr.line_info_rec_size = line_info_rec_size;
|
||||
attr.line_info = ptr_to_u64(line_info);
|
||||
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
|
||||
|
||||
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
|
||||
|
||||
if (log_level) {
|
||||
attr.log_buf = ptr_to_u64(log_buf);
|
||||
attr.log_size = log_size;
|
||||
attr.log_level = log_level;
|
||||
}
|
||||
|
||||
attr.prog_btf_fd = load_attr->prog_btf_fd;
|
||||
attr.prog_flags = load_attr->prog_flags;
|
||||
|
||||
attr.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
attr.func_info_cnt = load_attr->func_info_cnt;
|
||||
attr.func_info = ptr_to_u64(load_attr->func_info);
|
||||
|
||||
attr.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
attr.line_info_cnt = load_attr->line_info_cnt;
|
||||
attr.line_info = ptr_to_u64(load_attr->line_info);
|
||||
|
||||
if (load_attr->name)
|
||||
memcpy(attr.prog_name, load_attr->name,
|
||||
min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1));
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr));
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
|
||||
@ -279,11 +410,11 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
*/
|
||||
while (errno == E2BIG && (!finfo || !linfo)) {
|
||||
if (!finfo && attr.func_info_cnt &&
|
||||
attr.func_info_rec_size < load_attr->func_info_rec_size) {
|
||||
attr.func_info_rec_size < func_info_rec_size) {
|
||||
/* try with corrected func info records */
|
||||
finfo = alloc_zero_tailing_info(load_attr->func_info,
|
||||
load_attr->func_info_cnt,
|
||||
load_attr->func_info_rec_size,
|
||||
finfo = alloc_zero_tailing_info(func_info,
|
||||
attr.func_info_cnt,
|
||||
func_info_rec_size,
|
||||
attr.func_info_rec_size);
|
||||
if (!finfo) {
|
||||
errno = E2BIG;
|
||||
@ -291,13 +422,12 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
}
|
||||
|
||||
attr.func_info = ptr_to_u64(finfo);
|
||||
attr.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
attr.func_info_rec_size = func_info_rec_size;
|
||||
} else if (!linfo && attr.line_info_cnt &&
|
||||
attr.line_info_rec_size <
|
||||
load_attr->line_info_rec_size) {
|
||||
linfo = alloc_zero_tailing_info(load_attr->line_info,
|
||||
load_attr->line_info_cnt,
|
||||
load_attr->line_info_rec_size,
|
||||
attr.line_info_rec_size < line_info_rec_size) {
|
||||
linfo = alloc_zero_tailing_info(line_info,
|
||||
attr.line_info_cnt,
|
||||
line_info_rec_size,
|
||||
attr.line_info_rec_size);
|
||||
if (!linfo) {
|
||||
errno = E2BIG;
|
||||
@ -305,26 +435,27 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
}
|
||||
|
||||
attr.line_info = ptr_to_u64(linfo);
|
||||
attr.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
attr.line_info_rec_size = line_info_rec_size;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr));
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
|
||||
if (fd >= 0)
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (load_attr->log_level || !load_attr->log_buf)
|
||||
goto done;
|
||||
if (log_level == 0 && log_buf) {
|
||||
/* log_level == 0 with non-NULL log_buf requires retrying on error
|
||||
* with log_level == 1 and log_buf/log_buf_size set, to get details of
|
||||
* failure
|
||||
*/
|
||||
attr.log_buf = ptr_to_u64(log_buf);
|
||||
attr.log_size = log_size;
|
||||
attr.log_level = 1;
|
||||
|
||||
/* Try again with log */
|
||||
attr.log_buf = ptr_to_u64(load_attr->log_buf);
|
||||
attr.log_size = load_attr->log_buf_sz;
|
||||
attr.log_level = 1;
|
||||
load_attr->log_buf[0] = 0;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr));
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
|
||||
}
|
||||
done:
|
||||
/* free() doesn't affect errno, so we don't need to restore it */
|
||||
free(finfo);
|
||||
@ -332,17 +463,20 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
__attribute__((alias("bpf_load_program_xattr2")))
|
||||
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
|
||||
static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
{
|
||||
struct bpf_prog_load_params p = {};
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, p);
|
||||
|
||||
if (!load_attr || !log_buf != !log_buf_sz)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
p.prog_type = load_attr->prog_type;
|
||||
p.expected_attach_type = load_attr->expected_attach_type;
|
||||
switch (p.prog_type) {
|
||||
switch (load_attr->prog_type) {
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
p.attach_btf_id = load_attr->attach_btf_id;
|
||||
@ -356,12 +490,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
p.prog_ifindex = load_attr->prog_ifindex;
|
||||
p.kern_version = load_attr->kern_version;
|
||||
}
|
||||
p.insn_cnt = load_attr->insns_cnt;
|
||||
p.insns = load_attr->insns;
|
||||
p.license = load_attr->license;
|
||||
p.log_level = load_attr->log_level;
|
||||
p.log_buf = log_buf;
|
||||
p.log_buf_sz = log_buf_sz;
|
||||
p.log_size = log_buf_sz;
|
||||
p.prog_btf_fd = load_attr->prog_btf_fd;
|
||||
p.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
p.func_info_cnt = load_attr->func_info_cnt;
|
||||
@ -369,10 +500,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
p.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
p.line_info_cnt = load_attr->line_info_cnt;
|
||||
p.line_info = load_attr->line_info;
|
||||
p.name = load_attr->name;
|
||||
p.prog_flags = load_attr->prog_flags;
|
||||
|
||||
return libbpf__bpf_prog_load(&p);
|
||||
return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
|
||||
load_attr->insns, load_attr->insns_cnt, &p);
|
||||
}
|
||||
|
||||
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
@ -391,7 +522,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
load_attr.license = license;
|
||||
load_attr.kern_version = kern_version;
|
||||
|
||||
return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
|
||||
return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz);
|
||||
}
|
||||
|
||||
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
@ -402,6 +533,8 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = type;
|
||||
attr.insn_cnt = (__u32)insns_cnt;
|
||||
@ -414,7 +547,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
attr.kern_version = kern_version;
|
||||
attr.prog_flags = prog_flags;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr));
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -558,11 +691,11 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
|
||||
int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
|
||||
const struct bpf_map_batch_opts *opts)
|
||||
{
|
||||
return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
|
||||
NULL, keys, NULL, count, opts);
|
||||
NULL, (void *)keys, NULL, count, opts);
|
||||
}
|
||||
|
||||
int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
|
||||
@ -582,11 +715,11 @@ int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
|
||||
count, opts);
|
||||
}
|
||||
|
||||
int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
|
||||
int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
|
||||
const struct bpf_map_batch_opts *opts)
|
||||
{
|
||||
return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
|
||||
keys, values, count, opts);
|
||||
(void *)keys, (void *)values, count, opts);
|
||||
}
|
||||
|
||||
int bpf_obj_pin(int fd, const char *pathname)
|
||||
@ -610,7 +743,7 @@ int bpf_obj_get(const char *pathname)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.pathname = ptr_to_u64((void *)pathname);
|
||||
|
||||
fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -721,7 +854,7 @@ int bpf_link_create(int prog_fd, int target_fd,
|
||||
break;
|
||||
}
|
||||
proceed:
|
||||
fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -764,7 +897,7 @@ int bpf_iter_create(int link_fd)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.iter_create.link_fd = link_fd;
|
||||
|
||||
fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -922,7 +1055,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -934,7 +1067,7 @@ int bpf_map_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.map_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -946,7 +1079,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.btf_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -958,7 +1091,7 @@ int bpf_link_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.link_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -989,28 +1122,71 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
attr.raw_tracepoint.name = ptr_to_u64(name);
|
||||
attr.raw_tracepoint.prog_fd = prog_fd;
|
||||
|
||||
fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
|
||||
bool do_log)
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
|
||||
{
|
||||
union bpf_attr attr = {};
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
|
||||
union bpf_attr attr;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
__u32 log_level;
|
||||
int fd;
|
||||
|
||||
attr.btf = ptr_to_u64(btf);
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_btf_load_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
log_buf = OPTS_GET(opts, log_buf, NULL);
|
||||
log_size = OPTS_GET(opts, log_size, 0);
|
||||
log_level = OPTS_GET(opts, log_level, 0);
|
||||
|
||||
if (log_size > UINT_MAX)
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_size && !log_buf)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
attr.btf = ptr_to_u64(btf_data);
|
||||
attr.btf_size = btf_size;
|
||||
/* log_level == 0 and log_buf != NULL means "try loading without
|
||||
* log_buf, but retry with log_buf and log_level=1 on error", which is
|
||||
* consistent across low-level and high-level BTF and program loading
|
||||
* APIs within libbpf and provides a sensible behavior in practice
|
||||
*/
|
||||
if (log_level) {
|
||||
attr.btf_log_buf = ptr_to_u64(log_buf);
|
||||
attr.btf_log_size = (__u32)log_size;
|
||||
attr.btf_log_level = log_level;
|
||||
}
|
||||
|
||||
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
|
||||
if (fd < 0 && log_buf && log_level == 0) {
|
||||
attr.btf_log_buf = ptr_to_u64(log_buf);
|
||||
attr.btf_log_size = (__u32)log_size;
|
||||
attr.btf_log_level = 1;
|
||||
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
|
||||
}
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts);
|
||||
int fd;
|
||||
|
||||
retry:
|
||||
if (do_log && log_buf && log_buf_size) {
|
||||
attr.btf_log_level = 1;
|
||||
attr.btf_log_size = log_buf_size;
|
||||
attr.btf_log_buf = ptr_to_u64(log_buf);
|
||||
opts.log_buf = log_buf;
|
||||
opts.log_size = log_buf_size;
|
||||
opts.log_level = 1;
|
||||
}
|
||||
|
||||
fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
|
||||
|
||||
fd = bpf_btf_load(btf, btf_size, &opts);
|
||||
if (fd < 0 && !do_log && log_buf && log_buf_size) {
|
||||
do_log = true;
|
||||
goto retry;
|
||||
@ -1051,7 +1227,7 @@ int bpf_enable_stats(enum bpf_stats_type type)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.enable_stats.type = type;
|
||||
|
||||
fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
|
@ -29,11 +29,38 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libbpf_common.h"
|
||||
#include "libbpf_legacy.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int libbpf_set_memlock_rlim(size_t memlock_bytes);
|
||||
|
||||
struct bpf_map_create_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
__u32 btf_fd;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 btf_vmlinux_value_type_id;
|
||||
|
||||
__u32 inner_map_fd;
|
||||
__u32 map_flags;
|
||||
__u64 map_extra;
|
||||
|
||||
__u32 numa_node;
|
||||
__u32 map_ifindex;
|
||||
};
|
||||
#define bpf_map_create_opts__last_field map_ifindex
|
||||
|
||||
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
|
||||
const char *map_name,
|
||||
__u32 key_size,
|
||||
__u32 value_size,
|
||||
__u32 max_entries,
|
||||
const struct bpf_map_create_opts *opts);
|
||||
|
||||
struct bpf_create_map_attr {
|
||||
const char *name;
|
||||
enum bpf_map_type map_type;
|
||||
@ -52,25 +79,95 @@ struct bpf_create_map_attr {
|
||||
};
|
||||
};
|
||||
|
||||
LIBBPF_API int
|
||||
bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size,
|
||||
int max_entries, __u32 map_flags, int node);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size,
|
||||
int max_entries, __u32 map_flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
|
||||
int value_size, int max_entries, __u32 map_flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type,
|
||||
const char *name, int key_size,
|
||||
int inner_map_fd, int max_entries,
|
||||
__u32 map_flags, int node);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
|
||||
const char *name, int key_size,
|
||||
int inner_map_fd, int max_entries,
|
||||
__u32 map_flags);
|
||||
|
||||
struct bpf_prog_load_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
/* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
|
||||
* -EAGAIN. This field determines how many attempts libbpf has to
|
||||
* make. If not specified, libbpf will use default value of 5.
|
||||
*/
|
||||
int attempts;
|
||||
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
__u32 prog_btf_fd;
|
||||
__u32 prog_flags;
|
||||
__u32 prog_ifindex;
|
||||
__u32 kern_version;
|
||||
|
||||
__u32 attach_btf_id;
|
||||
__u32 attach_prog_fd;
|
||||
__u32 attach_btf_obj_fd;
|
||||
|
||||
const int *fd_array;
|
||||
|
||||
/* .BTF.ext func info data */
|
||||
const void *func_info;
|
||||
__u32 func_info_cnt;
|
||||
__u32 func_info_rec_size;
|
||||
|
||||
/* .BTF.ext line info data */
|
||||
const void *line_info;
|
||||
__u32 line_info_cnt;
|
||||
__u32 line_info_rec_size;
|
||||
|
||||
/* verifier log options */
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
char *log_buf;
|
||||
};
|
||||
#define bpf_prog_load_opts__last_field log_buf
|
||||
|
||||
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
/* this "specialization" should go away in libbpf 1.0 */
|
||||
LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
|
||||
/* This is an elaborate way to not conflict with deprecated bpf_prog_load()
|
||||
* API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone.
|
||||
* With this approach, if someone is calling bpf_prog_load() with
|
||||
* 4 arguments, they will use the deprecated API, which keeps backwards
|
||||
* compatibility (both source code and binary). If bpf_prog_load() is called
|
||||
* with 6 arguments, though, it gets redirected to __bpf_prog_load.
|
||||
* So looking forward to libbpf 1.0 when this hack will be gone and
|
||||
* __bpf_prog_load() will be called just bpf_prog_load().
|
||||
*/
|
||||
#ifndef bpf_prog_load
|
||||
#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__)
|
||||
#define ___bpf_prog_load4(file, type, pobj, prog_fd) \
|
||||
bpf_prog_load_deprecated(file, type, pobj, prog_fd)
|
||||
#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \
|
||||
bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts)
|
||||
#endif /* bpf_prog_load */
|
||||
|
||||
struct bpf_load_program_attr {
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
@ -100,15 +197,18 @@ struct bpf_load_program_attr {
|
||||
/* Flags to direct loading requirements */
|
||||
#define MAPS_RELAX_COMPAT 0x01
|
||||
|
||||
/* Recommend log buffer size */
|
||||
/* Recommended log buffer size */
|
||||
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
|
||||
LIBBPF_API int
|
||||
bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
|
||||
const struct bpf_insn *insns,
|
||||
size_t insns_cnt, __u32 prog_flags,
|
||||
@ -116,6 +216,23 @@ LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
int log_level);
|
||||
|
||||
struct bpf_btf_load_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
/* kernel log options */
|
||||
char *log_buf;
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
};
|
||||
#define bpf_btf_load_opts__last_field log_size
|
||||
|
||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
const struct bpf_btf_load_opts *opts);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead")
|
||||
LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
|
||||
__u32 log_buf_size, bool do_log);
|
||||
|
||||
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
|
||||
@ -137,17 +254,128 @@ struct bpf_map_batch_opts {
|
||||
};
|
||||
#define bpf_map_batch_opts__last_field flags
|
||||
|
||||
LIBBPF_API int bpf_map_delete_batch(int fd, void *keys,
|
||||
|
||||
/**
|
||||
* @brief **bpf_map_delete_batch()** allows for batch deletion of multiple
|
||||
* elements in a BPF map.
|
||||
*
|
||||
* @param fd BPF map file descriptor
|
||||
* @param keys pointer to an array of *count* keys
|
||||
* @param count input and output parameter; on input **count** represents the
|
||||
* number of elements in the map to delete in batch;
|
||||
* on output if a non-EFAULT error is returned, **count** represents the number of deleted
|
||||
* elements if the output **count** value is not equal to the input **count** value
|
||||
* If EFAULT is returned, **count** should not be trusted to be correct.
|
||||
* @param opts options for configuring the way the batch deletion works
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
|
||||
__u32 *count,
|
||||
const struct bpf_map_batch_opts *opts);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
|
||||
*
|
||||
* The parameter *in_batch* is the address of the first element in the batch to read.
|
||||
* *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
|
||||
* calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
|
||||
* that the batched lookup starts from the beginning of the map.
|
||||
*
|
||||
* The *keys* and *values* are output parameters which must point to memory large enough to
|
||||
* hold *count* items based on the key and value size of the map *map_fd*. The *keys*
|
||||
* buffer must be of *key_size* * *count*. The *values* buffer must be of
|
||||
* *value_size* * *count*.
|
||||
*
|
||||
* @param fd BPF map file descriptor
|
||||
* @param in_batch address of the first element in batch to read, can pass NULL to
|
||||
* indicate that the batched lookup starts from the beginning of the map.
|
||||
* @param out_batch output parameter that should be passed to next call as *in_batch*
|
||||
* @param keys pointer to an array large enough for *count* keys
|
||||
* @param values pointer to an array large enough for *count* values
|
||||
* @param count input and output parameter; on input it's the number of elements
|
||||
* in the map to read in batch; on output it's the number of elements that were
|
||||
* successfully read.
|
||||
* If a non-EFAULT error is returned, count will be set as the number of elements
|
||||
* that were read before the error occurred.
|
||||
* If EFAULT is returned, **count** should not be trusted to be correct.
|
||||
* @param opts options for configuring the way the batch lookup works
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
|
||||
void *keys, void *values, __u32 *count,
|
||||
const struct bpf_map_batch_opts *opts);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion
|
||||
* of BPF map elements where each element is deleted after being retrieved.
|
||||
*
|
||||
* @param fd BPF map file descriptor
|
||||
* @param in_batch address of the first element in batch to read, can pass NULL to
|
||||
* get address of the first element in *out_batch*
|
||||
* @param out_batch output parameter that should be passed to next call as *in_batch*
|
||||
* @param keys pointer to an array of *count* keys
|
||||
* @param values pointer to an array large enough for *count* values
|
||||
* @param count input and output parameter; on input it's the number of elements
|
||||
* in the map to read and delete in batch; on output it represents the number of
|
||||
* elements that were successfully read and deleted
|
||||
* If a non-**EFAULT** error code is returned and if the output **count** value
|
||||
* is not equal to the input **count** value, up to **count** elements may
|
||||
* have been deleted.
|
||||
* if **EFAULT** is returned up to *count* elements may have been deleted without
|
||||
* being returned via the *keys* and *values* output parameters.
|
||||
* @param opts options for configuring the way the batch lookup and delete works
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
|
||||
void *out_batch, void *keys,
|
||||
void *values, __u32 *count,
|
||||
const struct bpf_map_batch_opts *opts);
|
||||
LIBBPF_API int bpf_map_update_batch(int fd, void *keys, void *values,
|
||||
|
||||
/**
|
||||
* @brief **bpf_map_update_batch()** updates multiple elements in a map
|
||||
* by specifying keys and their corresponding values.
|
||||
*
|
||||
* The *keys* and *values* parameters must point to memory large enough
|
||||
* to hold *count* items based on the key and value size of the map.
|
||||
*
|
||||
* The *opts* parameter can be used to control how *bpf_map_update_batch()*
|
||||
* should handle keys that either do or do not already exist in the map.
|
||||
* In particular the *flags* parameter of *bpf_map_batch_opts* can be
|
||||
* one of the following:
|
||||
*
|
||||
* Note that *count* is an input and output parameter, where on output it
|
||||
* represents how many elements were successfully updated. Also note that if
|
||||
* **EFAULT** then *count* should not be trusted to be correct.
|
||||
*
|
||||
* **BPF_ANY**
|
||||
* Create new elements or update existing.
|
||||
*
|
||||
* **BPF_NOEXIST**
|
||||
* Create new elements only if they do not exist.
|
||||
*
|
||||
* **BPF_EXIST**
|
||||
* Update existing elements.
|
||||
*
|
||||
* **BPF_F_LOCK**
|
||||
* Update spin_lock-ed map elements. This must be
|
||||
* specified if the map value contains a spinlock.
|
||||
*
|
||||
* @param fd BPF map file descriptor
|
||||
* @param keys pointer to an array of *count* keys
|
||||
* @param values pointer to an array of *count* values
|
||||
* @param count input and output parameter; on input it's the number of elements
|
||||
* in the map to update in batch; on output if a non-EFAULT error is returned,
|
||||
* **count** represents the number of updated elements if the output **count**
|
||||
* value is not equal to the input **count** value.
|
||||
* If EFAULT is returned, **count** should not be trusted to be correct.
|
||||
* @param opts options for configuring the way the batch update works
|
||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
||||
* the error code)
|
||||
*/
|
||||
LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values,
|
||||
__u32 *count,
|
||||
const struct bpf_map_batch_opts *opts);
|
||||
|
||||
@ -243,8 +471,6 @@ LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
|
||||
__u32 query_flags, __u32 *attach_flags,
|
||||
__u32 *prog_ids, __u32 *prog_cnt);
|
||||
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
|
||||
LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
|
||||
__u32 log_buf_size, bool do_log);
|
||||
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
||||
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
||||
__u64 *probe_offset, __u64 *probe_addr);
|
||||
|
@ -3,10 +3,27 @@
|
||||
#ifndef __BPF_GEN_INTERNAL_H
|
||||
#define __BPF_GEN_INTERNAL_H
|
||||
|
||||
#include "bpf.h"
|
||||
|
||||
struct ksym_relo_desc {
|
||||
const char *name;
|
||||
int kind;
|
||||
int insn_idx;
|
||||
bool is_weak;
|
||||
bool is_typeless;
|
||||
};
|
||||
|
||||
struct ksym_desc {
|
||||
const char *name;
|
||||
int ref;
|
||||
int kind;
|
||||
union {
|
||||
/* used for kfunc */
|
||||
int off;
|
||||
/* used for typeless ksym */
|
||||
bool typeless;
|
||||
};
|
||||
int insn;
|
||||
};
|
||||
|
||||
struct bpf_gen {
|
||||
@ -22,20 +39,34 @@ struct bpf_gen {
|
||||
int error;
|
||||
struct ksym_relo_desc *relos;
|
||||
int relo_cnt;
|
||||
struct bpf_core_relo *core_relos;
|
||||
int core_relo_cnt;
|
||||
char attach_target[128];
|
||||
int attach_kind;
|
||||
struct ksym_desc *ksyms;
|
||||
__u32 nr_ksyms;
|
||||
int fd_array;
|
||||
int nr_fd_array;
|
||||
};
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
||||
int bpf_gen__finish(struct bpf_gen *gen);
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
|
||||
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
|
||||
void bpf_gen__free(struct bpf_gen *gen);
|
||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
|
||||
struct bpf_prog_load_params;
|
||||
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
enum bpf_map_type map_type, const char *map_name,
|
||||
__u32 key_size, __u32 value_size, __u32 max_entries,
|
||||
struct bpf_map_create_opts *map_attr, int map_idx);
|
||||
void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
enum bpf_prog_type prog_type, const char *prog_name,
|
||||
const char *license, struct bpf_insn *insns, size_t insn_cnt,
|
||||
struct bpf_prog_load_opts *load_attr, int prog_idx);
|
||||
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
|
||||
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
||||
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx);
|
||||
void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo);
|
||||
void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx);
|
||||
|
||||
#endif
|
||||
|
@ -14,14 +14,6 @@
|
||||
#define __type(name, val) typeof(val) *name
|
||||
#define __array(name, val) typeof(val) *name[]
|
||||
|
||||
/* Helper macro to print out debug messages */
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
/*
|
||||
* Helper macro to place programs, maps, license in
|
||||
* different sections in elf_bpf file. Section names
|
||||
@ -224,4 +216,47 @@ enum libbpf_tristate {
|
||||
___param, sizeof(___param)); \
|
||||
})
|
||||
|
||||
#ifdef BPF_NO_GLOBAL_DATA
|
||||
#define BPF_PRINTK_FMT_MOD
|
||||
#else
|
||||
#define BPF_PRINTK_FMT_MOD static const
|
||||
#endif
|
||||
|
||||
#define __bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
/*
|
||||
* __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
|
||||
* instead of an array of u64.
|
||||
*/
|
||||
#define __bpf_vprintk(fmt, args...) \
|
||||
({ \
|
||||
static const char ___fmt[] = fmt; \
|
||||
unsigned long long ___param[___bpf_narg(args)]; \
|
||||
\
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
___bpf_fill(___param, args); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
\
|
||||
bpf_trace_vprintk(___fmt, sizeof(___fmt), \
|
||||
___param, sizeof(___param)); \
|
||||
})
|
||||
|
||||
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
|
||||
* Otherwise use __bpf_vprintk
|
||||
*/
|
||||
#define ___bpf_pick_printk(...) \
|
||||
___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
|
||||
__bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
|
||||
__bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
|
||||
__bpf_printk /*1*/, __bpf_printk /*0*/)
|
||||
|
||||
/* Helper macro to print out debug messages */
|
||||
#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
|
||||
|
||||
#endif
|
||||
|
@ -24,6 +24,9 @@
|
||||
#elif defined(__TARGET_ARCH_sparc)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_riscv)
|
||||
#define bpf_target_riscv
|
||||
#define bpf_target_defined
|
||||
#else
|
||||
|
||||
/* Fall back to what the compiler says */
|
||||
@ -48,6 +51,9 @@
|
||||
#elif defined(__sparc__)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__riscv) && __riscv_xlen == 64
|
||||
#define bpf_target_riscv
|
||||
#define bpf_target_defined
|
||||
#endif /* no compiler target */
|
||||
|
||||
#endif
|
||||
@ -60,251 +66,204 @@
|
||||
|
||||
#if defined(__KERNEL__) || defined(__VMLINUX_H__)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->di)
|
||||
#define PT_REGS_PARM2(x) ((x)->si)
|
||||
#define PT_REGS_PARM3(x) ((x)->dx)
|
||||
#define PT_REGS_PARM4(x) ((x)->cx)
|
||||
#define PT_REGS_PARM5(x) ((x)->r8)
|
||||
#define PT_REGS_RET(x) ((x)->sp)
|
||||
#define PT_REGS_FP(x) ((x)->bp)
|
||||
#define PT_REGS_RC(x) ((x)->ax)
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->ip)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip)
|
||||
#define __PT_PARM1_REG di
|
||||
#define __PT_PARM2_REG si
|
||||
#define __PT_PARM3_REG dx
|
||||
#define __PT_PARM4_REG cx
|
||||
#define __PT_PARM5_REG r8
|
||||
#define __PT_RET_REG sp
|
||||
#define __PT_FP_REG bp
|
||||
#define __PT_RC_REG ax
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG ip
|
||||
|
||||
#else
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#define __PT_PARM1_REG eax
|
||||
#define __PT_PARM2_REG edx
|
||||
#define __PT_PARM3_REG ecx
|
||||
/* i386 kernel is built with -mregparm=3 */
|
||||
#define PT_REGS_PARM1(x) ((x)->eax)
|
||||
#define PT_REGS_PARM2(x) ((x)->edx)
|
||||
#define PT_REGS_PARM3(x) ((x)->ecx)
|
||||
#define PT_REGS_PARM4(x) 0
|
||||
#define PT_REGS_PARM5(x) 0
|
||||
#define PT_REGS_RET(x) ((x)->esp)
|
||||
#define PT_REGS_FP(x) ((x)->ebp)
|
||||
#define PT_REGS_RC(x) ((x)->eax)
|
||||
#define PT_REGS_SP(x) ((x)->esp)
|
||||
#define PT_REGS_IP(x) ((x)->eip)
|
||||
#define __PT_PARM4_REG __unsupported__
|
||||
#define __PT_PARM5_REG __unsupported__
|
||||
#define __PT_RET_REG esp
|
||||
#define __PT_FP_REG ebp
|
||||
#define __PT_RC_REG eax
|
||||
#define __PT_SP_REG esp
|
||||
#define __PT_IP_REG eip
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx)
|
||||
#define PT_REGS_PARM4_CORE(x) 0
|
||||
#define PT_REGS_PARM5_CORE(x) 0
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip)
|
||||
#else /* __i386__ */
|
||||
|
||||
#else
|
||||
#define __PT_PARM1_REG rdi
|
||||
#define __PT_PARM2_REG rsi
|
||||
#define __PT_PARM3_REG rdx
|
||||
#define __PT_PARM4_REG rcx
|
||||
#define __PT_PARM5_REG r8
|
||||
#define __PT_RET_REG rsp
|
||||
#define __PT_FP_REG rbp
|
||||
#define __PT_RC_REG rax
|
||||
#define __PT_SP_REG rsp
|
||||
#define __PT_IP_REG rip
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->rdi)
|
||||
#define PT_REGS_PARM2(x) ((x)->rsi)
|
||||
#define PT_REGS_PARM3(x) ((x)->rdx)
|
||||
#define PT_REGS_PARM4(x) ((x)->rcx)
|
||||
#define PT_REGS_PARM5(x) ((x)->r8)
|
||||
#define PT_REGS_RET(x) ((x)->rsp)
|
||||
#define PT_REGS_FP(x) ((x)->rbp)
|
||||
#define PT_REGS_RC(x) ((x)->rax)
|
||||
#define PT_REGS_SP(x) ((x)->rsp)
|
||||
#define PT_REGS_IP(x) ((x)->rip)
|
||||
#endif /* __i386__ */
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __KERNEL__ || __VMLINUX_H__ */
|
||||
|
||||
#elif defined(bpf_target_s390)
|
||||
|
||||
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||
struct pt_regs;
|
||||
#define PT_REGS_S390 const volatile user_pt_regs
|
||||
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
||||
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
|
||||
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
|
||||
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
|
||||
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
|
||||
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
|
||||
/* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
|
||||
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
||||
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
|
||||
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[14])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), psw.addr)
|
||||
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG gprs[2]
|
||||
#define __PT_PARM2_REG gprs[3]
|
||||
#define __PT_PARM3_REG gprs[4]
|
||||
#define __PT_PARM4_REG gprs[5]
|
||||
#define __PT_PARM5_REG gprs[6]
|
||||
#define __PT_RET_REG grps[14]
|
||||
#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG gprs[2]
|
||||
#define __PT_SP_REG gprs[15]
|
||||
#define __PT_IP_REG psw.addr
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->uregs[0])
|
||||
#define PT_REGS_PARM2(x) ((x)->uregs[1])
|
||||
#define PT_REGS_PARM3(x) ((x)->uregs[2])
|
||||
#define PT_REGS_PARM4(x) ((x)->uregs[3])
|
||||
#define PT_REGS_PARM5(x) ((x)->uregs[4])
|
||||
#define PT_REGS_RET(x) ((x)->uregs[14])
|
||||
#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_RC(x) ((x)->uregs[0])
|
||||
#define PT_REGS_SP(x) ((x)->uregs[13])
|
||||
#define PT_REGS_IP(x) ((x)->uregs[12])
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13])
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12])
|
||||
#define __PT_PARM1_REG uregs[0]
|
||||
#define __PT_PARM2_REG uregs[1]
|
||||
#define __PT_PARM3_REG uregs[2]
|
||||
#define __PT_PARM4_REG uregs[3]
|
||||
#define __PT_PARM5_REG uregs[4]
|
||||
#define __PT_RET_REG uregs[14]
|
||||
#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG uregs[0]
|
||||
#define __PT_SP_REG uregs[13]
|
||||
#define __PT_IP_REG uregs[12]
|
||||
|
||||
#elif defined(bpf_target_arm64)
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
struct pt_regs;
|
||||
#define PT_REGS_ARM64 const volatile struct user_pt_regs
|
||||
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
|
||||
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
|
||||
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
|
||||
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
|
||||
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
|
||||
/* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
|
||||
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
|
||||
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc)
|
||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG regs[0]
|
||||
#define __PT_PARM2_REG regs[1]
|
||||
#define __PT_PARM3_REG regs[2]
|
||||
#define __PT_PARM4_REG regs[3]
|
||||
#define __PT_PARM5_REG regs[4]
|
||||
#define __PT_RET_REG regs[30]
|
||||
#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG regs[0]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
|
||||
#elif defined(bpf_target_mips)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->regs[4])
|
||||
#define PT_REGS_PARM2(x) ((x)->regs[5])
|
||||
#define PT_REGS_PARM3(x) ((x)->regs[6])
|
||||
#define PT_REGS_PARM4(x) ((x)->regs[7])
|
||||
#define PT_REGS_PARM5(x) ((x)->regs[8])
|
||||
#define PT_REGS_RET(x) ((x)->regs[31])
|
||||
#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_RC(x) ((x)->regs[2])
|
||||
#define PT_REGS_SP(x) ((x)->regs[29])
|
||||
#define PT_REGS_IP(x) ((x)->cp0_epc)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[2])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29])
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc)
|
||||
#define __PT_PARM1_REG regs[4]
|
||||
#define __PT_PARM2_REG regs[5]
|
||||
#define __PT_PARM3_REG regs[6]
|
||||
#define __PT_PARM4_REG regs[7]
|
||||
#define __PT_PARM5_REG regs[8]
|
||||
#define __PT_RET_REG regs[31]
|
||||
#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG regs[2]
|
||||
#define __PT_SP_REG regs[29]
|
||||
#define __PT_IP_REG cp0_epc
|
||||
|
||||
#elif defined(bpf_target_powerpc)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->gpr[3])
|
||||
#define PT_REGS_PARM2(x) ((x)->gpr[4])
|
||||
#define PT_REGS_PARM3(x) ((x)->gpr[5])
|
||||
#define PT_REGS_PARM4(x) ((x)->gpr[6])
|
||||
#define PT_REGS_PARM5(x) ((x)->gpr[7])
|
||||
#define PT_REGS_RC(x) ((x)->gpr[3])
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->nip)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip)
|
||||
#define __PT_PARM1_REG gpr[3]
|
||||
#define __PT_PARM2_REG gpr[4]
|
||||
#define __PT_PARM3_REG gpr[5]
|
||||
#define __PT_PARM4_REG gpr[6]
|
||||
#define __PT_PARM5_REG gpr[7]
|
||||
#define __PT_RET_REG regs[31]
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG gpr[3]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG nip
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
|
||||
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
|
||||
#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
|
||||
#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
|
||||
#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
|
||||
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
|
||||
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
|
||||
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP])
|
||||
|
||||
#define __PT_PARM1_REG u_regs[UREG_I0]
|
||||
#define __PT_PARM2_REG u_regs[UREG_I1]
|
||||
#define __PT_PARM3_REG u_regs[UREG_I2]
|
||||
#define __PT_PARM4_REG u_regs[UREG_I3]
|
||||
#define __PT_PARM5_REG u_regs[UREG_I4]
|
||||
#define __PT_RET_REG u_regs[UREG_I7]
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG u_regs[UREG_I0]
|
||||
#define __PT_SP_REG u_regs[UREG_FP]
|
||||
/* Should this also be a bpf_target check for the sparc case? */
|
||||
#if defined(__arch64__)
|
||||
#define PT_REGS_IP(x) ((x)->tpc)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc)
|
||||
#define __PT_IP_REG tpc
|
||||
#else
|
||||
#define PT_REGS_IP(x) ((x)->pc)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
|
||||
#define __PT_IP_REG pc
|
||||
#endif
|
||||
|
||||
#elif defined(bpf_target_riscv)
|
||||
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG a0
|
||||
#define __PT_PARM2_REG a1
|
||||
#define __PT_PARM3_REG a2
|
||||
#define __PT_PARM4_REG a3
|
||||
#define __PT_PARM5_REG a4
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG fp
|
||||
#define __PT_RC_REG a5
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG epc
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(bpf_target_defined)
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
/* allow some architecutres to override `struct pt_regs` */
|
||||
#ifndef __PT_REGS_CAST
|
||||
#define __PT_REGS_CAST(x) (x)
|
||||
#endif
|
||||
|
||||
#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
|
||||
#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
|
||||
#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
|
||||
#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
|
||||
#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
|
||||
#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
|
||||
#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
|
||||
#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
|
||||
#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
|
||||
#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
|
||||
|
||||
#if defined(bpf_target_powerpc)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
#elif defined(bpf_target_defined)
|
||||
|
||||
#else
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), \
|
||||
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
||||
|
||||
#endif
|
||||
|
||||
#if !defined(bpf_target_defined)
|
||||
#else /* defined(bpf_target_defined) */
|
||||
|
||||
#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
@ -331,7 +290,7 @@ struct pt_regs;
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#endif /* !defined(bpf_target_defined) */
|
||||
#endif /* defined(bpf_target_defined) */
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
@ -343,25 +302,23 @@ struct pt_regs;
|
||||
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
|
||||
#endif
|
||||
#ifndef ___bpf_narg
|
||||
#define ___bpf_narg(...) \
|
||||
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
#endif
|
||||
|
||||
#define ___bpf_ctx_cast0() ctx
|
||||
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
|
||||
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
|
||||
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
|
||||
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
|
||||
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
|
||||
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
|
||||
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
|
||||
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
|
||||
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
|
||||
#define ___bpf_ctx_cast0() ctx
|
||||
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
|
||||
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
|
||||
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
|
||||
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
|
||||
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
|
||||
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
|
||||
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
|
||||
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
|
||||
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
|
||||
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
|
||||
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
|
||||
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
|
||||
#define ___bpf_ctx_cast(args...) \
|
||||
___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
||||
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
|
||||
@ -394,19 +351,13 @@ ____##name(unsigned long long *ctx, ##args)
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#define ___bpf_kprobe_args0() ctx
|
||||
#define ___bpf_kprobe_args1(x) \
|
||||
___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
|
||||
#define ___bpf_kprobe_args2(x, args...) \
|
||||
___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
|
||||
#define ___bpf_kprobe_args3(x, args...) \
|
||||
___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
|
||||
#define ___bpf_kprobe_args4(x, args...) \
|
||||
___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
|
||||
#define ___bpf_kprobe_args5(x, args...) \
|
||||
___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
|
||||
#define ___bpf_kprobe_args(args...) \
|
||||
___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
||||
#define ___bpf_kprobe_args0() ctx
|
||||
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
|
||||
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
|
||||
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
|
||||
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
|
||||
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
|
||||
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
|
||||
@ -432,11 +383,9 @@ typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define ___bpf_kretprobe_args0() ctx
|
||||
#define ___bpf_kretprobe_args1(x) \
|
||||
___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
|
||||
#define ___bpf_kretprobe_args(args...) \
|
||||
___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
||||
#define ___bpf_kretprobe_args0() ctx
|
||||
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
|
||||
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
|
||||
|
@ -57,7 +57,7 @@ struct btf {
|
||||
* representation is broken up into three independently allocated
|
||||
* memory regions to be able to modify them independently.
|
||||
* raw_data is nulled out at that point, but can be later allocated
|
||||
* and cached again if user calls btf__get_raw_data(), at which point
|
||||
* and cached again if user calls btf__raw_data(), at which point
|
||||
* raw_data will contain a contiguous copy of header, types, and
|
||||
* strings:
|
||||
*
|
||||
@ -189,12 +189,17 @@ int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
|
||||
{
|
||||
return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
|
||||
btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
|
||||
}
|
||||
|
||||
static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
|
||||
{
|
||||
__u32 *p;
|
||||
|
||||
p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
|
||||
btf->nr_types, BTF_MAX_NR_TYPES, 1);
|
||||
p = btf_add_type_offs_mem(btf, 1);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -294,6 +299,7 @@ static int btf_type_size(const struct btf_type *t)
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return base_size;
|
||||
case BTF_KIND_INT:
|
||||
return base_size + sizeof(__u32);
|
||||
@ -310,6 +316,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(struct btf_var);
|
||||
case BTF_KIND_DATASEC:
|
||||
return base_size + vlen * sizeof(struct btf_var_secinfo);
|
||||
case BTF_KIND_DECL_TAG:
|
||||
return base_size + sizeof(struct btf_decl_tag);
|
||||
default:
|
||||
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
|
||||
return -EINVAL;
|
||||
@ -342,6 +350,7 @@ static int btf_bswap_type_rest(struct btf_type *t)
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return 0;
|
||||
case BTF_KIND_INT:
|
||||
*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
|
||||
@ -382,6 +391,9 @@ static int btf_bswap_type_rest(struct btf_type *t)
|
||||
v->size = bswap_32(v->size);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_DECL_TAG:
|
||||
btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
|
||||
return 0;
|
||||
default:
|
||||
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
|
||||
return -EINVAL;
|
||||
@ -431,13 +443,18 @@ __u32 btf__get_nr_types(const struct btf *btf)
|
||||
return btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
__u32 btf__type_cnt(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types;
|
||||
}
|
||||
|
||||
const struct btf *btf__base_btf(const struct btf *btf)
|
||||
{
|
||||
return btf->base_btf;
|
||||
}
|
||||
|
||||
/* internal helper returning non-const pointer to a type */
|
||||
struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
|
||||
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
|
||||
{
|
||||
if (type_id == 0)
|
||||
return &btf_void;
|
||||
@ -462,8 +479,8 @@ static int determine_ptr_size(const struct btf *btf)
|
||||
if (btf->base_btf && btf->base_btf->ptr_sz > 0)
|
||||
return btf->base_btf->ptr_sz;
|
||||
|
||||
n = btf__get_nr_types(btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf__type_by_id(btf, i);
|
||||
if (!btf_is_int(t))
|
||||
continue;
|
||||
@ -523,9 +540,9 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
|
||||
|
||||
static bool is_host_big_endian(void)
|
||||
{
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
return false;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
return true;
|
||||
#else
|
||||
# error "Unrecognized __BYTE_ORDER__"
|
||||
@ -592,6 +609,8 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
type_id = t->type;
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
@ -633,6 +652,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return btf__align_of(btf, t->type);
|
||||
case BTF_KIND_ARRAY:
|
||||
return btf__align_of(btf, btf_array(t)->type);
|
||||
@ -679,12 +699,12 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
|
||||
|
||||
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
|
||||
{
|
||||
__u32 i, nr_types = btf__get_nr_types(btf);
|
||||
__u32 i, nr_types = btf__type_cnt(btf);
|
||||
|
||||
if (!strcmp(type_name, "void"))
|
||||
return 0;
|
||||
|
||||
for (i = 1; i <= nr_types; i++) {
|
||||
for (i = 1; i < nr_types; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name = btf__name_by_offset(btf, t->name_off);
|
||||
|
||||
@ -695,15 +715,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
|
||||
const char *type_name, __u32 kind)
|
||||
{
|
||||
__u32 i, nr_types = btf__get_nr_types(btf);
|
||||
__u32 i, nr_types = btf__type_cnt(btf);
|
||||
|
||||
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
|
||||
return 0;
|
||||
|
||||
for (i = 1; i <= nr_types; i++) {
|
||||
for (i = start_id; i < nr_types; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name;
|
||||
|
||||
@ -717,6 +737,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
{
|
||||
return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
{
|
||||
return btf_find_by_name_kind(btf, 1, type_name, kind);
|
||||
}
|
||||
|
||||
static bool btf_is_modifiable(const struct btf *btf)
|
||||
{
|
||||
return (void *)btf->hdr != btf->raw_data;
|
||||
@ -764,7 +796,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
|
||||
|
||||
if (base_btf) {
|
||||
btf->base_btf = base_btf;
|
||||
btf->start_id = btf__get_nr_types(base_btf) + 1;
|
||||
btf->start_id = btf__type_cnt(base_btf);
|
||||
btf->start_str_off = base_btf->hdr->str_len;
|
||||
}
|
||||
|
||||
@ -814,7 +846,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
|
||||
|
||||
if (base_btf) {
|
||||
btf->base_btf = base_btf;
|
||||
btf->start_id = btf__get_nr_types(base_btf) + 1;
|
||||
btf->start_id = btf__type_cnt(base_btf);
|
||||
btf->start_str_off = base_btf->hdr->str_len;
|
||||
}
|
||||
|
||||
@ -869,7 +901,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
|
||||
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
|
||||
}
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("failed to open %s: %s\n", path, strerror(errno));
|
||||
@ -1090,149 +1122,88 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
|
||||
return libbpf_ptr(btf_parse(path, base_btf, NULL));
|
||||
}
|
||||
|
||||
static int compare_vsi_off(const void *_a, const void *_b)
|
||||
{
|
||||
const struct btf_var_secinfo *a = _a;
|
||||
const struct btf_var_secinfo *b = _b;
|
||||
|
||||
return a->offset - b->offset;
|
||||
}
|
||||
|
||||
static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
|
||||
struct btf_type *t)
|
||||
{
|
||||
__u32 size = 0, off = 0, i, vars = btf_vlen(t);
|
||||
const char *name = btf__name_by_offset(btf, t->name_off);
|
||||
const struct btf_type *t_var;
|
||||
struct btf_var_secinfo *vsi;
|
||||
const struct btf_var *var;
|
||||
int ret;
|
||||
|
||||
if (!name) {
|
||||
pr_debug("No name found in string section for DATASEC kind.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* .extern datasec size and var offsets were set correctly during
|
||||
* extern collection step, so just skip straight to sorting variables
|
||||
*/
|
||||
if (t->size)
|
||||
goto sort_vars;
|
||||
|
||||
ret = bpf_object__section_size(obj, name, &size);
|
||||
if (ret || !size || (t->size && t->size != size)) {
|
||||
pr_debug("Invalid size for section %s: %u bytes\n", name, size);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
t->size = size;
|
||||
|
||||
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
|
||||
t_var = btf__type_by_id(btf, vsi->type);
|
||||
var = btf_var(t_var);
|
||||
|
||||
if (!btf_is_var(t_var)) {
|
||||
pr_debug("Non-VAR type seen in section %s\n", name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (var->linkage == BTF_VAR_STATIC)
|
||||
continue;
|
||||
|
||||
name = btf__name_by_offset(btf, t_var->name_off);
|
||||
if (!name) {
|
||||
pr_debug("No name found in string section for VAR kind\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = bpf_object__variable_offset(obj, name, &off);
|
||||
if (ret) {
|
||||
pr_debug("No offset found in symbol table for VAR %s\n",
|
||||
name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
vsi->offset = off;
|
||||
}
|
||||
|
||||
sort_vars:
|
||||
qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
|
||||
{
|
||||
int err = 0;
|
||||
__u32 i;
|
||||
|
||||
for (i = 1; i <= btf->nr_types; i++) {
|
||||
struct btf_type *t = btf_type_by_id(btf, i);
|
||||
|
||||
/* Loader needs to fix up some of the things compiler
|
||||
* couldn't get its hands on while emitting BTF. This
|
||||
* is section size and global variable offset. We use
|
||||
* the info from the ELF itself for this purpose.
|
||||
*/
|
||||
if (btf_is_datasec(t)) {
|
||||
err = btf_fixup_datasec(obj, btf, t);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
|
||||
|
||||
int btf__load_into_kernel(struct btf *btf)
|
||||
int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
|
||||
{
|
||||
__u32 log_buf_size = 0, raw_size;
|
||||
char *log_buf = NULL;
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts);
|
||||
__u32 buf_sz = 0, raw_size;
|
||||
char *buf = NULL, *tmp;
|
||||
void *raw_data;
|
||||
int err = 0;
|
||||
|
||||
if (btf->fd >= 0)
|
||||
return libbpf_err(-EEXIST);
|
||||
if (log_sz && !log_buf)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
retry_load:
|
||||
if (log_buf_size) {
|
||||
log_buf = malloc(log_buf_size);
|
||||
if (!log_buf)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
*log_buf = 0;
|
||||
}
|
||||
|
||||
/* cache native raw data representation */
|
||||
raw_data = btf_get_raw_data(btf, &raw_size, false);
|
||||
if (!raw_data) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
/* cache native raw data representation */
|
||||
btf->raw_size = raw_size;
|
||||
btf->raw_data = raw_data;
|
||||
|
||||
btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false);
|
||||
if (btf->fd < 0) {
|
||||
if (!log_buf || errno == ENOSPC) {
|
||||
log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
|
||||
log_buf_size << 1);
|
||||
free(log_buf);
|
||||
goto retry_load;
|
||||
retry_load:
|
||||
/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
|
||||
* initially. Only if BTF loading fails, we bump log_level to 1 and
|
||||
* retry, using either auto-allocated or custom log_buf. This way
|
||||
* non-NULL custom log_buf provides a buffer just in case, but hopes
|
||||
* for successful load and no need for log_buf.
|
||||
*/
|
||||
if (log_level) {
|
||||
/* if caller didn't provide custom log_buf, we'll keep
|
||||
* allocating our own progressively bigger buffers for BTF
|
||||
* verification log
|
||||
*/
|
||||
if (!log_buf) {
|
||||
buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
|
||||
tmp = realloc(buf, buf_sz);
|
||||
if (!tmp) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
buf = tmp;
|
||||
buf[0] = '\0';
|
||||
}
|
||||
|
||||
opts.log_buf = log_buf ? log_buf : buf;
|
||||
opts.log_size = log_buf ? log_sz : buf_sz;
|
||||
opts.log_level = log_level;
|
||||
}
|
||||
|
||||
btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
|
||||
if (btf->fd < 0) {
|
||||
/* time to turn on verbose mode and try again */
|
||||
if (log_level == 0) {
|
||||
log_level = 1;
|
||||
goto retry_load;
|
||||
}
|
||||
/* only retry if caller didn't provide custom log_buf, but
|
||||
* make sure we can never overflow buf_sz
|
||||
*/
|
||||
if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
|
||||
goto retry_load;
|
||||
|
||||
err = -errno;
|
||||
pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
|
||||
if (*log_buf)
|
||||
pr_warn("%s\n", log_buf);
|
||||
goto done;
|
||||
pr_warn("BTF loading error: %d\n", err);
|
||||
/* don't print out contents of custom log_buf */
|
||||
if (!log_buf && buf[0])
|
||||
pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
|
||||
}
|
||||
|
||||
done:
|
||||
free(log_buf);
|
||||
free(buf);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
int btf__load_into_kernel(struct btf *btf)
|
||||
{
|
||||
return btf_load_into_kernel(btf, NULL, 0, 0);
|
||||
}
|
||||
|
||||
int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));
|
||||
|
||||
int btf__fd(const struct btf *btf)
|
||||
@ -1300,7 +1271,7 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
{
|
||||
struct btf *btf = (struct btf *)btf_ro;
|
||||
__u32 data_sz;
|
||||
@ -1308,7 +1279,7 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
|
||||
data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
|
||||
if (!data)
|
||||
return errno = -ENOMEM, NULL;
|
||||
return errno = ENOMEM, NULL;
|
||||
|
||||
btf->raw_size = data_sz;
|
||||
if (btf->swapped_endian)
|
||||
@ -1319,6 +1290,9 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
return data;
|
||||
}
|
||||
|
||||
__attribute__((alias("btf__raw_data")))
|
||||
const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
|
||||
const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
|
||||
{
|
||||
if (offset < btf->start_str_off)
|
||||
@ -1691,6 +1665,111 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
|
||||
{
|
||||
struct btf *btf = ctx;
|
||||
|
||||
if (!*type_id) /* nothing to do for VOID references */
|
||||
return 0;
|
||||
|
||||
/* we haven't updated btf's type count yet, so
|
||||
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
|
||||
* add to all newly added BTF types
|
||||
*/
|
||||
*type_id += btf->start_id + btf->nr_types - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__add_btf(struct btf *btf, const struct btf *src_btf)
|
||||
{
|
||||
struct btf_pipe p = { .src = src_btf, .dst = btf };
|
||||
int data_sz, sz, cnt, i, err, old_strs_len;
|
||||
__u32 *off;
|
||||
void *t;
|
||||
|
||||
/* appending split BTF isn't supported yet */
|
||||
if (src_btf->base_btf)
|
||||
return libbpf_err(-ENOTSUP);
|
||||
|
||||
/* deconstruct BTF, if necessary, and invalidate raw_data */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
/* remember original strings section size if we have to roll back
|
||||
* partial strings section changes
|
||||
*/
|
||||
old_strs_len = btf->hdr->str_len;
|
||||
|
||||
data_sz = src_btf->hdr->type_len;
|
||||
cnt = btf__type_cnt(src_btf) - 1;
|
||||
|
||||
/* pre-allocate enough memory for new types */
|
||||
t = btf_add_type_mem(btf, data_sz);
|
||||
if (!t)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
/* pre-allocate enough memory for type offset index for new types */
|
||||
off = btf_add_type_offs_mem(btf, cnt);
|
||||
if (!off)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
/* bulk copy types data for all types from src_btf */
|
||||
memcpy(t, src_btf->types_data, data_sz);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
sz = btf_type_size(t);
|
||||
if (sz < 0) {
|
||||
/* unlikely, has to be corrupted src_btf */
|
||||
err = sz;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* fill out type ID to type offset mapping for lookups by type ID */
|
||||
*off = t - btf->types_data;
|
||||
|
||||
/* add, dedup, and remap strings referenced by this BTF type */
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* remap all type IDs referenced from this BTF type */
|
||||
err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* go to next type data and type offset index entry */
|
||||
t += sz;
|
||||
off++;
|
||||
}
|
||||
|
||||
/* Up until now any of the copied type data was effectively invisible,
|
||||
* so if we exited early before this point due to error, BTF would be
|
||||
* effectively unmodified. There would be extra internal memory
|
||||
* pre-allocated, but it would not be available for querying. But now
|
||||
* that we've copied and rewritten all the data successfully, we can
|
||||
* update type count and various internal offsets and sizes to
|
||||
* "commit" the changes and made them visible to the outside world.
|
||||
*/
|
||||
btf->hdr->type_len += data_sz;
|
||||
btf->hdr->str_off += data_sz;
|
||||
btf->nr_types += cnt;
|
||||
|
||||
/* return type ID of the first added BTF type */
|
||||
return btf->start_id + btf->nr_types - cnt;
|
||||
err_out:
|
||||
/* zero out preallocated memory as if it was just allocated with
|
||||
* libbpf_add_mem()
|
||||
*/
|
||||
memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
|
||||
memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
|
||||
|
||||
/* and now restore original strings section size; types data size
|
||||
* wasn't modified, so doesn't need restoring, see big comment above */
|
||||
btf->hdr->str_len = old_strs_len;
|
||||
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_INT type with:
|
||||
* - *name* - non-empty, non-NULL type name;
|
||||
@ -1939,7 +2018,7 @@ int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
|
||||
static struct btf_type *btf_last_type(struct btf *btf)
|
||||
{
|
||||
return btf_type_by_id(btf, btf__get_nr_types(btf));
|
||||
return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2192,6 +2271,22 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)
|
||||
return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_TYPE_TAG type with:
|
||||
* - *value*, non-empty/non-NULL tag value;
|
||||
* - *ref_type_id* - referenced type ID, it might not exist yet;
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
|
||||
{
|
||||
if (!value|| !value[0])
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_FUNC type with:
|
||||
* - *name*, non-empty/non-NULL name;
|
||||
@ -2446,6 +2541,48 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_DECL_TAG type with:
|
||||
* - *value* - non-empty/non-NULL string;
|
||||
* - *ref_type_id* - referenced type ID, it might not exist yet;
|
||||
* - *component_idx* - -1 for tagging reference type, otherwise struct/union
|
||||
* member or function argument index;
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int component_idx)
|
||||
{
|
||||
struct btf_type *t;
|
||||
int sz, value_off;
|
||||
|
||||
if (!value || !value[0] || component_idx < -1)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (validate_type_id(ref_type_id))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
|
||||
t = btf_add_type_mem(btf, sz);
|
||||
if (!t)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
value_off = btf__add_str(btf, value);
|
||||
if (value_off < 0)
|
||||
return value_off;
|
||||
|
||||
t->name_off = value_off;
|
||||
t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
|
||||
t->type = ref_type_id;
|
||||
btf_decl_tag(t)->component_idx = component_idx;
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
struct btf_ext_sec_setup_param {
|
||||
__u32 off;
|
||||
__u32 len;
|
||||
@ -2761,8 +2898,7 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
|
||||
|
||||
struct btf_dedup;
|
||||
|
||||
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
|
||||
const struct btf_dedup_opts *opts);
|
||||
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
static void btf_dedup_free(struct btf_dedup *d);
|
||||
static int btf_dedup_prep(struct btf_dedup *d);
|
||||
static int btf_dedup_strings(struct btf_dedup *d);
|
||||
@ -2909,12 +3045,17 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
|
||||
* deduplicating structs/unions is described in greater details in comments for
|
||||
* `btf_dedup_is_equiv` function.
|
||||
*/
|
||||
int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
|
||||
const struct btf_dedup_opts *opts)
|
||||
|
||||
DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
|
||||
int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
{
|
||||
struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
|
||||
struct btf_dedup *d;
|
||||
int err;
|
||||
|
||||
if (!OPTS_VALID(opts, btf_dedup_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
d = btf_dedup_new(btf, opts);
|
||||
if (IS_ERR(d)) {
|
||||
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
|
||||
return libbpf_err(-EINVAL);
|
||||
@ -2966,6 +3107,19 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
|
||||
int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
|
||||
{
|
||||
LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
|
||||
|
||||
if (unused_opts) {
|
||||
pr_warn("please use new version of btf__dedup() that supports options\n");
|
||||
return libbpf_err(-ENOTSUP);
|
||||
}
|
||||
|
||||
return btf__dedup(btf, &opts);
|
||||
}
|
||||
|
||||
#define BTF_UNPROCESSED_ID ((__u32)-1)
|
||||
#define BTF_IN_PROGRESS_ID ((__u32)-2)
|
||||
|
||||
@ -3078,8 +3232,7 @@ static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
|
||||
return k1 == k2;
|
||||
}
|
||||
|
||||
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
|
||||
const struct btf_dedup_opts *opts)
|
||||
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
{
|
||||
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
|
||||
hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
|
||||
@ -3088,13 +3241,11 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
|
||||
if (!d)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
|
||||
/* dedup_table_size is now used only to force collisions in tests */
|
||||
if (opts && opts->dedup_table_size == 1)
|
||||
if (OPTS_GET(opts, force_collisions, false))
|
||||
hash_fn = btf_dedup_collision_hash_fn;
|
||||
|
||||
d->btf = btf;
|
||||
d->btf_ext = btf_ext;
|
||||
d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
|
||||
|
||||
d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
|
||||
if (IS_ERR(d->dedup_table)) {
|
||||
@ -3103,7 +3254,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
|
||||
goto done;
|
||||
}
|
||||
|
||||
type_cnt = btf__get_nr_types(btf) + 1;
|
||||
type_cnt = btf__type_cnt(btf);
|
||||
d->map = malloc(sizeof(__u32) * type_cnt);
|
||||
if (!d->map) {
|
||||
err = -ENOMEM;
|
||||
@ -3264,8 +3415,8 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
|
||||
t1->size == t2->size;
|
||||
}
|
||||
|
||||
/* Calculate type signature hash of INT. */
|
||||
static long btf_hash_int(struct btf_type *t)
|
||||
/* Calculate type signature hash of INT or TAG. */
|
||||
static long btf_hash_int_decl_tag(struct btf_type *t)
|
||||
{
|
||||
__u32 info = *(__u32 *)(t + 1);
|
||||
long h;
|
||||
@ -3275,8 +3426,8 @@ static long btf_hash_int(struct btf_type *t)
|
||||
return h;
|
||||
}
|
||||
|
||||
/* Check structural equality of two INTs. */
|
||||
static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
|
||||
/* Check structural equality of two INTs or TAGs. */
|
||||
static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
|
||||
{
|
||||
__u32 info1, info2;
|
||||
|
||||
@ -3540,10 +3691,12 @@ static int btf_dedup_prep(struct btf_dedup *d)
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
h = btf_hash_common(t);
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
h = btf_hash_int(t);
|
||||
case BTF_KIND_DECL_TAG:
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
h = btf_hash_enum(t);
|
||||
@ -3598,14 +3751,16 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_INT:
|
||||
h = btf_hash_int(t);
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
if (btf_equal_int(t, cand)) {
|
||||
if (btf_equal_int_tag(t, cand)) {
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
@ -3621,8 +3776,6 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
if (d->opts.dont_resolve_fwds)
|
||||
continue;
|
||||
if (btf_compat_enum(t, cand)) {
|
||||
if (btf_is_enum_fwd(t)) {
|
||||
/* resolve fwd to full enum */
|
||||
@ -3902,8 +4055,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
|
||||
return 0;
|
||||
|
||||
/* FWD <--> STRUCT/UNION equivalence check, if enabled */
|
||||
if (!d->opts.dont_resolve_fwds
|
||||
&& (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
|
||||
if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
|
||||
&& cand_kind != canon_kind) {
|
||||
__u16 real_kind;
|
||||
__u16 fwd_kind;
|
||||
@ -3926,13 +4078,10 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
|
||||
|
||||
switch (cand_kind) {
|
||||
case BTF_KIND_INT:
|
||||
return btf_equal_int(cand_type, canon_type);
|
||||
return btf_equal_int_tag(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_ENUM:
|
||||
if (d->opts.dont_resolve_fwds)
|
||||
return btf_equal_enum(cand_type, canon_type);
|
||||
else
|
||||
return btf_compat_enum(cand_type, canon_type);
|
||||
return btf_compat_enum(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
@ -3944,6 +4093,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
if (cand_type->info != canon_type->info)
|
||||
return 0;
|
||||
return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
|
||||
@ -4239,6 +4389,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
ref_type_id = btf_dedup_ref_type(d, t->type);
|
||||
if (ref_type_id < 0)
|
||||
return ref_type_id;
|
||||
@ -4255,6 +4406,23 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_DECL_TAG:
|
||||
ref_type_id = btf_dedup_ref_type(d, t->type);
|
||||
if (ref_type_id < 0)
|
||||
return ref_type_id;
|
||||
t->type = ref_type_id;
|
||||
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
if (btf_equal_int_tag(t, cand)) {
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *info = btf_array(t);
|
||||
|
||||
@ -4527,6 +4695,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return visit(&t->type, ctx);
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
|
@ -1,5 +1,6 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
/* Copyright (c) 2018 Facebook */
|
||||
/*! \file */
|
||||
|
||||
#ifndef __LIBBPF_BTF_H
|
||||
#define __LIBBPF_BTF_H
|
||||
@ -30,11 +31,80 @@ enum btf_endianness {
|
||||
BTF_BIG_ENDIAN = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief **btf__free()** frees all data of a BTF object
|
||||
* @param btf BTF object to free
|
||||
*/
|
||||
LIBBPF_API void btf__free(struct btf *btf);
|
||||
|
||||
/**
|
||||
* @brief **btf__new()** creates a new instance of a BTF object from the raw
|
||||
* bytes of an ELF's BTF section
|
||||
* @param data raw bytes
|
||||
* @param size number of bytes passed in `data`
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
|
||||
|
||||
/**
|
||||
* @brief **btf__new_split()** create a new instance of a BTF object from the
|
||||
* provided raw data bytes. It takes another BTF instance, **base_btf**, which
|
||||
* serves as a base BTF, which is extended by types in a newly created BTF
|
||||
* instance
|
||||
* @param data raw bytes
|
||||
* @param size length of raw bytes
|
||||
* @param base_btf the base BTF object
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and
|
||||
* creates non-split BTF.
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf);
|
||||
|
||||
/**
|
||||
* @brief **btf__new_empty()** creates an empty BTF object. Use
|
||||
* `btf__add_*()` to populate such BTF object.
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_empty(void);
|
||||
|
||||
/**
|
||||
* @brief **btf__new_empty_split()** creates an unpopulated BTF object from an
|
||||
* ELF BTF section except with a base BTF on top of which split BTF should be
|
||||
* based
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to
|
||||
* `btf__new_empty()` and creates non-split BTF.
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
|
||||
|
||||
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
|
||||
@ -50,16 +120,21 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
|
||||
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
|
||||
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only")
|
||||
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
|
||||
LIBBPF_API int btf__load(struct btf *btf);
|
||||
LIBBPF_API int btf__load_into_kernel(struct btf *btf);
|
||||
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
|
||||
const char *type_name);
|
||||
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
|
||||
const char *type_name, __u32 kind);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1")
|
||||
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
|
||||
LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
|
||||
LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
|
||||
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
|
||||
__u32 id);
|
||||
@ -72,7 +147,9 @@ LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
|
||||
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
|
||||
LIBBPF_API int btf__fd(const struct btf *btf);
|
||||
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__raw_data() instead")
|
||||
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
@ -101,6 +178,28 @@ LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf,
|
||||
const struct btf_type *src_type);
|
||||
/**
|
||||
* @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf*
|
||||
* @param btf BTF object which all the BTF types and strings are added to
|
||||
* @param src_btf BTF object which all BTF types and referenced strings are copied from
|
||||
* @return BTF type ID of the first appended BTF type, or negative error code
|
||||
*
|
||||
* **btf__add_btf()** can be used to simply and efficiently append the entire
|
||||
* contents of one BTF object to another one. All the BTF type data is copied
|
||||
* over, all referenced type IDs are adjusted by adding a necessary ID offset.
|
||||
* Only strings referenced from BTF types are copied over and deduplicated, so
|
||||
* if there were some unused strings in *src_btf*, those won't be copied over,
|
||||
* which is consistent with the general string deduplication semantics of BTF
|
||||
* writing APIs.
|
||||
*
|
||||
* If any error is encountered during this process, the contents of *btf* is
|
||||
* left intact, which means that **btf__add_btf()** follows the transactional
|
||||
* semantics and the operation as a whole is all-or-nothing.
|
||||
*
|
||||
* *src_btf* has to be non-split BTF, as of now copying types from split BTF
|
||||
* is not supported and will result in -ENOTSUP error code returned.
|
||||
*/
|
||||
LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf);
|
||||
|
||||
LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
|
||||
LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz);
|
||||
@ -128,6 +227,7 @@ LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_
|
||||
LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
|
||||
LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
|
||||
LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
|
||||
LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
|
||||
|
||||
/* func and func_proto construction APIs */
|
||||
LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
|
||||
@ -141,26 +241,91 @@ LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz
|
||||
LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
|
||||
__u32 offset, __u32 byte_sz);
|
||||
|
||||
struct btf_dedup_opts {
|
||||
unsigned int dedup_table_size;
|
||||
bool dont_resolve_fwds;
|
||||
};
|
||||
/* tag construction API */
|
||||
LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int component_idx);
|
||||
|
||||
LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
|
||||
const struct btf_dedup_opts *opts);
|
||||
struct btf_dedup_opts {
|
||||
size_t sz;
|
||||
/* optional .BTF.ext info to dedup along the main BTF info */
|
||||
struct btf_ext *btf_ext;
|
||||
/* force hash collisions (used for testing) */
|
||||
bool force_collisions;
|
||||
size_t :0;
|
||||
};
|
||||
#define btf_dedup_opts__last_field force_collisions
|
||||
|
||||
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead")
|
||||
LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts);
|
||||
#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__)
|
||||
#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts)
|
||||
#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts)
|
||||
|
||||
struct btf_dump;
|
||||
|
||||
struct btf_dump_opts {
|
||||
void *ctx;
|
||||
union {
|
||||
size_t sz;
|
||||
void *ctx; /* DEPRECATED: will be gone in v1.0 */
|
||||
};
|
||||
};
|
||||
|
||||
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn);
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn);
|
||||
|
||||
/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the
|
||||
* type of 4th argument. If it's btf_dump's print callback, use deprecated
|
||||
* API; otherwise, choose the new btf_dump__new(). ___libbpf_override()
|
||||
* doesn't work here because both variants have 4 input arguments.
|
||||
*
|
||||
* (void *) casts are necessary to avoid compilation warnings about type
|
||||
* mismatches, because even though __builtin_choose_expr() only ever evaluates
|
||||
* one side the other side still has to satisfy type constraints (this is
|
||||
* compiler implementation limitation which might be lifted eventually,
|
||||
* according to the documentation). So passing struct btf_ext in place of
|
||||
* btf_dump_printf_fn_t would be generating compilation warning. Casting to
|
||||
* void * avoids this issue.
|
||||
*
|
||||
* Also, two type compatibility checks for a function and function pointer are
|
||||
* required because passing function reference into btf_dump__new() as
|
||||
* btf_dump__new(..., my_callback, ...) and as btf_dump__new(...,
|
||||
* &my_callback, ...) (not explicit ampersand in the latter case) actually
|
||||
* differs as far as __builtin_types_compatible_p() is concerned. Thus two
|
||||
* checks are combined to detect callback argument.
|
||||
*
|
||||
* The rest works just like in case of ___libbpf_override() usage with symbol
|
||||
* versioning.
|
||||
*
|
||||
* C++ compilers don't support __builtin_types_compatible_p(), so at least
|
||||
* don't screw up compilation for them and let C++ users pick btf_dump__new
|
||||
* vs btf_dump__new_deprecated explicitly.
|
||||
*/
|
||||
#ifndef __cplusplus
|
||||
#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
|
||||
__builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
|
||||
btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
|
||||
btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
|
||||
#endif
|
||||
|
||||
LIBBPF_API void btf_dump__free(struct btf_dump *d);
|
||||
|
||||
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
|
||||
@ -300,7 +465,8 @@ static inline bool btf_is_mod(const struct btf_type *t)
|
||||
|
||||
return kind == BTF_KIND_VOLATILE ||
|
||||
kind == BTF_KIND_CONST ||
|
||||
kind == BTF_KIND_RESTRICT;
|
||||
kind == BTF_KIND_RESTRICT ||
|
||||
kind == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
static inline bool btf_is_func(const struct btf_type *t)
|
||||
@ -328,6 +494,16 @@ static inline bool btf_is_float(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_FLOAT;
|
||||
}
|
||||
|
||||
static inline bool btf_is_decl_tag(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_DECL_TAG;
|
||||
}
|
||||
|
||||
static inline bool btf_is_type_tag(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
static inline __u8 btf_int_encoding(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
|
||||
@ -396,6 +572,12 @@ btf_var_secinfos(const struct btf_type *t)
|
||||
return (struct btf_var_secinfo *)(t + 1);
|
||||
}
|
||||
|
||||
struct btf_decl_tag;
|
||||
static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_decl_tag *)(t + 1);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
@ -77,9 +77,8 @@ struct btf_dump_data {
|
||||
|
||||
struct btf_dump {
|
||||
const struct btf *btf;
|
||||
const struct btf_ext *btf_ext;
|
||||
btf_dump_printf_fn_t printf_fn;
|
||||
struct btf_dump_opts opts;
|
||||
void *cb_ctx;
|
||||
int ptr_sz;
|
||||
bool strip_mods;
|
||||
bool skip_anon_defs;
|
||||
@ -138,29 +137,32 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
d->printf_fn(d->opts.ctx, fmt, args);
|
||||
d->printf_fn(d->cb_ctx, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static int btf_dump_mark_referenced(struct btf_dump *d);
|
||||
static int btf_dump_resize(struct btf_dump *d);
|
||||
|
||||
struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn)
|
||||
DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0)
|
||||
struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts)
|
||||
{
|
||||
struct btf_dump *d;
|
||||
int err;
|
||||
|
||||
if (!printf_fn)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
d = calloc(1, sizeof(struct btf_dump));
|
||||
if (!d)
|
||||
return libbpf_err_ptr(-ENOMEM);
|
||||
|
||||
d->btf = btf;
|
||||
d->btf_ext = btf_ext;
|
||||
d->printf_fn = printf_fn;
|
||||
d->opts.ctx = opts ? opts->ctx : NULL;
|
||||
d->cb_ctx = ctx;
|
||||
d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
|
||||
|
||||
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
|
||||
@ -186,9 +188,20 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
return libbpf_err_ptr(err);
|
||||
}
|
||||
|
||||
COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4)
|
||||
struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn)
|
||||
{
|
||||
if (!printf_fn)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts);
|
||||
}
|
||||
|
||||
static int btf_dump_resize(struct btf_dump *d)
|
||||
{
|
||||
int err, last_id = btf__get_nr_types(d->btf);
|
||||
int err, last_id = btf__type_cnt(d->btf) - 1;
|
||||
|
||||
if (last_id <= d->last_id)
|
||||
return 0;
|
||||
@ -262,7 +275,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
if (id > btf__get_nr_types(d->btf))
|
||||
if (id >= btf__type_cnt(d->btf))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
err = btf_dump_resize(d);
|
||||
@ -294,11 +307,11 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
|
||||
*/
|
||||
static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
{
|
||||
int i, j, n = btf__get_nr_types(d->btf);
|
||||
int i, j, n = btf__type_cnt(d->btf);
|
||||
const struct btf_type *t;
|
||||
__u16 vlen;
|
||||
|
||||
for (i = d->last_id + 1; i <= n; i++) {
|
||||
for (i = d->last_id + 1; i < n; i++) {
|
||||
t = btf__type_by_id(d->btf, i);
|
||||
vlen = btf_vlen(t);
|
||||
|
||||
@ -316,6 +329,8 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
d->type_states[t->type].referenced = 1;
|
||||
break;
|
||||
|
||||
@ -559,6 +574,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return btf_dump_order_type(d, t->type, through_ptr);
|
||||
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
@ -583,6 +599,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
d->type_states[id].order_state = ORDERED;
|
||||
return 0;
|
||||
|
||||
@ -732,6 +749,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
btf_dump_emit_type(d, t->type, cont_id);
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
@ -1152,6 +1170,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
id = t->type;
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
@ -1320,6 +1339,11 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
|
||||
case BTF_KIND_RESTRICT:
|
||||
btf_dump_printf(d, " restrict");
|
||||
break;
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
btf_dump_emit_mods(d, decls);
|
||||
name = btf_name_of(d, t->name_off);
|
||||
btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
|
||||
break;
|
||||
case BTF_KIND_ARRAY: {
|
||||
const struct btf_array *a = btf_array(t);
|
||||
const struct btf_type *next_t;
|
||||
@ -1560,29 +1584,28 @@ static int btf_dump_get_bitfield_value(struct btf_dump *d,
|
||||
__u64 *value)
|
||||
{
|
||||
__u16 left_shift_bits, right_shift_bits;
|
||||
__u8 nr_copy_bits, nr_copy_bytes;
|
||||
const __u8 *bytes = data;
|
||||
int sz = t->size;
|
||||
__u8 nr_copy_bits;
|
||||
__u64 num = 0;
|
||||
int i;
|
||||
|
||||
/* Maximum supported bitfield size is 64 bits */
|
||||
if (sz > 8) {
|
||||
pr_warn("unexpected bitfield size %d\n", sz);
|
||||
if (t->size > 8) {
|
||||
pr_warn("unexpected bitfield size %d\n", t->size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Bitfield value retrieval is done in two steps; first relevant bytes are
|
||||
* stored in num, then we left/right shift num to eliminate irrelevant bits.
|
||||
*/
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
for (i = t->size - 1; i >= 0; i--)
|
||||
num = num * 256 + bytes[i];
|
||||
nr_copy_bits = bit_sz + bits_offset;
|
||||
nr_copy_bytes = t->size;
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
for (i = nr_copy_bytes - 1; i >= 0; i--)
|
||||
num = num * 256 + bytes[i];
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
for (i = 0; i < nr_copy_bytes; i++)
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
for (i = 0; i < t->size; i++)
|
||||
num = num * 256 + bytes[i];
|
||||
nr_copy_bits = t->size * 8 - bits_offset;
|
||||
#else
|
||||
# error "Unrecognized __BYTE_ORDER__"
|
||||
#endif
|
||||
@ -1656,9 +1679,15 @@ static int btf_dump_base_type_check_zero(struct btf_dump *d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ptr_is_aligned(const void *data, int data_sz)
|
||||
static bool ptr_is_aligned(const struct btf *btf, __u32 type_id,
|
||||
const void *data)
|
||||
{
|
||||
return ((uintptr_t)data) % data_sz == 0;
|
||||
int alignment = btf__align_of(btf, type_id);
|
||||
|
||||
if (alignment == 0)
|
||||
return false;
|
||||
|
||||
return ((uintptr_t)data) % alignment == 0;
|
||||
}
|
||||
|
||||
static int btf_dump_int_data(struct btf_dump *d,
|
||||
@ -1669,9 +1698,10 @@ static int btf_dump_int_data(struct btf_dump *d,
|
||||
{
|
||||
__u8 encoding = btf_int_encoding(t);
|
||||
bool sign = encoding & BTF_INT_SIGNED;
|
||||
char buf[16] __attribute__((aligned(16)));
|
||||
int sz = t->size;
|
||||
|
||||
if (sz == 0) {
|
||||
if (sz == 0 || sz > sizeof(buf)) {
|
||||
pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1679,8 +1709,10 @@ static int btf_dump_int_data(struct btf_dump *d,
|
||||
/* handle packed int data - accesses of integers not aligned on
|
||||
* int boundaries can cause problems on some platforms.
|
||||
*/
|
||||
if (!ptr_is_aligned(data, sz))
|
||||
return btf_dump_bitfield_data(d, t, data, 0, 0);
|
||||
if (!ptr_is_aligned(d->btf, type_id, data)) {
|
||||
memcpy(buf, data, sz);
|
||||
data = buf;
|
||||
}
|
||||
|
||||
switch (sz) {
|
||||
case 16: {
|
||||
@ -1690,10 +1722,10 @@ static int btf_dump_int_data(struct btf_dump *d,
|
||||
/* avoid use of __int128 as some 32-bit platforms do not
|
||||
* support it.
|
||||
*/
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
lsi = ints[0];
|
||||
msi = ints[1];
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
lsi = ints[1];
|
||||
msi = ints[0];
|
||||
#else
|
||||
@ -1766,7 +1798,7 @@ static int btf_dump_float_data(struct btf_dump *d,
|
||||
int sz = t->size;
|
||||
|
||||
/* handle unaligned data; copy to local union */
|
||||
if (!ptr_is_aligned(data, sz)) {
|
||||
if (!ptr_is_aligned(d->btf, type_id, data)) {
|
||||
memcpy(&fl, data, sz);
|
||||
flp = &fl;
|
||||
}
|
||||
@ -1929,7 +1961,7 @@ static int btf_dump_ptr_data(struct btf_dump *d,
|
||||
__u32 id,
|
||||
const void *data)
|
||||
{
|
||||
if (ptr_is_aligned(data, d->ptr_sz) && d->ptr_sz == sizeof(void *)) {
|
||||
if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) {
|
||||
btf_dump_type_values(d, "%p", *(void **)data);
|
||||
} else {
|
||||
union ptr_data pt;
|
||||
@ -1949,10 +1981,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
|
||||
__u32 id,
|
||||
__s64 *value)
|
||||
{
|
||||
int sz = t->size;
|
||||
|
||||
/* handle unaligned enum value */
|
||||
if (!ptr_is_aligned(data, sz)) {
|
||||
if (!ptr_is_aligned(d->btf, id, data)) {
|
||||
__u64 val;
|
||||
int err;
|
||||
|
||||
@ -2215,6 +2245,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
err = btf_dump_unsupported_data(d, t, id);
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
@ -2290,8 +2321,8 @@ int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
|
||||
if (!opts->indent_str)
|
||||
d->typed_dump->indent_str[0] = '\t';
|
||||
else
|
||||
strncat(d->typed_dump->indent_str, opts->indent_str,
|
||||
sizeof(d->typed_dump->indent_str) - 1);
|
||||
libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str,
|
||||
sizeof(d->typed_dump->indent_str));
|
||||
|
||||
d->typed_dump->compact = OPTS_GET(opts, compact, false);
|
||||
d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <linux/filter.h>
|
||||
#include <sys/param.h>
|
||||
#include "btf.h"
|
||||
#include "bpf.h"
|
||||
#include "libbpf.h"
|
||||
@ -12,9 +13,12 @@
|
||||
#include "hashmap.h"
|
||||
#include "bpf_gen_internal.h"
|
||||
#include "skel_internal.h"
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
|
||||
|
||||
/* The following structure describes the stack layout of the loader program.
|
||||
* In addition R6 contains the pointer to context.
|
||||
@ -29,9 +33,8 @@
|
||||
*/
|
||||
struct loader_stack {
|
||||
__u32 btf_fd;
|
||||
__u32 map_fd[MAX_USED_MAPS];
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
__u32 inner_map_fd;
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
};
|
||||
|
||||
#define stack_off(field) \
|
||||
@ -39,6 +42,11 @@ struct loader_stack {
|
||||
|
||||
#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
|
||||
{
|
||||
size_t off = gen->insn_cur - gen->insn_start;
|
||||
@ -99,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
|
||||
emit(gen, insn2);
|
||||
}
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
|
||||
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
|
||||
{
|
||||
size_t stack_sz = sizeof(struct loader_stack);
|
||||
size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
|
||||
int i;
|
||||
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
gen->log_level = log_level;
|
||||
/* save ctx pointer into R6 */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
|
||||
@ -115,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
|
||||
|
||||
/* amount of stack actually used, only used to calculate iterations, not stack offset */
|
||||
nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
|
||||
/* jump over cleanup code */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
|
||||
/* size of cleanup code below */
|
||||
(stack_sz / 4) * 3 + 2));
|
||||
/* size of cleanup code below (including map fd cleanup) */
|
||||
(nr_progs_sz / 4) * 3 + 2 +
|
||||
/* 6 insns for emit_sys_close_blob,
|
||||
* 6 insns for debug_regs in emit_sys_close_blob
|
||||
*/
|
||||
nr_maps * (6 + (gen->log_level ? 6 : 0))));
|
||||
|
||||
/* remember the label where all error branches will jump to */
|
||||
gen->cleanup_label = gen->insn_cur - gen->insn_start;
|
||||
/* emit cleanup code: close all temp FDs */
|
||||
for (i = 0; i < stack_sz; i += 4) {
|
||||
for (i = 0; i < nr_progs_sz; i += 4) {
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
|
||||
}
|
||||
for (i = 0; i < nr_maps; i++)
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
|
||||
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
|
||||
emit(gen, BPF_EXIT_INSN());
|
||||
@ -135,16 +155,47 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
|
||||
static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
|
||||
{
|
||||
__u32 size8 = roundup(size, 8);
|
||||
__u64 zero = 0;
|
||||
void *prev;
|
||||
|
||||
if (realloc_data_buf(gen, size))
|
||||
if (realloc_data_buf(gen, size8))
|
||||
return 0;
|
||||
prev = gen->data_cur;
|
||||
memcpy(gen->data_cur, data, size);
|
||||
gen->data_cur += size;
|
||||
if (data) {
|
||||
memcpy(gen->data_cur, data, size);
|
||||
memcpy(gen->data_cur + size, &zero, size8 - size);
|
||||
} else {
|
||||
memset(gen->data_cur, 0, size8);
|
||||
}
|
||||
gen->data_cur += size8;
|
||||
return prev - gen->data_start;
|
||||
}
|
||||
|
||||
/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
|
||||
* to start of fd_array. Caller can decide if it is usable or not.
|
||||
*/
|
||||
static int add_map_fd(struct bpf_gen *gen)
|
||||
{
|
||||
if (gen->nr_maps == MAX_USED_MAPS) {
|
||||
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
|
||||
gen->error = -E2BIG;
|
||||
return 0;
|
||||
}
|
||||
return gen->nr_maps++;
|
||||
}
|
||||
|
||||
static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
{
|
||||
int cur;
|
||||
|
||||
if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
|
||||
cur = add_data(gen, NULL, sizeof(int));
|
||||
return (cur - gen->fd_array) / sizeof(int);
|
||||
}
|
||||
return MAX_USED_MAPS + gen->nr_fd_array++;
|
||||
}
|
||||
|
||||
static int insn_bytes_to_bpf_size(__u32 sz)
|
||||
{
|
||||
switch (sz) {
|
||||
@ -166,14 +217,22 @@ static void emit_rel_store(struct bpf_gen *gen, int off, int data)
|
||||
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
|
||||
}
|
||||
|
||||
/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
|
||||
static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
|
||||
static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
|
||||
{
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
|
||||
emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_off));
|
||||
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, off));
|
||||
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
|
||||
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
|
||||
}
|
||||
|
||||
static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
|
||||
{
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_off));
|
||||
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
|
||||
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
|
||||
}
|
||||
|
||||
static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
|
||||
@ -308,10 +367,16 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
|
||||
__emit_sys_close(gen);
|
||||
}
|
||||
|
||||
int bpf_gen__finish(struct bpf_gen *gen)
|
||||
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
|
||||
pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
|
||||
nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
|
||||
gen->error = -EFAULT;
|
||||
return gen->error;
|
||||
}
|
||||
emit_sys_close_stack(gen, stack_off(btf_fd));
|
||||
for (i = 0; i < gen->nr_progs; i++)
|
||||
move_stack2ctx(gen,
|
||||
@ -321,11 +386,11 @@ int bpf_gen__finish(struct bpf_gen *gen)
|
||||
offsetof(struct bpf_prog_desc, prog_fd), 4,
|
||||
stack_off(prog_fd[i]));
|
||||
for (i = 0; i < gen->nr_maps; i++)
|
||||
move_stack2ctx(gen,
|
||||
sizeof(struct bpf_loader_ctx) +
|
||||
sizeof(struct bpf_map_desc) * i +
|
||||
offsetof(struct bpf_map_desc, map_fd), 4,
|
||||
stack_off(map_fd[i]));
|
||||
move_blob2ctx(gen,
|
||||
sizeof(struct bpf_loader_ctx) +
|
||||
sizeof(struct bpf_map_desc) * i +
|
||||
offsetof(struct bpf_map_desc, map_fd), 4,
|
||||
blob_fd_array_off(gen, i));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
|
||||
emit(gen, BPF_EXIT_INSN());
|
||||
pr_debug("gen: finish %d\n", gen->error);
|
||||
@ -381,46 +446,32 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
||||
}
|
||||
|
||||
void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
struct bpf_create_map_attr *map_attr, int map_idx)
|
||||
enum bpf_map_type map_type,
|
||||
const char *map_name,
|
||||
__u32 key_size, __u32 value_size, __u32 max_entries,
|
||||
struct bpf_map_create_opts *map_attr, int map_idx)
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
|
||||
int attr_size = offsetofend(union bpf_attr, map_extra);
|
||||
bool close_inner_map_fd = false;
|
||||
int map_create_attr;
|
||||
int map_create_attr, idx;
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
attr.map_type = map_attr->map_type;
|
||||
attr.key_size = map_attr->key_size;
|
||||
attr.value_size = map_attr->value_size;
|
||||
attr.map_type = map_type;
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
attr.map_flags = map_attr->map_flags;
|
||||
memcpy(attr.map_name, map_attr->name,
|
||||
min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
|
||||
attr.map_extra = map_attr->map_extra;
|
||||
if (map_name)
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.numa_node = map_attr->numa_node;
|
||||
attr.map_ifindex = map_attr->map_ifindex;
|
||||
attr.max_entries = map_attr->max_entries;
|
||||
switch (attr.map_type) {
|
||||
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
|
||||
case BPF_MAP_TYPE_CGROUP_ARRAY:
|
||||
case BPF_MAP_TYPE_STACK_TRACE:
|
||||
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
|
||||
case BPF_MAP_TYPE_HASH_OF_MAPS:
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
case BPF_MAP_TYPE_SOCKMAP:
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
case BPF_MAP_TYPE_QUEUE:
|
||||
case BPF_MAP_TYPE_STACK:
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
break;
|
||||
default:
|
||||
attr.btf_key_type_id = map_attr->btf_key_type_id;
|
||||
attr.btf_value_type_id = map_attr->btf_value_type_id;
|
||||
}
|
||||
attr.max_entries = max_entries;
|
||||
attr.btf_key_type_id = map_attr->btf_key_type_id;
|
||||
attr.btf_value_type_id = map_attr->btf_value_type_id;
|
||||
|
||||
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
|
||||
attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
|
||||
attr.map_name, map_idx, map_type, attr.btf_value_type_id);
|
||||
|
||||
map_create_attr = add_data(gen, &attr, attr_size);
|
||||
if (attr.btf_value_type_id)
|
||||
@ -447,7 +498,7 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
/* emit MAP_CREATE command */
|
||||
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
|
||||
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
|
||||
attr.map_name, map_idx, map_attr->map_type, attr.value_size,
|
||||
attr.map_name, map_idx, map_type, value_size,
|
||||
attr.btf_value_type_id);
|
||||
emit_check_err(gen);
|
||||
/* remember map_fd in the stack, if successful */
|
||||
@ -462,9 +513,11 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
gen->error = -EDOM; /* internal bug */
|
||||
return;
|
||||
} else {
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
|
||||
stack_off(map_fd[map_idx])));
|
||||
gen->nr_maps++;
|
||||
/* add_map_fd does gen->nr_maps++ */
|
||||
idx = add_map_fd(gen);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, idx)));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
|
||||
}
|
||||
if (close_inner_map_fd)
|
||||
emit_sys_close_stack(gen, stack_off(inner_map_fd));
|
||||
@ -506,8 +559,8 @@ static void emit_find_attach_target(struct bpf_gen *gen)
|
||||
*/
|
||||
}
|
||||
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
|
||||
int insn_idx)
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx)
|
||||
{
|
||||
struct ksym_relo_desc *relo;
|
||||
|
||||
@ -519,38 +572,313 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
|
||||
gen->relos = relo;
|
||||
relo += gen->relo_cnt;
|
||||
relo->name = name;
|
||||
relo->is_weak = is_weak;
|
||||
relo->is_typeless = is_typeless;
|
||||
relo->kind = kind;
|
||||
relo->insn_idx = insn_idx;
|
||||
gen->relo_cnt++;
|
||||
}
|
||||
|
||||
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
|
||||
/* returns existing ksym_desc with ref incremented, or inserts a new one */
|
||||
static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name, insn, len = strlen(relo->name) + 1;
|
||||
struct ksym_desc *kdesc;
|
||||
int i;
|
||||
|
||||
pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
|
||||
name = add_data(gen, relo->name, len);
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
if (!strcmp(gen->ksyms[i].name, relo->name)) {
|
||||
gen->ksyms[i].ref++;
|
||||
return &gen->ksyms[i];
|
||||
}
|
||||
}
|
||||
kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
|
||||
if (!kdesc) {
|
||||
gen->error = -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
gen->ksyms = kdesc;
|
||||
kdesc = &gen->ksyms[gen->nr_ksyms++];
|
||||
kdesc->name = relo->name;
|
||||
kdesc->kind = relo->kind;
|
||||
kdesc->ref = 1;
|
||||
kdesc->off = 0;
|
||||
kdesc->insn = 0;
|
||||
return kdesc;
|
||||
}
|
||||
|
||||
/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
|
||||
* Returns result in BPF_REG_7
|
||||
*/
|
||||
static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name_off, len = strlen(relo->name) + 1;
|
||||
|
||||
name_off = add_data(gen, relo->name, len);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, name));
|
||||
0, 0, 0, name_off));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
|
||||
debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
|
||||
emit_check_err(gen);
|
||||
}
|
||||
|
||||
/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
|
||||
* Returns result in BPF_REG_7
|
||||
* Returns u64 symbol addr in BPF_REG_9
|
||||
*/
|
||||
static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name_off, len = strlen(relo->name) + 1, res_off;
|
||||
|
||||
name_off = add_data(gen, relo->name, len);
|
||||
res_off = add_data(gen, NULL, 8); /* res is u64 */
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, name_off));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, res_off));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
|
||||
emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
|
||||
debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*
|
||||
* We need to reuse BTF fd for same symbol otherwise each relocation takes a new
|
||||
* index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
|
||||
* this would mean a new BTF fd index for each entry. By pairing symbol name
|
||||
* with index, we get the insn->imm, insn->off pairing that kernel uses for
|
||||
* kfunc_tab, which becomes the effective limit even though all of them may
|
||||
* share same index in fd_array (such that kfunc_btf_tab has 1 element).
|
||||
*/
|
||||
static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
int btf_fd_idx;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing bpf_insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
|
||||
kdesc->insn + offsetof(struct bpf_insn, off));
|
||||
goto log;
|
||||
}
|
||||
/* remember insn offset, so we can copy BTF ID and FD later */
|
||||
kdesc->insn = insn;
|
||||
emit_bpf_find_by_name_kind(gen, relo);
|
||||
if (!relo->is_weak)
|
||||
emit_check_err(gen);
|
||||
/* get index in fd_array to store BTF FD at */
|
||||
btf_fd_idx = add_kfunc_btf_fd(gen);
|
||||
if (btf_fd_idx > INT16_MAX) {
|
||||
pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
|
||||
btf_fd_idx, relo->name);
|
||||
gen->error = -E2BIG;
|
||||
return;
|
||||
}
|
||||
kdesc->off = btf_fd_idx;
|
||||
/* jump to success case */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
|
||||
/* set value for imm, off as 0 */
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
|
||||
/* skip success case for ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
|
||||
/* store btf_id into insn[insn_idx].imm */
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
|
||||
offsetof(struct bpf_insn, imm);
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
|
||||
/* obtain fd in BPF_REG_9 */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
|
||||
/* jump to fd_array store if fd denotes module BTF */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
|
||||
/* set the default value for off */
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
|
||||
/* skip BTF fd store for vmlinux BTF */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
|
||||
/* load fd_array slot pointer */
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, insn));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
|
||||
if (relo->kind == BTF_KIND_VAR) {
|
||||
/* store btf_obj_fd into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
|
||||
sizeof(struct bpf_insn)));
|
||||
0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
|
||||
/* store BTF fd in slot */
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
|
||||
/* store index into insn[insn_idx].off */
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
|
||||
log:
|
||||
if (!gen->log_level)
|
||||
return;
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, off)));
|
||||
debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
|
||||
relo->name, kdesc->ref);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
|
||||
debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
|
||||
relo->name, kdesc->ref);
|
||||
}
|
||||
|
||||
static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
int ref)
|
||||
{
|
||||
if (!gen->log_level)
|
||||
return;
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
|
||||
relo->is_typeless, relo->is_weak, relo->name, ref);
|
||||
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
||||
debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
|
||||
relo->is_typeless, relo->is_weak, relo->name, ref);
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*/
|
||||
static void emit_relo_ksym_typeless(struct bpf_gen *gen,
|
||||
struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
goto log;
|
||||
}
|
||||
/* remember insn offset, so we can copy ksym addr later */
|
||||
kdesc->insn = insn;
|
||||
/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
|
||||
kdesc->typeless = true;
|
||||
emit_bpf_kallsyms_lookup_name(gen, relo);
|
||||
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
|
||||
emit_check_err(gen);
|
||||
/* store lower half of addr into insn[insn_idx].imm */
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
|
||||
/* store upper half of addr into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
log:
|
||||
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
||||
}
|
||||
|
||||
static __u32 src_reg_mask(void)
|
||||
{
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
return 0x0f; /* src_reg,dst_reg,... */
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
return 0xf0; /* dst_reg,src_reg,... */
|
||||
#else
|
||||
#error "Unsupported bit endianness, cannot proceed"
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*/
|
||||
static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
__u32 reg_mask;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
/* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
|
||||
goto clear_src_reg;
|
||||
}
|
||||
/* remember insn offset, so we can copy BTF ID and FD later */
|
||||
kdesc->insn = insn;
|
||||
emit_bpf_find_by_name_kind(gen, relo);
|
||||
if (!relo->is_weak)
|
||||
emit_check_err(gen);
|
||||
/* jump to success case */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
|
||||
/* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
|
||||
/* skip success case for ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
|
||||
/* store btf_id into insn[insn_idx].imm */
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
|
||||
/* store btf_obj_fd into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
/* skip src_reg adjustment */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
|
||||
clear_src_reg:
|
||||
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
||||
reg_mask = src_reg_mask();
|
||||
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
||||
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
|
||||
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
|
||||
|
||||
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
||||
}
|
||||
|
||||
void bpf_gen__record_relo_core(struct bpf_gen *gen,
|
||||
const struct bpf_core_relo *core_relo)
|
||||
{
|
||||
struct bpf_core_relo *relos;
|
||||
|
||||
relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
|
||||
if (!relos) {
|
||||
gen->error = -ENOMEM;
|
||||
return;
|
||||
}
|
||||
gen->core_relos = relos;
|
||||
relos += gen->core_relo_cnt;
|
||||
memcpy(relos, core_relo, sizeof(*relos));
|
||||
gen->core_relo_cnt++;
|
||||
}
|
||||
|
||||
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
|
||||
{
|
||||
int insn;
|
||||
|
||||
pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
|
||||
switch (relo->kind) {
|
||||
case BTF_KIND_VAR:
|
||||
if (relo->is_typeless)
|
||||
emit_relo_ksym_typeless(gen, relo, insn);
|
||||
else
|
||||
emit_relo_ksym_btf(gen, relo, insn);
|
||||
break;
|
||||
case BTF_KIND_FUNC:
|
||||
emit_relo_kfunc_btf(gen, relo, insn);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown relocation kind '%d'\n", relo->kind);
|
||||
gen->error = -EDOM;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -562,48 +890,68 @@ static void emit_relos(struct bpf_gen *gen, int insns)
|
||||
emit_relo(gen, gen->relos + i, insns);
|
||||
}
|
||||
|
||||
static void cleanup_core_relo(struct bpf_gen *gen)
|
||||
{
|
||||
if (!gen->core_relo_cnt)
|
||||
return;
|
||||
free(gen->core_relos);
|
||||
gen->core_relo_cnt = 0;
|
||||
gen->core_relos = NULL;
|
||||
}
|
||||
|
||||
static void cleanup_relos(struct bpf_gen *gen, int insns)
|
||||
{
|
||||
int i, insn;
|
||||
|
||||
for (i = 0; i < gen->relo_cnt; i++) {
|
||||
if (gen->relos[i].kind != BTF_KIND_VAR)
|
||||
continue;
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = insns +
|
||||
sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
|
||||
offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
/* only close fds for typed ksyms and kfuncs */
|
||||
if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = gen->ksyms[i].insn;
|
||||
insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
|
||||
if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
|
||||
gen->nr_fd_array--;
|
||||
}
|
||||
}
|
||||
if (gen->nr_ksyms) {
|
||||
free(gen->ksyms);
|
||||
gen->nr_ksyms = 0;
|
||||
gen->ksyms = NULL;
|
||||
}
|
||||
if (gen->relo_cnt) {
|
||||
free(gen->relos);
|
||||
gen->relo_cnt = 0;
|
||||
gen->relos = NULL;
|
||||
}
|
||||
cleanup_core_relo(gen);
|
||||
}
|
||||
|
||||
void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
struct bpf_prog_load_params *load_attr, int prog_idx)
|
||||
enum bpf_prog_type prog_type, const char *prog_name,
|
||||
const char *license, struct bpf_insn *insns, size_t insn_cnt,
|
||||
struct bpf_prog_load_opts *load_attr, int prog_idx)
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, fd_array);
|
||||
int prog_load_attr, license, insns, func_info, line_info;
|
||||
int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
|
||||
int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
|
||||
load_attr->prog_type, load_attr->insn_cnt);
|
||||
pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
|
||||
prog_type, insn_cnt, prog_idx);
|
||||
/* add license string to blob of bytes */
|
||||
license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
|
||||
license_off = add_data(gen, license, strlen(license) + 1);
|
||||
/* add insns to blob of bytes */
|
||||
insns = add_data(gen, load_attr->insns,
|
||||
load_attr->insn_cnt * sizeof(struct bpf_insn));
|
||||
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
|
||||
|
||||
attr.prog_type = load_attr->prog_type;
|
||||
attr.prog_type = prog_type;
|
||||
attr.expected_attach_type = load_attr->expected_attach_type;
|
||||
attr.attach_btf_id = load_attr->attach_btf_id;
|
||||
attr.prog_ifindex = load_attr->prog_ifindex;
|
||||
attr.kern_version = 0;
|
||||
attr.insn_cnt = (__u32)load_attr->insn_cnt;
|
||||
attr.insn_cnt = (__u32)insn_cnt;
|
||||
attr.prog_flags = load_attr->prog_flags;
|
||||
|
||||
attr.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
@ -616,15 +964,19 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
line_info = add_data(gen, load_attr->line_info,
|
||||
attr.line_info_cnt * attr.line_info_rec_size);
|
||||
|
||||
memcpy(attr.prog_name, load_attr->name,
|
||||
min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
|
||||
attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
|
||||
attr.core_relo_cnt = gen->core_relo_cnt;
|
||||
core_relos = add_data(gen, gen->core_relos,
|
||||
attr.core_relo_cnt * attr.core_relo_rec_size);
|
||||
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
prog_load_attr = add_data(gen, &attr, attr_size);
|
||||
|
||||
/* populate union bpf_attr with a pointer to license */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, license), license);
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
|
||||
|
||||
/* populate union bpf_attr with a pointer to instructions */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
|
||||
|
||||
/* populate union bpf_attr with a pointer to func_info */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
|
||||
@ -632,9 +984,11 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
/* populate union bpf_attr with a pointer to line_info */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
|
||||
|
||||
/* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
|
||||
emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
|
||||
stack_off(map_fd[0]));
|
||||
/* populate union bpf_attr with a pointer to core_relos */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
|
||||
|
||||
/* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
|
||||
|
||||
/* populate union bpf_attr with user provided log details */
|
||||
move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
|
||||
@ -657,12 +1011,12 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
|
||||
offsetof(union bpf_attr, attach_btf_obj_fd)));
|
||||
}
|
||||
emit_relos(gen, insns);
|
||||
emit_relos(gen, insns_off);
|
||||
/* emit PROG_LOAD command */
|
||||
emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
|
||||
debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
|
||||
/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
|
||||
cleanup_relos(gen, insns);
|
||||
cleanup_relos(gen, insns_off);
|
||||
if (gen->attach_kind) {
|
||||
emit_sys_close_blob(gen,
|
||||
attr_field(prog_load_attr, attach_btf_obj_fd));
|
||||
@ -703,8 +1057,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
|
||||
|
||||
map_update_attr = add_data(gen, &attr, attr_size);
|
||||
move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
stack_off(map_fd[map_idx]));
|
||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||
emit_rel_store(gen, attr_field(map_update_attr, value), value);
|
||||
/* emit MAP_UPDATE_ELEM command */
|
||||
@ -713,6 +1067,33 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
||||
emit_check_err(gen);
|
||||
}
|
||||
|
||||
void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
|
||||
int inner_map_idx)
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, flags);
|
||||
int map_update_attr, key;
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
|
||||
outer_map_idx, slot, inner_map_idx);
|
||||
|
||||
key = add_data(gen, &slot, sizeof(slot));
|
||||
|
||||
map_update_attr = add_data(gen, &attr, attr_size);
|
||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, outer_map_idx));
|
||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||
emit_rel_store(gen, attr_field(map_update_attr, value),
|
||||
blob_fd_array_off(gen, inner_map_idx));
|
||||
|
||||
/* emit MAP_UPDATE_ELEM command */
|
||||
emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
|
||||
debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
|
||||
outer_map_idx, slot, inner_map_idx);
|
||||
emit_check_err(gen);
|
||||
}
|
||||
|
||||
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, map_fd);
|
||||
@ -722,8 +1103,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: map_freeze: idx %d\n", map_idx);
|
||||
map_freeze_attr = add_data(gen, &attr, attr_size);
|
||||
move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
stack_off(map_fd[map_idx]));
|
||||
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
/* emit MAP_FREEZE command */
|
||||
emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
|
||||
debug_ret(gen, "map_freeze");
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,6 +24,10 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
LIBBPF_API __u32 libbpf_major_version(void);
|
||||
LIBBPF_API __u32 libbpf_minor_version(void);
|
||||
LIBBPF_API const char *libbpf_version_string(void);
|
||||
|
||||
enum libbpf_errno {
|
||||
__LIBBPF_ERRNO__START = 4000,
|
||||
|
||||
@ -83,12 +87,15 @@ struct bpf_object_open_opts {
|
||||
* Non-relocatable instructions are replaced with invalid ones to
|
||||
* prevent accidental errors.
|
||||
* */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect")
|
||||
bool relaxed_core_relocs;
|
||||
/* maps that set the 'pinning' attribute in their definition will have
|
||||
* their pin_path attribute set to a file in this directory, and be
|
||||
* auto-pinned to that path on load; defaults to "/sys/fs/bpf".
|
||||
*/
|
||||
const char *pin_root_path;
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program")
|
||||
__u32 attach_prog_fd;
|
||||
/* Additional kernel config content that augments and overrides
|
||||
* system Kconfig for CONFIG_xxx externs.
|
||||
@ -101,12 +108,73 @@ struct bpf_object_open_opts {
|
||||
* struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux.
|
||||
*/
|
||||
const char *btf_custom_path;
|
||||
/* Pointer to a buffer for storing kernel logs for applicable BPF
|
||||
* commands. Valid kernel_log_size has to be specified as well and are
|
||||
* passed-through to bpf() syscall. Keep in mind that kernel might
|
||||
* fail operation with -ENOSPC error if provided buffer is too small
|
||||
* to contain entire log output.
|
||||
* See the comment below for kernel_log_level for interaction between
|
||||
* log_buf and log_level settings.
|
||||
*
|
||||
* If specified, this log buffer will be passed for:
|
||||
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
|
||||
* with bpf_program__set_log() on per-program level, to get
|
||||
* BPF verifier log output.
|
||||
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
|
||||
* BTF sanity checking log.
|
||||
*
|
||||
* Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite
|
||||
* previous contents, so if you need more fine-grained control, set
|
||||
* per-program buffer with bpf_program__set_log_buf() to preserve each
|
||||
* individual program's verification log. Keep using kernel_log_buf
|
||||
* for BTF verification log, if necessary.
|
||||
*/
|
||||
char *kernel_log_buf;
|
||||
size_t kernel_log_size;
|
||||
/*
|
||||
* Log level can be set independently from log buffer. Log_level=0
|
||||
* means that libbpf will attempt loading BTF or program without any
|
||||
* logging requested, but will retry with either its own or custom log
|
||||
* buffer, if provided, and log_level=1 on any error.
|
||||
* And vice versa, setting log_level>0 will request BTF or prog
|
||||
* loading with verbose log from the first attempt (and as such also
|
||||
* for successfully loaded BTF or program), and the actual log buffer
|
||||
* could be either libbpf's own auto-allocated log buffer, if
|
||||
* kernel_log_buffer is NULL, or user-provided custom kernel_log_buf.
|
||||
* If user didn't provide custom log buffer, libbpf will emit captured
|
||||
* logs through its print callback.
|
||||
*/
|
||||
__u32 kernel_log_level;
|
||||
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_object_open_opts__last_field btf_custom_path
|
||||
#define bpf_object_open_opts__last_field kernel_log_level
|
||||
|
||||
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__open_file()** creates a bpf_object by opening
|
||||
* the BPF ELF object file pointed to by the passed path and loading it
|
||||
* into memory.
|
||||
* @param path BPF object file path
|
||||
* @param opts options for how to load the bpf object, this parameter is
|
||||
* optional and can be set to NULL
|
||||
* @return pointer to the new bpf_object; or NULL is returned on error,
|
||||
* error code is stored in errno
|
||||
*/
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts);
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__open_mem()** creates a bpf_object by reading
|
||||
* the BPF objects raw bytes from a memory buffer containing a valid
|
||||
* BPF ELF object file.
|
||||
* @param obj_buf pointer to the buffer containing ELF file bytes
|
||||
* @param obj_buf_sz number of bytes in the buffer
|
||||
* @param opts options for how to load the bpf object
|
||||
* @return pointer to the new bpf_object; or NULL is returned on error,
|
||||
* error code is stored in errno
|
||||
*/
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts);
|
||||
@ -146,7 +214,9 @@ struct bpf_object_load_attr {
|
||||
|
||||
/* Load/unload object into/from kernel */
|
||||
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
|
||||
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
|
||||
|
||||
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
||||
@ -157,6 +227,7 @@ struct btf;
|
||||
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__find_program_by_name() instead")
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_title(const struct bpf_object *obj,
|
||||
const char *title);
|
||||
@ -164,7 +235,8 @@ LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_name(const struct bpf_object *obj,
|
||||
const char *name);
|
||||
|
||||
LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead")
|
||||
struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
#define bpf_object__for_each_safe(pos, tmp) \
|
||||
for ((pos) = bpf_object__next(NULL), \
|
||||
(tmp) = bpf_object__next(pos); \
|
||||
@ -186,16 +258,22 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
|
||||
|
||||
/* Accessors of bpf_program */
|
||||
struct bpf_program;
|
||||
LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead")
|
||||
struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
#define bpf_object__for_each_program(pos, obj) \
|
||||
for ((pos) = bpf_program__next(NULL, (obj)); \
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_program__next((pos), (obj)))
|
||||
#define bpf_object__for_each_program(pos, obj) \
|
||||
for ((pos) = bpf_object__next_program((obj), NULL); \
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_object__next_program((obj), (pos)))
|
||||
|
||||
LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead")
|
||||
struct bpf_program *bpf_program__prev(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
|
||||
|
||||
@ -214,18 +292,79 @@ LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
|
||||
|
||||
/* returns program size in bytes */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead")
|
||||
LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
|
||||
|
||||
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
|
||||
__u32 kern_version);
|
||||
struct bpf_insn;
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__insns()** gives read-only access to BPF program's
|
||||
* underlying BPF instructions.
|
||||
* @param prog BPF program for which to return instructions
|
||||
* @return a pointer to an array of BPF instructions that belong to the
|
||||
* specified BPF program
|
||||
*
|
||||
* Returned pointer is always valid and not NULL. Number of `struct bpf_insn`
|
||||
* pointed to can be fetched using **bpf_program__insn_cnt()** API.
|
||||
*
|
||||
* Keep in mind, libbpf can modify and append/delete BPF program's
|
||||
* instructions as it processes BPF object file and prepares everything for
|
||||
* uploading into the kernel. So depending on the point in BPF object
|
||||
* lifetime, **bpf_program__insns()** can return different sets of
|
||||
* instructions. As an example, during BPF object load phase BPF program
|
||||
* instructions will be CO-RE-relocated, BPF subprograms instructions will be
|
||||
* appended, ldimm64 instructions will have FDs embedded, etc. So instructions
|
||||
* returned before **bpf_object__load()** and after it might be quite
|
||||
* different.
|
||||
*/
|
||||
LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog);
|
||||
/**
|
||||
* @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s
|
||||
* that form specified BPF program.
|
||||
* @param prog BPF program for which to return number of BPF instructions
|
||||
*
|
||||
* See **bpf_program__insns()** documentation for notes on how libbpf can
|
||||
* change instructions and their count during different phases of
|
||||
* **bpf_object** lifetime.
|
||||
*/
|
||||
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version);
|
||||
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__pin()** pins the BPF program to a file
|
||||
* in the BPF FS specified by a path. This increments the programs
|
||||
* reference count, allowing it to stay loaded after the process
|
||||
* which loaded it has exited.
|
||||
*
|
||||
* @param prog BPF program to pin, must already be loaded
|
||||
* @param path file path in a BPF file system
|
||||
* @return 0, on success; negative error code, otherwise
|
||||
*/
|
||||
LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__unpin()** unpins the BPF program from a file
|
||||
* in the BPFFS specified by a path. This decrements the programs
|
||||
* reference count.
|
||||
*
|
||||
* The file pinning the BPF program can also be unlinked by a different
|
||||
* process in which case this function will return an error.
|
||||
*
|
||||
* @param prog BPF program to unpin
|
||||
* @param path file path to the pin in a BPF file system
|
||||
* @return 0, on success; negative error code, otherwise
|
||||
*/
|
||||
LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
|
||||
LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
|
||||
|
||||
@ -243,7 +382,7 @@ LIBBPF_API int bpf_link__detach(struct bpf_link *link);
|
||||
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach(struct bpf_program *prog);
|
||||
bpf_program__attach(const struct bpf_program *prog);
|
||||
|
||||
struct bpf_perf_event_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
@ -254,10 +393,10 @@ struct bpf_perf_event_opts {
|
||||
#define bpf_perf_event_opts__last_field bpf_cookie
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_perf_event(struct bpf_program *prog, int pfd);
|
||||
bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd,
|
||||
bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
|
||||
const struct bpf_perf_event_opts *opts);
|
||||
|
||||
struct bpf_kprobe_opts {
|
||||
@ -266,7 +405,7 @@ struct bpf_kprobe_opts {
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 bpf_cookie;
|
||||
/* function's offset to install kprobe to */
|
||||
unsigned long offset;
|
||||
size_t offset;
|
||||
/* kprobe is return probe */
|
||||
bool retprobe;
|
||||
size_t :0;
|
||||
@ -274,10 +413,10 @@ struct bpf_kprobe_opts {
|
||||
#define bpf_kprobe_opts__last_field retprobe
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
|
||||
bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
|
||||
const char *func_name);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_kprobe_opts(struct bpf_program *prog,
|
||||
bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
|
||||
const char *func_name,
|
||||
const struct bpf_kprobe_opts *opts);
|
||||
|
||||
@ -296,12 +435,43 @@ struct bpf_uprobe_opts {
|
||||
};
|
||||
#define bpf_uprobe_opts__last_field retprobe
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
|
||||
* to the userspace function which is found by binary path and
|
||||
* offset. You can optionally specify a particular proccess to attach
|
||||
* to. You can also optionally attach the program to the function
|
||||
* exit instead of entry.
|
||||
*
|
||||
* @param prog BPF program to attach
|
||||
* @param retprobe Attach to function exit
|
||||
* @param pid Process ID to attach the uprobe to, 0 for self (own process),
|
||||
* -1 for all processes
|
||||
* @param binary_path Path to binary that contains the function symbol
|
||||
* @param func_offset Offset within the binary of the function symbol
|
||||
* @return Reference to the newly created BPF link; or NULL is returned on error,
|
||||
* error code is stored in errno
|
||||
*/
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
|
||||
bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe,
|
||||
pid_t pid, const char *binary_path,
|
||||
size_t func_offset);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__attach_uprobe_opts()** is just like
|
||||
* bpf_program__attach_uprobe() except with a options struct
|
||||
* for various configurations.
|
||||
*
|
||||
* @param prog BPF program to attach
|
||||
* @param pid Process ID to attach the uprobe to, 0 for self (own process),
|
||||
* -1 for all processes
|
||||
* @param binary_path Path to binary that contains the function symbol
|
||||
* @param func_offset Offset within the binary of the function symbol
|
||||
* @param opts Options for altering program attachment
|
||||
* @return Reference to the newly created BPF link; or NULL is returned on error,
|
||||
* error code is stored in errno
|
||||
*/
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid,
|
||||
bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
|
||||
const char *binary_path, size_t func_offset,
|
||||
const struct bpf_uprobe_opts *opts);
|
||||
|
||||
@ -314,35 +484,35 @@ struct bpf_tracepoint_opts {
|
||||
#define bpf_tracepoint_opts__last_field bpf_cookie
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_tracepoint(struct bpf_program *prog,
|
||||
bpf_program__attach_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_category,
|
||||
const char *tp_name);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_tracepoint_opts(struct bpf_program *prog,
|
||||
bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
|
||||
const char *tp_category,
|
||||
const char *tp_name,
|
||||
const struct bpf_tracepoint_opts *opts);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
|
||||
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_name);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_trace(struct bpf_program *prog);
|
||||
bpf_program__attach_trace(const struct bpf_program *prog);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_lsm(struct bpf_program *prog);
|
||||
bpf_program__attach_lsm(const struct bpf_program *prog);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd);
|
||||
bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_netns(struct bpf_program *prog, int netns_fd);
|
||||
bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_xdp(struct bpf_program *prog, int ifindex);
|
||||
bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_freplace(struct bpf_program *prog,
|
||||
bpf_program__attach_freplace(const struct bpf_program *prog,
|
||||
int target_fd, const char *attach_func_name);
|
||||
|
||||
struct bpf_map;
|
||||
|
||||
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
|
||||
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
|
||||
|
||||
struct bpf_iter_attach_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
@ -352,11 +522,9 @@ struct bpf_iter_attach_opts {
|
||||
#define bpf_iter_attach_opts__last_field link_info_len
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_iter(struct bpf_program *prog,
|
||||
bpf_program__attach_iter(const struct bpf_program *prog,
|
||||
const struct bpf_iter_attach_opts *opts);
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
/*
|
||||
* Libbpf allows callers to adjust BPF programs before being loaded
|
||||
* into kernel. One program in an object file can be transformed into
|
||||
@ -385,7 +553,6 @@ struct bpf_insn;
|
||||
* one instance. In this case bpf_program__fd(prog) is equal to
|
||||
* bpf_program__nth_fd(prog, 0).
|
||||
*/
|
||||
|
||||
struct bpf_prog_prep_result {
|
||||
/*
|
||||
* If not NULL, load new instruction array.
|
||||
@ -414,9 +581,11 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
|
||||
struct bpf_insn *insns, int insns_cnt,
|
||||
struct bpf_prog_prep_result *res);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
|
||||
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
|
||||
bpf_program_prep_t prep);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
|
||||
|
||||
/*
|
||||
@ -446,6 +615,18 @@ LIBBPF_API void
|
||||
bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags);
|
||||
|
||||
/* Per-program log level and log buffer getters/setters.
|
||||
* See bpf_object_open_opts comments regarding log_level and log_buf
|
||||
* interactions.
|
||||
*/
|
||||
LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level);
|
||||
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
|
||||
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
|
||||
|
||||
LIBBPF_API int
|
||||
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
|
||||
const char *attach_func_name);
|
||||
@ -478,9 +659,13 @@ struct bpf_map_def {
|
||||
unsigned int map_flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel,
|
||||
* so no need to worry about a name clash.
|
||||
/**
|
||||
* @brief **bpf_object__find_map_by_name()** returns BPF map of
|
||||
* the given name, if it exists within the passed BPF object
|
||||
* @param obj BPF object
|
||||
* @param name name of the BPF map
|
||||
* @return BPF map instance, if such map exists within the BPF object;
|
||||
* or NULL otherwise.
|
||||
*/
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
|
||||
@ -492,21 +677,32 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
|
||||
* Get bpf_map through the offset of corresponding struct bpf_map_def
|
||||
* in the BPF object file.
|
||||
*/
|
||||
LIBBPF_API struct bpf_map *
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__find_map_by_name() instead")
|
||||
struct bpf_map *
|
||||
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead")
|
||||
struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
#define bpf_object__for_each_map(pos, obj) \
|
||||
for ((pos) = bpf_map__next(NULL, (obj)); \
|
||||
for ((pos) = bpf_object__next_map((obj), NULL); \
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_map__next((pos), (obj)))
|
||||
(pos) = bpf_object__next_map((obj), (pos)))
|
||||
#define bpf_map__for_each bpf_object__for_each_map
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead")
|
||||
struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
/* get/set map FD */
|
||||
/**
|
||||
* @brief **bpf_map__fd()** gets the file descriptor of the passed
|
||||
* BPF map
|
||||
* @param map the BPF map instance
|
||||
* @return the file descriptor; or -EINVAL in case of an error
|
||||
*/
|
||||
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
|
||||
/* get map definition */
|
||||
@ -538,6 +734,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
|
||||
/* get/set map if_index */
|
||||
LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
|
||||
/* get/set map map_extra flags */
|
||||
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
@ -546,7 +745,16 @@ LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
|
||||
const void *data, size_t size);
|
||||
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__type() instead")
|
||||
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__is_internal()** tells the caller whether or not the
|
||||
* passed map is a special map created by libbpf automatically for things like
|
||||
* global variables, __ksym externs, Kconfig values, etc
|
||||
* @param map the bpf_map
|
||||
* @return true, if the map is an internal map; false, otherwise
|
||||
*/
|
||||
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
|
||||
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
|
||||
@ -558,6 +766,38 @@ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
|
||||
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
|
||||
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||
* pointer
|
||||
* @param ptr pointer returned from libbpf API function
|
||||
* @return error code; or 0 if no error occured
|
||||
*
|
||||
* Many libbpf API functions which return pointers have logic to encode error
|
||||
* codes as pointers, and do not return NULL. Meaning **libbpf_get_error()**
|
||||
* should be used on the return value from these functions immediately after
|
||||
* calling the API function, with no intervening calls that could clobber the
|
||||
* `errno` variable. Consult the individual functions documentation to verify
|
||||
* if this logic applies should be used.
|
||||
*
|
||||
* For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)`
|
||||
* is enabled, NULL is returned on error instead.
|
||||
*
|
||||
* If ptr is NULL, then errno should be already set by the failing
|
||||
* API, because libbpf never returns NULL on success and it now always
|
||||
* sets errno on error.
|
||||
*
|
||||
* Example usage:
|
||||
*
|
||||
* struct perf_buffer *pb;
|
||||
*
|
||||
* pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts);
|
||||
* err = libbpf_get_error(pb);
|
||||
* if (err) {
|
||||
* pb = NULL;
|
||||
* fprintf(stderr, "failed to open perf buffer: %d\n", err);
|
||||
* goto cleanup;
|
||||
* }
|
||||
*/
|
||||
LIBBPF_API long libbpf_get_error(const void *ptr);
|
||||
|
||||
struct bpf_prog_load_attr {
|
||||
@ -569,10 +809,12 @@ struct bpf_prog_load_attr {
|
||||
int prog_flags;
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
|
||||
/* XDP related API */
|
||||
struct xdp_link_info {
|
||||
@ -670,18 +912,52 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
|
||||
|
||||
/* common use perf buffer options */
|
||||
struct perf_buffer_opts {
|
||||
/* if specified, sample_cb is called for each sample */
|
||||
perf_buffer_sample_fn sample_cb;
|
||||
/* if specified, lost_cb is called for each batch of lost samples */
|
||||
perf_buffer_lost_fn lost_cb;
|
||||
/* ctx is provided to sample_cb and lost_cb */
|
||||
void *ctx;
|
||||
union {
|
||||
size_t sz;
|
||||
struct { /* DEPRECATED: will be removed in v1.0 */
|
||||
/* if specified, sample_cb is called for each sample */
|
||||
perf_buffer_sample_fn sample_cb;
|
||||
/* if specified, lost_cb is called for each batch of lost samples */
|
||||
perf_buffer_lost_fn lost_cb;
|
||||
/* ctx is provided to sample_cb and lost_cb */
|
||||
void *ctx;
|
||||
};
|
||||
};
|
||||
};
|
||||
#define perf_buffer_opts__last_field sz
|
||||
|
||||
/**
|
||||
* @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
|
||||
* BPF_PERF_EVENT_ARRAY map
|
||||
* @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF
|
||||
* code to send data over to user-space
|
||||
* @param page_cnt number of memory pages allocated for each per-CPU buffer
|
||||
* @param sample_cb function called on each received data record
|
||||
* @param lost_cb function called when record loss has occurred
|
||||
* @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
|
||||
* @return a new instance of struct perf_buffer on success, NULL on error with
|
||||
* *errno* containing an error code
|
||||
*/
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new(int map_fd, size_t page_cnt,
|
||||
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
|
||||
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead")
|
||||
struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__)
|
||||
#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \
|
||||
perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts)
|
||||
#define ___perf_buffer_new3(map_fd, page_cnt, opts) \
|
||||
perf_buffer__new_deprecated(map_fd, page_cnt, opts)
|
||||
|
||||
enum bpf_perf_event_ret {
|
||||
LIBBPF_PERF_EVENT_DONE = 0,
|
||||
LIBBPF_PERF_EVENT_ERROR = -1,
|
||||
@ -695,12 +971,21 @@ typedef enum bpf_perf_event_ret
|
||||
|
||||
/* raw perf buffer options, giving most power and control */
|
||||
struct perf_buffer_raw_opts {
|
||||
/* perf event attrs passed directly into perf_event_open() */
|
||||
struct perf_event_attr *attr;
|
||||
/* raw event callback */
|
||||
perf_buffer_event_fn event_cb;
|
||||
/* ctx is provided to event_cb */
|
||||
void *ctx;
|
||||
union {
|
||||
struct {
|
||||
size_t sz;
|
||||
long :0;
|
||||
long :0;
|
||||
};
|
||||
struct { /* DEPRECATED: will be removed in v1.0 */
|
||||
/* perf event attrs passed directly into perf_event_open() */
|
||||
struct perf_event_attr *attr;
|
||||
/* raw event callback */
|
||||
perf_buffer_event_fn event_cb;
|
||||
/* ctx is provided to event_cb */
|
||||
void *ctx;
|
||||
};
|
||||
};
|
||||
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
|
||||
* max_entries of given PERF_EVENT_ARRAY map)
|
||||
*/
|
||||
@ -710,11 +995,28 @@ struct perf_buffer_raw_opts {
|
||||
/* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
|
||||
int *map_keys;
|
||||
};
|
||||
#define perf_buffer_raw_opts__last_field map_keys
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_raw(int map_fd, size_t page_cnt,
|
||||
perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
|
||||
perf_buffer_event_fn event_cb, void *ctx,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
|
||||
perf_buffer_event_fn event_cb, void *ctx,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead")
|
||||
struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__)
|
||||
#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \
|
||||
perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts)
|
||||
#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \
|
||||
perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts)
|
||||
|
||||
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
|
||||
@ -726,6 +1028,7 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
|
||||
typedef enum bpf_perf_event_ret
|
||||
(*bpf_perf_event_print_t)(struct perf_event_header *hdr,
|
||||
void *private_data);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use perf_buffer__poll() or perf_buffer__consume() instead")
|
||||
LIBBPF_API enum bpf_perf_event_ret
|
||||
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
|
||||
void **copy_mem, size_t *copy_size,
|
||||
@ -752,13 +1055,57 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
|
||||
* user, causing subsequent probes to fail. In this case, the caller may want
|
||||
* to adjust that limit with setrlimit().
|
||||
*/
|
||||
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
|
||||
__u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_prog_type() instead")
|
||||
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_map_type() instead")
|
||||
LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
|
||||
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
|
||||
enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_helper() instead")
|
||||
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "implement your own or use bpftool for feature detection")
|
||||
LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
|
||||
* BPF programs of a given type.
|
||||
* @param prog_type BPF program type to detect kernel support for
|
||||
* @param opts reserved for future extensibility, should be NULL
|
||||
* @return 1, if given program type is supported; 0, if given program type is
|
||||
* not supported; negative error code if feature detection failed or can't be
|
||||
* performed
|
||||
*
|
||||
* Make sure the process has required set of CAP_* permissions (or runs as
|
||||
* root) when performing feature checking.
|
||||
*/
|
||||
LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts);
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports
|
||||
* BPF maps of a given type.
|
||||
* @param map_type BPF map type to detect kernel support for
|
||||
* @param opts reserved for future extensibility, should be NULL
|
||||
* @return 1, if given map type is supported; 0, if given map type is
|
||||
* not supported; negative error code if feature detection failed or can't be
|
||||
* performed
|
||||
*
|
||||
* Make sure the process has required set of CAP_* permissions (or runs as
|
||||
* root) when performing feature checking.
|
||||
*/
|
||||
LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts);
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the
|
||||
* use of a given BPF helper from specified BPF program type.
|
||||
* @param prog_type BPF program type used to check the support of BPF helper
|
||||
* @param helper_id BPF helper ID (enum bpf_func_id) to check support for
|
||||
* @param opts reserved for future extensibility, should be NULL
|
||||
* @return 1, if given combination of program type and helper is supported; 0,
|
||||
* if the combination is not supported; negative error code if feature
|
||||
* detection for provided input arguments failed or can't be performed
|
||||
*
|
||||
* Make sure the process has required set of CAP_* permissions (or runs as
|
||||
* root) when performing feature checking.
|
||||
*/
|
||||
LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
|
||||
enum bpf_func_id helper_id, const void *opts);
|
||||
|
||||
/*
|
||||
* Get bpf_prog_info in continuous memory
|
||||
*
|
||||
@ -813,18 +1160,22 @@ struct bpf_prog_info_linear {
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API struct bpf_prog_info_linear *
|
||||
bpf_program__get_prog_info_linear(int fd, __u64 arrays);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API void
|
||||
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API void
|
||||
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
|
||||
|
||||
/*
|
||||
* A helper function to get the number of possible CPUs before looking up
|
||||
* per-CPU maps. Negative errno is returned on failure.
|
||||
/**
|
||||
* @brief **libbpf_num_possible_cpus()** is a helper function to get the
|
||||
* number of possible CPUs that the host kernel supports and expects.
|
||||
* @return number of possible CPUs; or error code on failure
|
||||
*
|
||||
* Example usage:
|
||||
*
|
||||
@ -834,7 +1185,6 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
|
||||
* }
|
||||
* long values[ncpus];
|
||||
* bpf_map_lookup_elem(per_cpu_map_fd, key, values);
|
||||
*
|
||||
*/
|
||||
LIBBPF_API int libbpf_num_possible_cpus(void);
|
||||
|
||||
@ -854,17 +1204,17 @@ struct bpf_object_skeleton {
|
||||
size_t sz; /* size of this struct, for forward/backward compatibility */
|
||||
|
||||
const char *name;
|
||||
void *data;
|
||||
const void *data;
|
||||
size_t data_sz;
|
||||
|
||||
struct bpf_object **obj;
|
||||
|
||||
int map_cnt;
|
||||
int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */
|
||||
int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
|
||||
struct bpf_map_skeleton *maps;
|
||||
|
||||
int prog_cnt;
|
||||
int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */
|
||||
int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
|
||||
struct bpf_prog_skeleton *progs;
|
||||
};
|
||||
|
||||
|
@ -386,3 +386,49 @@ LIBBPF_0.5.0 {
|
||||
btf_dump__dump_type_data;
|
||||
libbpf_set_strict_mode;
|
||||
} LIBBPF_0.4.0;
|
||||
|
||||
LIBBPF_0.6.0 {
|
||||
global:
|
||||
bpf_map__map_extra;
|
||||
bpf_map__set_map_extra;
|
||||
bpf_map_create;
|
||||
bpf_object__next_map;
|
||||
bpf_object__next_program;
|
||||
bpf_object__prev_map;
|
||||
bpf_object__prev_program;
|
||||
bpf_prog_load_deprecated;
|
||||
bpf_prog_load;
|
||||
bpf_program__flags;
|
||||
bpf_program__insn_cnt;
|
||||
bpf_program__insns;
|
||||
bpf_program__set_flags;
|
||||
btf__add_btf;
|
||||
btf__add_decl_tag;
|
||||
btf__add_type_tag;
|
||||
btf__dedup;
|
||||
btf__dedup_deprecated;
|
||||
btf__raw_data;
|
||||
btf__type_cnt;
|
||||
btf_dump__new;
|
||||
btf_dump__new_deprecated;
|
||||
libbpf_major_version;
|
||||
libbpf_minor_version;
|
||||
libbpf_version_string;
|
||||
perf_buffer__new;
|
||||
perf_buffer__new_deprecated;
|
||||
perf_buffer__new_raw;
|
||||
perf_buffer__new_raw_deprecated;
|
||||
} LIBBPF_0.5.0;
|
||||
|
||||
LIBBPF_0.7.0 {
|
||||
global:
|
||||
bpf_btf_load;
|
||||
bpf_program__log_buf;
|
||||
bpf_program__log_level;
|
||||
bpf_program__set_log_buf;
|
||||
bpf_program__set_log_level;
|
||||
libbpf_probe_bpf_helper;
|
||||
libbpf_probe_bpf_map_type;
|
||||
libbpf_probe_bpf_prog_type;
|
||||
libbpf_set_memlock_rlim_max;
|
||||
};
|
||||
|
@ -10,6 +10,7 @@
|
||||
#define __LIBBPF_LIBBPF_COMMON_H
|
||||
|
||||
#include <string.h>
|
||||
#include "libbpf_version.h"
|
||||
|
||||
#ifndef LIBBPF_API
|
||||
#define LIBBPF_API __attribute__((visibility("default")))
|
||||
@ -17,6 +18,46 @@
|
||||
|
||||
#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg)))
|
||||
|
||||
/* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */
|
||||
#define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \
|
||||
__LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \
|
||||
(LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg))
|
||||
|
||||
#define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \
|
||||
(LIBBPF_MAJOR_VERSION > (major) || \
|
||||
(LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor)))
|
||||
|
||||
/* Add checks for other versions below when planning deprecation of API symbols
|
||||
* with the LIBBPF_DEPRECATED_SINCE macro.
|
||||
*/
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_8(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_8(X)
|
||||
#endif
|
||||
|
||||
/* This set of internal macros allows to do "function overloading" based on
|
||||
* number of arguments provided by used in backwards-compatible way during the
|
||||
* transition to libbpf 1.0
|
||||
* It's ugly but necessary evil that will be cleaned up when we get to 1.0.
|
||||
* See bpf_prog_load() overload for example.
|
||||
*/
|
||||
#define ___libbpf_cat(A, B) A ## B
|
||||
#define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM)
|
||||
#define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N
|
||||
#define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1)
|
||||
#define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__)
|
||||
|
||||
/* Helper macro to declare and initialize libbpf options struct
|
||||
*
|
||||
* This dance with uninitialized declaration, followed by memset to zero,
|
||||
@ -30,7 +71,7 @@
|
||||
* including any extra padding, it with memset() and then assigns initial
|
||||
* values provided by users in struct initializer-syntax as varargs.
|
||||
*/
|
||||
#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
|
||||
#define LIBBPF_OPTS(TYPE, NAME, ...) \
|
||||
struct TYPE NAME = ({ \
|
||||
memset(&NAME, 0, sizeof(struct TYPE)); \
|
||||
(struct TYPE) { \
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <limits.h>
|
||||
#include <errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include "libbpf_legacy.h"
|
||||
#include "relo_core.h"
|
||||
|
||||
@ -52,8 +54,8 @@
|
||||
#endif
|
||||
|
||||
/* Older libelf all end up in this expression, for both 32 and 64 bit */
|
||||
#ifndef GELF_ST_VISIBILITY
|
||||
#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
|
||||
#ifndef ELF64_ST_VISIBILITY
|
||||
#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
|
||||
#endif
|
||||
|
||||
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
|
||||
@ -69,6 +71,10 @@
|
||||
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
|
||||
#define BTF_TYPE_FLOAT_ENC(name, sz) \
|
||||
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
|
||||
#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
|
||||
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
|
||||
#define BTF_TYPE_TYPE_TAG_ENC(value, type) \
|
||||
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type)
|
||||
|
||||
#ifndef likely
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
@ -87,20 +93,40 @@
|
||||
(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
|
||||
#endif
|
||||
|
||||
/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is
|
||||
* a string literal known at compilation time or char * pointer known only at
|
||||
* runtime.
|
||||
*/
|
||||
#define str_has_pfx(str, pfx) \
|
||||
(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
|
||||
|
||||
/* Symbol versioning is different between static and shared library.
|
||||
* Properly versioned symbols are needed for shared library, but
|
||||
* only the symbol of the new version is needed for static library.
|
||||
* Starting with GNU C 10, use symver attribute instead of .symver assembler
|
||||
* directive, which works better with GCC LTO builds.
|
||||
*/
|
||||
#ifdef SHARED
|
||||
# define COMPAT_VERSION(internal_name, api_name, version) \
|
||||
#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10
|
||||
|
||||
#define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
__attribute__((symver(#api_name "@@" #version)))
|
||||
#define COMPAT_VERSION(internal_name, api_name, version) \
|
||||
__attribute__((symver(#api_name "@" #version)))
|
||||
|
||||
#elif defined(SHARED)
|
||||
|
||||
#define COMPAT_VERSION(internal_name, api_name, version) \
|
||||
asm(".symver " #internal_name "," #api_name "@" #version);
|
||||
# define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
#define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
asm(".symver " #internal_name "," #api_name "@@" #version);
|
||||
#else
|
||||
# define COMPAT_VERSION(internal_name, api_name, version)
|
||||
# define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
|
||||
#else /* !SHARED */
|
||||
|
||||
#define COMPAT_VERSION(internal_name, api_name, version)
|
||||
#define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
extern typeof(internal_name) api_name \
|
||||
__attribute__((alias(#internal_name)));
|
||||
|
||||
#endif
|
||||
|
||||
extern void libbpf_print(enum libbpf_print_level level,
|
||||
@ -143,10 +169,31 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
|
||||
return realloc(ptr, total);
|
||||
}
|
||||
|
||||
/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
|
||||
* is zero-terminated string no matter what (unless sz == 0, in which case
|
||||
* it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
|
||||
* in what is returned. Given this is internal helper, it's trivial to extend
|
||||
* this, when necessary. Use this instead of strncpy inside libbpf source code.
|
||||
*/
|
||||
static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
if (sz == 0)
|
||||
return;
|
||||
|
||||
sz--;
|
||||
for (i = 0; i < sz && src[i]; i++)
|
||||
dst[i] = src[i];
|
||||
dst[i] = '\0';
|
||||
}
|
||||
|
||||
__u32 get_kernel_version(void);
|
||||
|
||||
struct btf;
|
||||
struct btf_type;
|
||||
|
||||
struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
|
||||
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
|
||||
const char *btf_kind_str(const struct btf_type *t);
|
||||
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
|
||||
|
||||
@ -171,8 +218,9 @@ enum map_def_parts {
|
||||
MAP_DEF_NUMA_NODE = 0x080,
|
||||
MAP_DEF_PINNING = 0x100,
|
||||
MAP_DEF_INNER_MAP = 0x200,
|
||||
MAP_DEF_MAP_EXTRA = 0x400,
|
||||
|
||||
MAP_DEF_ALL = 0x3ff, /* combination of all above */
|
||||
MAP_DEF_ALL = 0x7ff, /* combination of all above */
|
||||
};
|
||||
|
||||
struct btf_map_def {
|
||||
@ -186,6 +234,7 @@ struct btf_map_def {
|
||||
__u32 map_flags;
|
||||
__u32 numa_node;
|
||||
__u32 pinning;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
int parse_btf_map_def(const char *map_name, struct btf *btf,
|
||||
@ -244,46 +293,52 @@ static inline bool libbpf_validate_opts(const char *opts,
|
||||
(opts)->sz - __off); \
|
||||
})
|
||||
|
||||
enum kern_feature_id {
|
||||
/* v4.14: kernel support for program & map names. */
|
||||
FEAT_PROG_NAME,
|
||||
/* v5.2: kernel support for global data sections. */
|
||||
FEAT_GLOBAL_DATA,
|
||||
/* BTF support */
|
||||
FEAT_BTF,
|
||||
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
|
||||
FEAT_BTF_FUNC,
|
||||
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
|
||||
FEAT_BTF_DATASEC,
|
||||
/* BTF_FUNC_GLOBAL is supported */
|
||||
FEAT_BTF_GLOBAL_FUNC,
|
||||
/* BPF_F_MMAPABLE is supported for arrays */
|
||||
FEAT_ARRAY_MMAP,
|
||||
/* kernel support for expected_attach_type in BPF_PROG_LOAD */
|
||||
FEAT_EXP_ATTACH_TYPE,
|
||||
/* bpf_probe_read_{kernel,user}[_str] helpers */
|
||||
FEAT_PROBE_READ_KERN,
|
||||
/* BPF_PROG_BIND_MAP is supported */
|
||||
FEAT_PROG_BIND_MAP,
|
||||
/* Kernel support for module BTFs */
|
||||
FEAT_MODULE_BTF,
|
||||
/* BTF_KIND_FLOAT support */
|
||||
FEAT_BTF_FLOAT,
|
||||
/* BPF perf link support */
|
||||
FEAT_PERF_LINK,
|
||||
/* BTF_KIND_DECL_TAG support */
|
||||
FEAT_BTF_DECL_TAG,
|
||||
/* BTF_KIND_TYPE_TAG support */
|
||||
FEAT_BTF_TYPE_TAG,
|
||||
/* memcg-based accounting for BPF maps and progs */
|
||||
FEAT_MEMCG_ACCOUNT,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
int probe_memcg_account(void);
|
||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
|
||||
int bump_rlimit_memlock(void);
|
||||
|
||||
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
|
||||
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
|
||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
const char *str_sec, size_t str_len);
|
||||
int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
|
||||
|
||||
struct bpf_prog_load_params {
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
const char *name;
|
||||
const struct bpf_insn *insns;
|
||||
size_t insn_cnt;
|
||||
const char *license;
|
||||
__u32 kern_version;
|
||||
__u32 attach_prog_fd;
|
||||
__u32 attach_btf_obj_fd;
|
||||
__u32 attach_btf_id;
|
||||
__u32 prog_ifindex;
|
||||
__u32 prog_btf_fd;
|
||||
__u32 prog_flags;
|
||||
|
||||
__u32 func_info_rec_size;
|
||||
const void *func_info;
|
||||
__u32 func_info_cnt;
|
||||
|
||||
__u32 line_info_rec_size;
|
||||
const void *line_info;
|
||||
__u32 line_info_cnt;
|
||||
|
||||
__u32 log_level;
|
||||
char *log_buf;
|
||||
size_t log_buf_sz;
|
||||
};
|
||||
|
||||
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
|
||||
|
||||
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
|
||||
__u32 *size);
|
||||
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
|
||||
__u32 *off);
|
||||
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
|
||||
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
|
||||
const char **prefix, int *kind);
|
||||
@ -386,6 +441,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind);
|
||||
|
||||
extern enum libbpf_strict_mode libbpf_mode;
|
||||
|
||||
@ -447,4 +504,26 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
|
||||
* Takes ownership of the fd passed in, and closes it if calling
|
||||
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
|
||||
*/
|
||||
static inline int ensure_good_fd(int fd)
|
||||
{
|
||||
int old_fd = fd, saved_errno;
|
||||
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
if (fd < 3) {
|
||||
fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
|
||||
saved_errno = errno;
|
||||
close(old_fd);
|
||||
if (fd < 0) {
|
||||
pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
|
||||
errno = saved_errno;
|
||||
}
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
||||
|
@ -45,12 +45,41 @@ enum libbpf_strict_mode {
|
||||
* (positive) error code.
|
||||
*/
|
||||
LIBBPF_STRICT_DIRECT_ERRS = 0x02,
|
||||
/*
|
||||
* Enforce strict BPF program section (SEC()) names.
|
||||
* E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were
|
||||
* allowed, with LIBBPF_STRICT_SEC_PREFIX this will become
|
||||
* unrecognized by libbpf and would have to be just SEC("xdp") and
|
||||
* SEC("xdp") and SEC("perf_event").
|
||||
*
|
||||
* Note, in this mode the program pin path will be based on the
|
||||
* function name instead of section name.
|
||||
*/
|
||||
LIBBPF_STRICT_SEC_NAME = 0x04,
|
||||
/*
|
||||
* Disable the global 'bpf_objects_list'. Maintaining this list adds
|
||||
* a race condition to bpf_object__open() and bpf_object__close().
|
||||
* Clients can maintain it on their own if it is valuable for them.
|
||||
*/
|
||||
LIBBPF_STRICT_NO_OBJECT_LIST = 0x08,
|
||||
/*
|
||||
* Automatically bump RLIMIT_MEMLOCK using setrlimit() before the
|
||||
* first BPF program or map creation operation. This is done only if
|
||||
* kernel is too old to support memcg-based memory accounting for BPF
|
||||
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
||||
* but it can be overriden with libbpf_set_memlock_rlim_max() API.
|
||||
* Note that libbpf_set_memlock_rlim_max() needs to be called before
|
||||
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
||||
* operation.
|
||||
*/
|
||||
LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK = 0x10,
|
||||
|
||||
__LIBBPF_STRICT_LAST,
|
||||
};
|
||||
|
||||
LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
|
||||
|
||||
#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
|
@ -33,7 +33,7 @@ static int get_vendor_id(int ifindex)
|
||||
|
||||
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
@ -48,41 +48,65 @@ static int get_vendor_id(int ifindex)
|
||||
return strtol(buf, NULL, 0);
|
||||
}
|
||||
|
||||
static int get_kernel_version(void)
|
||||
static int probe_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
__u32 ifindex)
|
||||
{
|
||||
int version, subversion, patchlevel;
|
||||
struct utsname utsn;
|
||||
|
||||
/* Return 0 on failure, and attempt to probe with empty kversion */
|
||||
if (uname(&utsn))
|
||||
return 0;
|
||||
|
||||
if (sscanf(utsn.release, "%d.%d.%d",
|
||||
&version, &subversion, &patchlevel) != 3)
|
||||
return 0;
|
||||
|
||||
return (version << 16) + (subversion << 8) + patchlevel;
|
||||
}
|
||||
|
||||
static void
|
||||
probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
|
||||
{
|
||||
struct bpf_load_program_attr xattr = {};
|
||||
int fd;
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.log_buf = log_buf,
|
||||
.log_size = log_buf_sz,
|
||||
.log_level = log_buf ? 1 : 0,
|
||||
.prog_ifindex = ifindex,
|
||||
);
|
||||
int fd, err, exp_err = 0;
|
||||
const char *exp_msg = NULL;
|
||||
char buf[4096];
|
||||
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
|
||||
opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
|
||||
break;
|
||||
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
|
||||
xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
|
||||
opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
|
||||
break;
|
||||
case BPF_PROG_TYPE_SK_LOOKUP:
|
||||
xattr.expected_attach_type = BPF_SK_LOOKUP;
|
||||
opts.expected_attach_type = BPF_SK_LOOKUP;
|
||||
break;
|
||||
case BPF_PROG_TYPE_KPROBE:
|
||||
xattr.kern_version = get_kernel_version();
|
||||
opts.kern_version = get_kernel_version();
|
||||
break;
|
||||
case BPF_PROG_TYPE_LIRC_MODE2:
|
||||
opts.expected_attach_type = BPF_LIRC_MODE2;
|
||||
break;
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
opts.log_buf = buf;
|
||||
opts.log_size = sizeof(buf);
|
||||
opts.log_level = 1;
|
||||
if (prog_type == BPF_PROG_TYPE_TRACING)
|
||||
opts.expected_attach_type = BPF_TRACE_FENTRY;
|
||||
else
|
||||
opts.expected_attach_type = BPF_MODIFY_RETURN;
|
||||
opts.attach_btf_id = 1;
|
||||
|
||||
exp_err = -EINVAL;
|
||||
exp_msg = "attach_btf_id 1 is not a function";
|
||||
break;
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
opts.log_buf = buf;
|
||||
opts.log_size = sizeof(buf);
|
||||
opts.log_level = 1;
|
||||
opts.attach_btf_id = 1;
|
||||
|
||||
exp_err = -EINVAL;
|
||||
exp_msg = "Cannot replace kernel functions";
|
||||
break;
|
||||
case BPF_PROG_TYPE_SYSCALL:
|
||||
opts.prog_flags = BPF_F_SLEEPABLE;
|
||||
break;
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
exp_err = -524; /* -ENOTSUPP */
|
||||
break;
|
||||
case BPF_PROG_TYPE_UNSPEC:
|
||||
case BPF_PROG_TYPE_SOCKET_FILTER:
|
||||
@ -103,27 +127,42 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT:
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
|
||||
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
|
||||
case BPF_PROG_TYPE_LIRC_MODE2:
|
||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
default:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
xattr.prog_type = prog_type;
|
||||
xattr.insns = insns;
|
||||
xattr.insns_cnt = insns_cnt;
|
||||
xattr.license = "GPL";
|
||||
xattr.prog_ifindex = ifindex;
|
||||
|
||||
fd = bpf_load_program_xattr(&xattr, buf, buf_len);
|
||||
fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
|
||||
err = -errno;
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
if (exp_err) {
|
||||
if (fd >= 0 || err != exp_err)
|
||||
return 0;
|
||||
if (exp_msg && !strstr(buf, exp_msg))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
return fd >= 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
const size_t insn_cnt = ARRAY_SIZE(insns);
|
||||
int ret;
|
||||
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
@ -133,12 +172,16 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
/* prefer libbpf_probe_bpf_prog_type() unless offload is requested */
|
||||
if (ifindex == 0)
|
||||
return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1;
|
||||
|
||||
if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
|
||||
/* nfp returns -EINVAL on exit(0) with TC offload */
|
||||
insns[0].imm = 2;
|
||||
|
||||
errno = 0;
|
||||
probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
|
||||
|
||||
return errno != EINVAL && errno != EOPNOTSUPP;
|
||||
}
|
||||
@ -166,7 +209,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
|
||||
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
|
||||
|
||||
btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
|
||||
btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
|
||||
|
||||
free(raw_btf);
|
||||
return btf_fd;
|
||||
@ -199,17 +242,18 @@ static int load_local_storage_btf(void)
|
||||
strs, sizeof(strs));
|
||||
}
|
||||
|
||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
||||
{
|
||||
int key_size, value_size, max_entries, map_flags;
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
int key_size, value_size, max_entries;
|
||||
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
|
||||
struct bpf_create_map_attr attr = {};
|
||||
int fd = -1, btf_fd = -1, fd_inner;
|
||||
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;
|
||||
|
||||
opts.map_ifindex = ifindex;
|
||||
|
||||
key_size = sizeof(__u32);
|
||||
value_size = sizeof(__u32);
|
||||
max_entries = 1;
|
||||
map_flags = 0;
|
||||
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_STACK_TRACE:
|
||||
@ -218,7 +262,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
case BPF_MAP_TYPE_LPM_TRIE:
|
||||
key_size = sizeof(__u64);
|
||||
value_size = sizeof(__u64);
|
||||
map_flags = BPF_F_NO_PREALLOC;
|
||||
opts.map_flags = BPF_F_NO_PREALLOC;
|
||||
break;
|
||||
case BPF_MAP_TYPE_CGROUP_STORAGE:
|
||||
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
|
||||
@ -237,17 +281,25 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
btf_value_type_id = 3;
|
||||
value_size = 8;
|
||||
max_entries = 0;
|
||||
map_flags = BPF_F_NO_PREALLOC;
|
||||
opts.map_flags = BPF_F_NO_PREALLOC;
|
||||
btf_fd = load_local_storage_btf();
|
||||
if (btf_fd < 0)
|
||||
return false;
|
||||
return btf_fd;
|
||||
break;
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
key_size = 0;
|
||||
value_size = 0;
|
||||
max_entries = 4096;
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
|
||||
opts.btf_vmlinux_value_type_id = 1;
|
||||
exp_err = -524; /* -ENOTSUPP */
|
||||
break;
|
||||
case BPF_MAP_TYPE_BLOOM_FILTER:
|
||||
key_size = 0;
|
||||
max_entries = 1;
|
||||
break;
|
||||
case BPF_MAP_TYPE_HASH:
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||
@ -266,9 +318,10 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
|
||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||
default:
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
|
||||
@ -277,37 +330,102 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
* map-in-map for offload
|
||||
*/
|
||||
if (ifindex)
|
||||
return false;
|
||||
goto cleanup;
|
||||
|
||||
fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
|
||||
sizeof(__u32), sizeof(__u32), 1, 0);
|
||||
fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
|
||||
sizeof(__u32), sizeof(__u32), 1, NULL);
|
||||
if (fd_inner < 0)
|
||||
return false;
|
||||
fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
|
||||
fd_inner, 1, 0);
|
||||
close(fd_inner);
|
||||
} else {
|
||||
/* Note: No other restriction on map type probes for offload */
|
||||
attr.map_type = map_type;
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
attr.max_entries = max_entries;
|
||||
attr.map_flags = map_flags;
|
||||
attr.map_ifindex = ifindex;
|
||||
if (btf_fd >= 0) {
|
||||
attr.btf_fd = btf_fd;
|
||||
attr.btf_key_type_id = btf_key_type_id;
|
||||
attr.btf_value_type_id = btf_value_type_id;
|
||||
}
|
||||
goto cleanup;
|
||||
|
||||
fd = bpf_create_map_xattr(&attr);
|
||||
opts.inner_map_fd = fd_inner;
|
||||
}
|
||||
|
||||
if (btf_fd >= 0) {
|
||||
opts.btf_fd = btf_fd;
|
||||
opts.btf_key_type_id = btf_key_type_id;
|
||||
opts.btf_value_type_id = btf_value_type_id;
|
||||
}
|
||||
|
||||
fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
|
||||
err = -errno;
|
||||
|
||||
cleanup:
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
if (fd_inner >= 0)
|
||||
close(fd_inner);
|
||||
if (btf_fd >= 0)
|
||||
close(btf_fd);
|
||||
|
||||
return fd >= 0;
|
||||
if (exp_err)
|
||||
return fd < 0 && err == exp_err ? 1 : 0;
|
||||
else
|
||||
return fd >= 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_map_create(map_type, 0);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
{
|
||||
return probe_map_create(map_type, ifindex) == 1;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
|
||||
const void *opts)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_EMIT_CALL((__u32)helper_id),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
const size_t insn_cnt = ARRAY_SIZE(insns);
|
||||
char buf[4096];
|
||||
int ret;
|
||||
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* we can't successfully load all prog types to check for BPF helper
|
||||
* support, so bail out with -EOPNOTSUPP error
|
||||
*/
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
buf[0] = '\0';
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0);
|
||||
if (ret < 0)
|
||||
return libbpf_err(ret);
|
||||
|
||||
/* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
|
||||
* at all, it will emit something like "invalid func unknown#181".
|
||||
* If BPF verifier recognizes BPF helper but it's not supported for
|
||||
* given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
|
||||
* In both cases, provided combination of BPF program type and BPF
|
||||
* helper is not supported by the kernel.
|
||||
* In all other cases, probe_prog_load() above will either succeed (e.g.,
|
||||
* because BPF helper happens to accept no input arguments or it
|
||||
* accepts one input argument and initial PTR_TO_CTX is fine for
|
||||
* that), or we'll get some more specific BPF verifier error about
|
||||
* some unsatisfied conditions.
|
||||
*/
|
||||
if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
|
||||
return 0;
|
||||
return 1; /* assume supported */
|
||||
}
|
||||
|
||||
bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
|
||||
@ -320,8 +438,7 @@ bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
|
||||
char buf[4096] = {};
|
||||
bool res;
|
||||
|
||||
probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
|
||||
ifindex);
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex);
|
||||
res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
|
||||
|
||||
if (ifindex) {
|
||||
@ -353,8 +470,8 @@ bool bpf_probe_large_insn_limit(__u32 ifindex)
|
||||
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
|
||||
|
||||
errno = 0;
|
||||
probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
|
||||
ifindex);
|
||||
probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
|
||||
ifindex);
|
||||
|
||||
return errno != E2BIG && errno != EINVAL;
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/btf.h>
|
||||
#include <elf.h>
|
||||
#include <libelf.h>
|
||||
#include <gelf.h>
|
||||
#include <fcntl.h>
|
||||
#include "libbpf.h"
|
||||
#include "btf.h"
|
||||
@ -303,7 +302,7 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||
if (!linker->filename)
|
||||
return -ENOMEM;
|
||||
|
||||
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0644);
|
||||
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
|
||||
if (linker->fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("failed to create '%s': %d\n", file, err);
|
||||
@ -325,12 +324,12 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||
|
||||
linker->elf_hdr->e_machine = EM_BPF;
|
||||
linker->elf_hdr->e_type = ET_REL;
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER"
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
|
||||
/* STRTAB */
|
||||
@ -540,12 +539,12 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
const struct bpf_linker_file_opts *opts,
|
||||
struct src_obj *obj)
|
||||
{
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
const int host_endianness = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
const int host_endianness = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER"
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
int err = 0;
|
||||
Elf_Scn *scn;
|
||||
@ -558,7 +557,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
|
||||
obj->filename = filename;
|
||||
|
||||
obj->fd = open(filename, O_RDONLY);
|
||||
obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
|
||||
if (obj->fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("failed to open file '%s': %d\n", filename, err);
|
||||
@ -922,7 +921,7 @@ static int check_btf_type_id(__u32 *type_id, void *ctx)
|
||||
{
|
||||
struct btf *btf = ctx;
|
||||
|
||||
if (*type_id > btf__get_nr_types(btf))
|
||||
if (*type_id >= btf__type_cnt(btf))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
@ -949,8 +948,8 @@ static int linker_sanity_check_btf(struct src_obj *obj)
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf_type_by_id(obj->btf, i);
|
||||
|
||||
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
|
||||
@ -1660,8 +1659,8 @@ static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sy
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf__type_by_id(obj->btf, i);
|
||||
|
||||
/* some global and extern FUNCs and VARs might not be associated with any
|
||||
@ -2135,8 +2134,8 @@ static int linker_fixup_btf(struct src_obj *obj)
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
struct btf_var_secinfo *vi;
|
||||
struct btf_type *t;
|
||||
|
||||
@ -2239,14 +2238,14 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
start_id = btf__get_nr_types(linker->btf) + 1;
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
start_id = btf__type_cnt(linker->btf);
|
||||
n = btf__type_cnt(obj->btf);
|
||||
|
||||
obj->btf_type_map = calloc(n + 1, sizeof(int));
|
||||
if (!obj->btf_type_map)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 1; i <= n; i++) {
|
||||
for (i = 1; i < n; i++) {
|
||||
struct glob_sym *glob_sym = NULL;
|
||||
|
||||
t = btf__type_by_id(obj->btf, i);
|
||||
@ -2301,8 +2300,8 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
}
|
||||
|
||||
/* remap all the types except DATASECs */
|
||||
n = btf__get_nr_types(linker->btf);
|
||||
for (i = start_id; i <= n; i++) {
|
||||
n = btf__type_cnt(linker->btf);
|
||||
for (i = start_id; i < n; i++) {
|
||||
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
|
||||
|
||||
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
|
||||
@ -2655,13 +2654,14 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
|
||||
|
||||
static int finalize_btf(struct bpf_linker *linker)
|
||||
{
|
||||
LIBBPF_OPTS(btf_dedup_opts, opts);
|
||||
struct btf *btf = linker->btf;
|
||||
const void *raw_data;
|
||||
int i, j, id, err;
|
||||
__u32 raw_sz;
|
||||
|
||||
/* bail out if no BTF data was produced */
|
||||
if (btf__get_nr_types(linker->btf) == 0)
|
||||
if (btf__type_cnt(linker->btf) == 1)
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < linker->sec_cnt; i++) {
|
||||
@ -2691,14 +2691,15 @@ static int finalize_btf(struct bpf_linker *linker)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = btf__dedup(linker->btf, linker->btf_ext, NULL);
|
||||
opts.btf_ext = linker->btf_ext;
|
||||
err = btf__dedup(linker->btf, &opts);
|
||||
if (err) {
|
||||
pr_warn("BTF dedup failed: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Emit .BTF section */
|
||||
raw_data = btf__get_raw_data(linker->btf, &raw_sz);
|
||||
raw_data = btf__raw_data(linker->btf, &raw_sz);
|
||||
if (!raw_data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1,6 +1,60 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2019 Facebook */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include "relo_core.h"
|
||||
|
||||
static const char *btf_kind_str(const struct btf_type *t)
|
||||
{
|
||||
return btf_type_str(t);
|
||||
}
|
||||
|
||||
static bool is_ldimm64_insn(struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
static const struct btf_type *
|
||||
skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
|
||||
{
|
||||
return btf_type_skip_modifiers(btf, id, res_id);
|
||||
}
|
||||
|
||||
static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
|
||||
{
|
||||
return btf_name_by_offset(btf, offset);
|
||||
}
|
||||
|
||||
static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
int size;
|
||||
|
||||
t = btf_type_by_id(btf, type_id);
|
||||
t = btf_resolve_size(btf, t, &size);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
return size;
|
||||
}
|
||||
|
||||
enum libbpf_print_level {
|
||||
LIBBPF_WARN,
|
||||
LIBBPF_INFO,
|
||||
LIBBPF_DEBUG,
|
||||
};
|
||||
|
||||
#undef pr_warn
|
||||
#undef pr_info
|
||||
#undef pr_debug
|
||||
#define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
|
||||
#define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
|
||||
#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
|
||||
#define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
@ -12,33 +66,7 @@
|
||||
#include "btf.h"
|
||||
#include "str_error.h"
|
||||
#include "libbpf_internal.h"
|
||||
|
||||
#define BPF_CORE_SPEC_MAX_LEN 64
|
||||
|
||||
/* represents BPF CO-RE field or array element accessor */
|
||||
struct bpf_core_accessor {
|
||||
__u32 type_id; /* struct/union type or array element type */
|
||||
__u32 idx; /* field index or array index */
|
||||
const char *name; /* field name or NULL for array accessor */
|
||||
};
|
||||
|
||||
struct bpf_core_spec {
|
||||
const struct btf *btf;
|
||||
/* high-level spec: named fields and array indices only */
|
||||
struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
|
||||
/* original unresolved (no skip_mods_or_typedefs) root type ID */
|
||||
__u32 root_type_id;
|
||||
/* CO-RE relocation kind */
|
||||
enum bpf_core_relo_kind relo_kind;
|
||||
/* high-level spec length */
|
||||
int len;
|
||||
/* raw, low-level spec: 1-to-1 with accessor spec string */
|
||||
int raw_spec[BPF_CORE_SPEC_MAX_LEN];
|
||||
/* raw spec length */
|
||||
int raw_len;
|
||||
/* field bit offset represented by spec */
|
||||
__u32 bit_offset;
|
||||
};
|
||||
#endif
|
||||
|
||||
static bool is_flex_arr(const struct btf *btf,
|
||||
const struct bpf_core_accessor *acc,
|
||||
@ -51,25 +79,25 @@ static bool is_flex_arr(const struct btf *btf,
|
||||
return false;
|
||||
|
||||
/* has to be the last member of enclosing struct */
|
||||
t = btf__type_by_id(btf, acc->type_id);
|
||||
t = btf_type_by_id(btf, acc->type_id);
|
||||
return acc->idx == btf_vlen(t) - 1;
|
||||
}
|
||||
|
||||
static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case BPF_FIELD_BYTE_OFFSET: return "byte_off";
|
||||
case BPF_FIELD_BYTE_SIZE: return "byte_sz";
|
||||
case BPF_FIELD_EXISTS: return "field_exists";
|
||||
case BPF_FIELD_SIGNED: return "signed";
|
||||
case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
|
||||
case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
|
||||
case BPF_TYPE_ID_LOCAL: return "local_type_id";
|
||||
case BPF_TYPE_ID_TARGET: return "target_type_id";
|
||||
case BPF_TYPE_EXISTS: return "type_exists";
|
||||
case BPF_TYPE_SIZE: return "type_size";
|
||||
case BPF_ENUMVAL_EXISTS: return "enumval_exists";
|
||||
case BPF_ENUMVAL_VALUE: return "enumval_value";
|
||||
case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
|
||||
case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
|
||||
case BPF_CORE_FIELD_EXISTS: return "field_exists";
|
||||
case BPF_CORE_FIELD_SIGNED: return "signed";
|
||||
case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
|
||||
case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
|
||||
case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
|
||||
case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
|
||||
case BPF_CORE_TYPE_EXISTS: return "type_exists";
|
||||
case BPF_CORE_TYPE_SIZE: return "type_size";
|
||||
case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
|
||||
case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
@ -77,12 +105,12 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
|
||||
static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case BPF_FIELD_BYTE_OFFSET:
|
||||
case BPF_FIELD_BYTE_SIZE:
|
||||
case BPF_FIELD_EXISTS:
|
||||
case BPF_FIELD_SIGNED:
|
||||
case BPF_FIELD_LSHIFT_U64:
|
||||
case BPF_FIELD_RSHIFT_U64:
|
||||
case BPF_CORE_FIELD_BYTE_OFFSET:
|
||||
case BPF_CORE_FIELD_BYTE_SIZE:
|
||||
case BPF_CORE_FIELD_EXISTS:
|
||||
case BPF_CORE_FIELD_SIGNED:
|
||||
case BPF_CORE_FIELD_LSHIFT_U64:
|
||||
case BPF_CORE_FIELD_RSHIFT_U64:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -92,10 +120,10 @@ static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
|
||||
static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case BPF_TYPE_ID_LOCAL:
|
||||
case BPF_TYPE_ID_TARGET:
|
||||
case BPF_TYPE_EXISTS:
|
||||
case BPF_TYPE_SIZE:
|
||||
case BPF_CORE_TYPE_ID_LOCAL:
|
||||
case BPF_CORE_TYPE_ID_TARGET:
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -105,8 +133,8 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
|
||||
static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case BPF_ENUMVAL_EXISTS:
|
||||
case BPF_ENUMVAL_VALUE:
|
||||
case BPF_CORE_ENUMVAL_EXISTS:
|
||||
case BPF_CORE_ENUMVAL_VALUE:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -150,7 +178,7 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
||||
* Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
|
||||
* string to specify enumerator's value index that need to be relocated.
|
||||
*/
|
||||
static int bpf_core_parse_spec(const struct btf *btf,
|
||||
static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
|
||||
__u32 type_id,
|
||||
const char *spec_str,
|
||||
enum bpf_core_relo_kind relo_kind,
|
||||
@ -272,8 +300,8 @@ static int bpf_core_parse_spec(const struct btf *btf,
|
||||
return sz;
|
||||
spec->bit_offset += access_idx * sz * 8;
|
||||
} else {
|
||||
pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
|
||||
type_id, spec_str, i, id, btf_kind_str(t));
|
||||
pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
|
||||
prog_name, type_id, spec_str, i, id, btf_kind_str(t));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -346,8 +374,6 @@ static int bpf_core_fields_are_compat(const struct btf *local_btf,
|
||||
targ_id = btf_array(targ_type)->type;
|
||||
goto recur;
|
||||
default:
|
||||
pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
|
||||
btf_kind(local_type), local_id, targ_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -388,7 +414,7 @@ static int bpf_core_match_member(const struct btf *local_btf,
|
||||
return 0;
|
||||
|
||||
local_id = local_acc->type_id;
|
||||
local_type = btf__type_by_id(local_btf, local_id);
|
||||
local_type = btf_type_by_id(local_btf, local_id);
|
||||
local_member = btf_members(local_type) + local_acc->idx;
|
||||
local_name = btf__name_by_offset(local_btf, local_member->name_off);
|
||||
|
||||
@ -571,7 +597,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
|
||||
*field_sz = 0;
|
||||
|
||||
if (relo->kind == BPF_FIELD_EXISTS) {
|
||||
if (relo->kind == BPF_CORE_FIELD_EXISTS) {
|
||||
*val = spec ? 1 : 0;
|
||||
return 0;
|
||||
}
|
||||
@ -580,11 +606,11 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
return -EUCLEAN; /* request instruction poisoning */
|
||||
|
||||
acc = &spec->spec[spec->len - 1];
|
||||
t = btf__type_by_id(spec->btf, acc->type_id);
|
||||
t = btf_type_by_id(spec->btf, acc->type_id);
|
||||
|
||||
/* a[n] accessor needs special handling */
|
||||
if (!acc->name) {
|
||||
if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
|
||||
if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
|
||||
*val = spec->bit_offset / 8;
|
||||
/* remember field size for load/store mem size */
|
||||
sz = btf__resolve_size(spec->btf, acc->type_id);
|
||||
@ -592,7 +618,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
return -EINVAL;
|
||||
*field_sz = sz;
|
||||
*type_id = acc->type_id;
|
||||
} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
|
||||
} else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
|
||||
sz = btf__resolve_size(spec->btf, acc->type_id);
|
||||
if (sz < 0)
|
||||
return -EINVAL;
|
||||
@ -644,36 +670,36 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
*validate = !bitfield;
|
||||
|
||||
switch (relo->kind) {
|
||||
case BPF_FIELD_BYTE_OFFSET:
|
||||
case BPF_CORE_FIELD_BYTE_OFFSET:
|
||||
*val = byte_off;
|
||||
if (!bitfield) {
|
||||
*field_sz = byte_sz;
|
||||
*type_id = field_type_id;
|
||||
}
|
||||
break;
|
||||
case BPF_FIELD_BYTE_SIZE:
|
||||
case BPF_CORE_FIELD_BYTE_SIZE:
|
||||
*val = byte_sz;
|
||||
break;
|
||||
case BPF_FIELD_SIGNED:
|
||||
case BPF_CORE_FIELD_SIGNED:
|
||||
/* enums will be assumed unsigned */
|
||||
*val = btf_is_enum(mt) ||
|
||||
(btf_int_encoding(mt) & BTF_INT_SIGNED);
|
||||
if (validate)
|
||||
*validate = true; /* signedness is never ambiguous */
|
||||
break;
|
||||
case BPF_FIELD_LSHIFT_U64:
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
case BPF_CORE_FIELD_LSHIFT_U64:
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
*val = 64 - (bit_off + bit_sz - byte_off * 8);
|
||||
#else
|
||||
*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
|
||||
#endif
|
||||
break;
|
||||
case BPF_FIELD_RSHIFT_U64:
|
||||
case BPF_CORE_FIELD_RSHIFT_U64:
|
||||
*val = 64 - bit_sz;
|
||||
if (validate)
|
||||
*validate = true; /* right shift is never ambiguous */
|
||||
break;
|
||||
case BPF_FIELD_EXISTS:
|
||||
case BPF_CORE_FIELD_EXISTS:
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -683,10 +709,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
|
||||
static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val)
|
||||
__u32 *val, bool *validate)
|
||||
{
|
||||
__s64 sz;
|
||||
|
||||
/* by default, always check expected value in bpf_insn */
|
||||
if (validate)
|
||||
*validate = true;
|
||||
|
||||
/* type-based relos return zero when target type is not found */
|
||||
if (!spec) {
|
||||
*val = 0;
|
||||
@ -694,20 +724,25 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
}
|
||||
|
||||
switch (relo->kind) {
|
||||
case BPF_TYPE_ID_TARGET:
|
||||
case BPF_CORE_TYPE_ID_TARGET:
|
||||
*val = spec->root_type_id;
|
||||
/* type ID, embedded in bpf_insn, might change during linking,
|
||||
* so enforcing it is pointless
|
||||
*/
|
||||
if (validate)
|
||||
*validate = false;
|
||||
break;
|
||||
case BPF_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
*val = 1;
|
||||
break;
|
||||
case BPF_TYPE_SIZE:
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
sz = btf__resolve_size(spec->btf, spec->root_type_id);
|
||||
if (sz < 0)
|
||||
return -EINVAL;
|
||||
*val = sz;
|
||||
break;
|
||||
case BPF_TYPE_ID_LOCAL:
|
||||
/* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
|
||||
case BPF_CORE_TYPE_ID_LOCAL:
|
||||
/* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -723,13 +758,13 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
|
||||
const struct btf_enum *e;
|
||||
|
||||
switch (relo->kind) {
|
||||
case BPF_ENUMVAL_EXISTS:
|
||||
case BPF_CORE_ENUMVAL_EXISTS:
|
||||
*val = spec ? 1 : 0;
|
||||
break;
|
||||
case BPF_ENUMVAL_VALUE:
|
||||
case BPF_CORE_ENUMVAL_VALUE:
|
||||
if (!spec)
|
||||
return -EUCLEAN; /* request instruction poisoning */
|
||||
t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
|
||||
t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
|
||||
e = btf_enum(t) + spec->spec[0].idx;
|
||||
*val = e->val;
|
||||
break;
|
||||
@ -805,8 +840,8 @@ static int bpf_core_calc_relo(const char *prog_name,
|
||||
if (res->orig_sz != res->new_sz) {
|
||||
const struct btf_type *orig_t, *new_t;
|
||||
|
||||
orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
|
||||
new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
|
||||
orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
|
||||
new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
|
||||
|
||||
/* There are two use cases in which it's safe to
|
||||
* adjust load/store's mem size:
|
||||
@ -835,8 +870,8 @@ static int bpf_core_calc_relo(const char *prog_name,
|
||||
res->fail_memsz_adjust = true;
|
||||
}
|
||||
} else if (core_relo_is_type_based(relo->kind)) {
|
||||
err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
|
||||
err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
|
||||
err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
|
||||
err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
|
||||
} else if (core_relo_is_enumval_based(relo->kind)) {
|
||||
err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
|
||||
err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
|
||||
@ -1045,7 +1080,7 @@ static int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
* [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
|
||||
* where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
|
||||
*/
|
||||
static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
|
||||
static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
@ -1054,7 +1089,7 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
|
||||
int i;
|
||||
|
||||
type_id = spec->root_type_id;
|
||||
t = btf__type_by_id(spec->btf, type_id);
|
||||
t = btf_type_by_id(spec->btf, type_id);
|
||||
s = btf__name_by_offset(spec->btf, t->name_off);
|
||||
|
||||
libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
|
||||
@ -1147,9 +1182,12 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
const struct bpf_core_relo *relo,
|
||||
int relo_idx,
|
||||
const struct btf *local_btf,
|
||||
struct bpf_core_cand_list *cands)
|
||||
struct bpf_core_cand_list *cands,
|
||||
struct bpf_core_spec *specs_scratch)
|
||||
{
|
||||
struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
|
||||
struct bpf_core_spec *local_spec = &specs_scratch[0];
|
||||
struct bpf_core_spec *cand_spec = &specs_scratch[1];
|
||||
struct bpf_core_spec *targ_spec = &specs_scratch[2];
|
||||
struct bpf_core_relo_res cand_res, targ_res;
|
||||
const struct btf_type *local_type;
|
||||
const char *local_name;
|
||||
@ -1158,10 +1196,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
int i, j, err;
|
||||
|
||||
local_id = relo->type_id;
|
||||
local_type = btf__type_by_id(local_btf, local_id);
|
||||
if (!local_type)
|
||||
return -EINVAL;
|
||||
|
||||
local_type = btf_type_by_id(local_btf, local_id);
|
||||
local_name = btf__name_by_offset(local_btf, local_type->name_off);
|
||||
if (!local_name)
|
||||
return -EINVAL;
|
||||
@ -1170,7 +1205,8 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
if (str_is_empty(spec_str))
|
||||
return -EINVAL;
|
||||
|
||||
err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
|
||||
err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
|
||||
relo->kind, local_spec);
|
||||
if (err) {
|
||||
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
|
||||
prog_name, relo_idx, local_id, btf_kind_str(local_type),
|
||||
@ -1181,15 +1217,17 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
|
||||
pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
|
||||
relo_idx, core_relo_kind_str(relo->kind), relo->kind);
|
||||
bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
|
||||
bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
|
||||
libbpf_print(LIBBPF_DEBUG, "\n");
|
||||
|
||||
/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
|
||||
if (relo->kind == BPF_TYPE_ID_LOCAL) {
|
||||
targ_res.validate = true;
|
||||
if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
|
||||
/* bpf_insn's imm value could get out of sync during linking */
|
||||
memset(&targ_res, 0, sizeof(targ_res));
|
||||
targ_res.validate = false;
|
||||
targ_res.poison = false;
|
||||
targ_res.orig_val = local_spec.root_type_id;
|
||||
targ_res.new_val = local_spec.root_type_id;
|
||||
targ_res.orig_val = local_spec->root_type_id;
|
||||
targ_res.new_val = local_spec->root_type_id;
|
||||
goto patch_insn;
|
||||
}
|
||||
|
||||
@ -1200,40 +1238,39 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0, j = 0; i < cands->len; i++) {
|
||||
err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
|
||||
cands->cands[i].id, &cand_spec);
|
||||
err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
|
||||
cands->cands[i].id, cand_spec);
|
||||
if (err < 0) {
|
||||
pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
|
||||
prog_name, relo_idx, i);
|
||||
bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
|
||||
bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
|
||||
libbpf_print(LIBBPF_WARN, ": %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
|
||||
relo_idx, err == 0 ? "non-matching" : "matching", i);
|
||||
bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
|
||||
bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
|
||||
libbpf_print(LIBBPF_DEBUG, "\n");
|
||||
|
||||
if (err == 0)
|
||||
continue;
|
||||
|
||||
err = bpf_core_calc_relo(prog_name, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
|
||||
err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (j == 0) {
|
||||
targ_res = cand_res;
|
||||
targ_spec = cand_spec;
|
||||
} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
|
||||
*targ_spec = *cand_spec;
|
||||
} else if (cand_spec->bit_offset != targ_spec->bit_offset) {
|
||||
/* if there are many field relo candidates, they
|
||||
* should all resolve to the same bit offset
|
||||
*/
|
||||
pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
|
||||
prog_name, relo_idx, cand_spec.bit_offset,
|
||||
targ_spec.bit_offset);
|
||||
prog_name, relo_idx, cand_spec->bit_offset,
|
||||
targ_spec->bit_offset);
|
||||
return -EINVAL;
|
||||
} else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
|
||||
/* all candidates should result in the same relocation
|
||||
@ -1251,7 +1288,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
}
|
||||
|
||||
/*
|
||||
* For BPF_FIELD_EXISTS relo or when used BPF program has field
|
||||
* For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field
|
||||
* existence checks or kernel version/config checks, it's expected
|
||||
* that we might not find any candidates. In this case, if field
|
||||
* wasn't found in any candidate, the list of candidates shouldn't
|
||||
@ -1277,7 +1314,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
prog_name, relo_idx);
|
||||
|
||||
/* calculate single target relo result explicitly */
|
||||
err = bpf_core_calc_relo(prog_name, relo, relo_idx, &local_spec, NULL, &targ_res);
|
||||
err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, &targ_res);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -4,81 +4,10 @@
|
||||
#ifndef __RELO_CORE_H
|
||||
#define __RELO_CORE_H
|
||||
|
||||
/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
|
||||
* has to be adjusted by relocations.
|
||||
*/
|
||||
enum bpf_core_relo_kind {
|
||||
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
|
||||
BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */
|
||||
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
|
||||
BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
|
||||
BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
|
||||
BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
|
||||
BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
|
||||
BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */
|
||||
BPF_TYPE_EXISTS = 8, /* type existence in target kernel */
|
||||
BPF_TYPE_SIZE = 9, /* type size in bytes */
|
||||
BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
|
||||
BPF_ENUMVAL_VALUE = 11, /* enum value integer value */
|
||||
};
|
||||
|
||||
/* The minimum bpf_core_relo checked by the loader
|
||||
*
|
||||
* CO-RE relocation captures the following data:
|
||||
* - insn_off - instruction offset (in bytes) within a BPF program that needs
|
||||
* its insn->imm field to be relocated with actual field info;
|
||||
* - type_id - BTF type ID of the "root" (containing) entity of a relocatable
|
||||
* type or field;
|
||||
* - access_str_off - offset into corresponding .BTF string section. String
|
||||
* interpretation depends on specific relocation kind:
|
||||
* - for field-based relocations, string encodes an accessed field using
|
||||
* a sequence of field and array indices, separated by colon (:). It's
|
||||
* conceptually very close to LLVM's getelementptr ([0]) instruction's
|
||||
* arguments for identifying offset to a field.
|
||||
* - for type-based relocations, strings is expected to be just "0";
|
||||
* - for enum value-based relocations, string contains an index of enum
|
||||
* value within its enum type;
|
||||
*
|
||||
* Example to provide a better feel.
|
||||
*
|
||||
* struct sample {
|
||||
* int a;
|
||||
* struct {
|
||||
* int b[10];
|
||||
* };
|
||||
* };
|
||||
*
|
||||
* struct sample *s = ...;
|
||||
* int x = &s->a; // encoded as "0:0" (a is field #0)
|
||||
* int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
|
||||
* // b is field #0 inside anon struct, accessing elem #5)
|
||||
* int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
|
||||
*
|
||||
* type_id for all relocs in this example will capture BTF type id of
|
||||
* `struct sample`.
|
||||
*
|
||||
* Such relocation is emitted when using __builtin_preserve_access_index()
|
||||
* Clang built-in, passing expression that captures field address, e.g.:
|
||||
*
|
||||
* bpf_probe_read(&dst, sizeof(dst),
|
||||
* __builtin_preserve_access_index(&src->a.b.c));
|
||||
*
|
||||
* In this case Clang will emit field relocation recording necessary data to
|
||||
* be able to find offset of embedded `a.b.c` field within `src` struct.
|
||||
*
|
||||
* [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
|
||||
*/
|
||||
struct bpf_core_relo {
|
||||
__u32 insn_off;
|
||||
__u32 type_id;
|
||||
__u32 access_str_off;
|
||||
enum bpf_core_relo_kind kind;
|
||||
};
|
||||
#include <linux/bpf.h>
|
||||
|
||||
struct bpf_core_cand {
|
||||
const struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const char *name;
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
@ -88,11 +17,39 @@ struct bpf_core_cand_list {
|
||||
int len;
|
||||
};
|
||||
|
||||
#define BPF_CORE_SPEC_MAX_LEN 64
|
||||
|
||||
/* represents BPF CO-RE field or array element accessor */
|
||||
struct bpf_core_accessor {
|
||||
__u32 type_id; /* struct/union type or array element type */
|
||||
__u32 idx; /* field index or array index */
|
||||
const char *name; /* field name or NULL for array accessor */
|
||||
};
|
||||
|
||||
struct bpf_core_spec {
|
||||
const struct btf *btf;
|
||||
/* high-level spec: named fields and array indices only */
|
||||
struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
|
||||
/* original unresolved (no skip_mods_or_typedefs) root type ID */
|
||||
__u32 root_type_id;
|
||||
/* CO-RE relocation kind */
|
||||
enum bpf_core_relo_kind relo_kind;
|
||||
/* high-level spec length */
|
||||
int len;
|
||||
/* raw, low-level spec: 1-to-1 with accessor spec string */
|
||||
int raw_spec[BPF_CORE_SPEC_MAX_LEN];
|
||||
/* raw spec length */
|
||||
int raw_len;
|
||||
/* field bit offset represented by spec */
|
||||
__u32 bit_offset;
|
||||
};
|
||||
|
||||
int bpf_core_apply_relo_insn(const char *prog_name,
|
||||
struct bpf_insn *insn, int insn_idx,
|
||||
const struct bpf_core_relo *relo, int relo_idx,
|
||||
const struct btf *local_btf,
|
||||
struct bpf_core_cand_list *cands);
|
||||
struct bpf_core_cand_list *cands,
|
||||
struct bpf_core_spec *specs_scratch);
|
||||
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id);
|
||||
|
||||
|
@ -7,6 +7,16 @@
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#ifndef __NR_bpf
|
||||
# if defined(__mips__) && defined(_ABIO32)
|
||||
# define __NR_bpf 4355
|
||||
# elif defined(__mips__) && defined(_ABIN32)
|
||||
# define __NR_bpf 6319
|
||||
# elif defined(__mips__) && defined(_ABI64)
|
||||
# define __NR_bpf 5315
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* This file is a base header for auto-generated *.lskel.h files.
|
||||
* Its contents will change and may become part of auto-generation in the future.
|
||||
*
|
||||
@ -65,8 +75,7 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
||||
int map_fd = -1, prog_fd = -1, key = 0, err;
|
||||
union bpf_attr attr;
|
||||
|
||||
map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4,
|
||||
opts->data_sz, 1, 0);
|
||||
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, NULL);
|
||||
if (map_fd < 0) {
|
||||
opts->errstr = "failed to create loader map";
|
||||
err = -errno;
|
||||
|
@ -35,6 +35,11 @@
|
||||
#include "libbpf_internal.h"
|
||||
#include "xsk.h"
|
||||
|
||||
/* entire xsk.h and xsk.c is going away in libbpf 1.0, so ignore all internal
|
||||
* uses of deprecated APIs
|
||||
*/
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
#ifndef SOL_XDP
|
||||
#define SOL_XDP 283
|
||||
#endif
|
||||
@ -281,6 +286,7 @@ static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
|
||||
return err;
|
||||
}
|
||||
|
||||
DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
|
||||
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
__u64 size, struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
@ -299,7 +305,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
if (!umem)
|
||||
return -ENOMEM;
|
||||
|
||||
umem->fd = socket(AF_XDP, SOCK_RAW, 0);
|
||||
umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
|
||||
if (umem->fd < 0) {
|
||||
err = -errno;
|
||||
goto out_umem_alloc;
|
||||
@ -345,6 +351,7 @@ struct xsk_umem_config_v1 {
|
||||
__u32 frame_headroom;
|
||||
};
|
||||
|
||||
COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
|
||||
int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
__u64 size, struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
@ -358,14 +365,10 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
|
||||
&config);
|
||||
}
|
||||
COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
|
||||
DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
|
||||
|
||||
static enum xsk_prog get_xsk_prog(void)
|
||||
{
|
||||
enum xsk_prog detected = XSK_PROG_FALLBACK;
|
||||
struct bpf_load_program_attr prog_attr;
|
||||
struct bpf_create_map_attr map_attr;
|
||||
__u32 size_out, retval, duration;
|
||||
char data_in = 0, data_out;
|
||||
struct bpf_insn insns[] = {
|
||||
@ -375,27 +378,15 @@ static enum xsk_prog get_xsk_prog(void)
|
||||
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int prog_fd, map_fd, ret;
|
||||
int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
|
||||
|
||||
memset(&map_attr, 0, sizeof(map_attr));
|
||||
map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
|
||||
map_attr.key_size = sizeof(int);
|
||||
map_attr.value_size = sizeof(int);
|
||||
map_attr.max_entries = 1;
|
||||
|
||||
map_fd = bpf_create_map_xattr(&map_attr);
|
||||
map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
|
||||
if (map_fd < 0)
|
||||
return detected;
|
||||
|
||||
insns[0].imm = map_fd;
|
||||
|
||||
memset(&prog_attr, 0, sizeof(prog_attr));
|
||||
prog_attr.prog_type = BPF_PROG_TYPE_XDP;
|
||||
prog_attr.insns = insns;
|
||||
prog_attr.insns_cnt = ARRAY_SIZE(insns);
|
||||
prog_attr.license = "GPL";
|
||||
|
||||
prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
if (prog_fd < 0) {
|
||||
close(map_fd);
|
||||
return detected;
|
||||
@ -495,10 +486,13 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
|
||||
};
|
||||
struct bpf_insn *progs[] = {prog, prog_redirect_flags};
|
||||
enum xsk_prog option = get_xsk_prog();
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.log_buf = log_buf,
|
||||
.log_size = log_buf_size,
|
||||
);
|
||||
|
||||
prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
|
||||
"LGPL-2.1 or BSD-2-Clause", 0, log_buf,
|
||||
log_buf_size);
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause",
|
||||
progs[option], insns_cnt[option], &opts);
|
||||
if (prog_fd < 0) {
|
||||
pr_warn("BPF log buffer:\n%s", log_buf);
|
||||
return prog_fd;
|
||||
@ -549,13 +543,12 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
|
||||
struct ifreq ifr = {};
|
||||
int fd, err, ret;
|
||||
|
||||
fd = socket(AF_LOCAL, SOCK_DGRAM, 0);
|
||||
fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0);
|
||||
if (fd < 0)
|
||||
return -errno;
|
||||
|
||||
ifr.ifr_data = (void *)&channels;
|
||||
memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
|
||||
ifr.ifr_name[IFNAMSIZ - 1] = '\0';
|
||||
libbpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
|
||||
err = ioctl(fd, SIOCETHTOOL, &ifr);
|
||||
if (err && errno != EOPNOTSUPP) {
|
||||
ret = -errno;
|
||||
@ -590,8 +583,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
|
||||
if (max_queues < 0)
|
||||
return max_queues;
|
||||
|
||||
fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
|
||||
sizeof(int), sizeof(int), max_queues, 0);
|
||||
fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map",
|
||||
sizeof(int), sizeof(int), max_queues, NULL);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
@ -725,14 +718,12 @@ static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
|
||||
|
||||
static bool xsk_probe_bpf_link(void)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
|
||||
.flags = XDP_FLAGS_SKB_MODE);
|
||||
struct bpf_load_program_attr prog_attr;
|
||||
LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE);
|
||||
struct bpf_insn insns[2] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
int prog_fd, link_fd = -1;
|
||||
int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns);
|
||||
int ifindex_lo = 1;
|
||||
bool ret = false;
|
||||
int err;
|
||||
@ -744,13 +735,7 @@ static bool xsk_probe_bpf_link(void)
|
||||
if (link_fd >= 0)
|
||||
return true;
|
||||
|
||||
memset(&prog_attr, 0, sizeof(prog_attr));
|
||||
prog_attr.prog_type = BPF_PROG_TYPE_XDP;
|
||||
prog_attr.insns = insns;
|
||||
prog_attr.insns_cnt = ARRAY_SIZE(insns);
|
||||
prog_attr.license = "GPL";
|
||||
|
||||
prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
if (prog_fd < 0)
|
||||
return ret;
|
||||
|
||||
@ -782,8 +767,7 @@ static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
|
||||
}
|
||||
|
||||
ctx->ifindex = ifindex;
|
||||
memcpy(ctx->ifname, ifname, IFNAMSIZ -1);
|
||||
ctx->ifname[IFNAMSIZ - 1] = 0;
|
||||
libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
|
||||
|
||||
xsk->ctx = ctx;
|
||||
xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
|
||||
@ -965,8 +949,7 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
|
||||
ctx->refcount = 1;
|
||||
ctx->umem = umem;
|
||||
ctx->queue_id = queue_id;
|
||||
memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
|
||||
ctx->ifname[IFNAMSIZ - 1] = '\0';
|
||||
libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
|
||||
|
||||
ctx->fill = fill;
|
||||
ctx->comp = comp;
|
||||
@ -1046,7 +1029,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
|
||||
}
|
||||
|
||||
if (umem->refcount++ > 0) {
|
||||
xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
|
||||
xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
|
||||
if (xsk->fd < 0) {
|
||||
err = -errno;
|
||||
goto out_xsk_alloc;
|
||||
|
@ -23,6 +23,12 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* This whole API has been deprecated and moved to libxdp that can be found at
|
||||
* https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so
|
||||
* it should just be linking with libxdp instead of libbpf for this set of
|
||||
* functionality. If not, please submit a bug report on the aforementioned page.
|
||||
*/
|
||||
|
||||
/* Load-Acquire Store-Release barriers used by the XDP socket
|
||||
* library. The following macros should *NOT* be considered part of
|
||||
* the xsk.h API, and is subject to change anytime.
|
||||
@ -245,8 +251,10 @@ static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
|
||||
return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
|
||||
}
|
||||
|
||||
LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
|
||||
LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__fd(const struct xsk_umem *umem);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__fd(const struct xsk_socket *xsk);
|
||||
|
||||
#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
|
||||
#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
|
||||
@ -263,10 +271,10 @@ struct xsk_umem_config {
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
LIBBPF_API int xsk_setup_xdp_prog(int ifindex,
|
||||
int *xsks_map_fd);
|
||||
LIBBPF_API int xsk_socket__update_xskmap(struct xsk_socket *xsk,
|
||||
int xsks_map_fd);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
|
||||
|
||||
/* Flags for the libbpf_flags field. */
|
||||
#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
|
||||
@ -280,40 +288,46 @@ struct xsk_socket_config {
|
||||
};
|
||||
|
||||
/* Set config to NULL to get the default configuration. */
|
||||
LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
|
||||
const char *ifname, __u32 queue_id,
|
||||
struct xsk_umem *umem,
|
||||
struct xsk_ring_cons *rx,
|
||||
struct xsk_ring_prod *tx,
|
||||
const struct xsk_socket_config *config);
|
||||
LIBBPF_API int
|
||||
xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
|
||||
const char *ifname,
|
||||
__u32 queue_id, struct xsk_umem *umem,
|
||||
struct xsk_ring_cons *rx,
|
||||
struct xsk_ring_prod *tx,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_socket_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__create(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__create(struct xsk_socket **xsk,
|
||||
const char *ifname, __u32 queue_id,
|
||||
struct xsk_umem *umem,
|
||||
struct xsk_ring_cons *rx,
|
||||
struct xsk_ring_prod *tx,
|
||||
const struct xsk_socket_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
|
||||
const char *ifname,
|
||||
__u32 queue_id, struct xsk_umem *umem,
|
||||
struct xsk_ring_cons *rx,
|
||||
struct xsk_ring_prod *tx,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_socket_config *config);
|
||||
|
||||
/* Returns 0 for success and -EBUSY if the umem is still in use. */
|
||||
LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
|
||||
LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__delete(struct xsk_umem *umem);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
void xsk_socket__delete(struct xsk_socket *xsk);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
|
@ -96,6 +96,26 @@ unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_first_and_bit
|
||||
/*
|
||||
* Find the first set bit in two memory regions.
|
||||
*/
|
||||
unsigned long _find_first_and_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long idx, val;
|
||||
|
||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
||||
val = addr1[idx] & addr2[idx];
|
||||
if (val)
|
||||
return min(idx * BITS_PER_LONG + __ffs(val), size);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit
|
||||
/*
|
||||
* Find the first cleared bit in a memory region.
|
||||
|
@ -48,6 +48,7 @@ SYNOPSIS
|
||||
int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
|
||||
bool perf_cpu_map__empty(const struct perf_cpu_map *map);
|
||||
int perf_cpu_map__max(struct perf_cpu_map *map);
|
||||
bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
|
||||
|
||||
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus)
|
||||
--
|
||||
@ -135,16 +136,16 @@ SYNOPSIS
|
||||
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads);
|
||||
void perf_evsel__close(struct perf_evsel *evsel);
|
||||
void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
|
||||
void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
|
||||
int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
|
||||
void perf_evsel__munmap(struct perf_evsel *evsel);
|
||||
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread);
|
||||
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
||||
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
|
||||
int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
|
||||
struct perf_counts_values *count);
|
||||
int perf_evsel__enable(struct perf_evsel *evsel);
|
||||
int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
|
||||
int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
|
||||
int perf_evsel__disable(struct perf_evsel *evsel);
|
||||
int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
|
||||
int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
|
||||
struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
|
||||
struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
|
||||
struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
|
||||
|
@ -10,15 +10,24 @@
|
||||
#include <ctype.h>
|
||||
#include <limits.h>
|
||||
|
||||
struct perf_cpu_map *perf_cpu_map__dummy_new(void)
|
||||
static struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
|
||||
{
|
||||
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
|
||||
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
|
||||
|
||||
if (cpus != NULL) {
|
||||
cpus->nr = 1;
|
||||
cpus->map[0] = -1;
|
||||
cpus->nr = nr_cpus;
|
||||
refcount_set(&cpus->refcnt, 1);
|
||||
|
||||
}
|
||||
return cpus;
|
||||
}
|
||||
|
||||
struct perf_cpu_map *perf_cpu_map__dummy_new(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
|
||||
|
||||
if (cpus)
|
||||
cpus->map[0].cpu = -1;
|
||||
|
||||
return cpus;
|
||||
}
|
||||
@ -54,15 +63,12 @@ static struct perf_cpu_map *cpu_map__default_new(void)
|
||||
if (nr_cpus < 0)
|
||||
return NULL;
|
||||
|
||||
cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
|
||||
cpus = perf_cpu_map__alloc(nr_cpus);
|
||||
if (cpus != NULL) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_cpus; ++i)
|
||||
cpus->map[i] = i;
|
||||
|
||||
cpus->nr = nr_cpus;
|
||||
refcount_set(&cpus->refcnt, 1);
|
||||
cpus->map[i].cpu = i;
|
||||
}
|
||||
|
||||
return cpus;
|
||||
@ -73,31 +79,32 @@ struct perf_cpu_map *perf_cpu_map__default_new(void)
|
||||
return cpu_map__default_new();
|
||||
}
|
||||
|
||||
static int cmp_int(const void *a, const void *b)
|
||||
|
||||
static int cmp_cpu(const void *a, const void *b)
|
||||
{
|
||||
return *(const int *)a - *(const int*)b;
|
||||
const struct perf_cpu *cpu_a = a, *cpu_b = b;
|
||||
|
||||
return cpu_a->cpu - cpu_b->cpu;
|
||||
}
|
||||
|
||||
static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
|
||||
static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
|
||||
{
|
||||
size_t payload_size = nr_cpus * sizeof(int);
|
||||
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
|
||||
size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
|
||||
struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
|
||||
int i, j;
|
||||
|
||||
if (cpus != NULL) {
|
||||
memcpy(cpus->map, tmp_cpus, payload_size);
|
||||
qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
|
||||
qsort(cpus->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
|
||||
/* Remove dups */
|
||||
j = 0;
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
if (i == 0 || cpus->map[i] != cpus->map[i - 1])
|
||||
cpus->map[j++] = cpus->map[i];
|
||||
if (i == 0 || cpus->map[i].cpu != cpus->map[i - 1].cpu)
|
||||
cpus->map[j++].cpu = cpus->map[i].cpu;
|
||||
}
|
||||
cpus->nr = j;
|
||||
assert(j <= nr_cpus);
|
||||
refcount_set(&cpus->refcnt, 1);
|
||||
}
|
||||
|
||||
return cpus;
|
||||
}
|
||||
|
||||
@ -105,7 +112,7 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
|
||||
{
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
int nr_cpus = 0;
|
||||
int *tmp_cpus = NULL, *tmp;
|
||||
struct perf_cpu *tmp_cpus = NULL, *tmp;
|
||||
int max_entries = 0;
|
||||
int n, cpu, prev;
|
||||
char sep;
|
||||
@ -124,24 +131,24 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
|
||||
|
||||
if (new_max >= max_entries) {
|
||||
max_entries = new_max + MAX_NR_CPUS / 2;
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
|
||||
if (tmp == NULL)
|
||||
goto out_free_tmp;
|
||||
tmp_cpus = tmp;
|
||||
}
|
||||
|
||||
while (++prev < cpu)
|
||||
tmp_cpus[nr_cpus++] = prev;
|
||||
tmp_cpus[nr_cpus++].cpu = prev;
|
||||
}
|
||||
if (nr_cpus == max_entries) {
|
||||
max_entries += MAX_NR_CPUS;
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
|
||||
if (tmp == NULL)
|
||||
goto out_free_tmp;
|
||||
tmp_cpus = tmp;
|
||||
}
|
||||
|
||||
tmp_cpus[nr_cpus++] = cpu;
|
||||
tmp_cpus[nr_cpus++].cpu = cpu;
|
||||
if (n == 2 && sep == '-')
|
||||
prev = cpu;
|
||||
else
|
||||
@ -179,7 +186,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
|
||||
unsigned long start_cpu, end_cpu = 0;
|
||||
char *p = NULL;
|
||||
int i, nr_cpus = 0;
|
||||
int *tmp_cpus = NULL, *tmp;
|
||||
struct perf_cpu *tmp_cpus = NULL, *tmp;
|
||||
int max_entries = 0;
|
||||
|
||||
if (!cpu_list)
|
||||
@ -220,17 +227,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
|
||||
for (; start_cpu <= end_cpu; start_cpu++) {
|
||||
/* check for duplicates */
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
if (tmp_cpus[i] == (int)start_cpu)
|
||||
if (tmp_cpus[i].cpu == (int)start_cpu)
|
||||
goto invalid;
|
||||
|
||||
if (nr_cpus == max_entries) {
|
||||
max_entries += MAX_NR_CPUS;
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
|
||||
if (tmp == NULL)
|
||||
goto invalid;
|
||||
tmp_cpus = tmp;
|
||||
}
|
||||
tmp_cpus[nr_cpus++] = (int)start_cpu;
|
||||
tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
|
||||
}
|
||||
if (*p)
|
||||
++p;
|
||||
@ -250,12 +257,16 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
|
||||
return cpus;
|
||||
}
|
||||
|
||||
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
|
||||
struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
|
||||
{
|
||||
struct perf_cpu result = {
|
||||
.cpu = -1
|
||||
};
|
||||
|
||||
if (cpus && idx < cpus->nr)
|
||||
return cpus->map[idx];
|
||||
|
||||
return -1;
|
||||
return result;
|
||||
}
|
||||
|
||||
int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
|
||||
@ -265,25 +276,47 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
|
||||
|
||||
bool perf_cpu_map__empty(const struct perf_cpu_map *map)
|
||||
{
|
||||
return map ? map->map[0] == -1 : true;
|
||||
return map ? map->map[0].cpu == -1 : true;
|
||||
}
|
||||
|
||||
int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
|
||||
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
|
||||
{
|
||||
int i;
|
||||
int low, high;
|
||||
|
||||
for (i = 0; i < cpus->nr; ++i) {
|
||||
if (cpus->map[i] == cpu)
|
||||
return i;
|
||||
if (!cpus)
|
||||
return -1;
|
||||
|
||||
low = 0;
|
||||
high = cpus->nr;
|
||||
while (low < high) {
|
||||
int idx = (low + high) / 2;
|
||||
struct perf_cpu cpu_at_idx = cpus->map[idx];
|
||||
|
||||
if (cpu_at_idx.cpu == cpu.cpu)
|
||||
return idx;
|
||||
|
||||
if (cpu_at_idx.cpu > cpu.cpu)
|
||||
high = idx;
|
||||
else
|
||||
low = idx + 1;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int perf_cpu_map__max(struct perf_cpu_map *map)
|
||||
bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
|
||||
{
|
||||
return perf_cpu_map__idx(cpus, cpu) != -1;
|
||||
}
|
||||
|
||||
struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
|
||||
{
|
||||
struct perf_cpu result = {
|
||||
.cpu = -1
|
||||
};
|
||||
|
||||
// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
|
||||
return map->nr > 0 ? map->map[map->nr - 1] : -1;
|
||||
return map->nr > 0 ? map->map[map->nr - 1] : result;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -297,7 +330,7 @@ int perf_cpu_map__max(struct perf_cpu_map *map)
|
||||
struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
|
||||
struct perf_cpu_map *other)
|
||||
{
|
||||
int *tmp_cpus;
|
||||
struct perf_cpu *tmp_cpus;
|
||||
int tmp_len;
|
||||
int i, j, k;
|
||||
struct perf_cpu_map *merged;
|
||||
@ -311,19 +344,19 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
|
||||
if (!other)
|
||||
return orig;
|
||||
if (orig->nr == other->nr &&
|
||||
!memcmp(orig->map, other->map, orig->nr * sizeof(int)))
|
||||
!memcmp(orig->map, other->map, orig->nr * sizeof(struct perf_cpu)))
|
||||
return orig;
|
||||
|
||||
tmp_len = orig->nr + other->nr;
|
||||
tmp_cpus = malloc(tmp_len * sizeof(int));
|
||||
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
|
||||
if (!tmp_cpus)
|
||||
return NULL;
|
||||
|
||||
/* Standard merge algorithm from wikipedia */
|
||||
i = j = k = 0;
|
||||
while (i < orig->nr && j < other->nr) {
|
||||
if (orig->map[i] <= other->map[j]) {
|
||||
if (orig->map[i] == other->map[j])
|
||||
if (orig->map[i].cpu <= other->map[j].cpu) {
|
||||
if (orig->map[i].cpu == other->map[j].cpu)
|
||||
j++;
|
||||
tmp_cpus[k++] = orig->map[i++];
|
||||
} else
|
||||
|
@ -407,7 +407,7 @@ perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
|
||||
|
||||
static int
|
||||
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
|
||||
int output, int cpu)
|
||||
int output, struct perf_cpu cpu)
|
||||
{
|
||||
return perf_mmap__mmap(map, mp, output, cpu);
|
||||
}
|
||||
@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||||
int idx, struct perf_mmap_param *mp, int cpu_idx,
|
||||
int thread, int *_output, int *_output_overwrite)
|
||||
{
|
||||
int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
|
||||
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
|
||||
struct perf_evsel *evsel;
|
||||
int revent;
|
||||
|
||||
@ -643,14 +643,14 @@ perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
|
||||
return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
|
||||
}
|
||||
|
||||
void __perf_evlist__set_leader(struct list_head *list)
|
||||
void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
|
||||
{
|
||||
struct perf_evsel *evsel, *leader;
|
||||
struct perf_evsel *first, *last, *evsel;
|
||||
|
||||
leader = list_entry(list->next, struct perf_evsel, node);
|
||||
evsel = list_entry(list->prev, struct perf_evsel, node);
|
||||
first = list_first_entry(list, struct perf_evsel, node);
|
||||
last = list_last_entry(list, struct perf_evsel, node);
|
||||
|
||||
leader->nr_members = evsel->idx - leader->idx + 1;
|
||||
leader->nr_members = last->idx - first->idx + 1;
|
||||
|
||||
__perf_evlist__for_each_entry(list, evsel)
|
||||
evsel->leader = leader;
|
||||
@ -659,7 +659,10 @@ void __perf_evlist__set_leader(struct list_head *list)
|
||||
void perf_evlist__set_leader(struct perf_evlist *evlist)
|
||||
{
|
||||
if (evlist->nr_entries) {
|
||||
struct perf_evsel *first = list_entry(evlist->entries.next,
|
||||
struct perf_evsel, node);
|
||||
|
||||
evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
|
||||
__perf_evlist__set_leader(&evlist->entries);
|
||||
__perf_evlist__set_leader(&evlist->entries, first);
|
||||
}
|
||||
}
|
||||
|
@ -43,18 +43,22 @@ void perf_evsel__delete(struct perf_evsel *evsel)
|
||||
free(evsel);
|
||||
}
|
||||
|
||||
#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
|
||||
#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
|
||||
#define FD(_evsel, _cpu_map_idx, _thread) \
|
||||
((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
|
||||
#define MMAP(_evsel, _cpu_map_idx, _thread) \
|
||||
(_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
|
||||
: NULL)
|
||||
|
||||
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
{
|
||||
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
|
||||
|
||||
if (evsel->fd) {
|
||||
int cpu, thread;
|
||||
for (cpu = 0; cpu < ncpus; cpu++) {
|
||||
int idx, thread;
|
||||
|
||||
for (idx = 0; idx < ncpus; idx++) {
|
||||
for (thread = 0; thread < nthreads; thread++) {
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, idx, thread);
|
||||
|
||||
if (fd)
|
||||
*fd = -1;
|
||||
@ -74,13 +78,13 @@ static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthre
|
||||
|
||||
static int
|
||||
sys_perf_event_open(struct perf_event_attr *attr,
|
||||
pid_t pid, int cpu, int group_fd,
|
||||
pid_t pid, struct perf_cpu cpu, int group_fd,
|
||||
unsigned long flags)
|
||||
{
|
||||
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
|
||||
return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
|
||||
}
|
||||
|
||||
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
|
||||
static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
|
||||
{
|
||||
struct perf_evsel *leader = evsel->leader;
|
||||
int *fd;
|
||||
@ -97,7 +101,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
|
||||
if (!leader->fd)
|
||||
return -ENOTCONN;
|
||||
|
||||
fd = FD(leader, cpu, thread);
|
||||
fd = FD(leader, cpu_map_idx, thread);
|
||||
if (fd == NULL || *fd == -1)
|
||||
return -EBADF;
|
||||
|
||||
@ -109,7 +113,8 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
|
||||
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads)
|
||||
{
|
||||
int cpu, thread, err = 0;
|
||||
struct perf_cpu cpu;
|
||||
int idx, thread, err = 0;
|
||||
|
||||
if (cpus == NULL) {
|
||||
static struct perf_cpu_map *empty_cpu_map;
|
||||
@ -136,24 +141,24 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
||||
}
|
||||
|
||||
if (evsel->fd == NULL &&
|
||||
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
|
||||
perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
|
||||
for (thread = 0; thread < threads->nr; thread++) {
|
||||
int fd, group_fd, *evsel_fd;
|
||||
|
||||
evsel_fd = FD(evsel, cpu, thread);
|
||||
evsel_fd = FD(evsel, idx, thread);
|
||||
if (evsel_fd == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
err = get_group_fd(evsel, cpu, thread, &group_fd);
|
||||
err = get_group_fd(evsel, idx, thread, &group_fd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
fd = sys_perf_event_open(&evsel->attr,
|
||||
threads->map[thread].pid,
|
||||
cpus->map[cpu], group_fd, 0);
|
||||
cpu, group_fd, 0);
|
||||
|
||||
if (fd < 0)
|
||||
return -errno;
|
||||
@ -165,12 +170,12 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
|
||||
static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
|
||||
{
|
||||
int thread;
|
||||
|
||||
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, cpu_map_idx, thread);
|
||||
|
||||
if (fd && *fd >= 0) {
|
||||
close(*fd);
|
||||
@ -181,10 +186,8 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
|
||||
|
||||
void perf_evsel__close_fd(struct perf_evsel *evsel)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
|
||||
perf_evsel__close_fd_cpu(evsel, cpu);
|
||||
for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
|
||||
perf_evsel__close_fd_cpu(evsel, idx);
|
||||
}
|
||||
|
||||
void perf_evsel__free_fd(struct perf_evsel *evsel)
|
||||
@ -202,29 +205,29 @@ void perf_evsel__close(struct perf_evsel *evsel)
|
||||
perf_evsel__free_fd(evsel);
|
||||
}
|
||||
|
||||
void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
|
||||
void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
|
||||
{
|
||||
if (evsel->fd == NULL)
|
||||
return;
|
||||
|
||||
perf_evsel__close_fd_cpu(evsel, cpu);
|
||||
perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
|
||||
}
|
||||
|
||||
void perf_evsel__munmap(struct perf_evsel *evsel)
|
||||
{
|
||||
int cpu, thread;
|
||||
int idx, thread;
|
||||
|
||||
if (evsel->fd == NULL || evsel->mmap == NULL)
|
||||
return;
|
||||
|
||||
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
|
||||
for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
|
||||
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, idx, thread);
|
||||
|
||||
if (fd == NULL || *fd < 0)
|
||||
continue;
|
||||
|
||||
perf_mmap__munmap(MMAP(evsel, cpu, thread));
|
||||
perf_mmap__munmap(MMAP(evsel, idx, thread));
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,7 +237,7 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
|
||||
|
||||
int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
|
||||
{
|
||||
int ret, cpu, thread;
|
||||
int ret, idx, thread;
|
||||
struct perf_mmap_param mp = {
|
||||
.prot = PROT_READ | PROT_WRITE,
|
||||
.mask = (pages * page_size) - 1,
|
||||
@ -246,15 +249,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
|
||||
if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
|
||||
for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
|
||||
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, idx, thread);
|
||||
struct perf_mmap *map;
|
||||
struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
|
||||
|
||||
if (fd == NULL || *fd < 0)
|
||||
continue;
|
||||
|
||||
map = MMAP(evsel, cpu, thread);
|
||||
map = MMAP(evsel, idx, thread);
|
||||
perf_mmap__init(map, NULL, false, NULL);
|
||||
|
||||
ret = perf_mmap__mmap(map, &mp, *fd, cpu);
|
||||
@ -268,14 +272,14 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
|
||||
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
|
||||
{
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, cpu_map_idx, thread);
|
||||
|
||||
if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
|
||||
if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
|
||||
return NULL;
|
||||
|
||||
return MMAP(evsel, cpu, thread)->base;
|
||||
return MMAP(evsel, cpu_map_idx, thread)->base;
|
||||
}
|
||||
|
||||
int perf_evsel__read_size(struct perf_evsel *evsel)
|
||||
@ -303,19 +307,19 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
|
||||
return size;
|
||||
}
|
||||
|
||||
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
||||
int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
|
||||
struct perf_counts_values *count)
|
||||
{
|
||||
size_t size = perf_evsel__read_size(evsel);
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, cpu_map_idx, thread);
|
||||
|
||||
memset(count, 0, sizeof(*count));
|
||||
|
||||
if (fd == NULL || *fd < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (MMAP(evsel, cpu, thread) &&
|
||||
!perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
|
||||
if (MMAP(evsel, cpu_map_idx, thread) &&
|
||||
!perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
|
||||
return 0;
|
||||
|
||||
if (readn(*fd, count->values, size) <= 0)
|
||||
@ -326,13 +330,13 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
||||
|
||||
static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
|
||||
int ioc, void *arg,
|
||||
int cpu)
|
||||
int cpu_map_idx)
|
||||
{
|
||||
int thread;
|
||||
|
||||
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
|
||||
int err;
|
||||
int *fd = FD(evsel, cpu, thread);
|
||||
int *fd = FD(evsel, cpu_map_idx, thread);
|
||||
|
||||
if (fd == NULL || *fd < 0)
|
||||
return -1;
|
||||
@ -346,9 +350,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
|
||||
int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
|
||||
{
|
||||
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
|
||||
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
|
||||
}
|
||||
|
||||
int perf_evsel__enable(struct perf_evsel *evsel)
|
||||
@ -361,9 +365,9 @@ int perf_evsel__enable(struct perf_evsel *evsel)
|
||||
return err;
|
||||
}
|
||||
|
||||
int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
|
||||
int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
|
||||
{
|
||||
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
|
||||
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
|
||||
}
|
||||
|
||||
int perf_evsel__disable(struct perf_evsel *evsel)
|
||||
@ -380,7 +384,7 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
|
||||
{
|
||||
int err = 0, i;
|
||||
|
||||
for (i = 0; i < evsel->cpus->nr && !err; i++)
|
||||
for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
|
||||
err = perf_evsel__run_ioctl(evsel,
|
||||
PERF_EVENT_IOC_SET_FILTER,
|
||||
(void *)filter, i);
|
||||
@ -431,3 +435,22 @@ void perf_evsel__free_id(struct perf_evsel *evsel)
|
||||
zfree(&evsel->id);
|
||||
evsel->ids = 0;
|
||||
}
|
||||
|
||||
void perf_counts_values__scale(struct perf_counts_values *count,
|
||||
bool scale, __s8 *pscaled)
|
||||
{
|
||||
s8 scaled = 0;
|
||||
|
||||
if (scale) {
|
||||
if (count->run == 0) {
|
||||
scaled = -1;
|
||||
count->val = 0;
|
||||
} else if (count->run < count->ena) {
|
||||
scaled = 1;
|
||||
count->val = (u64)((double)count->val * count->ena / count->run);
|
||||
}
|
||||
}
|
||||
|
||||
if (pscaled)
|
||||
*pscaled = scaled;
|
||||
}
|
||||
|
@ -3,17 +3,27 @@
|
||||
#define __LIBPERF_INTERNAL_CPUMAP_H
|
||||
|
||||
#include <linux/refcount.h>
|
||||
#include <perf/cpumap.h>
|
||||
|
||||
/**
|
||||
* A sized, reference counted, sorted array of integers representing CPU
|
||||
* numbers. This is commonly used to capture which CPUs a PMU is associated
|
||||
* with. The indices into the cpumap are frequently used as they avoid having
|
||||
* gaps if CPU numbers were used. For events associated with a pid, rather than
|
||||
* a CPU, a single dummy map with an entry of -1 is used.
|
||||
*/
|
||||
struct perf_cpu_map {
|
||||
refcount_t refcnt;
|
||||
/** Length of the map array. */
|
||||
int nr;
|
||||
int map[];
|
||||
/** The CPU values. */
|
||||
struct perf_cpu map[];
|
||||
};
|
||||
|
||||
#ifndef MAX_NR_CPUS
|
||||
#define MAX_NR_CPUS 2048
|
||||
#endif
|
||||
|
||||
int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu);
|
||||
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <api/fd/array.h>
|
||||
#include <internal/cpumap.h>
|
||||
#include <internal/evsel.h>
|
||||
|
||||
#define PERF_EVLIST__HLIST_BITS 8
|
||||
@ -36,7 +37,7 @@ typedef void
|
||||
typedef struct perf_mmap*
|
||||
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
|
||||
typedef int
|
||||
(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
|
||||
(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, struct perf_cpu);
|
||||
|
||||
struct perf_evlist_mmap_ops {
|
||||
perf_evlist_mmap__cb_idx_t idx;
|
||||
@ -127,5 +128,5 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
|
||||
|
||||
void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
|
||||
|
||||
void __perf_evlist__set_leader(struct list_head *list);
|
||||
void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader);
|
||||
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
|
||||
|
@ -6,8 +6,8 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
#include <internal/cpumap.h>
|
||||
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
struct xyarray;
|
||||
|
||||
@ -27,7 +27,7 @@ struct perf_sample_id {
|
||||
* queue number.
|
||||
*/
|
||||
int idx;
|
||||
int cpu;
|
||||
struct perf_cpu cpu;
|
||||
pid_t tid;
|
||||
|
||||
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <internal/cpumap.h>
|
||||
|
||||
/* perf sample has 16 bits size limit */
|
||||
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
|
||||
@ -24,7 +25,7 @@ struct perf_mmap {
|
||||
void *base;
|
||||
int mask;
|
||||
int fd;
|
||||
int cpu;
|
||||
struct perf_cpu cpu;
|
||||
refcount_t refcnt;
|
||||
u64 prev;
|
||||
u64 start;
|
||||
@ -46,7 +47,7 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map);
|
||||
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
|
||||
bool overwrite, libperf_unmap_cb_t unmap_cb);
|
||||
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
|
||||
int fd, int cpu);
|
||||
int fd, struct perf_cpu cpu);
|
||||
void perf_mmap__munmap(struct perf_mmap *map);
|
||||
void perf_mmap__get(struct perf_mmap *map);
|
||||
void perf_mmap__put(struct perf_mmap *map);
|
||||
|
@ -3,10 +3,14 @@
|
||||
#define __LIBPERF_CPUMAP_H
|
||||
|
||||
#include <perf/core.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
struct perf_cpu_map;
|
||||
/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
|
||||
struct perf_cpu {
|
||||
int cpu;
|
||||
};
|
||||
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
|
||||
@ -16,10 +20,11 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
|
||||
struct perf_cpu_map *other);
|
||||
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
|
||||
LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
|
||||
LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
|
||||
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
|
||||
LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
|
||||
LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map);
|
||||
LIBPERF_API struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map);
|
||||
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
|
||||
|
||||
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
|
||||
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
|
||||
|
@ -289,6 +289,11 @@ struct perf_record_itrace_start {
|
||||
__u32 tid;
|
||||
};
|
||||
|
||||
struct perf_record_aux_output_hw_id {
|
||||
struct perf_event_header header;
|
||||
__u64 hw_id;
|
||||
};
|
||||
|
||||
struct perf_record_thread_map_entry {
|
||||
__u64 pid;
|
||||
char comm[16];
|
||||
@ -414,6 +419,7 @@ union perf_event {
|
||||
struct perf_record_auxtrace_error auxtrace_error;
|
||||
struct perf_record_aux aux;
|
||||
struct perf_record_itrace_start itrace_start;
|
||||
struct perf_record_aux_output_hw_id aux_output_hw_id;
|
||||
struct perf_record_switch context_switch;
|
||||
struct perf_record_thread_map thread_map;
|
||||
struct perf_record_cpu_map cpu_map;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user