1
0
mirror of https://github.com/netdata/netdata.git synced 2021-06-06 23:03:21 +03:00

VFS new thread (#11187)

Split process thread in two different threads.
This commit is contained in:
thiagoftsm
2021-05-28 18:57:59 +00:00
committed by GitHub
parent 4561d6a5a9
commit 0b09b61997
18 changed files with 1245 additions and 394 deletions

View File

@@ -498,6 +498,8 @@ set(EBPF_PROCESS_PLUGIN_FILES
collectors/ebpf.plugin/ebpf_sync.h
collectors/ebpf.plugin/ebpf_swap.c
collectors/ebpf.plugin/ebpf_swap.h
collectors/ebpf.plugin/ebpf_vfs.c
collectors/ebpf.plugin/ebpf_vfs.h
collectors/ebpf.plugin/ebpf_apps.c
collectors/ebpf.plugin/ebpf_apps.h
)

View File

@@ -297,6 +297,8 @@ EBPF_PLUGIN_FILES = \
collectors/ebpf.plugin/ebpf_sync.h \
collectors/ebpf.plugin/ebpf_swap.c \
collectors/ebpf.plugin/ebpf_swap.h \
collectors/ebpf.plugin/ebpf_vfs.c \
collectors/ebpf.plugin/ebpf_vfs.h \
collectors/ebpf.plugin/ebpf.h \
collectors/ebpf.plugin/ebpf_apps.c \
collectors/ebpf.plugin/ebpf_apps.h \

View File

@@ -138,6 +138,17 @@
#define NETDATA_CHART_PRIO_MDSTAT_FINISH 2105
#define NETDATA_CHART_PRIO_MDSTAT_SPEED 2106
// Filesystem
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN 2150
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT 2151
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES 2152
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES 2153
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC 2154
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC 2155
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN 2156
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN 2157
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE 2158
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE 2159
// NFS (server)

View File

@@ -38,4 +38,5 @@ dist_ebpfconfig_DATA = \
ebpf.d/process.conf \
ebpf.d/sync.conf \
ebpf.d/swap.conf \
ebpf.d/vfs.conf \
$(NULL)

View File

@@ -82,6 +82,19 @@ The Agent displays the number of bytes written as negative because they are movi
The Agent counts and shows the number of instances where a running program experiences a read or write error.
#### Create
This chart shows the number of calls for `vfs_create`. This function is responsible to create files.
#### Synchronization
This chart shows the number of calls for `vfs_fsync`. This function is responsible to perform a fsync or fdatasync
on a file.
#### Open
This chart shows the number of calls for `vfs_open`. This function is responsible to open files.
### Process
For this group, the eBPF collector monitors process/thread creation and process end, and then displays any errors in the
@@ -203,11 +216,12 @@ The eBPF collector enables and runs the following eBPF programs by default:
- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
`kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and
files are not found.
- `process`: This eBPF program creates charts that show information about process creation, VFS IO, and files removed.
- `process`: This eBPF program creates charts that show information about process creation, calls to open files.
When in `return` mode, it also creates charts showing errors when these operations are executed.
- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
bandwidth consumed by each.
- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
## Thread configuration
@@ -229,6 +243,7 @@ The following configuration files are available:
- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and
also lets you specify which network the eBPF collector monitors.
- `sync.conf`: Configuration for the `sync` thread.
- `vfs.conf`: Configuration for the `vfs` thread.
### Network configuration

View File

@@ -104,6 +104,11 @@ ebpf_module_t ebpf_modules[] = {
.update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL },
{ .thread_name = "vfs", .config_name = "swap", .enabled = 0, .start_routine = ebpf_vfs_thread,
.update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
.config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE },
{ .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
.global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
@@ -172,6 +177,12 @@ static void ebpf_exit(int sig)
freez(swap_pid);
}
if (ebpf_modules[EBPF_MODULE_VFS_IDX].enabled) {
ebpf_modules[EBPF_MODULE_VFS_IDX].enabled = 0;
clean_vfs_pid_structures();
freez(vfs_pid);
}
/*
int ret = fork();
if (ret < 0) // error
@@ -624,6 +635,8 @@ void ebpf_print_help()
"\n"
" --swap or -w Enable chart related to swap run time.\n"
"\n"
" --vfs or -f Enable chart related to vfs run time.\n"
"\n"
VERSION,
(year >= 116) ? year + 1900 : 2020);
}
@@ -920,6 +933,13 @@ static void read_collector_values(int *disable_apps)
started++;
}
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs",
CONFIG_BOOLEAN_NO);
if (enabled) {
ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_apps);
started++;
}
if (!started){
ebpf_enable_all_charts(*disable_apps);
// Read network viewer section
@@ -1009,6 +1029,7 @@ static void parse_args(int argc, char **argv)
{"return", no_argument, 0, 'r' },
{"sync", no_argument, 0, 's' },
{"swap", no_argument, 0, 'w' },
{"vfs", no_argument, 0, 'f' },
{0, 0, 0, 0}
};
@@ -1107,6 +1128,14 @@ static void parse_args(int argc, char **argv)
ebpf_enable_chart(EBPF_MODULE_SWAP_IDX, disable_apps);
#ifdef NETDATA_INTERNAL_CHECKS
info("EBPF enabling \"swap\" chart, because it was started with the option \"--swap\" or \"-w\".");
#endif
break;
}
case 'f': {
enabled = 1;
ebpf_enable_chart(EBPF_MODULE_VFS_IDX, disable_apps);
#ifdef NETDATA_INTERNAL_CHECKS
info("EBPF enabling \"vfs\" chart, because it was started with the option \"--vfs\" or \"-f\".");
#endif
break;
}
@@ -1254,6 +1283,8 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_DCSTAT_IDX].start_routine},
{"EBPF SWAP" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine},
{"EBPF VFS" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};

View File

@@ -26,12 +26,13 @@
# The eBPF collector enables and runs the following eBPF programs by default:
#
# `cachestat`: Make charts for kernel functions related to page cache.
# `process` : This eBPF program creates charts that show information about process creation, VFS IO, and
# files removed.
# `process` : This eBPF program creates charts that show information about process creation, and file manipulation.
# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
# bandwidth consumed by each.
# `sync` : Montitor calls for syscall sync(2).
# `swap` : Monitor calls for internal swap functions.
# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
# files removed.
[ebpf programs]
cachestat = no
dcstat = no
@@ -39,5 +40,5 @@
socket = yes
sync = yes
swap = no
vfs = yes
network connections = no

View File

@@ -0,0 +1,14 @@
# The `ebpf load mode` option accepts the following values :
# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
# 'no'.
#
[global]
ebpf load mode = entry
apps = yes
update every = 1
pid table size = 32768

View File

@@ -79,7 +79,8 @@ enum ebpf_module_indexes {
EBPF_MODULE_CACHESTAT_IDX,
EBPF_MODULE_SYNC_IDX,
EBPF_MODULE_DCSTAT_IDX,
EBPF_MODULE_SWAP_IDX
EBPF_MODULE_SWAP_IDX,
EBPF_MODULE_VFS_IDX
};
// Copied from musl header

View File

@@ -939,6 +939,12 @@ void cleanup_variables_from_other_threads(uint32_t pid)
freez(swap_pid[pid]);
swap_pid[pid] = NULL;
}
// Clean vfs structure
if (vfs_pid) {
freez(vfs_pid[pid]);
vfs_pid[pid] = NULL;
}
}
/**

View File

@@ -23,6 +23,7 @@
#include "ebpf_cachestat.h"
#include "ebpf_sync.h"
#include "ebpf_swap.h"
#include "ebpf_vfs.h"
#define MAX_COMPARE_NAME 100
#define MAX_NAME 100
@@ -115,6 +116,7 @@ struct target {
netdata_publish_cachestat_t cachestat;
netdata_publish_dcstat_t dcstat;
netdata_publish_swap_t swap;
netdata_publish_vfs_t vfs;
/* These variables are not necessary for eBPF collector
kernel_uint_t minflt;
@@ -344,30 +346,14 @@ typedef struct ebpf_process_stat {
//Counter
uint32_t open_call;
uint32_t write_call;
uint32_t writev_call;
uint32_t read_call;
uint32_t readv_call;
uint32_t unlink_call;
uint32_t exit_call;
uint32_t release_call;
uint32_t fork_call;
uint32_t clone_call;
uint32_t close_call;
//Accumulator
uint64_t write_bytes;
uint64_t writev_bytes;
uint64_t readv_bytes;
uint64_t read_bytes;
//Counter
uint32_t open_err;
uint32_t write_err;
uint32_t writev_err;
uint32_t read_err;
uint32_t readv_err;
uint32_t unlink_err;
uint32_t fork_err;
uint32_t clone_err;
uint32_t close_err;

View File

@@ -11,11 +11,10 @@
*
*****************************************************************/
static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "delete", "read", "write",
"process", "task", "process", "thread" };
static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "vfs_unlink",
"vfs_read", "vfs_write", "do_exit",
"release_task", "_do_fork", "sys_clone" };
static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "process",
"task", "process", "thread" };
static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "do_exit",
"release_task", "_do_fork", "sys_clone" };
static char *status[] = { "process", "zombie" };
static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
@@ -56,16 +55,16 @@ struct config process_config = { .first_section = NULL,
* @param pvc the second output structure with correlated dimensions
* @param input the structure with the input data.
*/
static void ebpf_update_global_publish(
netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, netdata_syscall_stat_t *input)
static void ebpf_update_global_publish(netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc,
netdata_syscall_stat_t *input)
{
netdata_publish_syscall_t *move = publish;
int selector = NETDATA_KEY_PUBLISH_PROCESS_OPEN;
while (move) {
// Until NETDATA_KEY_PUBLISH_PROCESS_READ we are creating accumulators, so it is possible
// Until NETDATA_KEY_PUBLISH_PROCESS_EXIT we are creating accumulators, so it is possible
// to use incremental charts, but after this we will do some math with the values, so we are storing
// absolute values
if (selector < NETDATA_KEY_PUBLISH_PROCESS_READ) {
if (selector < NETDATA_KEY_PUBLISH_PROCESS_EXIT) {
move->ncall = input->call;
move->nbyte = input->bytes;
move->nerr = input->ecall;
@@ -84,12 +83,11 @@ static void ebpf_update_global_publish(
selector++;
}
pvc->write = -((long)publish[NETDATA_KEY_PUBLISH_PROCESS_WRITE].nbyte);
pvc->read = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_READ].nbyte;
pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall - (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall -
(long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall = -publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall +
(long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
}
/**
@@ -118,33 +116,20 @@ static void ebpf_process_send_data(ebpf_module_t *em)
netdata_publish_vfs_common_t pvc;
ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data);
write_count_chart(
NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
write_count_chart(
NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_DEL_START], 1);
write_count_chart(
NETDATA_VFS_FILE_IO_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_IN_START_BYTE], 2);
write_count_chart(
NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_EXIT_START], 2);
write_count_chart(
NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
write_count_chart(NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY,
&process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], 2);
write_count_chart(NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY,
&process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
write_status_chart(NETDATA_EBPF_FAMILY, &pvc);
if (em->mode < MODE_ENTRY) {
write_err_chart(
NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
write_err_chart(
NETDATA_VFS_FILE_ERR_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[2], NETDATA_VFS_ERRORS);
write_err_chart(
NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY,
process_publish_aggregated, 2);
write_err_chart(NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY,
&process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
}
write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY,
process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], (long long) pvc.write,
process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ], (long long)pvc.read);
}
/**
@@ -220,8 +205,8 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_sys_open));
value = ebpf_process_sum_values_for_pids(w->root_pid,
offsetof(ebpf_process_publish_apps_t, ecall_sys_open));
write_chart_dimension(w->name, value);
}
}
@@ -231,8 +216,7 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value =
ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd));
value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd));
write_chart_dimension(w->name, value);
}
}
@@ -242,93 +226,18 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_close_fd));
value = ebpf_process_sum_values_for_pids(w->root_pid,
offsetof(ebpf_process_publish_apps_t, ecall_close_fd));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value =
ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_vfs_unlink));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, call_write));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_write));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value =
ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_read));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_read));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_written));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value = ebpf_process_sum_values_for_pids(
w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_read));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value =
ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork));
value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork));
write_chart_dimension(w->name, value);
}
}
@@ -337,8 +246,7 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
value =
ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone));
value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone));
write_chart_dimension(w->name, value);
}
}
@@ -369,10 +277,10 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
static void read_hash_global_tables()
{
uint64_t idx;
netdata_idx_t res[NETDATA_GLOBAL_VECTOR];
netdata_idx_t res[NETDATA_KEY_END_VECTOR];
netdata_idx_t *val = process_hash_values;
for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) {
for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) {
if (!bpf_map_lookup_elem(map_fd[1], &idx, val)) {
uint64_t total = 0;
int i;
@@ -388,9 +296,6 @@ static void read_hash_global_tables()
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].call = res[NETDATA_KEY_CALLS_CLOSE_FD];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].call = res[NETDATA_KEY_CALLS_VFS_UNLINK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].call = res[NETDATA_KEY_CALLS_DO_FORK];
@@ -398,16 +303,8 @@ static void read_hash_global_tables()
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
(uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
(uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
}
/**
@@ -433,9 +330,6 @@ static void ebpf_process_update_apps_data()
//Read data
cad->call_sys_open = ps->open_call;
cad->call_close_fd = ps->close_call;
cad->call_vfs_unlink = ps->unlink_call;
cad->call_read = ps->read_call + ps->readv_call;
cad->call_write = ps->write_call + ps->writev_call;
cad->call_do_exit = ps->exit_call;
cad->call_release_task = ps->release_call;
cad->call_do_fork = ps->fork_call;
@@ -443,15 +337,9 @@ static void ebpf_process_update_apps_data()
cad->ecall_sys_open = ps->open_err;
cad->ecall_close_fd = ps->close_err;
cad->ecall_vfs_unlink = ps->unlink_err;
cad->ecall_read = ps->read_err + ps->readv_err;
cad->ecall_write = ps->write_err + ps->writev_err;
cad->ecall_do_fork = ps->fork_err;
cad->ecall_sys_clone = ps->clone_err;
cad->bytes_written = (uint64_t)ps->write_bytes + (uint64_t)ps->write_bytes;
cad->bytes_read = (uint64_t)ps->read_bytes + (uint64_t)ps->readv_bytes;
pids = pids->next;
}
}
@@ -462,36 +350,6 @@ static void ebpf_process_update_apps_data()
*
*****************************************************************/
/**
* Create IO chart
*
* @param family the chart family
* @param name the chart name
* @param axis the axis label
* @param web the group name used to attach the chart on dashboard
* @param order the order number of the specified chart
* @param algorithm the algorithm used to make the charts.
*/
static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order, int algorithm)
{
printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n",
family,
name,
axis,
web,
order,
update_every);
printf("DIMENSION %s %s %s 1 1\n",
process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
ebpf_algorithms[algorithm]);
printf("DIMENSION %s %s %s 1 1\n",
process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
ebpf_algorithms[algorithm]);
}
/**
* Create process status chart
*
@@ -551,50 +409,6 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
2);
}
ebpf_create_chart(NETDATA_EBPF_FAMILY,
NETDATA_VFS_FILE_CLEAN_COUNT,
"Remove files",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21002,
ebpf_create_global_dimension,
&process_publish_aggregated[NETDATA_DEL_START],
1);
ebpf_create_chart(NETDATA_EBPF_FAMILY,
NETDATA_VFS_FILE_IO_COUNT,
"Calls to IO",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21003,
ebpf_create_global_dimension,
&process_publish_aggregated[NETDATA_IN_START_BYTE],
2);
ebpf_create_io_chart(NETDATA_EBPF_FAMILY,
NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
NETDATA_VFS_GROUP,
21004,
NETDATA_EBPF_ABSOLUTE_IDX);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_EBPF_FAMILY,
NETDATA_VFS_FILE_ERR_COUNT,
"Fails to write or read",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21005,
ebpf_create_global_dimension,
&process_publish_aggregated[2],
NETDATA_VFS_ERRORS);
}
ebpf_create_chart(NETDATA_EBPF_FAMILY,
NETDATA_PROCESS_SYSCALL,
"Start process",
@@ -602,9 +416,9 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21006,
21002,
ebpf_create_global_dimension,
&process_publish_aggregated[NETDATA_PROCESS_START],
&process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
2);
ebpf_create_chart(NETDATA_EBPF_FAMILY,
@@ -614,9 +428,9 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21007,
21003,
ebpf_create_global_dimension,
&process_publish_aggregated[NETDATA_EXIT_START],
&process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
2);
ebpf_process_status_chart(NETDATA_EBPF_FAMILY,
@@ -624,7 +438,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
EBPF_COMMON_DIMENSION_DIFFERENCE,
NETDATA_PROCESS_GROUP,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
21008);
21004);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_EBPF_FAMILY,
@@ -634,9 +448,9 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21009,
21005,
ebpf_create_global_dimension,
&process_publish_aggregated[NETDATA_PROCESS_START],
&process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
2);
}
}
@@ -692,77 +506,12 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
"Files deleted",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20065,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
"Write to disk",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20066,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
apps_groups_root_target);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
"Fails to write",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20067,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
"Read from disk",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20068,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
"Fails to read",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20069,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
"Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20070,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
"Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20071,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS,
"Process started",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20072,
20065,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
root);
@@ -771,7 +520,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20073,
20066,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
root);
@@ -780,7 +529,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20074,
20067,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
root);
}
@@ -971,7 +720,7 @@ static void ebpf_process_allocate_global_vectors(size_t length)
static void change_syscalls()
{
static char *lfork = { "do_fork" };
process_id_names[7] = lfork;
process_id_names[NETDATA_KEY_PUBLISH_PROCESS_FORK] = lfork;
}
/**
@@ -1056,8 +805,7 @@ void *ebpf_process_thread(void *ptr)
}
int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX, //open, close, unlink
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
};

View File

@@ -5,41 +5,20 @@
// Groups used on Dashboard
#define NETDATA_FILE_GROUP "File"
#define NETDATA_VFS_GROUP "VFS"
#define NETDATA_PROCESS_GROUP "Process"
// Internal constants
#define NETDATA_GLOBAL_VECTOR 24
#define NETDATA_VFS_ERRORS 3
// Map index
#define NETDATA_DEL_START 2
#define NETDATA_IN_START_BYTE 3
#define NETDATA_EXIT_START 5
#define NETDATA_PROCESS_START 7
// Global chart name
#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
#define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
#define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects"
#define NETDATA_VFS_FILE_IO_COUNT "io"
#define NETDATA_VFS_FILE_ERR_COUNT "io_error"
#define NETDATA_EXIT_SYSCALL "exit"
#define NETDATA_PROCESS_SYSCALL "process_thread"
#define NETDATA_PROCESS_ERROR_NAME "task_error"
#define NETDATA_PROCESS_STATUS_NAME "process_status"
#define NETDATA_VFS_IO_FILE_BYTES "io_bytes"
// Charts created on Apps submenu
#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open"
#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed"
#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
#define NETDATA_SYSCALL_APPS_TASK_PROCESS "process_create"
#define NETDATA_SYSCALL_APPS_TASK_THREAD "thread_create"
#define NETDATA_SYSCALL_APPS_TASK_CLOSE "task_close"
@@ -48,8 +27,6 @@
#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error"
#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error"
#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
// Process configuration name
#define NETDATA_PROCESS_CONFIG_FILE "process.conf"
@@ -59,17 +36,6 @@ typedef enum ebpf_process_index {
NETDATA_KEY_CALLS_DO_SYS_OPEN,
NETDATA_KEY_ERROR_DO_SYS_OPEN,
NETDATA_KEY_CALLS_VFS_WRITE,
NETDATA_KEY_ERROR_VFS_WRITE,
NETDATA_KEY_BYTES_VFS_WRITE,
NETDATA_KEY_CALLS_VFS_READ,
NETDATA_KEY_ERROR_VFS_READ,
NETDATA_KEY_BYTES_VFS_READ,
NETDATA_KEY_CALLS_VFS_UNLINK,
NETDATA_KEY_ERROR_VFS_UNLINK,
NETDATA_KEY_CALLS_DO_EXIT,
NETDATA_KEY_CALLS_RELEASE_TASK,
@@ -83,14 +49,7 @@ typedef enum ebpf_process_index {
NETDATA_KEY_CALLS_SYS_CLONE,
NETDATA_KEY_ERROR_SYS_CLONE,
NETDATA_KEY_CALLS_VFS_WRITEV,
NETDATA_KEY_ERROR_VFS_WRITEV,
NETDATA_KEY_BYTES_VFS_WRITEV,
NETDATA_KEY_CALLS_VFS_READV,
NETDATA_KEY_ERROR_VFS_READV,
NETDATA_KEY_BYTES_VFS_READV
NETDATA_KEY_END_VECTOR
} ebpf_process_index_t;
// This enum acts as an index for publish vector.
@@ -101,9 +60,6 @@ typedef enum ebpf_process_index {
typedef enum netdata_publish_process {
NETDATA_KEY_PUBLISH_PROCESS_OPEN,
NETDATA_KEY_PUBLISH_PROCESS_CLOSE,
NETDATA_KEY_PUBLISH_PROCESS_UNLINK,
NETDATA_KEY_PUBLISH_PROCESS_READ,
NETDATA_KEY_PUBLISH_PROCESS_WRITE,
NETDATA_KEY_PUBLISH_PROCESS_EXIT,
NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK,
NETDATA_KEY_PUBLISH_PROCESS_FORK,
@@ -116,9 +72,6 @@ typedef struct ebpf_process_publish_apps {
// Number of calls during the last read
uint64_t call_sys_open;
uint64_t call_close_fd;
uint64_t call_vfs_unlink;
uint64_t call_read;
uint64_t call_write;
uint64_t call_do_exit;
uint64_t call_release_task;
uint64_t call_do_fork;
@@ -127,15 +80,8 @@ typedef struct ebpf_process_publish_apps {
// Number of errors during the last read
uint64_t ecall_sys_open;
uint64_t ecall_close_fd;
uint64_t ecall_vfs_unlink;
uint64_t ecall_read;
uint64_t ecall_write;
uint64_t ecall_do_fork;
uint64_t ecall_sys_clone;
// Number of bytes during the last read
uint64_t bytes_written;
uint64_t bytes_read;
} ebpf_process_publish_apps_t;
extern struct config process_config;

View File

@@ -0,0 +1,930 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include <sys/resource.h>
#include "ebpf.h"
#include "ebpf_vfs.h"
static char *vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_END] = { "delete", "read", "write",
"fsync", "open", "create" };
static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_read", "vfs_write",
"vfs_fsync", "vfs_open", "vfs_create"};
static netdata_idx_t *vfs_hash_values = NULL;
static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
netdata_publish_vfs_t **vfs_pid = NULL;
netdata_publish_vfs_t *vfs_vector = NULL;
static ebpf_data_t vfs_data;
static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0},
{.name = NULL, .internal_input = 0, .user_input = 0}};
struct config vfs_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
static struct bpf_object *objects = NULL;
static struct bpf_link **probe_links = NULL;
struct netdata_static_thread vfs_threads = {"VFS KERNEL",
NULL, NULL, 1, NULL,
NULL, NULL};
static int *map_fd = NULL;
static int read_thread_closed = 1;
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
*
*****************************************************************/
/**
* Clean PID structures
*
* Clean the allocated structures.
*/
void clean_vfs_pid_structures() {
struct pid_stat *pids = root_of_pids;
while (pids) {
freez(vfs_pid[pids->pid]);
pids = pids->next;
}
}
/**
* Clean up the main thread.
*
* @param ptr thread data.
**/
static void ebpf_vfs_cleanup(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
if (!em->enabled)
return;
heartbeat_t hb;
heartbeat_init(&hb);
uint32_t tick = 50 * USEC_PER_MS;
while (!read_thread_closed) {
usec_t dt = heartbeat_next(&hb, tick);
UNUSED(dt);
}
freez(vfs_data.map_fd);
freez(vfs_hash_values);
freez(vfs_vector);
if (probe_links) {
struct bpf_program *prog;
size_t i = 0 ;
bpf_object__for_each_program(prog, objects) {
bpf_link__destroy(probe_links[i]);
i++;
}
bpf_object__close(objects);
}
}
/*****************************************************************
*
* FUNCTIONS WITH THE MAIN LOOP
*
*****************************************************************/
/**
* Send data to Netdata calling auxiliar functions.
*
* @param em the structure with thread information
*/
static void ebpf_vfs_send_data(ebpf_module_t *em)
{
netdata_publish_vfs_common_t pvc;
pvc.write = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes;
pvc.read = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes;
write_count_chart(NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK], 1);
write_count_chart(NETDATA_VFS_FILE_IO_COUNT, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2);
if (em->mode < MODE_ENTRY) {
write_err_chart(NETDATA_VFS_FILE_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2);
}
write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_FILESYSTEM_FAMILY, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
(long long)pvc.write, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ], (long long)pvc.read);
write_count_chart(NETDATA_VFS_FSYNC, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1);
if (em->mode < MODE_ENTRY) {
write_err_chart(NETDATA_VFS_FSYNC_ERR, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1);
}
write_count_chart(NETDATA_VFS_OPEN, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1);
if (em->mode < MODE_ENTRY) {
write_err_chart(NETDATA_VFS_OPEN_ERR, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1);
}
write_count_chart(NETDATA_VFS_CREATE, NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], 1);
if (em->mode < MODE_ENTRY) {
write_err_chart(
NETDATA_VFS_CREATE_ERR,
NETDATA_FILESYSTEM_FAMILY,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
1);
}
}
/**
* Read the hash table and store data to allocated vectors.
*/
static void read_global_table()
{
uint64_t idx;
netdata_idx_t res[NETDATA_VFS_COUNTER];
netdata_idx_t *val = vfs_hash_values;
int fd = map_fd[NETDATA_VFS_ALL];
for (idx = 0; idx < NETDATA_VFS_COUNTER; idx++) {
uint64_t total = 0;
if (!bpf_map_lookup_elem(fd, &idx, val)) {
int i;
int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
for (i = 0; i < end; i++)
total += val[i];
}
res[idx] = total;
}
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].ncall = res[NETDATA_KEY_CALLS_VFS_READ] +
res[NETDATA_KEY_CALLS_VFS_READV];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].ncall = res[NETDATA_KEY_CALLS_VFS_WRITE] +
res[NETDATA_KEY_CALLS_VFS_WRITEV];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].ncall = res[NETDATA_KEY_CALLS_VFS_FSYNC];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].ncall = res[NETDATA_KEY_CALLS_VFS_OPEN];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].ncall = res[NETDATA_KEY_CALLS_VFS_CREATE];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].nerr = res[NETDATA_KEY_ERROR_VFS_UNLINK];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].nerr = res[NETDATA_KEY_ERROR_VFS_READ] +
res[NETDATA_KEY_ERROR_VFS_READV];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].nerr = res[NETDATA_KEY_ERROR_VFS_WRITE] +
res[NETDATA_KEY_ERROR_VFS_WRITEV];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].nerr = res[NETDATA_KEY_ERROR_VFS_FSYNC];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].nerr = res[NETDATA_KEY_ERROR_VFS_OPEN];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].nerr = res[NETDATA_KEY_ERROR_VFS_CREATE];
vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
(uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
(uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
}
/**
* Sum PIDs
*
* Sum values for all targets.
*
* @param swap output structure
* @param root link list with structure to be used
*/
static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target *root)
{
netdata_publish_vfs_t accumulator;
memset(&accumulator, 0, sizeof(accumulator));
while (root) {
int32_t pid = root->pid;
netdata_publish_vfs_t *w = vfs_pid[pid];
if (w) {
accumulator.write_call += w->write_call;
accumulator.writev_call += w->writev_call;
accumulator.read_call += w->read_call;
accumulator.readv_call += w->readv_call;
accumulator.unlink_call += w->unlink_call;
accumulator.fsync_call += w->fsync_call;
accumulator.open_call += w->open_call;
accumulator.create_call += w->create_call;
accumulator.write_bytes += w->write_bytes;
accumulator.writev_bytes += w->writev_bytes;
accumulator.read_bytes += w->read_bytes;
accumulator.readv_bytes += w->readv_bytes;
accumulator.write_err += w->write_err;
accumulator.writev_err += w->writev_err;
accumulator.read_err += w->read_err;
accumulator.readv_err += w->readv_err;
accumulator.unlink_err += w->unlink_err;
accumulator.fsync_err += w->fsync_err;
accumulator.open_err += w->open_err;
accumulator.create_err += w->create_err;
}
root = root->next;
}
// These conditions were added, because we are using incremental algorithm
vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call;
vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call;
vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call;
vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call;
vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call;
vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call;
vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call;
vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call;
vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes;
vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes;
vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes;
vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes;
vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err;
vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err;
vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err;
vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err;
vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err;
vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err;
vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err;
vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err;
}
/**
* Send data to Netdata calling auxiliar functions.
*
* @param em the structure with thread information
* @param root the target list.
*/
void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct target *root)
{
struct target *w;
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
ebpf_vfs_sum_pids(&w->vfs, w->root_pid);
}
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.unlink_call);
}
}
write_end_chart();
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.write_call + w->vfs.writev_call);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.write_err + w->vfs.writev_err);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.read_call + w->vfs.readv_call);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.read_err + w->vfs.readv_err);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.write_bytes + w->vfs.writev_bytes);
}
}
write_end_chart();
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.read_bytes + w->vfs.readv_bytes);
}
}
write_end_chart();
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.fsync_call);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.fsync_err);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.open_call);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.open_err);
}
}
write_end_chart();
}
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.create_call);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
write_chart_dimension(w->name, w->vfs.create_err);
}
}
write_end_chart();
}
}
/**
* Apps Accumulator
*
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
*/
static void vfs_apps_accumulator(netdata_publish_vfs_t *out)
{
int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
netdata_publish_vfs_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_publish_vfs_t *w = &out[i];
total->write_call += w->write_call;
total->writev_call += w->writev_call;
total->read_call += w->read_call;
total->readv_call += w->readv_call;
total->unlink_call += w->unlink_call;
total->write_bytes += w->write_bytes;
total->writev_bytes += w->writev_bytes;
total->read_bytes += w->read_bytes;
total->readv_bytes += w->readv_bytes;
total->write_err += w->write_err;
total->writev_err += w->writev_err;
total->read_err += w->read_err;
total->readv_err += w->readv_err;
total->unlink_err += w->unlink_err;
}
}
/**
* Fill PID
*
* Fill PID structures
*
* @param current_pid pid that we are collecting data
* @param out values read from hash tables;
*/
static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
{
netdata_publish_vfs_t *curr = vfs_pid[current_pid];
if (!curr) {
curr = callocz(1, sizeof(netdata_publish_vfs_t));
vfs_pid[current_pid] = curr;
}
memcpy(curr, &publish[0], sizeof(netdata_publish_vfs_t));
}
/**
* Read the hash table and store data to allocated vectors.
*/
static void ebpf_vfs_read_apps()
{
struct pid_stat *pids = root_of_pids;
netdata_publish_vfs_t *vv = vfs_vector;
int fd = map_fd[NETDATA_VFS_PID];
size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs;
while (pids) {
uint32_t key = pids->pid;
if (bpf_map_lookup_elem(fd, &key, vv)) {
pids = pids->next;
continue;
}
vfs_apps_accumulator(vv);
vfs_fill_pid(key, vv);
// We are cleaning to avoid passing data read from one process to other.
memset(vv, 0, length);
pids = pids->next;
}
}
/**
* VFS read hash
*
* This is the thread callback.
* This thread is necessary, because we cannot freeze the whole plugin to read the data.
*
* @param ptr It is a NULL value for this thread.
*
* @return It always returns NULL.
*/
void *ebpf_vfs_read_hash(void *ptr)
{
read_thread_closed = 0;
heartbeat_t hb;
heartbeat_init(&hb);
ebpf_module_t *em = (ebpf_module_t *)ptr;
usec_t step = NETDATA_LATENCY_VFS_SLEEP_MS * em->update_time;
while (!close_ebpf_plugin) {
usec_t dt = heartbeat_next(&hb, step);
(void)dt;
read_global_table();
}
read_thread_closed = 1;
return NULL;
}
/**
* Main loop for this collector.
*
* @param step the number of microseconds used with heart beat
* @param em the structure with thread information
*/
static void vfs_collector(ebpf_module_t *em)
{
vfs_threads.thread = mallocz(sizeof(netdata_thread_t));
vfs_threads.start_routine = ebpf_vfs_read_hash;
map_fd = vfs_data.map_fd;
netdata_thread_create(vfs_threads.thread, vfs_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
ebpf_vfs_read_hash, em);
int apps = em->apps_charts;
while (!close_ebpf_plugin) {
pthread_mutex_lock(&collect_data_mutex);
pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
if (apps)
ebpf_vfs_read_apps();
pthread_mutex_lock(&lock);
ebpf_vfs_send_data(em);
fflush(stdout);
if (apps)
ebpf_vfs_send_apps_data(em, apps_groups_root_target);
pthread_mutex_unlock(&lock);
pthread_mutex_unlock(&collect_data_mutex);
}
}
/*****************************************************************
*
* FUNCTIONS TO CREATE CHARTS
*
*****************************************************************/
/**
* Create IO chart
*
* @param family the chart family
* @param name the chart name
* @param axis the axis label
* @param web the group name used to attach the chart on dashboard
* @param order the order number of the specified chart
* @param algorithm the algorithm used to make the charts.
*/
static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order, int algorithm)
{
printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n",
family,
name,
axis,
web,
order,
update_every);
printf("DIMENSION %s %s %s 1 1\n",
vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ],
vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_READ],
ebpf_algorithms[algorithm]);
printf("DIMENSION %s %s %s -1 1\n",
vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
ebpf_algorithms[algorithm]);
}
/**
* Create global charts
*
* Call ebpf_create_chart to create the charts for the collector.
*
* @param em a pointer to the structure with the default values.
*/
static void ebpf_create_global_charts(ebpf_module_t *em)
{
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_FILE_CLEAN_COUNT,
"Remove files",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
1);
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_FILE_IO_COUNT,
"Calls to IO",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
2);
ebpf_create_io_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
NETDATA_VFS_GROUP,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES,
NETDATA_EBPF_INCREMENTAL_IDX);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_FILE_ERR_COUNT,
"Fails to write or read",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
2);
}
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_FSYNC,
"Calls to vfs_fsync",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
1);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_FSYNC_ERR,
"Fails to synchronize",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
1);
}
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_OPEN,
"Calls to vfs_open",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
1);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_OPEN_ERR,
"Fails to open a file",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
1);
}
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_CREATE,
"Calls to vfs_create",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
1);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
NETDATA_VFS_CREATE_ERR,
"Fails to create a file.",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_VFS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE,
ebpf_create_global_dimension,
&vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
1);
}
}
/**
* Create process apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
*
* @param em a pointer to the structure with the default values.
* @param ptr a pointer for the targets.
**/
void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
{
struct target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
"Files deleted",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20065,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
"Write to disk",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20066,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
apps_groups_root_target);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
"Fails to write",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20067,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
"Read from disk",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20068,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
"Fails to read",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20069,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
"Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20070,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
"Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20071,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC,
"Calls for <code>vfs_fsync</code>", EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20072,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR,
"Sync error",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20073,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN,
"Calls for <code>vfs_open</code>", EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20074,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR,
"Open error",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20075,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE,
"Calls for <code>vfs_create</code>", EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20076,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR,
"Create error",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20077,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
root);
}
}
/*****************************************************************
*
* FUNCTIONS TO START THREAD
*
*****************************************************************/
/**
* Allocate vectors used with this thread.
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
*
* @param length is the length for the vectors used inside the collector.
*/
static void ebpf_vfs_allocate_global_vectors()
{
memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data));
memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated));
vfs_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *));
}
/*****************************************************************
*
* EBPF PROCESS THREAD
*
*****************************************************************/
/**
* Process thread
*
* Thread used to generate process charts.
*
* @param ptr a pointer to `struct ebpf_module`
*
* @return It always return NULL
*/
void *ebpf_vfs_thread(void *ptr)
{
netdata_thread_cleanup_push(ebpf_vfs_cleanup, ptr);
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = vfs_maps;
fill_ebpf_data(&vfs_data);
ebpf_update_pid_table(&vfs_maps[0], em);
ebpf_vfs_allocate_global_vectors();
if (!em->enabled)
goto endvfs;
if (ebpf_update_kernel(&vfs_data)) {
goto endvfs;
}
probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, vfs_data.map_fd);
if (!probe_links) {
goto endvfs;
}
int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX,
NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX
};
ebpf_global_labels(vfs_aggregated_data, vfs_publish_aggregated, vfs_dimension_names,
vfs_id_names, algorithms, NETDATA_KEY_PUBLISH_VFS_END);
pthread_mutex_lock(&lock);
ebpf_create_global_charts(em);
pthread_mutex_unlock(&lock);
vfs_collector(em);
endvfs:
netdata_thread_cleanup_pop(1);
return NULL;
}

View File

@@ -0,0 +1,130 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_EBPF_VFS_H
#define NETDATA_EBPF_VFS_H 1
#define NETDATA_DIRECTORY_VFS_CONFIG_FILE "vfs.conf"
#define NETDATA_LATENCY_VFS_SLEEP_MS 750000ULL
// Global chart name
#define NETDATA_VFS_FILE_CLEAN_COUNT "vfs_deleted_objects"
#define NETDATA_VFS_FILE_IO_COUNT "vfs_io"
#define NETDATA_VFS_FILE_ERR_COUNT "vfs_io_error"
#define NETDATA_VFS_IO_FILE_BYTES "vfs_io_bytes"
#define NETDATA_VFS_FSYNC "vfs_fsync"
#define NETDATA_VFS_FSYNC_ERR "vfs_fsync_error"
#define NETDATA_VFS_OPEN "vfs_open"
#define NETDATA_VFS_OPEN_ERR "vfs_open_error"
#define NETDATA_VFS_CREATE "vfs_create"
#define NETDATA_VFS_CREATE_ERR "vfs_create_error"
// Charts created on Apps submenu
#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
#define NETDATA_SYSCALL_APPS_VFS_FSYNC "vfs_fsync"
#define NETDATA_SYSCALL_APPS_VFS_OPEN "vfs_open"
#define NETDATA_SYSCALL_APPS_VFS_CREATE "vfs_create"
#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
#define NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR "vfs_fsync_error"
#define NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR "vfs_open_error"
#define NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR "vfs_create_error"
// Group used on Dashboard
#define NETDATA_VFS_GROUP "VFS (eBPF)"
typedef struct netdata_publish_vfs {
uint64_t pid_tgid;
uint32_t pid;
uint32_t pad;
//Counter
uint32_t write_call;
uint32_t writev_call;
uint32_t read_call;
uint32_t readv_call;
uint32_t unlink_call;
uint32_t fsync_call;
uint32_t open_call;
uint32_t create_call;
//Accumulator
uint64_t write_bytes;
uint64_t writev_bytes;
uint64_t readv_bytes;
uint64_t read_bytes;
//Counter
uint32_t write_err;
uint32_t writev_err;
uint32_t read_err;
uint32_t readv_err;
uint32_t unlink_err;
uint32_t fsync_err;
uint32_t open_err;
uint32_t create_err;
} netdata_publish_vfs_t;
enum netdata_publish_vfs_list {
NETDATA_KEY_PUBLISH_VFS_UNLINK,
NETDATA_KEY_PUBLISH_VFS_READ,
NETDATA_KEY_PUBLISH_VFS_WRITE,
NETDATA_KEY_PUBLISH_VFS_FSYNC,
NETDATA_KEY_PUBLISH_VFS_OPEN,
NETDATA_KEY_PUBLISH_VFS_CREATE,
NETDATA_KEY_PUBLISH_VFS_END
};
enum vfs_counters {
NETDATA_KEY_CALLS_VFS_WRITE,
NETDATA_KEY_ERROR_VFS_WRITE,
NETDATA_KEY_BYTES_VFS_WRITE,
NETDATA_KEY_CALLS_VFS_WRITEV,
NETDATA_KEY_ERROR_VFS_WRITEV,
NETDATA_KEY_BYTES_VFS_WRITEV,
NETDATA_KEY_CALLS_VFS_READ,
NETDATA_KEY_ERROR_VFS_READ,
NETDATA_KEY_BYTES_VFS_READ,
NETDATA_KEY_CALLS_VFS_READV,
NETDATA_KEY_ERROR_VFS_READV,
NETDATA_KEY_BYTES_VFS_READV,
NETDATA_KEY_CALLS_VFS_UNLINK,
NETDATA_KEY_ERROR_VFS_UNLINK,
NETDATA_KEY_CALLS_VFS_FSYNC,
NETDATA_KEY_ERROR_VFS_FSYNC,
NETDATA_KEY_CALLS_VFS_OPEN,
NETDATA_KEY_ERROR_VFS_OPEN,
NETDATA_KEY_CALLS_VFS_CREATE,
NETDATA_KEY_ERROR_VFS_CREATE,
// Keep this as last and don't skip numbers as it is used as element counter
NETDATA_VFS_COUNTER
};
enum netdata_vfs_tables {
NETDATA_VFS_PID,
NETDATA_VFS_ALL
};
extern netdata_publish_vfs_t **vfs_pid;
extern void *ebpf_vfs_thread(void *ptr);
extern void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr);
extern void clean_vfs_pid_structures();
extern struct config vfs_config;
#endif /* NETDATA_EBPF_VFS_H */

View File

@@ -1,3 +1,3 @@
6102337e8d38c4902c02371b44e962b65d9d7f4e793fc0e093ec6352e3cf8b14 netdata-kernel-collector-glibc-v0.6.5.tar.xz
12e95abfe9173566d20467b5946e5850c830533bc0ab46c0f95470c95e7ccc19 netdata-kernel-collector-musl-v0.6.5.tar.xz
d26a976d684cc4635a530d17cf0caaa70223a80d795339923f8cc6ba55413741 netdata-kernel-collector-static-v0.6.5.tar.xz
49c50bdca3389a7a5df3c2753d9ed790983fae1350e1ee95bdaa5bc36e8ed398 netdata-kernel-collector-glibc-v0.6.6.tar.xz
3cfe2ab7ebec5e508a4e769d79e97a8bdfe03e1d0f2147320643dcae2a3b525a netdata-kernel-collector-musl-v0.6.6.tar.xz
66c04aa186540dc27317265bad5ee9ce4eec962c176d3cb04f4d56032171e566 netdata-kernel-collector-static-v0.6.6.tar.xz

View File

@@ -1 +1 @@
v0.6.5
v0.6.6

View File

@@ -3413,6 +3413,53 @@ netdataDashboard.context = {
info: 'Size of metric samples written to disk.'
},
// ------------------------------------------------------------------------
// Filesystem
'filesystem.vfs_deleted_objects': {
title : 'VFS remove',
info: 'This chart does not show all events that remove files from the file system, because file systems can create their own functions to remove files, it shows calls for the function <code>vfs_unlink</code>. '
},
'filesystem.vfs_io': {
title : 'VFS IO',
info: 'Successful or failed calls to functions <code>vfs_read</code> and <code>vfs_write</code>. This chart may not show all file system events if it uses other functions to store data on disk.'
},
'filesystem.vfs_io_bytes': {
title : 'VFS bytes written',
info: 'Total of bytes read or written with success using the functions <code>vfs_read</code> and <code>vfs_write</code>.'
},
'filesystem.vfs_io_error': {
title : 'VFS IO error',
info: 'Failed calls to functions <code>vfs_read</code> and <code>vfs_write</code>.'
},
'filesystem.vfs_fsync': {
info: 'Successful or failed calls to functions <code>vfs_fsync</code>.'
},
'filesystem.vfs_fsync_error': {
info: 'Failed calls to functions <code>vfs_fsync</code>.'
},
'filesystem.vfs_open': {
info: 'Successful or failed calls to functions <code>vfs_open</code>.'
},
'filesystem.vfs_open_error': {
info: 'Failed calls to functions <code>vfs_open</code>.'
},
'filesystem.vfs_create': {
info: 'Successful or failed calls to functions <code>vfs_create</code>.'
},
'filesystem.vfs_create_error': {
info: 'Failed calls to functions <code>vfs_create</code>.'
},
// ------------------------------------------------------------------------
// eBPF
@@ -3464,26 +3511,6 @@ netdataDashboard.context = {
' <a href="https://www.man7.org/linux/man-pages/man2/close.2.html" target="_blank">close(2)</a>. '
},
'ebpf.deleted_objects': {
title : 'VFS remove',
info: 'This chart does not show all events that remove files from the file system, because file systems can create their own functions to remove files, it shows calls for the function <a href="https://www.kernel.org/doc/htmldocs/filesystems/API-vfs-unlink.html" target="_blank">vfs_unlink</a>. '
},
'ebpf.io': {
title : 'VFS IO',
info: 'Successful or failed calls to functions <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a> and <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>. This chart may not show all file system events if it uses other functions to store data on disk.'
},
'ebpf.io_bytes': {
title : 'VFS bytes written',
info: 'Total of bytes read or written with success using the functions <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a> and <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>.'
},
'ebpf.io_error': {
title : 'VFS IO error',
info: 'Failed calls to functions <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a> and <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>.'
},
'ebpf.process_thread': {
title : 'Task creation',
info: 'Number of times that either <a href="https://www.ece.uic.edu/~yshi1/linux/lkse/node4.html#SECTION00421000000000000000" target="_blank">do_fork</a>, or <code>kernel_clone</code> if you are running kernel newer than 5.9.16, is called to create a new task, which is the common name used to define process and tasks inside the kernel. Netdata identifies the threads by counting the number of calls for <a href="https://linux.die.net/man/2/clone" target="_blank">sys_clone</a> that has the flag <code>CLONE_THREAD</code> set.'