1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
|
#define PERF_BUFFER_PAGES 16
#define PERF_POLL_TIMEOUT_MS 100
static volatile sig_atomic_t exiting = 0;
static struct env {
__u64 min_us;
pid_t pid;
bool timestamp;
bool lport;
bool verbose;
} env;
const char *argp_program_version = "tcpconnlat 0.1";
const char *argp_program_bug_address =
"https://github.com/iovisor/bcc/tree/master/libbpf-tools";
const char argp_program_doc[] =
"\nTrace TCP connects and show connection latency.\n"
"\n"
"USAGE: tcpconnlat [--help] [-t] [-p PID] [-L]\n"
"\n"
"EXAMPLES:\n"
" tcpconnlat # summarize on-CPU time as a histogram\n"
" tcpconnlat 1 # trace connection latency slower than 1 ms\n"
" tcpconnlat 0.1 # trace connection latency slower than 100 us\n"
" tcpconnlat -t # 1s summaries, milliseconds, and timestamps\n"
" tcpconnlat -p 185 # trace PID 185 only\n"
" tcpconnlat -L # include LPORT while printing outputs\n";
static const struct argp_option opts[] = {
{ "timestamp", 't', NULL, 0, "Include timestamp on output" },
{ "pid", 'p', "PID", 0, "Trace this PID only" },
{ "lport", 'L', NULL, 0, "Include LPORT on output" },
{ "verbose", 'v', NULL, 0, "Verbose debug output" },
{ NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" },
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
...
}
static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
{
...
}
static void sig_int(int signo)
{
exiting = 1;
}
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
{
const struct event *e = data;
char src[INET6_ADDRSTRLEN];
char dst[INET6_ADDRSTRLEN];
union {
struct in_addr x4;
struct in6_addr x6;
} s, d;
static __u64 start_ts;
if (env.timestamp) {
if (start_ts == 0)
start_ts = e->ts_us;
printf("%-9.3f ", (e->ts_us - start_ts) / 1000000.0);
}
if (e->af == AF_INET) {
s.x4.s_addr = e->saddr_v4;
d.x4.s_addr = e->daddr_v4;
} else if (e->af == AF_INET6) {
memcpy(&s.x6.s6_addr, e->saddr_v6, sizeof(s.x6.s6_addr));
memcpy(&d.x6.s6_addr, e->daddr_v6, sizeof(d.x6.s6_addr));
} else {
fprintf(stderr, "broken event: event->af=%d", e->af);
return;
}
if (env.lport) {
printf("%-6d %-12.12s %-2d %-16s %-6d %-16s %-5d %.2f\n", e->tgid, e->comm,
e->af == AF_INET ? 4 : 6, inet_ntop(e->af, &s, src, sizeof(src)), e->lport,
inet_ntop(e->af, &d, dst, sizeof(dst)), ntohs(e->dport),
e->delta_us / 1000.0);
} else {
printf("%-6d %-12.12s %-2d %-16s %-16s %-5d %.2f\n", e->tgid, e->comm,
e->af == AF_INET ? 4 : 6, inet_ntop(e->af, &s, src, sizeof(src)),
inet_ntop(e->af, &d, dst, sizeof(dst)), ntohs(e->dport),
e->delta_us / 1000.0);
}
}
void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt)
{
fprintf(stderr, "lost %llu events on CPU #%d\n", lost_cnt, cpu);
}
int main(int argc, char **argv)
{
static const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
};
struct perf_buffer *pb = NULL;
struct tcpconnlat_bpf *obj;
int err;
err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
if (err)
return err;
libbpf_set_print(libbpf_print_fn);
obj = tcpconnlat_bpf__open();
if (!obj) {
fprintf(stderr, "failed to open BPF object\n");
return 1;
}
/* initialize global data (filtering options) */
obj->rodata->targ_min_us = env.min_us;
obj->rodata->targ_tgid = env.pid;
if (fentry_can_attach("tcp_v4_connect", NULL)) {
bpf_program__set_attach_target(obj->progs.fentry_tcp_v4_connect, 0, "tcp_v4_connect");
bpf_program__set_attach_target(obj->progs.fentry_tcp_v6_connect, 0, "tcp_v6_connect");
bpf_program__set_attach_target(obj->progs.fentry_tcp_rcv_state_process, 0, "tcp_rcv_state_process");
bpf_program__set_autoload(obj->progs.tcp_v4_connect, false);
bpf_program__set_autoload(obj->progs.tcp_v6_connect, false);
bpf_program__set_autoload(obj->progs.tcp_rcv_state_process, false);
} else {
bpf_program__set_autoload(obj->progs.fentry_tcp_v4_connect, false);
bpf_program__set_autoload(obj->progs.fentry_tcp_v6_connect, false);
bpf_program__set_autoload(obj->progs.fentry_tcp_rcv_state_process, false);
}
err = tcpconnlat_bpf__load(obj);
if (err) {
fprintf(stderr, "failed to load BPF object: %d\n", err);
goto cleanup;
}
err = tcpconnlat_bpf__attach(obj);
if (err) {
goto cleanup;
}
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES,
handle_event, handle_lost_events, NULL, NULL);
if (!pb) {
fprintf(stderr, "failed to open perf buffer: %d\n", errno);
goto cleanup;
}
/* print header */
if (env.timestamp)
printf("%-9s ", ("TIME(s)"));
if (env.lport) {
printf("%-6s %-12s %-2s %-16s %-6s %-16s %-5s %s\n",
"PID", "COMM", "IP", "SADDR", "LPORT", "DADDR", "DPORT", "LAT(ms)");
} else {
printf("%-6s %-12s %-2s %-16s %-16s %-5s %s\n",
"PID", "COMM", "IP", "SADDR", "DADDR", "DPORT", "LAT(ms)");
}
if (signal(SIGINT, sig_int) == SIG_ERR) {
fprintf(stderr, "can't set signal handler: %s\n", strerror(errno));
err = 1;
goto cleanup;
}
/* main: poll */
while (!exiting) {
err = perf_buffer__poll(pb, PERF_POLL_TIMEOUT_MS);
if (err < 0 && err != -EINTR) {
fprintf(stderr, "error polling perf buffer: %s\n", strerror(-err));
goto cleanup;
}
/* reset err to return 0 if exiting */
err = 0;
}
cleanup:
perf_buffer__free(pb);
tcpconnlat_bpf__destroy(obj);
return err != 0;
}
|