4
#include <bpf/bpf_helpers.h>
5
#include <bpf/bpf_core_read.h>
6
#include <bpf/bpf_tracing.h>
7
#include <bpf/bpf_endian.h>
9
#include <gadget/mntns_filter.h>
11
#define MAX_ENTRIES 10240
14
const volatile pid_t target_pid = 0;
15
const volatile bool ignore_errors = true;
16
const volatile bool filter_by_port = false;
19
const struct bind_event *unusedbindevent __attribute__((unused));
22
__uint(type, BPF_MAP_TYPE_HASH);
23
__uint(max_entries, MAX_ENTRIES);
25
__type(value, struct socket *);
26
} sockets SEC(".maps");
29
__uint(type, BPF_MAP_TYPE_HASH);
30
__uint(max_entries, MAX_PORTS);
36
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
37
__uint(key_size, sizeof(__u32));
38
__uint(value_size, sizeof(__u32));
41
static int probe_entry(struct pt_regs *ctx, struct socket *socket)
43
__u64 pid_tgid = bpf_get_current_pid_tgid();
44
__u32 pid = pid_tgid >> 32;
45
__u32 tid = (__u32)pid_tgid;
47
if (target_pid && target_pid != pid)
50
bpf_map_update_elem(&sockets, &tid, &socket, BPF_ANY);
54
static int probe_exit(struct pt_regs *ctx, short ver)
56
__u64 pid_tgid = bpf_get_current_pid_tgid();
57
__u32 pid = pid_tgid >> 32;
58
__u32 tid = (__u32)pid_tgid;
59
__u64 uid_gid = bpf_get_current_uid_gid();
61
struct socket **socketp, *socket;
62
struct inet_sock *inet_sock;
64
union bind_options opts;
65
struct bind_event event = {};
66
__u16 sport = 0, *port;
69
socketp = bpf_map_lookup_elem(&sockets, &tid);
73
mntns_id = gadget_get_mntns_id();
75
if (gadget_should_discard_mntns_id(mntns_id))
78
ret = PT_REGS_RC(ctx);
79
if (ignore_errors && ret != 0)
83
sock = BPF_CORE_READ(socket, sk);
84
inet_sock = (struct inet_sock *)sock;
86
sport = bpf_ntohs(BPF_CORE_READ(inet_sock, inet_sport));
87
port = bpf_map_lookup_elem(&ports, &sport);
88
if (filter_by_port && !port)
91
opts.fields.freebind =
92
BPF_CORE_READ_BITFIELD_PROBED(inet_sock, freebind);
93
opts.fields.transparent =
94
BPF_CORE_READ_BITFIELD_PROBED(inet_sock, transparent);
95
opts.fields.bind_address_no_port =
96
BPF_CORE_READ_BITFIELD_PROBED(inet_sock, bind_address_no_port);
97
opts.fields.reuseaddress =
98
BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuse);
99
opts.fields.reuseport =
100
BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuseport);
101
event.opts = opts.data;
102
event.ts_us = bpf_ktime_get_ns() / 1000;
105
event.bound_dev_if = BPF_CORE_READ(sock, __sk_common.skc_bound_dev_if);
107
event.proto = BPF_CORE_READ_BITFIELD_PROBED(sock, sk_protocol);
108
event.mount_ns_id = mntns_id;
109
event.timestamp = bpf_ktime_get_boot_ns();
110
event.uid = (u32)uid_gid;
111
event.gid = (u32)(uid_gid >> 32);
112
bpf_get_current_comm(&event.task, sizeof(event.task));
115
bpf_probe_read_kernel(&event.addr, sizeof(event.addr),
116
&inet_sock->inet_saddr);
119
bpf_probe_read_kernel(
120
&event.addr, sizeof(event.addr),
121
sock->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
123
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event,
127
bpf_map_delete_elem(&sockets, &tid);
131
SEC("kprobe/inet_bind")
132
int BPF_KPROBE(ig_bind_ipv4_e, struct socket *socket)
134
return probe_entry(ctx, socket);
137
SEC("kretprobe/inet_bind")
138
int BPF_KRETPROBE(ig_bind_ipv4_x)
140
return probe_exit(ctx, 4);
143
SEC("kprobe/inet6_bind")
144
int BPF_KPROBE(ig_bind_ipv6_e, struct socket *socket)
146
return probe_entry(ctx, socket);
149
SEC("kretprobe/inet6_bind")
150
int BPF_KRETPROBE(ig_bind_ipv6_x)
152
return probe_exit(ctx, 6);
155
char LICENSE[] SEC("license") = "Dual BSD/GPL";