diff --git a/charts/common/templates/role.yaml b/charts/common/templates/role.yaml index 92b09b4..01dda2d 100644 --- a/charts/common/templates/role.yaml +++ b/charts/common/templates/role.yaml @@ -15,5 +15,5 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "patch"] - resourceNames: ["{{ include "common.resourceNamePrefix" . }}-discovery-state-tracker"] + resourceNames: ["{{ include "common.resourceNamePrefix" . }}-discovery-state-tracker","{{ include "common.resourceNamePrefix" . }}-pod-mapping-tracker"] {{- end }} \ No newline at end of file diff --git a/charts/logan/templates/discovery-cronjob.yaml b/charts/logan/templates/discovery-cronjob.yaml index 85969fc..de1188f 100644 --- a/charts/logan/templates/discovery-cronjob.yaml +++ b/charts/logan/templates/discovery-cronjob.yaml @@ -4,150 +4,148 @@ {{- $kubernetesClusterId := (include "logan.kubernetesClusterId" .) }} {{- $kubernetesNamespace := (include "logan.namespace" .) }} {{- $serviceAccount := (include "logan.serviceAccount" .) }} -apiVersion: batch/v1 -kind: CronJob +apiVersion: apps/v1 +kind: Deployment metadata: name: {{ .Values.global.resourceNamePrefix }}-discovery namespace: {{ $kubernetesNamespace }} spec: - schedule: {{ .Values.k8sDiscovery.objects.cronSchedule | quote }} - startingDeadlineSeconds: 120 - concurrencyPolicy: Replace - successfulJobsHistoryLimit: {{ .Values.k8sDiscovery.objects.successfulJobsHistoryLimit }} - failedJobsHistoryLimit: {{ .Values.k8sDiscovery.objects.failedJobsHistoryLimit }} - jobTemplate: + replicas: 1 + selector: + matchLabels: + app: {{ .Values.global.resourceNamePrefix }}-discovery + template: + metadata: + labels: + app: {{ .Values.global.resourceNamePrefix }}-discovery spec: - backoffLimit: {{ .Values.k8sDiscovery.objects.backoffLimit }} - template: - spec: - restartPolicy: {{ .Values.k8sDiscovery.objects.restartPolicy }} - serviceAccountName: {{ $serviceAccount }} - {{- if .Values.image.imagePullSecrets }} - imagePullSecrets: - - name: {{ .Values.image.imagePullSecrets }} + serviceAccountName: {{ $serviceAccount }} + {{- if .Values.image.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.image.imagePullSecrets }} + {{- end }} + containers: + - name: k8-discovery-job + image: {{ .Values.image.url }} + {{- if eq $authtype "config" }} + volumeMounts: + - name: ociconfigdir + mountPath: {{ .Values.oci.path }} + readOnly: true + {{- end }} + command: + {{- /* object discovery script */}} + - bundle + - exec + - oci-loganalytics-kubernetes-discovery + {{- /* mandatory inputs */}} + - --kubernetes_cluster_id + - {{ $kubernetesClusterId }} + - --kubernetes_cluster_name + - {{ $kubernetesClusterName }} + - --kubernetes_cluster_namespace + - {{ $kubernetesNamespace }} + - --oci_la_namespace + {{- if .Values.ociLANamespace }} + - {{ .Values.ociLANamespace }} + {{- else }} + {{- required "ociLANamespace is required" .Values.ociLANamespace }} {{- end }} - containers: - - name: k8-discovery-job - image: {{ .Values.image.url }} - {{- if eq $authtype "config" }} - volumeMounts: - - name: ociconfigdir - mountPath: {{ .Values.oci.path }} - readOnly: true - {{- end }} - command: - {{- /* object discovery script */}} - - bundle - - exec - - oci-loganalytics-kubernetes-discovery - {{- /* mandatory inputs */}} - - --kubernetes_cluster_id - - {{ $kubernetesClusterId }} - - --kubernetes_cluster_name - - {{ $kubernetesClusterName }} - - --kubernetes_cluster_namespace - - {{ $kubernetesNamespace }} - - --oci_la_namespace - {{- if .Values.ociLANamespace }} - - {{ .Values.ociLANamespace }} - {{- else }} - {{- required "ociLANamespace is required" .Values.ociLANamespace }} - {{- end }} - - --oci_la_log_group_id - {{- if .Values.k8sDiscovery.objects.ociLALogGroupID }} - - {{ .Values.k8sDiscovery.objects.ociLALogGroupID }} - {{- else if .Values.ociLALogGroupID }} - - {{ .Values.ociLALogGroupID }} - {{- else }} - {{- required "ociLALogGroupID is required" .Values.ociLALogGroupID }} - {{- end }} - {{- /* mandatory inputs when authtype is set as config */}} - {{- if eq .Values.authtype "config" }} - {{- if and .Values.oci.path .Values.oci.file }} - - --config_file_location - - {{ .Values.oci.path -}}/{{ .Values.oci.file }} - {{- else }} - {{- required "{{ .Values.oci.path -}}/{{ .Values.oci.file }} is required" .Values.oci.path }} - {{- end }} - {{- end }} - {{- /* optional discovery job configuration */}} - {{- if .Values.ociLAClusterEntityID }} - - --oci_la_cluster_entity_id - - {{ .Values.ociLAClusterEntityID }} - {{- end }} - {{- if $resourceNamePrefix }} - - --kubernetes_resourcename_prefix - - {{ $resourceNamePrefix }} - {{- end }} - {{- if .Values.ociDomain }} - - --oci_domain - - {{ .Values.ociDomain }} - {{- end }} - {{- if .Values.k8sDiscovery.objects.discoveryMode }} - - --discovery - - {{ .Values.k8sDiscovery.objects.discoveryMode }} - {{- end }} - {{- if .Values.k8sDiscovery.objects.log_format }} - - --log_format - - {{ .Values.k8sDiscovery.objects.log_format }} - {{- end }} - {{- if .Values.k8sDiscovery.objects.log_level }} - - --log_level - - {{ .Values.k8sDiscovery.objects.log_level }} - {{- end }} - {{- if and .Values.k8sDiscovery.objects.enable_threading .Values.k8sDiscovery.objects.thread_count }} - - --enable_threading - - --thread_count - - {{ .Values.k8sDiscovery.objects.thread_count | quote }} - {{- end }} - {{- if .Values.k8sDiscovery.objects.chunk_limit }} - - --chunk_limit - - {{ .Values.k8sDiscovery.objects.chunk_limit | quote }} - {{- end }} - {{- if .Values.k8sDiscovery.objects.collect_warning_events_only }} - - --collect_warning_events_only - {{- end }} - {{- /* optional kubernetes cluster configuration */}} - {{- if .Values.k8sDiscovery.kubeClientOptions.kubernetes_url }} - - --kubernetes_url - - {{ .Values.k8sDiscovery.kubeClientOptions.kubernetes_url }} - {{- end }} - {{- if .Values.k8sDiscovery.kubeClientOptions.client_cert }} - - --client_cert - - {{ .Values.k8sDiscovery.kubeClientOptions.client_cert }} - {{- end }} - {{- if .Values.k8sDiscovery.kubeClientOptions.client_key }} - - --client_key - - {{ .Values.k8sDiscovery.kubeClientOptions.client_key }} - {{- end }} - {{- if .Values.k8sDiscovery.kubeClientOptions.ca_file }} - - --ca_file - - {{ .Values.k8sDiscovery.kubeClientOptions.ca_file }} - {{- end }} - {{- if eq .Values.k8sDiscovery.kubeClientOptions.verify_ssl true }} - - --verify_ssl - {{- end }} - {{- if .Values.k8sDiscovery.kubeClientOptions.bearer_token_file }} - - --bearer_token_file - - {{ .Values.k8sDiscovery.kubeClientOptions.bearer_token_file }} - {{- end }} - {{- if .Values.k8sDiscovery.kubeClientOptions.secret_dir }} - - --secret_dir - - {{ .Values.k8sDiscovery.kubeClientOptions.secret_dir }} - {{- end }} - {{- /* developer options - optional */}} - {{- if .Values.k8sDiscovery.oci_la_endpoint }} - - --endpoint - - {{ .Values.k8sDiscovery.oci_la_endpoint }} - {{- else if .Values.ociLAEndpoint }} - - --endpoint - - {{ .Values.ociLAEndpoint }} - {{- end }} - {{- if eq $authtype "config" }} - volumes: - - name: ociconfigdir - projected: - sources: - - secret: - name: {{ $resourceNamePrefix }}-oci-config + - --oci_la_log_group_id + {{- if .Values.k8sDiscovery.objects.ociLALogGroupID }} + - {{ .Values.k8sDiscovery.objects.ociLALogGroupID }} + {{- else if .Values.ociLALogGroupID }} + - {{ .Values.ociLALogGroupID }} + {{- else }} + {{- required "ociLALogGroupID is required" .Values.ociLALogGroupID }} {{- end }} + {{- /* mandatory inputs when authtype is set as config */}} + {{- if eq .Values.authtype "config" }} + {{- if and .Values.oci.path .Values.oci.file }} + - --config_file_location + - {{ .Values.oci.path -}}/{{ .Values.oci.file }} + {{- else }} + {{- required "{{ .Values.oci.path -}}/{{ .Values.oci.file }} is required" .Values.oci.path }} + {{- end }} + {{- end }} + {{- /* optional discovery job configuration */}} + {{- if .Values.ociLAClusterEntityID }} + - --oci_la_cluster_entity_id + - {{ .Values.ociLAClusterEntityID }} + {{- end }} + {{- if $resourceNamePrefix }} + - --kubernetes_resourcename_prefix + - {{ $resourceNamePrefix }} + {{- end }} + {{- if .Values.ociDomain }} + - --oci_domain + - {{ .Values.ociDomain }} + {{- end }} + {{- if .Values.k8sDiscovery.objects.discoveryMode }} + - --discovery + - {{ .Values.k8sDiscovery.objects.discoveryMode }} + {{- end }} + {{- if .Values.k8sDiscovery.objects.log_format }} + - --log_format + - {{ .Values.k8sDiscovery.objects.log_format }} + {{- end }} + {{- if .Values.k8sDiscovery.objects.log_level }} + - --log_level + - {{ .Values.k8sDiscovery.objects.log_level }} + {{- end }} + {{- if and .Values.k8sDiscovery.objects.enable_threading .Values.k8sDiscovery.objects.thread_count }} + - --enable_threading + - --thread_count + - {{ .Values.k8sDiscovery.objects.thread_count | quote }} + {{- end }} + {{- if .Values.k8sDiscovery.objects.chunk_limit }} + - --chunk_limit + - {{ .Values.k8sDiscovery.objects.chunk_limit | quote }} + {{- end }} + {{- if .Values.k8sDiscovery.objects.collect_warning_events_only }} + - --collect_warning_events_only + {{- end }} + {{- /* optional kubernetes cluster configuration */}} + {{- if .Values.k8sDiscovery.kubeClientOptions.kubernetes_url }} + - --kubernetes_url + - {{ .Values.k8sDiscovery.kubeClientOptions.kubernetes_url }} + {{- end }} + {{- if .Values.k8sDiscovery.kubeClientOptions.client_cert }} + - --client_cert + - {{ .Values.k8sDiscovery.kubeClientOptions.client_cert }} + {{- end }} + {{- if .Values.k8sDiscovery.kubeClientOptions.client_key }} + - --client_key + - {{ .Values.k8sDiscovery.kubeClientOptions.client_key }} + {{- end }} + {{- if .Values.k8sDiscovery.kubeClientOptions.ca_file }} + - --ca_file + - {{ .Values.k8sDiscovery.kubeClientOptions.ca_file }} + {{- end }} + {{- if eq .Values.k8sDiscovery.kubeClientOptions.verify_ssl true }} + - --verify_ssl + {{- end }} + {{- if .Values.k8sDiscovery.kubeClientOptions.bearer_token_file }} + - --bearer_token_file + - {{ .Values.k8sDiscovery.kubeClientOptions.bearer_token_file }} + {{- end }} + {{- if .Values.k8sDiscovery.kubeClientOptions.secret_dir }} + - --secret_dir + - {{ .Values.k8sDiscovery.kubeClientOptions.secret_dir }} + {{- end }} + {{- /* developer options - optional */}} + {{- if .Values.k8sDiscovery.oci_la_endpoint }} + - --endpoint + - {{ .Values.k8sDiscovery.oci_la_endpoint }} + {{- else if .Values.ociLAEndpoint }} + - --endpoint + - {{ .Values.ociLAEndpoint }} + {{- end }} + {{- if eq $authtype "config" }} + volumes: + - name: ociconfigdir + projected: + sources: + - secret: + name: {{ $resourceNamePrefix }}-oci-config + {{- end }} \ No newline at end of file diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile b/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile index b8a6ce4..0b0fc67 100644 --- a/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile @@ -18,6 +18,8 @@ ENV GEM_HOME /fluentd/vendor/bundle/ruby/3.3 ENV FLUENTD_DISABLE_BUNDLER_INJECTION 1 COPY Gemfile* /fluentd/ +COPY tcpconnect.* /fluentd/ +COPY tcp-monitor /usr/local/bin/ # Install ruby, ruby-libs along with rubygems and bundler. RUN microdnf -y module enable ruby:3.3 \ @@ -27,7 +29,7 @@ RUN microdnf -y module enable ruby:3.3 \ && microdnf -y install --setopt=install_weak_deps=0 --nodocs rubygems \ && gem install bundler -v 2.5.16 \ # Install development dependent packages for gems native installation - && microdnf --enablerepo ol8_codeready_builder -y install --nodocs gcc make redhat-rpm-config openssl ruby-devel gcc-c++ libtool libffi-devel bzip2 git libyaml-devel \ + && microdnf --enablerepo ol8_codeready_builder -y install --nodocs gcc make redhat-rpm-config openssl ruby-devel gcc-c++ libtool libffi-devel bzip2 git libyaml-devel zlib elfutils-libelf-devel which clang llvm \ # Install Fluentd, it's dependencies along with other run time dependencies for OCI Logging Analytics Solution && bundle config silence_root_warning true \ && bundle config --local path /fluentd/vendor/bundle \ @@ -38,9 +40,16 @@ RUN microdnf -y module enable ruby:3.3 \ && microdnf -y install --nodocs tini-0.19.0 \ # Install jemalloc (custom make with no docs) && cd /tmp && ls /tmp \ - && git clone -b 5.3.0 https://github.com/jemalloc/jemalloc.git && cd jemalloc/ \ + && git clone --depth 1 -b 5.3.0 https://github.com/jemalloc/jemalloc.git && cd jemalloc/ \ && ./autogen.sh && make && make install_bin install_include install_lib \ - && mv lib/libjemalloc.so.2 /usr/lib + && mv lib/libjemalloc.so.2 /usr/lib \ + # Install libbpf-tools from bcc + && cd /tmp && ls /tmp \ + && git clone -b v0.29.1 --depth 1 https://github.com/iovisor/bcc.git && cd bcc/ && git submodule update --init --recursive \ + && cd libbpf-tools/ && cp /fluentd/tcpconnect.* ./ && make \ + && mv ./tcpconnect /usr/bin/ \ + && rm /fluentd/tcpconnect.* \ + && cd /fluentd/ ## To build the final docker image @@ -62,17 +71,22 @@ RUN microdnf -y module enable ruby:3.3 \ && microdnf -y install --setopt=install_weak_deps=0 --nodocs ruby ruby-libs \ # Install rubygems (it's dependencies rubygem-openssl rubygem-psych), disabling week dependencies && microdnf -y install --setopt=install_weak_deps=0 --nodocs rubygems \ +# Install development packages needed for tcp_monitor +# && microdnf --enablerepo ol8_codeready_builder -y install --nodocs gcc make redhat-rpm-config openssl ruby-devel gcc-c++ libtool libffi-devel bzip2 libyaml-devel zlib elfutils-libelf-devel which clang llvm \ && gem install bundler -v 2.5.16 \ && bundle config --local path /fluentd/vendor/bundle \ # clear caches && microdnf clean all \ - && rm -rf /var/cache/dnf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem + && rm -rf /var/cache/dnf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem # Copy binaries (tini & jemallco) and rubygems bundler environment from build stage COPY --from=builder /fluentd /fluentd COPY --from=builder /usr/bin/tini /usr/bin/tini COPY --from=builder /usr/lib/libjemalloc.so.2 /usr/lib/libjemalloc.so.2 +COPY --from=builder /usr/bin/tcpconnect /usr/bin/tcpconnect +COPY --from=builder /usr/local/bin /usr/local/bin +RUN chmod +x /usr/local/bin/tcp-monitor RUN mkdir -p /fluentd/etc /fluentd/plugins \ && touch /fluentd/etc/disable.conf diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile b/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile index c7f5542..0ac30c0 100644 --- a/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile @@ -11,8 +11,9 @@ gem "fluent-plugin-concat", "~> 2.5.0" gem "fluent-plugin-rewrite-tag-filter", "~> 2.4.0" gem "fluent-plugin-parser-cri", "~> 0.1.1" gem "fluent-plugin-kubernetes_metadata_filter", "3.5.0" -gem "oci-logging-analytics-kubernetes-discovery", "1.0.2" +gem "oci-logging-analytics-kubernetes-discovery", "1.2.0.5", source: "https://artifactory.oci.oraclecorp.com:443/logan-dev-gems-local/" gem "fluent-plugin-record-modifier", "2.2.0" gem "fluent-plugin-cloudwatch-logs", "0.14.3" gem "fluent-plugin-s3", "1.7.2" gem "rexml", "3.4.0" +gem "tcp_monitor", "0.3.5", source: "https://artifactory.oci.oraclecorp.com:443/logan-dev-gems-local/" diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile.lock b/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile.lock index aae5d4a..635ee45 100644 --- a/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile.lock +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/Gemfile.lock @@ -1,3 +1,14 @@ +GEM + remote: https://artifactory.oci.oraclecorp.com/logan-dev-gems-local/ + specs: + oci-logging-analytics-kubernetes-discovery (1.2.0.5) + concurrent-ruby (~> 1.2, >= 1.2.2) + kubeclient (~> 4.9, >= 4.9.3) + oci (~> 2.21) + rubyzip (~> 2.3.2) + rufus-scheduler (~> 3.7, >= 3.9.2) + yajl-ruby (~> 1.0) + GEM remote: https://rubygems.org/ specs: @@ -32,6 +43,8 @@ GEM csv (3.3.0) domain_name (0.6.20240107) drb (2.2.1) + et-orbi (1.2.11) + tzinfo event_stream_parser (1.0.0) ffi (1.15.5) ffi-compiler (1.0.1) @@ -81,6 +94,9 @@ GEM tzinfo-data (~> 1.0) webrick (~> 1.4) yajl-ruby (~> 1.0) + fugit (1.11.1) + et-orbi (~> 1, >= 1.2.11) + raabro (~> 1.4) http (5.2.0) addressable (~> 2.8) base64 (~> 0.1) @@ -121,12 +137,6 @@ GEM json (>= 1.4.6, < 3.0.0) jwt (~> 2.1) psych (~> 5.0, >= 5.0.1) - oci-logging-analytics-kubernetes-discovery (1.0.2) - concurrent-ruby (~> 1.2, >= 1.2.2) - kubeclient (~> 4.9, >= 4.9.3) - oci (~> 2.20) - rubyzip (~> 2.3.2) - yajl-ruby (~> 1.0) oj (3.16.4) bigdecimal (>= 3.0) prometheus-client (4.2.3) @@ -134,6 +144,7 @@ GEM psych (5.1.2) stringio public_suffix (6.0.1) + raabro (1.4.0) rake (13.2.1) recursive-open-struct (1.2.2) rest-client (2.1.0) @@ -143,6 +154,8 @@ GEM netrc (~> 0.8) rexml (3.4.0) rubyzip (2.3.2) + rufus-scheduler (3.9.2) + fugit (~> 1.1, >= 1.11.1) serverengine (2.3.2) sigdump (~> 0.2.2) sigdump (0.2.5) @@ -156,6 +169,7 @@ GEM yajl-ruby (1.4.3) PLATFORMS + arm64-darwin-24 x86_64-linux DEPENDENCIES @@ -169,7 +183,7 @@ DEPENDENCIES fluent-plugin-s3 (= 1.7.2) fluentd (= 1.17.1) json (= 2.7.2) - oci-logging-analytics-kubernetes-discovery (= 1.0.2) + oci-logging-analytics-kubernetes-discovery (= 1.2.0.5)! oj (= 3.16.4) rexml (= 3.4.0) diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/tcp-monitor b/logan/docker-images/v1.0/oraclelinux/8-slim/tcp-monitor new file mode 100644 index 0000000..9b466ca --- /dev/null +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/tcp-monitor @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +ENV["BUNDLE_GEMFILE"] = "/fluentd/Gemfile" +require "bundler/setup" +load "/fluentd/vendor/bundle/ruby/3.3.0/gems/tcp_monitor-0.3.5/bin/tcp-monitor" \ No newline at end of file diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.bpf.c b/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.bpf.c new file mode 100644 index 0000000..2fbc3c8 --- /dev/null +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.bpf.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Anton Protopopov +// +// Based on tcpconnect(8) from BCC by Brendan Gregg +#include + +#include +#include +#include + +#include "maps.bpf.h" +#include "tcpconnect.h" + +const volatile int filter_ports[MAX_PORTS]; +const volatile int filter_ports_len = 0; +const volatile uid_t filter_uid = -1; +const volatile pid_t filter_pid = 0; +const volatile bool do_count = 0; +const volatile bool do_ec = 0; +const volatile bool source_port = 0; + +/* Define here, because there are conflicts with include files */ +#define AF_INET 2 +#define AF_INET6 10 + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u32); + __type(value, struct sock *); +} sockets SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, struct ipv4_flow_key); + __type(value, u64); +} ipv4_count SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, struct ipv6_flow_key); + __type(value, u64); +} ipv6_count SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u32)); +} events SEC(".maps"); + +static __always_inline bool filter_port(__u16 port) +{ + int i; + + if (filter_ports_len == 0) + return false; + + for (i = 0; i < filter_ports_len && i < MAX_PORTS; i++) { + if (port == filter_ports[i]) + return false; + } + return true; +} + +static __always_inline int +enter_tcp_connect(struct pt_regs *ctx, struct sock *sk) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + __u32 tid = pid_tgid; + __u32 uid; + + if (filter_pid && pid != filter_pid) + return 0; + + uid = bpf_get_current_uid_gid(); + if (filter_uid != (uid_t) -1 && uid != filter_uid) + return 0; + + bpf_map_update_elem(&sockets, &tid, &sk, 0); + return 0; +} + +static __always_inline void count_v4(struct sock *sk, __u16 sport, __u16 dport) +{ + struct ipv4_flow_key key = {}; + static __u64 zero; + __u64 *val; + + BPF_CORE_READ_INTO(&key.saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&key.daddr, sk, __sk_common.skc_daddr); + key.sport = sport; + key.dport = dport; + val = bpf_map_lookup_or_try_init(&ipv4_count, &key, &zero); + if (val) + __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); +} + +static __always_inline void extended_count_v4(struct sock *sk, __u16 sport, __u16 dport) +{ + struct ipv4_flow_key key = {}; + static __u64 zero; + __u64 *val; + + BPF_CORE_READ_INTO(&key.saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&key.daddr, sk, __sk_common.skc_daddr); + //key.sport = sport; + key.dport = dport; + bpf_get_current_comm(&key.task, sizeof(key.task)); + val = bpf_map_lookup_or_try_init(&ipv4_count, &key, &zero); + if (val) + __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); +} + +static __always_inline void count_v6(struct sock *sk, __u16 sport, __u16 dport) +{ + struct ipv6_flow_key key = {}; + static const __u64 zero; + __u64 *val; + + BPF_CORE_READ_INTO(&key.saddr, sk, + __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); + BPF_CORE_READ_INTO(&key.daddr, sk, + __sk_common.skc_v6_daddr.in6_u.u6_addr32); + key.sport = sport; + key.dport = dport; + + val = bpf_map_lookup_or_try_init(&ipv6_count, &key, &zero); + if (val) + __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); +} + +static __always_inline void extended_count_v6(struct sock *sk, __u16 sport, __u16 dport) +{ + struct ipv6_flow_key key = {}; + static const __u64 zero; + __u64 *val; + + BPF_CORE_READ_INTO(&key.saddr, sk, + __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); + BPF_CORE_READ_INTO(&key.daddr, sk, + __sk_common.skc_v6_daddr.in6_u.u6_addr32); + //key.sport = sport; + key.dport = dport; + bpf_get_current_comm(&key.task, sizeof(key.task)); + + val = bpf_map_lookup_or_try_init(&ipv6_count, &key, &zero); + if (val) + __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); +} + +static __always_inline void +trace_v4(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 sport, __u16 dport) +{ + struct event event = {}; + + event.af = AF_INET; + event.pid = pid; + event.uid = bpf_get_current_uid_gid(); + event.ts_us = bpf_ktime_get_ns() / 1000; + BPF_CORE_READ_INTO(&event.saddr_v4, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&event.daddr_v4, sk, __sk_common.skc_daddr); + event.sport = sport; + event.dport = dport; + bpf_get_current_comm(event.task, sizeof(event.task)); + + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, + &event, sizeof(event)); +} + +static __always_inline void +trace_v6(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 sport, __u16 dport) +{ + struct event event = {}; + + event.af = AF_INET6; + event.pid = pid; + event.uid = bpf_get_current_uid_gid(); + event.ts_us = bpf_ktime_get_ns() / 1000; + BPF_CORE_READ_INTO(&event.saddr_v6, sk, + __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); + BPF_CORE_READ_INTO(&event.daddr_v6, sk, + __sk_common.skc_v6_daddr.in6_u.u6_addr32); + event.sport = sport; + event.dport = dport; + bpf_get_current_comm(event.task, sizeof(event.task)); + + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, + &event, sizeof(event)); +} + +static __always_inline int +exit_tcp_connect(struct pt_regs *ctx, int ret, int ip_ver) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + __u32 tid = pid_tgid; + struct sock **skpp; + struct sock *sk; + __u16 sport = 0; + __u16 dport; + + skpp = bpf_map_lookup_elem(&sockets, &tid); + if (!skpp) + return 0; + + if (ret) + goto end; + + sk = *skpp; + + if (source_port) + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + + if (filter_port(dport)) + goto end; + + if (do_ec) { + if (ip_ver == 4) + extended_count_v4(sk, sport, dport); + else + extended_count_v6(sk, sport, dport); + } else if (do_count) { + if (ip_ver == 4) + count_v4(sk, sport, dport); + else + count_v6(sk, sport, dport); + } else { + if (ip_ver == 4) + trace_v4(ctx, pid, sk, sport, dport); + else + trace_v6(ctx, pid, sk, sport, dport); + } + +end: + bpf_map_delete_elem(&sockets, &tid); + return 0; +} + +SEC("kprobe/tcp_v4_connect") +int BPF_KPROBE(tcp_v4_connect, struct sock *sk) +{ + return enter_tcp_connect(ctx, sk); +} + +SEC("kretprobe/tcp_v4_connect") +int BPF_KRETPROBE(tcp_v4_connect_ret, int ret) +{ + return exit_tcp_connect(ctx, ret, 4); +} + +SEC("kprobe/tcp_v6_connect") +int BPF_KPROBE(tcp_v6_connect, struct sock *sk) +{ + return enter_tcp_connect(ctx, sk); +} + +SEC("kretprobe/tcp_v6_connect") +int BPF_KRETPROBE(tcp_v6_connect_ret, int ret) +{ + return exit_tcp_connect(ctx, ret, 6); +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.c b/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.c new file mode 100644 index 0000000..9c110f2 --- /dev/null +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Anton Protopopov +// +// Based on tcpconnect(8) from BCC by Brendan Gregg +#include +#include +#include +#include +#include +#include +#include +#include +#include "tcpconnect.h" +#include "tcpconnect.skel.h" +#include "btf_helpers.h" +#include "trace_helpers.h" +#include "map_helpers.h" + +#define warn(...) fprintf(stderr, __VA_ARGS__) + +static volatile sig_atomic_t exiting = 0; + +const char *argp_program_version = "tcpconnect 0.1"; +const char *argp_program_bug_address = + "https://github.com/iovisor/bcc/tree/master/libbpf-tools"; +static const char argp_program_doc[] = + "\ntcpconnect: Count/Trace active tcp connections\n" + "\n" + "EXAMPLES:\n" + " tcpconnect # trace all TCP connect()s\n" + " tcpconnect -t # include timestamps\n" + " tcpconnect -p 181 # only trace PID 181\n" + " tcpconnect -P 80 # only trace port 80\n" + " tcpconnect -P 80,81 # only trace port 80 and 81\n" + " tcpconnect -U # include UID\n" + " tcpconnect -u 1000 # only trace UID 1000\n" + " tcpconnect -c # count connects per src, dest, port\n" + " tcpconnect -e # count the connects per command, src ip, dst ip/port and prints the output (space separated, without headers) to STDOUT periodically (default 60s)\n" + " tcpconnect -e -i 30 # count the connects per command, src ip, dst ip/port and prints the output (space separated, without headers) to STDOUT once in every 30s\n" + " tcpconnect --C mappath # only trace cgroups in the map\n" + " tcpconnect --M mappath # only trace mount namespaces in the map\n" + ; + +static int get_int(const char *arg, int *ret, int min, int max) +{ + char *end; + long val; + + errno = 0; + val = strtol(arg, &end, 10); + if (errno) { + warn("strtol: %s: %s\n", arg, strerror(errno)); + return -1; + } else if (end == arg || val < min || val > max) { + return -1; + } + if (ret) + *ret = val; + return 0; +} + +static int get_ints(const char *arg, int *size, int *ret, int min, int max) +{ + const char *argp = arg; + int max_size = *size; + int sz = 0; + char *end; + long val; + + while (sz < max_size) { + errno = 0; + val = strtol(argp, &end, 10); + if (errno) { + warn("strtol: %s: %s\n", arg, strerror(errno)); + return -1; + } else if (end == arg || val < min || val > max) { + return -1; + } + ret[sz++] = val; + if (*end == 0) + break; + argp = end + 1; + } + + *size = sz; + return 0; +} + +static int get_uint(const char *arg, unsigned int *ret, + unsigned int min, unsigned int max) +{ + char *end; + long val; + + errno = 0; + val = strtoul(arg, &end, 10); + if (errno) { + warn("strtoul: %s: %s\n", arg, strerror(errno)); + return -1; + } else if (end == arg || val < min || val > max) { + return -1; + } + if (ret) + *ret = val; + return 0; +} + +static const struct argp_option opts[] = { + { "verbose", 'v', NULL, 0, "Verbose debug output" }, + { "timestamp", 't', NULL, 0, "Include timestamp on output" }, + { "count", 'c', NULL, 0, "Count connects per src ip and dst ip/port" }, + { "extended-count", 'e', NULL, 0, "Count the connects per command, src ip, dst ip/port and prints the output (space separated, without headers) to STDOUT periodically (default 60s)" }, + { "interval", 'i', "INTERVAL", 0, "Interval for extended-count in seconds, defaults to 60" }, + { "print-uid", 'U', NULL, 0, "Include UID on output" }, + { "pid", 'p', "PID", 0, "Process PID to trace" }, + { "uid", 'u', "UID", 0, "Process UID to trace" }, + { "source-port", 's', NULL, 0, "Consider source port when counting" }, + { "port", 'P', "PORTS", 0, + "Comma-separated list of destination ports to trace" }, + { "cgroupmap", 'C', "PATH", 0, "trace cgroups in this map" }, + { "mntnsmap", 'M', "PATH", 0, "trace mount namespaces in this map" }, + { NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" }, + {}, +}; + +static struct env { + bool verbose; + bool count; + bool ec; + int interval; + bool print_timestamp; + bool print_uid; + pid_t pid; + uid_t uid; + int nports; + int ports[MAX_PORTS]; + bool source_port; +} env = { + .uid = (uid_t) -1, + .interval = (int) 60 +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + int err; + int nports; + + switch (key) { + case 'h': + argp_state_help(state, stderr, ARGP_HELP_STD_HELP); + break; + case 'v': + env.verbose = true; + break; + case 'c': + env.count = true; + break; + case 'e': + env.ec = true; + break; + case 'i': + err = get_int(arg, &env.interval, 1, INT_MAX); + if (err) { + warn("invalid Interval: %s\n", arg); + argp_usage(state); + } + break; + case 's': + env.source_port = true; + break; + case 't': + env.print_timestamp = true; + break; + case 'U': + env.print_uid = true; + break; + case 'p': + err = get_int(arg, &env.pid, 1, INT_MAX); + if (err) { + warn("invalid PID: %s\n", arg); + argp_usage(state); + } + break; + case 'u': + err = get_uint(arg, &env.uid, 0, (uid_t) -2); + if (err) { + warn("invalid UID: %s\n", arg); + argp_usage(state); + } + break; + case 'P': + nports = MAX_PORTS; + err = get_ints(arg, &nports, env.ports, 1, 65535); + if (err) { + warn("invalid PORT_LIST: %s\n", arg); + argp_usage(state); + } + env.nports = nports; + break; + case 'C': + warn("not implemented: --cgroupmap"); + break; + case 'M': + warn("not implemented: --mntnsmap"); + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !env.verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static void sig_int(int signo) +{ + exiting = 1; +} + +static void print_count_ipv4(int map_fd, time_t start) +{ + static struct ipv4_flow_key keys[MAX_ENTRIES]; + __u32 value_size = sizeof(__u64); + __u32 key_size = sizeof(keys[0]); + static struct ipv4_flow_key zero; + static __u64 counts[MAX_ENTRIES]; + char s[INET_ADDRSTRLEN]; + char d[INET_ADDRSTRLEN]; + __u32 i, n = MAX_ENTRIES; + //__u64 init = 0; + struct in_addr src; + struct in_addr dst; + + if (dump_hash(map_fd, keys, key_size, counts, value_size, &n, &zero)) { + warn("dump_hash: %s", strerror(errno)); + return; + } + + for (i = 0; i < n; i++) { + src.s_addr = keys[i].saddr; + dst.s_addr = keys[i].daddr; + + if (env.ec) { + printf("%lu %lu %s %s %d %llu %s", + (unsigned long)start, + (unsigned long)time(NULL), + inet_ntop(AF_INET, &src, s, sizeof(s)), + inet_ntop(AF_INET, &dst, d, sizeof(d)), + ntohs(keys[i].dport), + counts[i], + keys[i].task); + printf("\n"); + bpf_map_delete_elem(map_fd, &keys[i]); + } else { + printf("%-25s %-25s", + inet_ntop(AF_INET, &src, s, sizeof(s)), + inet_ntop(AF_INET, &dst, d, sizeof(d))); + if (env.source_port) + printf(" %-20d", keys[i].sport); + printf(" %-20d", ntohs(keys[i].dport)); + printf(" %-10llu", counts[i]); + printf("\n"); + } + } +} + +static void print_count_ipv6(int map_fd, time_t start) +{ + static struct ipv6_flow_key keys[MAX_ENTRIES]; + __u32 value_size = sizeof(__u64); + __u32 key_size = sizeof(keys[0]); + static struct ipv6_flow_key zero; + static __u64 counts[MAX_ENTRIES]; + char s[INET6_ADDRSTRLEN]; + char d[INET6_ADDRSTRLEN]; + __u32 i, n = MAX_ENTRIES; + struct in6_addr src; + struct in6_addr dst; + + if (dump_hash(map_fd, keys, key_size, counts, value_size, &n, &zero)) { + warn("dump_hash: %s", strerror(errno)); + return; + } + + for (i = 0; i < n; i++) { + memcpy(src.s6_addr, keys[i].saddr, sizeof(src.s6_addr)); + memcpy(dst.s6_addr, keys[i].daddr, sizeof(src.s6_addr)); + + if (env.ec) { + printf("%lu %lu %s %s %d %llu %s", + (unsigned long)start, + (unsigned long)time(NULL), + inet_ntop(AF_INET6, &src, s, sizeof(s)), + inet_ntop(AF_INET6, &dst, d, sizeof(d)), + ntohs(keys[i].dport), + counts[i], + keys[i].task); + printf("\n"); + bpf_map_delete_elem(map_fd, &keys[i]); + } else { + printf("%-25s %-25s", + inet_ntop(AF_INET6, &src, s, sizeof(s)), + inet_ntop(AF_INET6, &dst, d, sizeof(d))); + if (env.source_port) + printf(" %-20d", keys[i].sport); + printf(" %-20d", ntohs(keys[i].dport)); + printf(" %-10llu", counts[i]); + printf("\n"); + } + } +} + +static void print_count_header() +{ + printf("\n%-25s %-25s", "LADDR", "RADDR"); + if (env.source_port) + printf(" %-20s", "LPORT"); + printf(" %-20s", "RPORT"); + printf(" %-10s", "CONNECTS"); + printf("\n"); +} + +static void print_count(int map_fd_ipv4, int map_fd_ipv6) +{ + while (!exiting) + pause(); + + print_count_header(); + print_count_ipv4(map_fd_ipv4, 0); + print_count_ipv6(map_fd_ipv6, 0); +} + +static void print_extended_count(int map_fd_ipv4, int map_fd_ipv6) +{ + time_t end; + time_t start = 0; + while (!exiting) { + start = time(NULL); + end = time(NULL) + env.interval; + while (time(NULL) <= end && !exiting) { + sleep(1); + } + print_count_ipv4(map_fd_ipv4, start); + print_count_ipv6(map_fd_ipv6, start); + } + + print_count_ipv4(map_fd_ipv4, start); + print_count_ipv6(map_fd_ipv6, start); +} + +static void print_events_header() +{ + if (env.print_timestamp) + printf("%-9s", "TIME(s)"); + if (env.print_uid) + printf("%-6s", "UID"); + printf("%-6s %-12s %-2s %-16s %-16s", + "PID", "COMM", "IP", "SADDR", "DADDR"); + if (env.source_port) + printf(" %-5s", "SPORT"); + printf(" %-5s\n", "DPORT"); +} + +static void handle_event(void *ctx, int cpu, void *data, __u32 data_sz) +{ + struct event event; + char src[INET6_ADDRSTRLEN]; + char dst[INET6_ADDRSTRLEN]; + union { + struct in_addr x4; + struct in6_addr x6; + } s, d; + static __u64 start_ts; + + if (data_sz < sizeof(event)) { + printf("Error: packet too small\n"); + return; + } + /* Copy data as alignment in the perf buffer isn't guaranteed. */ + memcpy(&event, data, sizeof(event)); + + if (event.af == AF_INET) { + s.x4.s_addr = event.saddr_v4; + d.x4.s_addr = event.daddr_v4; + } else if (event.af == AF_INET6) { + memcpy(&s.x6.s6_addr, event.saddr_v6, sizeof(s.x6.s6_addr)); + memcpy(&d.x6.s6_addr, event.daddr_v6, sizeof(d.x6.s6_addr)); + } else { + warn("broken event: event.af=%d", event.af); + return; + } + + if (env.print_timestamp) { + if (start_ts == 0) + start_ts = event.ts_us; + printf("%-9.3f", (event.ts_us - start_ts) / 1000000.0); + } + + if (env.print_uid) + printf("%-6d", event.uid); + + printf("%-6d %-12.12s %-2d %-16s %-16s", + event.pid, event.task, + event.af == AF_INET ? 4 : 6, + inet_ntop(event.af, &s, src, sizeof(src)), + inet_ntop(event.af, &d, dst, sizeof(dst))); + + if (env.source_port) + printf(" %-5d", event.sport); + + printf(" %-5d", ntohs(event.dport)); + + printf("\n"); +} + +static void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt) +{ + warn("Lost %llu events on CPU #%d!\n", lost_cnt, cpu); +} + +static void print_events(int perf_map_fd) +{ + struct perf_buffer *pb; + int err; + + pb = perf_buffer__new(perf_map_fd, 128, + handle_event, handle_lost_events, NULL, NULL); + if (!pb) { + err = -errno; + warn("failed to open perf buffer: %d\n", err); + goto cleanup; + } + + print_events_header(); + while (!exiting) { + err = perf_buffer__poll(pb, 100); + if (err < 0 && err != -EINTR) { + warn("error polling perf buffer: %s\n", strerror(-err)); + goto cleanup; + } + /* reset err to return 0 if exiting */ + err = 0; + } + +cleanup: + perf_buffer__free(pb); +} + +int main(int argc, char **argv) +{ + LIBBPF_OPTS(bpf_object_open_opts, open_opts); + static const struct argp argp = { + .options = opts, + .parser = parse_arg, + .doc = argp_program_doc, + .args_doc = NULL, + }; + struct tcpconnect_bpf *obj; + int i, err; + + err = argp_parse(&argp, argc, argv, 0, NULL, NULL); + if (err) + return err; + + libbpf_set_print(libbpf_print_fn); + + err = ensure_core_btf(&open_opts); + if (err) { + fprintf(stderr, "failed to fetch necessary BTF for CO-RE: %s\n", strerror(-err)); + return 1; + } + + obj = tcpconnect_bpf__open_opts(&open_opts); + if (!obj) { + warn("failed to open BPF object\n"); + return 1; + } + + // ec takes precedence over count + if (env.ec) + obj->rodata->do_ec = true; + if (env.count) + obj->rodata->do_count = true; + if (env.pid) + obj->rodata->filter_pid = env.pid; + if (env.uid != (uid_t) -1) + obj->rodata->filter_uid = env.uid; + if (env.nports > 0) { + obj->rodata->filter_ports_len = env.nports; + for (i = 0; i < env.nports; i++) { + obj->rodata->filter_ports[i] = htons(env.ports[i]); + } + } + // count per unique source_port not applicable for extended count + if (env.source_port && !env.ec) + obj->rodata->source_port = true; + + err = tcpconnect_bpf__load(obj); + if (err) { + warn("failed to load BPF object: %d\n", err); + goto cleanup; + } + + err = tcpconnect_bpf__attach(obj); + if (err) { + warn("failed to attach BPF programs: %s\n", strerror(-err)); + goto cleanup; + } + + if (signal(SIGINT, sig_int) == SIG_ERR) { + warn("can't set signal handler: %s\n", strerror(errno)); + err = 1; + goto cleanup; + } + + // ec takes precedence over count + if (env.ec) { + print_extended_count(bpf_map__fd(obj->maps.ipv4_count), + bpf_map__fd(obj->maps.ipv6_count)); + } else if (env.count) { + print_count(bpf_map__fd(obj->maps.ipv4_count), + bpf_map__fd(obj->maps.ipv6_count)); + } else { + print_events(bpf_map__fd(obj->maps.events)); + } + +cleanup: + tcpconnect_bpf__destroy(obj); + cleanup_core_btf(&open_opts); + + return err != 0; +} diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.h b/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.h new file mode 100644 index 0000000..facb376 --- /dev/null +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/tcpconnect.h @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Anton Protopopov +#ifndef __TCPCONNECT_H +#define __TCPCONNECT_H + +/* The maximum number of items in maps */ +#define MAX_ENTRIES 8192 + +/* The maximum number of ports to filter */ +#define MAX_PORTS 64 + +#define TASK_COMM_LEN 16 + +struct ipv4_flow_key { + __u32 saddr; + __u32 daddr; + __u16 sport; + __u16 dport; + char task[TASK_COMM_LEN]; +}; + +struct ipv6_flow_key { + __u8 saddr[16]; + __u8 daddr[16]; + __u16 sport; + __u16 dport; + char task[TASK_COMM_LEN]; +}; + +struct event { + union { + __u32 saddr_v4; + __u8 saddr_v6[16]; + }; + union { + __u32 daddr_v4; + __u8 daddr_v6[16]; + }; + char task[TASK_COMM_LEN]; + __u64 ts_us; + __u32 af; // AF_INET or AF_INET6 + __u32 pid; + __u32 uid; + __u16 sport; + __u16 dport; +}; + +#endif /* __TCPCONNECT_H */