Spring application monitoring in AWS EKS with Prometheus and Grafana

Daniel Lungu
7 min readDec 11, 2023

--

In this post, I will go through the minimum steps to configure Prometheus and Grafana in an AWS EKS cluster to monitor a Spring application.

Requirements

Prepare Spring application

Before we allow Prometheus to extract data from the application, we need to provide an API with information about the JVM’s memory, garbage collector, service statistics, and processes.

A good tool for exposing information about a Spring application is the micrometer. Micrometer is easy to integrate, you will need to import the dependency in Gradle or Maven and provide custom values via API to Prometheus.

implementation "io.micrometer:micrometer-registry-prometheus:1.9.0"

Custom metrics.

@WebEndpoint(id = "prometheus")
public class MetricsEndpoint {
private final MeterRegistry meterRegistry;

public MetricsEndpoint(MeterRegistry meterRegistry) {
this.meterRegistry = meterRegistry;
}

@ReadOperation
public Map<String, Map<?, ?>> allMetrics() {

Map<String, Map<?, ?>> results = new HashMap<>();
// JVM stats
results.put("jvm", jvmMemoryMetrics());
// Garbage collector
results.put("garbageCollector", garbageCollectorMetrics());
// Process stats
results.put("processMetrics", processMetrics());

return results;
}

private Map<String, Number> processMetrics() {
Map<String, Number> resultsProcess = new HashMap<>();

Collection<Gauge> gauges = Search.in(meterRegistry)
.name(s -> s.contains("cpu") || s.contains("system") || s.contains("process"))
.gauges();
gauges.forEach(gauge -> resultsProcess.put(gauge.getId().getName(), gauge.value()));

Collection<TimeGauge> timeGauges = Search.in(meterRegistry)
.name(s -> s.contains("process")).timeGauges();
timeGauges.forEach(gauge -> resultsProcess.put(gauge.getId().getName(),
gauge.value(TimeUnit.MILLISECONDS)));

return resultsProcess;
}

private Map<String, Object> garbageCollectorMetrics() {
Map<String, Object> resultsGarbageCollector = new HashMap<>();

Collection<Timer> timers = Search.in(meterRegistry).name(s -> s.contains("jvm.gc.pause"))
.timers();
timers.forEach(timer -> {
HashMap<String, Number> gcPauseResults = new HashMap<>();
gcPauseResults.put("count", timer.count());
gcPauseResults.put("max", timer.max(TimeUnit.MILLISECONDS));
gcPauseResults.put("totalTime", timer.totalTime(TimeUnit.MILLISECONDS));
gcPauseResults.put("mean", timer.mean(TimeUnit.MILLISECONDS));

ValueAtPercentile[] percentiles = timer.takeSnapshot().percentileValues();
for (ValueAtPercentile percentile : percentiles) {
gcPauseResults.put(String.valueOf(percentile.percentile()),
percentile.value(TimeUnit.MILLISECONDS));
}

String key = timer.getId().getName();

resultsGarbageCollector.putIfAbsent(key, gcPauseResults);
});

Collection<Gauge> gauges = Search.in(meterRegistry).name(s -> s.contains("jvm.gc") &&
!s.contains("jvm.gc.pause")).gauges();
gauges.forEach(gauge -> resultsGarbageCollector.put(gauge.getId().getName(), gauge.value()));

Collection<Counter> counters = Search.in(meterRegistry)
.name(s -> s.contains("jvm.gc") && !s.contains("jvm.gc.pause"))
.counters();
counters.forEach(
counter -> resultsGarbageCollector.put(counter.getId().getName(), counter.count())
);

gauges = Search.in(meterRegistry).name(s -> s.contains("jvm.classes.loaded")).gauges();
Double classesLoaded = gauges.stream().mapToDouble(Gauge::value).sum();
resultsGarbageCollector.put("classesLoaded", classesLoaded);

Collection<FunctionCounter> functionCounters = Search.in(meterRegistry)
.name(s -> s.contains("jvm.classes.unloaded"))
.functionCounters();
Double classesUnloaded = functionCounters.stream().mapToDouble(FunctionCounter::count).sum();
resultsGarbageCollector.put("classesUnloaded", classesUnloaded);

return resultsGarbageCollector;
}

private Map<String, Map<String, Number>> jvmMemoryMetrics() {
Map<String, Map<String, Number>> resultsJvm = new HashMap<>();

Search jvmUsedSearch = Search.in(meterRegistry).name(s -> s.contains("jvm.memory.used"));

Collection<Gauge> gauges = jvmUsedSearch.gauges();

gauges.forEach(gauge -> {
String key = gauge.getId().getTag("id");
resultsJvm.putIfAbsent(key, new HashMap<>());
resultsJvm.get(key).put("used", gauge.value());
});

Search jvmMaxSearch = Search.in(meterRegistry).name(s -> s.contains("jvm.memory.max"));

gauges = jvmMaxSearch.gauges();
gauges.forEach(gauge -> {
String key = gauge.getId().getTag("id");
resultsJvm.get(key).put("max", gauge.value());
});

gauges = Search.in(meterRegistry).name(s -> s.contains("jvm.memory.committed")).gauges();
gauges.forEach(gauge -> {
String key = gauge.getId().getTag("id");
resultsJvm.get(key).put("committed", gauge.value());
});

return resultsJvm;
}

}

Register custom metrics.

@Configuration
@ConditionalOnClass(Timed.class)
@AutoConfigureAfter(MetricsEndpointAutoConfiguration.class)
public class MetricsEndpointConfiguration {

@Bean
@ConditionalOnBean({MeterRegistry.class})
@ConditionalOnMissingBean
@ConditionalOnAvailableEndpoint
public MetricsEndpoint metricsEndpoint(MeterRegistry meterRegistry) {
return new MetricsEndpoint(meterRegistry);
}
}

All metrics will be available by requesting the API from the URL

http://<application-host>/management/prometheus

Create Docker container

To deploy the application to the AWS EKS cluster, we need to put the Spring application into the Docker container and add it to a container registry such as AWS ECR.

To add a Docker image in AWS ECR:

  1. Retrieve an authentication token and authenticate your Docker client to your registry.
aws ecr get-login-password --region <AWS-REGION>| docker login --username AWS --password-stdin <AWS-ACCOUNT>.dkr.ecr.<AWS-REGION>.amazonaws.com

2. Build your Docker image

docker build -t spring-application .

3. After the build is completed, tag your image so you can push the image to this repository:

docker tag spring-application:latest <AWS-ACCOUNT>.dkr.ecr.<AWS-REGION>.amazonaws.com/spring-application:latest

4. Run the following command to push this image to your newly created AWS repository:

docker push <AWS-ACCOUNT>.dkr.ecr.<AWS-REGION>.amazonaws.com/spring-application:latest

Deploy Spring application in AWS EKS cluster

After kubectl is connected to the AWS EKS cluster, we can start deploying the Spring application to the AWS EKS cluster.

First we will need to connect AWS EKS to AWS ECR so that AWS EKS can get the Docker image from AWS ECR. This can be done by running the following command in the terminal.

kubectl create secret docker-registry secret-registry \
--docker-server=https://<AWS-ACCOUNT>.dkr.ecr.<AWS-REGION>.amazonaws.com \
--docker-username=AWS \
--docker-password=$(aws ecr get-login-password --region eu-west-1 )

Now create a file called spring-application.yml and copy paste the following content to deploy Spring application in AWS EKS.

apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: spring-application
name: spring-application
spec:
replicas: 1
selector:
matchLabels:
app: spring-application
template:
metadata:
labels:
app: spring-application
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- spring-application
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- image: <AWS-ACCOUNT>.dkr.ecr.<AWS-REGION>.amazonaws.com/spring-application:latest
name: spring-application
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "400m"
readinessProbe:
httpGet:
path: /management/health/readiness
port: http
initialDelaySeconds: 20
periodSeconds: 15
failureThreshold: 6
livenessProbe:
httpGet:
path: /management/health/liveness
port: http
initialDelaySeconds: 120
imagePullSecrets:
- name: secret-registry
---
apiVersion: v1
kind: Service
metadata:
name: spring-application
labels:
app: spring-application
spec:
selector:
app: spring-application
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: spring-application-ingress
labels:
app: spring-application
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80}]'
spec:
rules:
- host: <APPLICATION-HOST>
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: spring-application
port:
number: 8080

You can now run the following command to create the AWS EKS for Spring application deployment, service and ingress:

 kubectl apply -f spring-application.yml

Deploy Prometheus in AWS EKS cluster

First we will need to add the Prometheus operator to AWS EKS. This can be done by running the command:

kubectl create -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/master/bundle.yaml

Now create the spring-application-prometheus-sm.yml file with the following content to get the Spring application information with Prometheus.

apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: spring-application-sm
labels:
team: monitoring
spec:
selector:
matchLabels:
app: spring-application
endpoints:
- port: http
path: /management/prometheus

Next we need to create a cluster role and bind it to the Prometheus operator. Create a new file called prometheus-crd.yml and copy and paste the following content.

apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-operator-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-operator-cr
rules:
- apiGroups:
- extensions
resources:
- thirdpartyresources
verbs:
- "*"
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- servicemonitors
- prometheusrules
- thanosrulers
- podmonitors
- probes
verbs:
- "*"
- apiGroups:
- apps
resources:
- statefulsets
verbs: ["*"]
- apiGroups: [""]
resources:
- configmaps
- secrets
verbs: ["*"]
- apiGroups: [""]
resources:
- pods
verbs: ["list", "delete"]
- apiGroups: [""]
resources:
- services
- endpoints
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources:
- nodes
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- namespaces
verbs: ["list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus-operator-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-operator-cr
subjects:
- kind: ServiceAccount
name: prometheus-operator-sa
namespace: default

Next, we will create Prometheus on AWS EKS. Create a new file named prometheus-cr.yml with the following content.

apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-role
rules:
- apiGroups: [ "" ]
resources:
- nodes
- services
- endpoints
- pods
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources:
- configmaps
verbs: [ "get" ]
---
apiVersion: rbac.authorization.k8s.io/v1
# limit to the namespace
kind: RoleBinding
metadata:
name: prometheus-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-role
subjects:
- kind: ServiceAccount
name: prometheus-sa
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
replicas: 1
serviceAccountName: prometheus-sa
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
podMonitorSelector: {}
resources:
requests:
memory: 400Mi
securityContext:
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
storage:
volumeClaimTemplate:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
storageClassName: ebs-sc
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
spec:
ports:
- name: web
port: 9090
protocol: TCP
targetPort: web
selector:
prometheus: prometheus

Now run each file from the terminal.

kubectl apply -f spring-application-prometheus-sm.yml
kubectl apply -f prometheus-crd.yml
kubectl apply -f prometheus-cr.yml

Deploy Grafana in AWS EKS cluster

Create a new YAML file named grafana.yaml.

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-pv-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: ebs-sc
resources:
requests:
storage: "500M"
---
apiVersion: v1
kind: Secret
metadata:
name: grafana-credentials
data:
username: YWRtaW4=
password: YWRtaW4=
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
securityContext:
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
containers:
- name: grafana
image: grafana/grafana:8.4.1
ports:
- containerPort: 3000
name: http
protocol: TCP
env:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: grafana-credentials
key: username
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: grafana-credentials
key: password
- name: GF_USERS_ALLOW_SIGN_UP
value: "false"
resources:
requests:
memory: "100Mi"
cpu: "100m"
limits:
memory: "250Mi"
cpu: "200m"
volumeMounts:
- name: grafana-storage
mountPath: /var/lib/grafana
volumes:
- name: grafana-storage
persistentVolumeClaim:
claimName: grafana-pv-claim
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: grafana
labels:
app: grafana
spec:
selector:
app: grafana
type: NodePort
ports:
- name: http
port: 3000
targetPort: 3000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana-ingress
labels:
app: grafana
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80}]'
spec:
rules:
- host: <GRAFANA-HOST>
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: grafana
port:
number: 3000

Run the file in the terminal.

kubectl apply -f grafana.yml

Link Prometheus in Grafana

Now that everything is set up in AWS EKS, we can start connecting Prometheus data scraped from Spring application to Grafana and start creating a monitoring dashboard.

Access Grafana by navigating to the Grafana host provided in the YAML file. For credentials, type admin for username and password.

Go to the Data Sources option and create a new data source for Prometheus. Everything can be left as is, the only thing that needs to be filled in is the URL field with http://prometheus:9090. Click the “Save and Test” button.

Now we can create a new Dashboard in Grafana with the Spring application information provided with Prometheus. Something like this.

--

--

Daniel Lungu
Daniel Lungu

No responses yet