Horizontal Autoscaling From Request

Sadja Leelanoi
Lotus’s IT
Published in
11 min readJan 24, 2024

Horizontal Autoscaling From Request Solution

  • Prometheus + Prometheus Adapter + Spring Metrics
  • Prometheus + Prometheus Adapter + NGINX Ingress Controller

Solution 1. Prometheus + Prometheus Adapter + Spring Metrics

Step 1. Config Spring Application, Jenkins Pipline And Deploy

pom.xml

<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus</artifactId>
</dependency>

application.properties

management.endpoints.web.exposure.include=metrics, prometheus

Change Jenkins Pipeline Step Deploy

            annotations:
prometheus.io/path: '/actuator/prometheus'
prometheus.io/scrape: 'true'
prometheus.io/port: '8080'

Example Jenkins File

pipeline {
agent any
environment {
APP_ENV = "dev"
IMAGE_NAME = "spring-api-common"
APP_NAME = "spring-api-common-autoscale"
SPRING_PROFILES_ACTIVE = "${APP_ENV}"
KUBE_NAMESPACE = "cma-${APP_ENV}"
SERVER_DEPLOYMENT = "dvcmaappus001th.th-tesco.org"
NEXUS_SERVER = "pvnexusho001th.dmz.th-tesco.org"
}
parameters {
string (
name: 'APP_TAG',
defaultValue: "latest",
description: 'Please fill your image TAG',
)
}
stages {
stage('Clean') {
steps {
echo 'Clean Workspace and Existing Deployment'
sh '''
rm -rf *
ls -alh
ssh svccma@${SERVER_DEPLOYMENT} "if sudo kubectl -n ${KUBE_NAMESPACE} get deployment ${APP_NAME} -n ${KUBE_NAMESPACE}; then echo exists && sudo kubectl delete deployment ${APP_NAME} -n ${KUBE_NAMESPACE} && sudo kubectl delete svc ${APP_NAME} -n ${KUBE_NAMESPACE} && sudo kubectl delete ingress ${APP_NAME} -n ${KUBE_NAMESPACE}; else echo no deployment; fi"
'''
}
}

stage('Deploy to PROD ENV') {
steps {
echo 'Deploy to PROD ENV'
sh '''
ssh svccma@${SERVER_DEPLOYMENT} "
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ${APP_NAME}
name: ${APP_NAME}
spec:
replicas: 1
selector:
matchLabels:
app: ${APP_NAME}
template:
metadata:
annotations:
prometheus.io/path: '/actuator/prometheus'
prometheus.io/scrape: 'true'
prometheus.io/port: '8080'
labels:
app: ${APP_NAME}
spec:
containers:
- image: ${NEXUS_SERVER}:5000/${IMAGE_NAME}:${APP_TAG}
imagePullPolicy: Always
name: ${IMAGE_NAME}
resources:
limits:
memory: 1024Mi
requests:
memory: 1024Mi
cpu: 1
env:
- name: SPRING_PROFILES_ACTIVE
value: "${SPRING_PROFILES_ACTIVE}"
- name: STORELINE_USERNAME
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: STORELINE_USERNAME
- name: STORELINE_PASSWORD
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: STORELINE_PASSWORD
- name: COUCHBASE_PASSWORD
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: COUCHBASE_PASSWORD
EOF"

ssh svccma@${SERVER_DEPLOYMENT} "sudo kubectl expose deployment ${APP_NAME} -n ${KUBE_NAMESPACE} --port=80 --target-port=8080 && sudo kubectl set env deployment/${APP_NAME} SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE} -n ${KUBE_NAMESPACE};
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ${APP_NAME}
spec:
rules:
- host: ${APP_NAME}.${APP_ENV}.com
http:
paths:
- backend:
serviceName: ${APP_NAME}
servicePort: 80
path: /
EOF"

ssh svccma@${SERVER_DEPLOYMENT} "
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: ${APP_NAME}
labels:
app: ${APP_NAME}
annotations:
# metric-config.<metricType>.<metricName>.<collectorName>/<configKey>
metric-config.pods.load-per-min.json-path/json-key: "$.measurements[0].value"
metric-config.pods.load-per-min.json-path/path: /actuator/metrics/system.load.average.1m
metric-config.pods.load-per-min.json-path/port: '7070'
metric-config.pods.load-per-min.json-path/scheme: 'http'
spec:
metrics:
- type: Pods
pods:
metricName: cpu_usage
targetAverageValue: 800m
- type: Pods
pods:
metricName: memory_usage_bytes
targetAverageValue: 629145600
- type: Pods
pods:
metricName: requests_per_second
targetAverageValue: 50m
- type: Object
object:
metricName: nginx_ingress_request_per_second
target:
apiVersion: extensions/v1beta1
kind: ingress
name: ${APP_NAME}
targetValue: 50m
maxReplicas: 10
minReplicas: 4
scaleTargetRef:
kind: Deployment
name: ${APP_NAME}
apiVersion: apps/v1
EOF"
'''

}
}
stage("Initialization") {
steps {
// use name of the patchset as the build name
buildName "#${APP_TAG}"
// buildDescription "Executed @ ${NODE_NAME}"
}
}

stage('Check App') {
steps {
echo 'Check App'
sh '''
sleep 60
STATUSCODE=$(curl --noproxy -s -o /dev/null -I -w "%{http_code}" http://${APP_NAME}.${APP_ENV}.com/healthcheck)
if test $STATUSCODE -ne 200; then echo ERROR:$STATUSCODE && exit 1; else echo SUCCESS; fi;
'''
}
}
}
}

Deploy And Check Metric From URL : http://localhost/actuator/prometheus

# TYPE http_server_requests_seconds summary
http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 10941.0
http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 41.837929663
http_server_requests_seconds_count{exception="MethodArgumentTypeMismatchException",method="GET",outcome="SERVER_ERROR",status="500",uri="/stores/status",} 1.0
http_server_requests_seconds_sum{exception="MethodArgumentTypeMismatchException",method="GET",outcome="SERVER_ERROR",status="500",uri="/stores/status",} 0.278811851

Step 2. Installation Prometheus And Change Config https://prometheus.io/

Change Config

- job_name: 'kubernetes-pods'

kubernetes_sd_configs:
- role: pod
namespaces:
names:
- cma-dev

relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
metric_relabel_configs:
- source_labels:
- namespace
action: replace
regex: (.+)
target_label: kubernetes_namespace

Check Prometheus Targets From URL:http://prometheus.dev.com:5000/targets

Check Prometheus Metric From URL:http://prometheus.dev.com:5000/graph

Step 3. Install Prometheus Adapter And Custom Metrics

Example Config Prometheus Adapter

apiVersion: v1
kind: ConfigMap
metadata:
name: adapter-config
namespace: lens-metrics
data:
config.yaml: |
rules:

- seriesQuery: 'http_server_requests_seconds_count'
seriesFilters: []
resources:
overrides:
kubernetes_pod_name:
resource: pod
kubernetes_namespace:
resource: namespace
name:
matches: "http_server_requests_seconds_count"
as: "requests_per_second"
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>,uri!="/actuator/prometheus",uri!="/**/favicon.ico"}[2m])) by (<<.GroupBy>>)'
- seriesQuery:
'nginx_ingress_controller_requests{ingress!="",namespace!=""}'
resources:
overrides:
namespace: {resource: "namespace"}
ingress: {resource: "ingress"}
name:
matches: "nginx_ingress_controller_requests"
as: "nginx_ingress_request_per_second"
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[2m])) by (<<.GroupBy>>)'
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters: []
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters:
- isNot: ^container_.*_seconds_total$
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters:
- isNot: ^container_.*_total$
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)$
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_total$
resources:
template: <<.Resource>>
name:
matches: ""
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_seconds_total
resources:
template: <<.Resource>>
name:
matches: ^(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters: []
resources:
template: <<.Resource>>
name:
matches: ^(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
resourceRules:
cpu:
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
memory:
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
window: 1m
externalRules:
- seriesQuery: '{__name__=~"^.*_queue_(length|size)$",namespace!=""}'
resources:
overrides:
namespace:
resource: namespace
name:
matches: ^.*_queue_(length|size)$
as: "$0"
metricsQuery: max(<<.Series>>{<<.LabelMatchers>>})
- seriesQuery: '{__name__=~"^.*_queue$",namespace!=""}'
resources:
overrides:
namespace:
resource: namespace
name:
matches: ^.*_queue$
as: "$0"
metricsQuery: max(<<.Series>>{<<.LabelMatchers>>})
- seriesQuery: 'http_server_requests_seconds_count'
seriesFilters: []
resources:
overrides:
kubernetes_pod_name:
resource: pod
kubernetes_namespace:
resource: namespace
name:
matches: "http_server_requests_seconds_count"
as: "requests_per_second"
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>,uri!="/actuator/prometheus",uri!="/**/favicon.ico"}[2m])) by (<<.GroupBy>>)'

Step 4. Check Metric “requests_per_second”

[svccma@node1 ~]$ sudo kubectl get --raw="/apis/custom.metrics.k8s.io/v1beta1/namespaces/cma-dev/pods/*/requests_per_second" | jq .
{
"kind": "MetricValueList",
"apiVersion": "custom.metrics.k8s.io/v1beta1",
"metadata": {
"selfLink": "/apis/custom.metrics.k8s.io/v1beta1/namespaces/cma-dev/pods/%2A/requests_per_second"
},
"items": [
{
"describedObject": {
"kind": "Pod",
"namespace": "cma-dev",
"name": "spring-api-common-autoscale-6b9d6668b9-5bstx",
"apiVersion": "/v1"
},
"metricName": "requests_per_second",
"timestamp": "2021-09-05T07:40:57Z",
"value": "0",
"selector": null
}
]
}

Step 5. Config Jenkins Pipeline And Deploy

Set Limit Resource

resources:
limits:
memory: 1024Mi
requests:
memory: 1024Mi
cpu: 1

Set HorizontalPodAutoscaler

apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: spring-api-common-autoscale
labels:
app: spring-api-common-autoscale
annotations:
# metric-config.<metricType>.<metricName>.<collectorName>/<configKey>
metric-config.pods.load-per-min.json-path/json-key: "$.measurements[0].value"
metric-config.pods.load-per-min.json-path/path: /actuator/metrics/system.load.average.1m
metric-config.pods.load-per-min.json-path/port: '7070'
metric-config.pods.load-per-min.json-path/scheme: 'http'
spec:
metrics:
- type: Pods
pods:
metricName: requests_per_second
targetAverageValue: 50m
maxReplicas: 10
minReplicas: 4
scaleTargetRef:
kind: Deployment
name: spring-api-common-autoscale
apiVersion: apps/v1

Example Jenkins File

pipeline {
agent any
environment {
APP_ENV = "dev"
IMAGE_NAME = "spring-api-common"
APP_NAME = "spring-api-common-autoscale"
SPRING_PROFILES_ACTIVE = "${APP_ENV}"
KUBE_NAMESPACE = "cma-${APP_ENV}"
SERVER_DEPLOYMENT = "dvcmaappus001th.th-tesco.org"
NEXUS_SERVER = "pvnexusho001th.dmz.th-tesco.org"
}
parameters {
string (
name: 'APP_TAG',
defaultValue: "latest",
description: 'Please fill your image TAG',
)
}
stages {
stage('Clean') {
steps {
echo 'Clean Workspace and Existing Deployment'
sh '''
rm -rf *
ls -alh
ssh svccma@${SERVER_DEPLOYMENT} "if sudo kubectl -n ${KUBE_NAMESPACE} get deployment ${APP_NAME} -n ${KUBE_NAMESPACE}; then echo exists && sudo kubectl delete deployment ${APP_NAME} -n ${KUBE_NAMESPACE} && sudo kubectl delete svc ${APP_NAME} -n ${KUBE_NAMESPACE} && sudo kubectl delete ingress ${APP_NAME} -n ${KUBE_NAMESPACE}; else echo no deployment; fi"
'''
}
}

stage('Deploy to PROD ENV') {
steps {
echo 'Deploy to PROD ENV'
sh '''
ssh svccma@${SERVER_DEPLOYMENT} "
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ${APP_NAME}
name: ${APP_NAME}
spec:
replicas: 1
selector:
matchLabels:
app: ${APP_NAME}
template:
metadata:
annotations:
prometheus.io/path: '/actuator/prometheus'
prometheus.io/scrape: 'true'
prometheus.io/port: '8080'
labels:
app: ${APP_NAME}
spec:
containers:
- image: ${NEXUS_SERVER}:5000/${IMAGE_NAME}:${APP_TAG}
imagePullPolicy: Always
name: ${IMAGE_NAME}
resources:
limits:
memory: 1024Mi
requests:
memory: 1024Mi
cpu: 1
env:
- name: SPRING_PROFILES_ACTIVE
value: "${SPRING_PROFILES_ACTIVE}"
- name: STORELINE_USERNAME
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: STORELINE_USERNAME
- name: STORELINE_PASSWORD
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: STORELINE_PASSWORD
- name: COUCHBASE_PASSWORD
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: COUCHBASE_PASSWORD
EOF"

ssh svccma@${SERVER_DEPLOYMENT} "sudo kubectl expose deployment ${APP_NAME} -n ${KUBE_NAMESPACE} --port=80 --target-port=8080 && sudo kubectl set env deployment/${APP_NAME} SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE} -n ${KUBE_NAMESPACE};
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ${APP_NAME}
spec:
rules:
- host: ${APP_NAME}.${APP_ENV}.com
http:
paths:
- backend:
serviceName: ${APP_NAME}
servicePort: 80
path: /
EOF"

ssh svccma@${SERVER_DEPLOYMENT} "
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: ${APP_NAME}
labels:
app: ${APP_NAME}
annotations:
# metric-config.<metricType>.<metricName>.<collectorName>/<configKey>
metric-config.pods.load-per-min.json-path/json-key: "$.measurements[0].value"
metric-config.pods.load-per-min.json-path/path: /actuator/metrics/system.load.average.1m
metric-config.pods.load-per-min.json-path/port: '7070'
metric-config.pods.load-per-min.json-path/scheme: 'http'
spec:
metrics:
- type: Pods
pods:
metricName: cpu_usage
targetAverageValue: 800m
- type: Pods
pods:
metricName: memory_usage_bytes
targetAverageValue: 629145600
- type: Pods
pods:
metricName: requests_per_second
targetAverageValue: 50m
- type: Object
object:
metricName: nginx_ingress_request_per_second
target:
apiVersion: extensions/v1beta1
kind: ingress
name: ${APP_NAME}
targetValue: 50m
maxReplicas: 10
minReplicas: 4
scaleTargetRef:
kind: Deployment
name: ${APP_NAME}
apiVersion: apps/v1
EOF"
'''

}
}
stage("Initialization") {
steps {
// use name of the patchset as the build name
buildName "#${APP_TAG}"
// buildDescription "Executed @ ${NODE_NAME}"
}
}

stage('Check App') {
steps {
echo 'Check App'
sh '''
sleep 60
STATUSCODE=$(curl --noproxy -s -o /dev/null -I -w "%{http_code}" http://${APP_NAME}.${APP_ENV}.com/healthcheck)
if test $STATUSCODE -ne 200; then echo ERROR:$STATUSCODE && exit 1; else echo SUCCESS; fi;
'''
}
}
}
}

Step 6. Check HPA

[svccma@node1 ~]$ sudo kubectl get hpa -n cma-dev
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
spring-api-common-autoscale Deployment/spring-api-common-autoscale 0 / 50m 4 10 4 4d23h

Step 7. Test Call Request And Check POD Scaling

[svccma@node1 ~]$ sudo kubectl get hpa -n cma-dev
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
spring-api-common-autoscale Deployment/spring-api-common-autoscale 60m / 50m 4 10 5 4d23h
[svccma@node1 ~]$ sudo kubectl get po -l app=spring-api-common-autoscale  -w -n cma-dev
NAME READY STATUS RESTARTS AGE
spring-api-common-autoscale-6b9d6668b9-5bstx 1/1 Running 0 22h
spring-api-common-autoscale-6b9d6668b9-fsvf6 1/1 Running 0 45h
spring-api-common-autoscale-6b9d6668b9-hljdk 1/1 Running 0 29h
spring-api-common-autoscale-6b9d6668b9-q9xw4 1/1 Running 0 29h
spring-api-common-autoscale-6b9d6668b9-q9xwa 1/1 Running 0 29h

Solution 2. Prometheus + Prometheus Adapter + NGINX Ingress Controller

Step 1. Install NGINX Ingress Controller

Change Config

      annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'

Step 2. Installation Prometheus And Change Config https://prometheus.io/

Change Config

    - job_name: 'ingress-nginx'

kubernetes_sd_configs:
- role: pod
namespaces:
names:
- ingress-nginx

relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
metric_relabel_configs:
- source_labels:
- namespace
action: replace
regex: (.+)
target_label: kubernetes_namespace

Check Prometheus Targets From URL:http://prometheus.dev.com:5000/targets

Check Prometheus Metric From URL:http://prometheus.dev.com:5000/graph

Step 3. Install Prometheus Adapter And Custom Metrics

Example Change Config Prometheus Adapter

apiVersion: v1
kind: ConfigMap
metadata:
name: adapter-config
namespace: lens-metrics
data:
config.yaml: |
rules:

- seriesQuery: 'http_server_requests_seconds_count'
seriesFilters: []
resources:
overrides:
kubernetes_pod_name:
resource: pod
kubernetes_namespace:
resource: namespace
name:
matches: "http_server_requests_seconds_count"
as: "requests_per_second"
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>,uri!="/actuator/prometheus",uri!="/**/favicon.ico"}[2m])) by (<<.GroupBy>>)'
- seriesQuery:
'nginx_ingress_controller_requests{ingress!="",namespace!=""}'
resources:
overrides:
namespace: {resource: "namespace"}
ingress: {resource: "ingress"}
name:
matches: "nginx_ingress_controller_requests"
as: "nginx_ingress_request_per_second"
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[2m])) by (<<.GroupBy>>)'
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters: []
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters:
- isNot: ^container_.*_seconds_total$
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters:
- isNot: ^container_.*_total$
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)$
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_total$
resources:
template: <<.Resource>>
name:
matches: ""
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_seconds_total
resources:
template: <<.Resource>>
name:
matches: ^(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters: []
resources:
template: <<.Resource>>
name:
matches: ^(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
resourceRules:
cpu:
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
memory:
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
window: 1m
externalRules:
- seriesQuery: '{__name__=~"^.*_queue_(length|size)$",namespace!=""}'
resources:
overrides:
namespace:
resource: namespace
name:
matches: ^.*_queue_(length|size)$
as: "$0"
metricsQuery: max(<<.Series>>{<<.LabelMatchers>>})
- seriesQuery: '{__name__=~"^.*_queue$",namespace!=""}'
resources:
overrides:
namespace:
resource: namespace
name:
matches: ^.*_queue$
as: "$0"
metricsQuery: max(<<.Series>>{<<.LabelMatchers>>})
    - seriesQuery:
'nginx_ingress_controller_requests{ingress!="",namespace!=""}'
resources:
overrides:
namespace: {resource: "namespace"}
ingress: {resource: "ingress"}
name:
matches: "nginx_ingress_controller_requests"
as: "nginx_ingress_request_per_second"
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[2m])) by (<<.GroupBy>>)'

Step 4. Check Metric “nginx_ingress_request_per_second”

[svccma@node1 ~]$ sudo kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/cma-dev/ingress/spring-api-common-autoscale/nginx_ingress_request_per_second" | jq .
{
"kind": "MetricValueList",
"apiVersion": "custom.metrics.k8s.io/v1beta1",
"metadata": {
"selfLink": "/apis/custom.metrics.k8s.io/v1beta1/namespaces/cma-dev/ingress/spring-api-common-autoscale/nginx_ingress_request_per_second"
},
"items": [
{
"describedObject": {
"kind": "Ingress",
"namespace": "cma-dev",
"name": "spring-api-common-autoscale",
"apiVersion": "extensions/v1beta1"
},
"metricName": "nginx_ingress_request_per_second",
"timestamp": "2021-09-05T08:41:41Z",
"value": "0",
"selector": null
}
]
}

Step 5. Config Jenkins Pipeline And Deploy

Set Limit Resource

              resources:
limits:
memory: 1024Mi
requests:
memory: 1024Mi
cpu: 1

Set HorizontalPodAutoscaler

apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: spring-api-common-autoscale
labels:
app: spring-api-common-autoscale
annotations:
# metric-config.<metricType>.<metricName>.<collectorName>/<configKey>
metric-config.pods.load-per-min.json-path/json-key: "$.measurements[0].value"
metric-config.pods.load-per-min.json-path/path: /actuator/metrics/system.load.average.1m
metric-config.pods.load-per-min.json-path/port: '7070'
metric-config.pods.load-per-min.json-path/scheme: 'http'
spec:
metrics:
- type: Object
object:
metricName: nginx_ingress_request_per_second
target:
apiVersion: extensions/v1beta1
kind: ingress
name: ${APP_NAME}
targetValue: 50m
maxReplicas: 10
minReplicas: 4
scaleTargetRef:
kind: Deployment
name: spring-api-common-autoscale
apiVersion: apps/v1

Example Jenkins File

pipeline {
agent any
environment {
APP_ENV = "dev"
IMAGE_NAME = "spring-api-common"
APP_NAME = "spring-api-common-autoscale"
SPRING_PROFILES_ACTIVE = "${APP_ENV}"
KUBE_NAMESPACE = "cma-${APP_ENV}"
SERVER_DEPLOYMENT = "dvcmaappus001th.th-tesco.org"
NEXUS_SERVER = "pvnexusho001th.dmz.th-tesco.org"
}
parameters {
string (
name: 'APP_TAG',
defaultValue: "latest",
description: 'Please fill your image TAG',
)
}
stages {
stage('Clean') {
steps {
echo 'Clean Workspace and Existing Deployment'
sh '''
rm -rf *
ls -alh
ssh svccma@${SERVER_DEPLOYMENT} "if sudo kubectl -n ${KUBE_NAMESPACE} get deployment ${APP_NAME} -n ${KUBE_NAMESPACE}; then echo exists && sudo kubectl delete deployment ${APP_NAME} -n ${KUBE_NAMESPACE} && sudo kubectl delete svc ${APP_NAME} -n ${KUBE_NAMESPACE} && sudo kubectl delete ingress ${APP_NAME} -n ${KUBE_NAMESPACE}; else echo no deployment; fi"
'''
}
}

stage('Deploy to PROD ENV') {
steps {
echo 'Deploy to PROD ENV'
sh '''
ssh svccma@${SERVER_DEPLOYMENT} "
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ${APP_NAME}
name: ${APP_NAME}
spec:
replicas: 1
selector:
matchLabels:
app: ${APP_NAME}
template:
metadata:
annotations:
prometheus.io/path: '/actuator/prometheus'
prometheus.io/scrape: 'true'
prometheus.io/port: '8080'
labels:
app: ${APP_NAME}
spec:
containers:
- image: ${NEXUS_SERVER}:5000/${IMAGE_NAME}:${APP_TAG}
imagePullPolicy: Always
name: ${IMAGE_NAME}
resources:
limits:
memory: 1024Mi
requests:
memory: 1024Mi
cpu: 1
env:
- name: SPRING_PROFILES_ACTIVE
value: "${SPRING_PROFILES_ACTIVE}"
- name: STORELINE_USERNAME
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: STORELINE_USERNAME
- name: STORELINE_PASSWORD
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: STORELINE_PASSWORD
- name: COUCHBASE_PASSWORD
valueFrom:
secretKeyRef:
name: ${IMAGE_NAME}
key: COUCHBASE_PASSWORD
EOF"

ssh svccma@${SERVER_DEPLOYMENT} "sudo kubectl expose deployment ${APP_NAME} -n ${KUBE_NAMESPACE} --port=80 --target-port=8080 && sudo kubectl set env deployment/${APP_NAME} SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE} -n ${KUBE_NAMESPACE};
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ${APP_NAME}
spec:
rules:
- host: ${APP_NAME}.${APP_ENV}.com
http:
paths:
- backend:
serviceName: ${APP_NAME}
servicePort: 80
path: /
EOF"

ssh svccma@${SERVER_DEPLOYMENT} "
cat <<EOF | sudo kubectl -n ${KUBE_NAMESPACE} apply -f -
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: ${APP_NAME}
labels:
app: ${APP_NAME}
annotations:
# metric-config.<metricType>.<metricName>.<collectorName>/<configKey>
metric-config.pods.load-per-min.json-path/json-key: "$.measurements[0].value"
metric-config.pods.load-per-min.json-path/path: /actuator/metrics/system.load.average.1m
metric-config.pods.load-per-min.json-path/port: '7070'
metric-config.pods.load-per-min.json-path/scheme: 'http'
spec:
metrics:
- type: Pods
pods:
metricName: cpu_usage
targetAverageValue: 800m
- type: Pods
pods:
metricName: memory_usage_bytes
targetAverageValue: 629145600
- type: Pods
pods:
metricName: requests_per_second
targetAverageValue: 50m
- type: Object
object:
metricName: nginx_ingress_request_per_second
target:
apiVersion: extensions/v1beta1
kind: ingress
name: ${APP_NAME}
targetValue: 50m
maxReplicas: 10
minReplicas: 4
scaleTargetRef:
kind: Deployment
name: ${APP_NAME}
apiVersion: apps/v1
EOF"
'''

}
}
stage("Initialization") {
steps {
// use name of the patchset as the build name
buildName "#${APP_TAG}"
// buildDescription "Executed @ ${NODE_NAME}"
}
}

stage('Check App') {
steps {
echo 'Check App'
sh '''
sleep 60
STATUSCODE=$(curl --noproxy -s -o /dev/null -I -w "%{http_code}" http://${APP_NAME}.${APP_ENV}.com/healthcheck)
if test $STATUSCODE -ne 200; then echo ERROR:$STATUSCODE && exit 1; else echo SUCCESS; fi;
'''
}
}
}
}

Step 6. Check HPA

[svccma@node1 ~]$ sudo kubectl get hpa -n cma-dev
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
spring-api-common-autoscale Deployment/spring-api-common-autoscale 0 / 50m 4 10 4 4d23h

Step 7. Test Call Request And Check POD Scaling

[svccma@node1 ~]$ sudo kubectl get hpa -n cma-dev
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
spring-api-common-autoscale Deployment/spring-api-common-autoscale 60m / 50m 4 10 5 4d23h
[svccma@node1 ~]$ sudo kubectl get po -l app=spring-api-common-autoscale  -w -n cma-dev
NAME READY STATUS RESTARTS AGE
spring-api-common-autoscale-6b9d6668b9-5bstx 1/1 Running 0 22h
spring-api-common-autoscale-6b9d6668b9-fsvf6 1/1 Running 0 45h
spring-api-common-autoscale-6b9d6668b9-hljdk 1/1 Running 0 29h
spring-api-common-autoscale-6b9d6668b9-q9xw4 1/1 Running 0 29h
spring-api-common-autoscale-6b9d6668b9-q9xwa 1/1 Running 0 29h

--

--