mirror of
https://gitlab.com/gitlab-org/gitlab-foss.git
synced 2025-07-29 12:00:32 +00:00
Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
@ -57,10 +57,7 @@ module Gitlab
|
||||
desc: "Kubernetes resource definition preset",
|
||||
default: Gitlab::Orchestrator::Deployment::ResourcePresets::DEFAULT,
|
||||
type: :string,
|
||||
enum: [
|
||||
Gitlab::Orchestrator::Deployment::ResourcePresets::DEFAULT,
|
||||
Gitlab::Orchestrator::Deployment::ResourcePresets::HIGH
|
||||
]
|
||||
enum: Gitlab::Orchestrator::Deployment::ResourcePresets::PRESETS
|
||||
|
||||
super
|
||||
end
|
||||
|
@ -5,9 +5,18 @@ module Gitlab
|
||||
module Deployment
|
||||
# Kubernetes resource request/limit presets optimized for different usecases
|
||||
#
|
||||
# Prefer vertical scaling over hpa for test stability
|
||||
# as waiting for new pods to scale will lead to test flakiness
|
||||
# Configure most pods with minReplicas: 1 to simplify debugging
|
||||
# by having less logs files to review
|
||||
# To scale webservice and sidekiq, concurrency parameters need to be adjusted together
|
||||
# with cpu and memory values
|
||||
class ResourcePresets
|
||||
DEFAULT = "default"
|
||||
HIGH = "high"
|
||||
PERFORMANCE = "performance"
|
||||
|
||||
PRESETS = [DEFAULT, HIGH, PERFORMANCE].freeze
|
||||
|
||||
class << self
|
||||
# Kubernetes resources values for given preset
|
||||
@ -15,147 +24,158 @@ module Gitlab
|
||||
# @param [String] preset_name
|
||||
# @return [Hash]
|
||||
def resource_values(preset_name)
|
||||
presets.fetch(preset_name)
|
||||
raise ArgumentError, "'#{preset_name}' is not a valid preset name" unless PRESETS.include?(preset_name)
|
||||
|
||||
send(preset_name) # rubocop:disable GitlabSecurity/PublicSend -- send with user input is prevented by validating PRESETS
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Different resources presets and replicas count
|
||||
# Default resource preset for local deployments
|
||||
#
|
||||
# Prefer vertical scaling over hpa for test stability
|
||||
# as waiting for new pods to scale will lead to test flakiness
|
||||
# Configure most pods with minReplicas: 1 to simplify debugging
|
||||
# by having less logs files to review
|
||||
# To scale webservice and sidekiq, concurrency parameters need to be adjusted together
|
||||
# with cpu and memory values
|
||||
# @return [Hash]
|
||||
def presets
|
||||
@presets ||= {
|
||||
# Default preset for local deployments
|
||||
DEFAULT => {
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 2,
|
||||
minReplicas: 1,
|
||||
resources: resources("1500m", "3Gi")
|
||||
},
|
||||
sidekiq: {
|
||||
concurrency: 20,
|
||||
minReplicas: 1,
|
||||
resources: resources("900m", "2Gi"),
|
||||
hpa: {
|
||||
cpu: { targetAverageValue: "800m" }
|
||||
}
|
||||
},
|
||||
kas: {
|
||||
minReplicas: 1,
|
||||
resources: resources("40m", "96Mi")
|
||||
},
|
||||
# TODO: if limits are defined, git operations start failing in e2e tests, investigate potential cause
|
||||
# https://gitlab.com/gitlab-org/quality/quality-engineering/team-tasks/-/issues/3699
|
||||
"gitlab-shell": {
|
||||
minReplicas: 1,
|
||||
resources: resources("30m", "16Mi", no_limits: true)
|
||||
},
|
||||
gitaly: {
|
||||
resources: resources("300m", "300Mi")
|
||||
},
|
||||
toolbox: {
|
||||
resources: resources("50m", "128Mi", no_limits: true)
|
||||
}
|
||||
def default
|
||||
@default ||= {
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 2,
|
||||
minReplicas: 1,
|
||||
resources: resources("1500m", "3Gi")
|
||||
},
|
||||
registry: {
|
||||
resources: resources("40m", "96Mi"),
|
||||
sidekiq: {
|
||||
concurrency: 20,
|
||||
minReplicas: 1,
|
||||
resources: resources("900m", "2Gi"),
|
||||
hpa: {
|
||||
minReplicas: 1,
|
||||
**cpu_utilization
|
||||
cpu: { targetAverageValue: "800m" }
|
||||
}
|
||||
},
|
||||
minio: {
|
||||
resources: resources("30m", "32Mi")
|
||||
kas: {
|
||||
minReplicas: 1,
|
||||
resources: resources("40m", "96Mi")
|
||||
},
|
||||
"nginx-ingress": {
|
||||
controller: {
|
||||
resources: resources("30m", "256Mi")
|
||||
}
|
||||
# TODO: if limits are defined, git operations start failing in e2e tests, investigate potential cause
|
||||
# https://gitlab.com/gitlab-org/quality/quality-engineering/team-tasks/-/issues/3699
|
||||
"gitlab-shell": {
|
||||
minReplicas: 1,
|
||||
resources: resources("30m", "16Mi", no_limits: true)
|
||||
},
|
||||
postgresql: {
|
||||
primary: {
|
||||
resources: resources("400m", "1Gi")
|
||||
}
|
||||
gitaly: {
|
||||
resources: resources("300m", "300Mi")
|
||||
},
|
||||
redis: {
|
||||
master: {
|
||||
resources: resources("50m", "16Mi")
|
||||
}
|
||||
toolbox: {
|
||||
resources: resources("50m", "128Mi", no_limits: true)
|
||||
}
|
||||
},
|
||||
# This preset is optimized for running e2e tests in parallel
|
||||
HIGH => {
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 4,
|
||||
minReplicas: 1,
|
||||
# See https://docs.gitlab.com/charts/charts/gitlab/webservice/#memory-requestslimits
|
||||
resources: resources(3, "5Gi", 3, "7Gi"),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
sidekiq: {
|
||||
concurrency: 30,
|
||||
minReplicas: 1,
|
||||
resources: resources("1200m", "2Gi"),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
kas: {
|
||||
minReplicas: 1,
|
||||
resources: resources("60m", "96Mi"),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
# TODO: if limits are defined, git operations start failing in e2e tests, investigate potential cause
|
||||
# https://gitlab.com/gitlab-org/quality/quality-engineering/team-tasks/-/issues/3699
|
||||
"gitlab-shell": {
|
||||
minReplicas: 2,
|
||||
resources: resources("60m", "32Mi", no_limits: true),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
gitaly: {
|
||||
resources: resources("400m", "384Mi")
|
||||
},
|
||||
# Toolbox create peak load during startup but then consumes very little
|
||||
# Set high limit value but don't request full amount to avoid unnecessary lock
|
||||
toolbox: {
|
||||
resources: resources("50m", "128Mi", no_limits: true)
|
||||
}
|
||||
},
|
||||
registry: {
|
||||
resources: resources("50m", "128Mi"),
|
||||
hpa: {
|
||||
minReplicas: 1,
|
||||
**cpu_utilization
|
||||
}
|
||||
},
|
||||
minio: {
|
||||
resources: resources("50m", "32Mi")
|
||||
},
|
||||
"nginx-ingress": {
|
||||
controller: {
|
||||
resources: resources("30m", "256Mi")
|
||||
}
|
||||
},
|
||||
postgresql: {
|
||||
primary: {
|
||||
resources: resources("600m", "1536Mi")
|
||||
}
|
||||
},
|
||||
redis: {
|
||||
master: {
|
||||
resources: resources("100m", "16Mi")
|
||||
}
|
||||
registry: {
|
||||
resources: resources("40m", "96Mi"),
|
||||
hpa: {
|
||||
minReplicas: 1,
|
||||
**cpu_utilization
|
||||
}
|
||||
},
|
||||
minio: {
|
||||
resources: resources("30m", "32Mi")
|
||||
},
|
||||
"nginx-ingress": {
|
||||
controller: {
|
||||
resources: resources("30m", "256Mi")
|
||||
}
|
||||
},
|
||||
postgresql: {
|
||||
primary: {
|
||||
resources: resources("400m", "1Gi")
|
||||
}
|
||||
},
|
||||
redis: {
|
||||
master: {
|
||||
resources: resources("50m", "16Mi")
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
# High resource preset optimized for running e2e tests in parallel
|
||||
#
|
||||
# @return [Hash]
|
||||
def high
|
||||
@high ||= {
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 4,
|
||||
minReplicas: 1,
|
||||
# See https://docs.gitlab.com/charts/charts/gitlab/webservice/#memory-requestslimits
|
||||
resources: resources(3, "5Gi", 3, "7Gi"),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
sidekiq: {
|
||||
concurrency: 30,
|
||||
minReplicas: 1,
|
||||
resources: resources("1200m", "2Gi"),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
kas: {
|
||||
minReplicas: 1,
|
||||
resources: resources("60m", "96Mi"),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
# TODO: if limits are defined, git operations start failing in e2e tests, investigate potential cause
|
||||
# https://gitlab.com/gitlab-org/quality/quality-engineering/team-tasks/-/issues/3699
|
||||
"gitlab-shell": {
|
||||
minReplicas: 2,
|
||||
resources: resources("60m", "32Mi", no_limits: true),
|
||||
hpa: cpu_utilization
|
||||
},
|
||||
gitaly: {
|
||||
resources: resources("400m", "384Mi")
|
||||
},
|
||||
# Toolbox create peak load during startup but then consumes very little
|
||||
# Set high limit value but don't request full amount to avoid unnecessary lock
|
||||
toolbox: {
|
||||
resources: resources("50m", "128Mi", no_limits: true)
|
||||
}
|
||||
},
|
||||
registry: {
|
||||
resources: resources("50m", "128Mi"),
|
||||
hpa: {
|
||||
minReplicas: 1,
|
||||
**cpu_utilization
|
||||
}
|
||||
},
|
||||
minio: {
|
||||
resources: resources("50m", "32Mi")
|
||||
},
|
||||
"nginx-ingress": {
|
||||
controller: {
|
||||
resources: resources("30m", "256Mi")
|
||||
}
|
||||
},
|
||||
postgresql: {
|
||||
primary: {
|
||||
resources: resources("600m", "1536Mi")
|
||||
}
|
||||
},
|
||||
redis: {
|
||||
master: {
|
||||
resources: resources("100m", "16Mi")
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
# Resource preset optimized for performance tests
|
||||
#
|
||||
# @return [Hash]
|
||||
def performance
|
||||
high.deep_merge({
|
||||
redis: {
|
||||
master: {
|
||||
resources: resources("200m", "128Mi")
|
||||
}
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
# Kubernetes resources configuration
|
||||
#
|
||||
# Set limits equal to requests by default for simplicity
|
||||
|
@ -1,101 +1,8 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
RSpec.describe Gitlab::Orchestrator::Deployment::ResourcePresets do
|
||||
it "returns default resources values preset" do
|
||||
expect(described_class.resource_values(described_class::DEFAULT)).to eq({
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 2,
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "1500m", memory: "3Gi" },
|
||||
limits: { cpu: "1500m", memory: "3Gi" }
|
||||
}
|
||||
},
|
||||
sidekiq: {
|
||||
concurrency: 20,
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "900m", memory: "2Gi" },
|
||||
limits: { cpu: "900m", memory: "2Gi" }
|
||||
},
|
||||
hpa: {
|
||||
cpu: { targetAverageValue: "800m" }
|
||||
}
|
||||
},
|
||||
kas: {
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "40m", memory: "96Mi" },
|
||||
limits: { cpu: "40m", memory: "96Mi" }
|
||||
}
|
||||
},
|
||||
"gitlab-shell": {
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "30m", memory: "16Mi" }
|
||||
}
|
||||
},
|
||||
gitaly: {
|
||||
resources: {
|
||||
requests: { cpu: "300m", memory: "300Mi" },
|
||||
limits: { cpu: "300m", memory: "300Mi" }
|
||||
}
|
||||
},
|
||||
toolbox: {
|
||||
resources: {
|
||||
requests: { cpu: "50m", memory: "128Mi" }
|
||||
}
|
||||
}
|
||||
},
|
||||
registry: {
|
||||
resources: {
|
||||
requests: { cpu: "40m", memory: "96Mi" },
|
||||
limits: { cpu: "40m", memory: "96Mi" }
|
||||
},
|
||||
hpa: {
|
||||
minReplicas: 1,
|
||||
cpu: {
|
||||
targetType: "Utilization",
|
||||
targetAverageUtilization: 90
|
||||
}
|
||||
}
|
||||
},
|
||||
minio: {
|
||||
resources: {
|
||||
requests: { cpu: "30m", memory: "32Mi" },
|
||||
limits: { cpu: "30m", memory: "32Mi" }
|
||||
}
|
||||
},
|
||||
"nginx-ingress": {
|
||||
controller: {
|
||||
resources: {
|
||||
requests: { cpu: "30m", memory: "256Mi" },
|
||||
limits: { cpu: "30m", memory: "256Mi" }
|
||||
}
|
||||
}
|
||||
},
|
||||
postgresql: {
|
||||
primary: {
|
||||
resources: {
|
||||
requests: { cpu: "400m", memory: "1Gi" },
|
||||
limits: { cpu: "400m", memory: "1Gi" }
|
||||
}
|
||||
}
|
||||
},
|
||||
redis: {
|
||||
master: {
|
||||
resources: {
|
||||
requests: { cpu: "50m", memory: "16Mi" },
|
||||
limits: { cpu: "50m", memory: "16Mi" }
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
it "returns high resources values preset" do
|
||||
expect(described_class.resource_values(described_class::HIGH)).to eq({
|
||||
let(:high_preset) do
|
||||
{
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 4,
|
||||
@ -205,6 +112,123 @@ RSpec.describe Gitlab::Orchestrator::Deployment::ResourcePresets do
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
it "returns default resources values preset" do
|
||||
expect(described_class.resource_values(described_class::DEFAULT)).to eq({
|
||||
gitlab: {
|
||||
webservice: {
|
||||
workerProcesses: 2,
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "1500m", memory: "3Gi" },
|
||||
limits: { cpu: "1500m", memory: "3Gi" }
|
||||
}
|
||||
},
|
||||
sidekiq: {
|
||||
concurrency: 20,
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "900m", memory: "2Gi" },
|
||||
limits: { cpu: "900m", memory: "2Gi" }
|
||||
},
|
||||
hpa: {
|
||||
cpu: { targetAverageValue: "800m" }
|
||||
}
|
||||
},
|
||||
kas: {
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "40m", memory: "96Mi" },
|
||||
limits: { cpu: "40m", memory: "96Mi" }
|
||||
}
|
||||
},
|
||||
"gitlab-shell": {
|
||||
minReplicas: 1,
|
||||
resources: {
|
||||
requests: { cpu: "30m", memory: "16Mi" }
|
||||
}
|
||||
},
|
||||
gitaly: {
|
||||
resources: {
|
||||
requests: { cpu: "300m", memory: "300Mi" },
|
||||
limits: { cpu: "300m", memory: "300Mi" }
|
||||
}
|
||||
},
|
||||
toolbox: {
|
||||
resources: {
|
||||
requests: { cpu: "50m", memory: "128Mi" }
|
||||
}
|
||||
}
|
||||
},
|
||||
registry: {
|
||||
resources: {
|
||||
requests: { cpu: "40m", memory: "96Mi" },
|
||||
limits: { cpu: "40m", memory: "96Mi" }
|
||||
},
|
||||
hpa: {
|
||||
minReplicas: 1,
|
||||
cpu: {
|
||||
targetType: "Utilization",
|
||||
targetAverageUtilization: 90
|
||||
}
|
||||
}
|
||||
},
|
||||
minio: {
|
||||
resources: {
|
||||
requests: { cpu: "30m", memory: "32Mi" },
|
||||
limits: { cpu: "30m", memory: "32Mi" }
|
||||
}
|
||||
},
|
||||
"nginx-ingress": {
|
||||
controller: {
|
||||
resources: {
|
||||
requests: { cpu: "30m", memory: "256Mi" },
|
||||
limits: { cpu: "30m", memory: "256Mi" }
|
||||
}
|
||||
}
|
||||
},
|
||||
postgresql: {
|
||||
primary: {
|
||||
resources: {
|
||||
requests: { cpu: "400m", memory: "1Gi" },
|
||||
limits: { cpu: "400m", memory: "1Gi" }
|
||||
}
|
||||
}
|
||||
},
|
||||
redis: {
|
||||
master: {
|
||||
resources: {
|
||||
requests: { cpu: "50m", memory: "16Mi" },
|
||||
limits: { cpu: "50m", memory: "16Mi" }
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
it "returns high resources values preset" do
|
||||
expect(described_class.resource_values(described_class::HIGH)).to eq(high_preset)
|
||||
end
|
||||
|
||||
it "returns performance resources values preset" do
|
||||
expect(described_class.resource_values(described_class::PERFORMANCE)).to eq(high_preset.deep_merge({
|
||||
redis: {
|
||||
master: {
|
||||
resources: {
|
||||
requests: { cpu: "200m", memory: "128Mi" },
|
||||
limits: { cpu: "200m", memory: "128Mi" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
end
|
||||
|
||||
it "raises an error when an invalid preset name is provided" do
|
||||
expect { described_class.resource_values("invalid_preset") }.to raise_error(
|
||||
ArgumentError,
|
||||
"'invalid_preset' is not a valid preset name"
|
||||
)
|
||||
end
|
||||
end
|
||||
|
Reference in New Issue
Block a user