Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot
2023-11-16 03:11:54 +00:00
parent d22bc415db
commit a746dae4f7
17 changed files with 327 additions and 96 deletions

View File

@ -30,7 +30,13 @@ export default {
</script>
<template>
<span class="mw-100 gl-display-flex gl-align-items-center gl-flex-grow-1">
<ci-icon :size="iconSize" :status="status" :show-tooltip="false" class="gl-line-height-0" />
<ci-icon
:size="iconSize"
:status="status"
:show-tooltip="false"
:use-link="false"
class="gl-line-height-0"
/>
<span class="gl-text-truncate mw-70p gl-pl-3 gl-display-inline-block">
{{ name }}
</span>

View File

@ -4,5 +4,5 @@ introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/100500
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/378470
milestone: '15.6'
type: development
group: group::incubation
group: group::ai framework
default_enabled: false

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
class AddMaxWorkspacesToRemoteDevelopmentAgentConfigs < Gitlab::Database::Migration[2.2]
milestone '16.7'
enable_lock_retries!
def change
add_column :remote_development_agent_configs, :max_workspaces, :bigint, default: -1, null: false
end
end

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
class AddMaxWorkspacesPerUserToRemoteDevelopmentAgentConfigs < Gitlab::Database::Migration[2.2]
milestone '16.7'
enable_lock_retries!
def change
add_column :remote_development_agent_configs, :max_workspaces_per_user, :bigint, default: -1, null: false
end
end

View File

@ -0,0 +1 @@
e95eb36797f002f77df630fb500d234049e21c0eb80f348ae37ce54498ac7a1f

View File

@ -0,0 +1 @@
67c2be8f235e41d1a871a79247832affb34570ec21db63f7c34e8b26432f066e

View File

@ -22589,6 +22589,8 @@ CREATE TABLE remote_development_agent_configs (
network_policy_egress jsonb DEFAULT '[{"allow": "0.0.0.0/0", "except": ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]}]'::jsonb NOT NULL,
default_resources_per_workspace_container jsonb DEFAULT '{}'::jsonb NOT NULL,
max_resources_per_workspace jsonb DEFAULT '{}'::jsonb NOT NULL,
max_workspaces bigint DEFAULT '-1'::integer NOT NULL,
max_workspaces_per_user bigint DEFAULT '-1'::integer NOT NULL,
CONSTRAINT check_72947a4495 CHECK ((char_length(gitlab_workspaces_proxy_namespace) <= 63)),
CONSTRAINT check_9f5cd54d1c CHECK ((char_length(dns_zone) <= 256))
);

View File

@ -319,6 +319,63 @@ To use Docker-in-Docker with TLS enabled in Kubernetes:
- docker run my-docker-image /script/to/run/tests
```
##### Docker-in-Docker with TLS disabled in Kubernetes
To use Docker-in-Docker with TLS disabled in Kubernetes, you must adapt the example above to:
- Remove the `[[runners.kubernetes.volumes.empty_dir]]` section from the `values.yml` file.
- Change the port from `2376` to `2375` with `DOCKER_HOST: tcp://docker:2375`.
- Instruct Docker to start with TLS disabled with `DOCKER_TLS_CERTDIR: ""`.
For example:
1. Using the
[Helm chart](https://docs.gitlab.com/runner/install/kubernetes.html), update the
[`values.yml` file](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/blob/00c1a2098f303dffb910714752e9a981e119f5b5/values.yaml#L133-137):
```yaml
runners:
config: |
[[runners]]
[runners.kubernetes]
image = "ubuntu:20.04"
privileged = true
```
1. You can now use `docker` in the job script. You should include the
`docker:24.0.5-dind` service:
```yaml
default:
image: docker:24.0.5
services:
- docker:24.0.5-dind
before_script:
- docker info
variables:
# When using dind service, you must instruct Docker to talk with
# the daemon started inside of the service. The daemon is available
# with a network connection instead of the default
# /var/run/docker.sock socket.
DOCKER_HOST: tcp://docker:2375
#
# The 'docker' hostname is the alias of the service container as described at
# https://docs.gitlab.com/ee/ci/services/#accessing-the-services.
# If you're using GitLab Runner 12.7 or earlier with the Kubernetes executor and Kubernetes 1.6 or earlier,
# the variable must be set to tcp://localhost:2376 because of how the
# Kubernetes executor connects services to the job container
# DOCKER_HOST: tcp://localhost:2376
#
# This instructs Docker not to start over TLS.
DOCKER_TLS_CERTDIR: ""
build:
stage: build
script:
- docker build -t my-docker-image .
- docker run my-docker-image /script/to/run/tests
```
#### Known issues with Docker-in-Docker
Docker-in-Docker is the recommended configuration, but you should be aware of the following issues:

View File

@ -42,7 +42,7 @@ This should enable everyone to see locally any change in an IDE being sent to th
When testing interactions with the Model Gateway, you might want to integrate your local GDK
with the deployed staging Model Gateway. To do this:
1. You need a [cloud staging license](../../user/project/repository/code_suggestions/self_managed.md#update-gitlab) that has the Code Suggestions add-on, because add-ons are enabled on staging. Drop a note in the `#s_fulfillment` internal Slack channel to request an add-on to your license. See this [handbook page](https://about.gitlab.com/handbook/developer-onboarding/#working-on-gitlab-ee-developer-licenses) for how to request a license for local development.
1. You need a [cloud staging license](../../user/project/repository/code_suggestions/self_managed.md#upgrade-gitlab) that has the Code Suggestions add-on, because add-ons are enabled on staging. Drop a note in the `#s_fulfillment` internal Slack channel to request an add-on to your license. See this [handbook page](https://about.gitlab.com/handbook/developer-onboarding/#working-on-gitlab-ee-developer-licenses) for how to request a license for local development.
1. Set environment variables to point customers-dot to staging, and the Model Gateway to staging:
```shell

View File

@ -48,7 +48,7 @@ If you suspect that your GitLab instance has been compromised, consider taking t
- Review the [Credentials Inventory](../administration/credentials_inventory.md), if available to you.
- Change any sensitive credentials, variables, tokens, and secrets. For example, those located in instance configuration, database,
CI/CD pipelines, or elsewhere.
- Upgrade to the latest version of GitLab and adopt a plan to upgrade after every security patch release.
- Update to the latest version of GitLab and adopt a plan to update after every security patch release.
In addition, the suggestions below are common steps taken in incident response plans when servers are compromised by malicious actors.

View File

@ -126,14 +126,14 @@ If your GitLab instance uses an HTTP proxy server to access the internet, ensure
the server is configured to allow outbound connections, including the
[`gitlab_workhorse` environment variable](https://docs.gitlab.com/omnibus/settings/environment-variables.html).
### Update GitLab
### Upgrade GitLab
In GitLab 16.3 and later, GitLab is enforcing the cloud licensing requirement for Code Suggestions:
- The Premium and Ultimate subscription tiers support cloud Licensing.
- GitLab Free does not have cloud licensing support.
If you have a GitLab Free subscription and update to GitLab 16.3 or later,
If you have a GitLab Free subscription and upgrade to GitLab 16.3 or later,
to continue having early access to Code Suggestions, you must:
1. Have a [subscription that supports cloud licensing](https://about.gitlab.com/pricing/).
@ -144,8 +144,8 @@ to continue having early access to Code Suggestions, you must:
You must [manually synchronize your subscription](../../../../subscriptions/self_managed/index.md#manually-synchronize-your-subscription-details) if either:
- You have already updated to GitLab 16.3 and have just bought a Premium or Ultimate tier subscription.
- You already have a Premium or Ultimate tier subscription and have just updated to GitLab 16.3.
- You have already upgraded to GitLab 16.3 and have just bought a Premium or Ultimate tier subscription.
- You already have a Premium or Ultimate tier subscription and have just upgraded to GitLab 16.3.
Without the manual synchronization, it might take up to 24 hours to active Code Suggestions on your instance.

View File

@ -36,7 +36,7 @@ module Gitlab
def mark_notified
Gitlab::Redis::SharedState.with do |redis|
redis.hset(DELETION_TRACKING_REDIS_KEY, "project:#{project_id}", Date.current)
redis.hset(DELETION_TRACKING_REDIS_KEY, "project:#{project_id}", Date.current.to_s)
end
end
@ -47,8 +47,9 @@ module Gitlab
end
def scheduled_deletion_date
if notification_date.present?
(notification_date.to_date + grace_period_after_notification).to_s
notif_date = notification_date
if notif_date.present?
(notif_date.to_date + grace_period_after_notification).to_s
else
grace_period_after_notification.from_now.to_date.to_s
end

View File

@ -0,0 +1,85 @@
# frozen_string_literal: true
module Gitlab
module Instrumentation
module RedisHelper
APDEX_EXCLUDE = %w[brpop blpop brpoplpush bzpopmin bzpopmax command xread xreadgroup].freeze
def instrument_call(commands, instrumentation_class, pipelined = false)
start = Gitlab::Metrics::System.monotonic_time # must come first so that 'start' is always defined
instrumentation_class.instance_count_request(commands.size)
instrumentation_class.instance_count_pipelined_request(commands.size) if pipelined
if !instrumentation_class.redis_cluster_validate!(commands) && ::RequestStore.active?
instrumentation_class.increment_cross_slot_request_count
end
yield
rescue ::Redis::BaseError => ex
if ex.message.start_with?('MOVED', 'ASK')
instrumentation_class.instance_count_cluster_redirection(ex)
else
instrumentation_class.instance_count_exception(ex)
end
instrumentation_class.log_exception(ex)
raise ex
ensure
duration = Gitlab::Metrics::System.monotonic_time - start
unless exclude_from_apdex?(commands)
commands.each { instrumentation_class.instance_observe_duration(duration / commands.size) }
end
if ::RequestStore.active?
# These metrics measure total Redis usage per Rails request / job.
instrumentation_class.increment_request_count(commands.size)
instrumentation_class.add_duration(duration)
instrumentation_class.add_call_details(duration, commands)
end
end
def measure_write_size(command, instrumentation_class)
size = 0
# Mimic what happens in
# https://github.com/redis/redis-rb/blob/f597f21a6b954b685cf939febbc638f6c803e3a7/lib/redis/connection/command_helper.rb#L8.
# This count is an approximation that omits the Redis protocol overhead
# of type prefixes, length prefixes and line endings.
command.each do |x|
size += if x.is_a? Array
x.inject(0) { |sum, y| sum + y.to_s.bytesize }
else
x.to_s.bytesize
end
end
instrumentation_class.increment_write_bytes(size)
end
def measure_read_size(result, instrumentation_class)
# The Connection::Ruby#read class can return one of four types of results from read:
# https://github.com/redis/redis-rb/blob/f597f21a6b954b685cf939febbc638f6c803e3a7/lib/redis/connection/ruby.rb#L406
#
# 1. Error (exception, will not reach this line)
# 2. Status (string)
# 3. Integer (will be converted to string by to_s.bytesize and thrown away)
# 4. "Binary" string (i.e. may contain zero byte)
# 5. Array of binary string
if result.is_a? Array
# Redis can return nested arrays, e.g. from XRANGE or GEOPOS, so we use recursion here.
result.each { |x| measure_read_size(x, instrumentation_class) }
else
# This count is an approximation that omits the Redis protocol overhead
# of type prefixes, length prefixes and line endings.
instrumentation_class.increment_read_bytes(result.to_s.bytesize)
end
end
def exclude_from_apdex?(commands)
commands.any? { |command| APDEX_EXCLUDE.include?(command.first.to_s.downcase) }
end
end
end
end

View File

@ -3,115 +3,37 @@
module Gitlab
module Instrumentation
module RedisInterceptor
APDEX_EXCLUDE = %w[brpop blpop brpoplpush bzpopmin bzpopmax command xread xreadgroup].freeze
include RedisHelper
def call(command)
instrument_call([command]) do
instrument_call([command], instrumentation_class) do
super
end
end
def call_pipeline(pipeline)
instrument_call(pipeline.commands, true) do
instrument_call(pipeline.commands, instrumentation_class, true) do
super
end
end
def write(command)
measure_write_size(command) if ::RequestStore.active?
measure_write_size(command, instrumentation_class) if ::RequestStore.active?
super
end
def read
result = super
measure_read_size(result) if ::RequestStore.active?
measure_read_size(result, instrumentation_class) if ::RequestStore.active?
result
end
private
def instrument_call(commands, pipelined = false)
start = ::Gitlab::Metrics::System.monotonic_time # must come first so that 'start' is always defined
instrumentation_class.instance_count_request(commands.size)
instrumentation_class.instance_count_pipelined_request(commands.size) if pipelined
if !instrumentation_class.redis_cluster_validate!(commands) && ::RequestStore.active?
instrumentation_class.increment_cross_slot_request_count
end
yield
rescue ::Redis::BaseError => ex
if ex.message.start_with?('MOVED', 'ASK')
instrumentation_class.instance_count_cluster_redirection(ex)
else
instrumentation_class.instance_count_exception(ex)
end
instrumentation_class.log_exception(ex)
raise ex
ensure
duration = ::Gitlab::Metrics::System.monotonic_time - start
unless exclude_from_apdex?(commands)
commands.each { instrumentation_class.instance_observe_duration(duration / commands.size) }
end
if ::RequestStore.active?
# These metrics measure total Redis usage per Rails request / job.
instrumentation_class.increment_request_count(commands.size)
instrumentation_class.add_duration(duration)
instrumentation_class.add_call_details(duration, commands)
end
end
def measure_write_size(command)
size = 0
# Mimic what happens in
# https://github.com/redis/redis-rb/blob/f597f21a6b954b685cf939febbc638f6c803e3a7/lib/redis/connection/command_helper.rb#L8.
# This count is an approximation that omits the Redis protocol overhead
# of type prefixes, length prefixes and line endings.
command.each do |x|
size += if x.is_a? Array
x.inject(0) { |sum, y| sum + y.to_s.bytesize }
else
x.to_s.bytesize
end
end
instrumentation_class.increment_write_bytes(size)
end
def measure_read_size(result)
# The Connection::Ruby#read class can return one of four types of results from read:
# https://github.com/redis/redis-rb/blob/f597f21a6b954b685cf939febbc638f6c803e3a7/lib/redis/connection/ruby.rb#L406
#
# 1. Error (exception, will not reach this line)
# 2. Status (string)
# 3. Integer (will be converted to string by to_s.bytesize and thrown away)
# 4. "Binary" string (i.e. may contain zero byte)
# 5. Array of binary string
if result.is_a? Array
# Redis can return nested arrays, e.g. from XRANGE or GEOPOS, so we use recursion here.
result.each { |x| measure_read_size(x) }
else
# This count is an approximation that omits the Redis protocol overhead
# of type prefixes, length prefixes and line endings.
instrumentation_class.increment_read_bytes(result.to_s.bytesize)
end
end
# That's required so it knows which GitLab Redis instance
# it's interacting with in order to categorize accordingly.
#
def instrumentation_class
@options[:instrumentation_class] # rubocop:disable Gitlab/ModuleWithInstanceVariables
end
def exclude_from_apdex?(commands)
commands.any? { |command| APDEX_EXCLUDE.include?(command.first.to_s.downcase) }
end
end
end
end

View File

@ -0,0 +1,136 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Instrumentation::RedisHelper, :request_store, feature_category: :scalability do
include RedisHelpers
let(:minimal_test_class) do
Class.new do
include Gitlab::Instrumentation::RedisHelper
def initialize
@instrumentation_class = Gitlab::Instrumentation::Redis::Cache
end
def check_command(commands, pipelined)
instrument_call(commands, @instrumentation_class, pipelined) { 'empty block' }
end
def test_read(result)
measure_read_size(result, @instrumentation_class)
end
def test_write(command)
measure_write_size(command, @instrumentation_class)
end
def test_exclusion(commands)
exclude_from_apdex?(commands)
end
end
end
before do
stub_const("MinimalTestClass", minimal_test_class)
end
subject(:minimal_test_class_instance) { MinimalTestClass.new }
describe '.instrument_call' do
it 'instruments request count' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:instance_count_request).with(1)
expect(Gitlab::Instrumentation::Redis::Cache).not_to receive(:instance_count_pipelined_request)
minimal_test_class_instance.check_command([[:set, 'foo', 'bar']], false)
end
it 'performs cluster validation' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:redis_cluster_validate!).once
minimal_test_class_instance.check_command([[:set, 'foo', 'bar']], false)
end
context 'when command is not valid for Redis Cluster' do
before do
allow(Gitlab::Instrumentation::Redis::Cache).to receive(:redis_cluster_validate!).and_return(false)
end
it 'reports cross slot request' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_cross_slot_request_count).once
minimal_test_class_instance.check_command([[:mget, 'foo', 'bar']], false)
end
end
context 'when an error is raised' do
# specific error behaviours are tested in spec/lib/gitlab/instrumentation/redis_client_middleware_spec.rb
# this spec tests for the generic behaviour to verify that `ensure` works for any general error types
before do
allow(Gitlab::Instrumentation::Redis::Cache).to receive(:instance_count_request)
.and_raise(StandardError)
end
it 'ensures duration is tracked' do
commands = [[:set, 'foo', 'bar']]
allow(Gitlab::Instrumentation::Redis::Cache).to receive(:instance_observe_duration).once
allow(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_request_count).with(1).once
allow(Gitlab::Instrumentation::Redis::Cache).to receive(:add_duration).once
allow(Gitlab::Instrumentation::Redis::Cache).to receive(:add_call_details).with(anything, commands).once
expect { minimal_test_class_instance.check_command(commands, false) }.to raise_error(StandardError)
end
end
context 'when pipelined' do
it 'instruments pipelined request count' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:instance_count_pipelined_request)
minimal_test_class_instance.check_command([[:get, '{user1}:bar'], [:get, '{user1}:foo']], true)
end
end
end
describe '.measure_read_size' do
it 'reads array' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_read_bytes).with(3).exactly(3).times
minimal_test_class_instance.test_read(%w[bar foo buz])
end
it 'reads Integer' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_read_bytes).with(4)
minimal_test_class_instance.test_read(1234)
end
it 'reads String' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_read_bytes).with(3)
minimal_test_class_instance.test_read('bar')
end
end
describe '.measure_write_size' do
it 'measures command size' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_write_bytes).with(9)
minimal_test_class_instance.test_write([:set, 'foo', 'bar'])
end
it 'accept array input' do
expect(Gitlab::Instrumentation::Redis::Cache).to receive(:increment_write_bytes).with((9 + 12))
minimal_test_class_instance.test_write([[:set, 'foo', 'bar'], [:lpush, 'que', 'item']])
end
end
describe '.exclude_from_apdex?' do
it 'returns false if all commands are allowed' do
expect(minimal_test_class_instance.test_exclusion([[:set, 'foo', 'bar'], [:lpush, 'que', 'item']])).to eq(false)
end
it 'returns true if any commands are banned' do
expect(minimal_test_class_instance.test_exclusion([[:brpop, 'foo', 2], [:lpush, 'que', 'item']])).to eq(true)
end
end
end

View File

@ -881,7 +881,7 @@ RSpec.describe Event, feature_category: :user_profile do
context 'when a project was updated more than 1 hour ago', :clean_gitlab_redis_shared_state do
before do
::Gitlab::Redis::SharedState.with do |redis|
redis.hset('inactive_projects_deletion_warning_email_notified', "project:#{project.id}", Date.current)
redis.hset('inactive_projects_deletion_warning_email_notified', "project:#{project.id}", Date.current.to_s)
end
end

View File

@ -95,7 +95,7 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker, feature_category: :
expect(redis).to receive(:hset).with(
'inactive_projects_deletion_warning_email_notified',
"project:#{inactive_large_project.id}",
Date.current
Date.current.to_s
)
end
expect(::Projects::InactiveProjectsDeletionNotificationWorker).to receive(:perform_async).with(