Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot
2025-05-19 18:19:33 +00:00
parent 6b456b2e03
commit 8ab4a7c3af
51 changed files with 705 additions and 61 deletions

View File

@ -2,6 +2,7 @@ include:
- local: .gitlab/ci/qa-common/rules.gitlab-ci.yml
- local: .gitlab/ci/qa-common/variables.gitlab-ci.yml
- local: .gitlab/ci/qa-common/omnibus.gitlab-ci.yml
- local: .gitlab/ci/test-on-omnibus/internal.gitlab-ci.yml
workflow:
rules:

View File

@ -1 +1 @@
074f9898f8f398c40be5146092dbc6ff36ab0ada
d5f56b1272c7abb5cf0f64b8241e141bed1253ec

View File

@ -8,7 +8,7 @@ import {
import NestedGroupsProjectsList from '~/vue_shared/components/nested_groups_projects_list/nested_groups_projects_list.vue';
import ResourceListsEmptyState from '~/vue_shared/components/resource_lists/empty_state.vue';
import { formatGroups } from './utils';
import memberGroupsQuery from './graphql/queries/member_groups.query.graphql';
import groupsQuery from './graphql/queries/groups.query.graphql';
const baseTab = {
formatter: formatGroups,
@ -20,15 +20,22 @@ const baseTab = {
'Organization|A group is a collection of several projects. If you organize your projects under a group, it works like a folder.',
),
},
query: groupsQuery,
queryPath: 'groups',
listComponent: NestedGroupsProjectsList,
};
export const MEMBER_TAB = {
...baseTab,
text: __('Member'),
value: 'member',
query: memberGroupsQuery,
queryPath: 'groups',
listComponent: NestedGroupsProjectsList,
};
export const INACTIVE_TAB = {
...baseTab,
text: __('Inactive'),
value: 'inactive',
variables: { active: false },
};
export const SORT_OPTION_NAME = {
@ -48,7 +55,7 @@ export const SORT_OPTION_UPDATED = {
export const SORT_OPTIONS = [SORT_OPTION_NAME, SORT_OPTION_CREATED, SORT_OPTION_UPDATED];
export const GROUP_DASHBOARD_TABS = [MEMBER_TAB];
export const GROUP_DASHBOARD_TABS = [MEMBER_TAB, INACTIVE_TAB];
export const BASE_ROUTE = '/dashboard/groups';

View File

@ -1,7 +1,13 @@
#import "~/graphql_shared/fragments/group.fragment.graphql"
query getMemberYourWorkGroups($search: String, $sort: String, $parentId: Int, $page: Int) {
groups(search: $search, sort: $sort, parentId: $parentId, page: $page) @client {
query yourWorkGroups(
$active: Boolean = true
$search: String
$sort: String
$parentId: Int
$page: Int
) {
groups(active: $active, search: $search, sort: $sort, parentId: $parentId, page: $page) @client {
nodes {
...Group
children

View File

@ -4,9 +4,9 @@ import { formatGroupForGraphQLResolver } from '~/groups/your_work/graphql/utils'
export const resolvers = (endpoint) => ({
Query: {
async groups(_, { search: filter, sort, parentId, page }) {
async groups(_, { active, search: filter, sort, parentId, page }) {
const { data, headers } = await axios.get(endpoint, {
params: { filter, sort, parent_id: parentId, page },
params: { active, filter, sort, parent_id: parentId, page },
});
const normalizedHeaders = normalizeHeaders(headers);

View File

@ -51,9 +51,9 @@ export default {
hoverTimeoutId: null,
showSVG: true,
targetRect: null,
cleanup: null,
};
},
cleanupFunction: undefined,
computed: {
topSVGPoints() {
const x = (this.currentMouseX / this.targetRect.width) * 100;
@ -121,13 +121,11 @@ export default {
};
});
this.$options.cleanupFunction = autoUpdate(target, flyout, updatePosition);
},
beforeUnmount() {
this.$options.cleanupFunction?.();
clearTimeout(this.hoverTimeoutId);
this.cleanup = autoUpdate(target, flyout, updatePosition);
},
beforeDestroy() {
this.cleanup();
clearTimeout(this.hoverTimeoutId);
const target = document.querySelector(`#${this.targetId}`);
target.removeEventListener('mousemove', this.onMouseMove);
},

View File

@ -8,6 +8,9 @@ module Integrations
include Gitlab::EncryptedAttribute
belongs_to :integration, inverse_of: self.table_name.to_sym, foreign_key: :integration_id, optional: true
belongs_to :project, inverse_of: self.table_name.to_sym, foreign_key: :project_id, optional: true
belongs_to :group, inverse_of: self.table_name.to_sym, foreign_key: :group_id, optional: true
belongs_to :organization, inverse_of: self.table_name.to_sym, foreign_key: :organization_id, optional: true
belongs_to :instance_integration,
inverse_of: self.table_name.to_sym,
@ -15,9 +18,12 @@ module Integrations
class_name: 'Integrations::Instance::Integration',
optional: true
before_validation :set_sharding_key
validates :integration, absence: true, if: :instance_integration
validates :instance_integration, absence: true, if: :integration
validate :validate_mutual_exclusion
validate :validate_sharding_key
end
class_methods do
@ -38,7 +44,17 @@ module Integrations
def to_database_hash
as_json(
only: self.class.column_names
).except('id', 'service_id', 'integration_id', 'created_at', 'updated_at', 'instance_integration_id')
).except(
'id',
'service_id',
'integration_id',
'created_at',
'updated_at',
'instance_integration_id',
'group_id',
'project_id',
'organization_id'
)
end
private
@ -48,5 +64,19 @@ module Integrations
errors.add(:base, :blank, message: 'one of integration or instance_integration must be present')
end
def set_sharding_key
return if project_id || group_id || organization_id || integration.nil?
self.project_id = integration.project_id if integration.project_id
self.group_id = integration.group_id if integration.group_id
self.organization_id = integration.organization_id if integration.organization_id
end
def validate_sharding_key
return if project_id.present? || group_id.present? || organization_id.present?
errors.add(:base, :blank, message: 'one of project_id, group_id or organization_id must be present')
end
end
end

View File

@ -50,8 +50,11 @@ module Integrations
integration_fk_name = model.reflections['integration'].foreign_key
attributes = data_fields_hash(:create)
items_to_insert = integration_ids.map do |id|
attributes.merge(integration_fk_name => id)
items_to_insert = integration_ids.zip(batch).map do |integration_id, record|
attributes.merge(
integration_fk_name => integration_id,
"#{association}_id" => record.id
)
end
bulk_insert_new(model, items_to_insert)

View File

@ -0,0 +1,10 @@
---
name: stream_audit_events_remote_ip_proxy_protocol
description: Fixes remote IP in stream audit events of Git over SSH
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/378590
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/191408
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/543033
milestone: '18.1'
group: group::source code
type: beta
default_enabled: false

View File

@ -0,0 +1,13 @@
---
migration_job_name: BackfillHasVulnerabilityResolutionCwe78Cwe89
description: >-
We need to support filtering vulnerabilities based on whether the "Resolve with Duo" button is enabled.
To enable this, we introduced the has_vulnerability_resolution column.
This update backfills the column to true for vulnerabilities associated with CWE-78 and CWE-89.
feature_category: vulnerability_management
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/188420
milestone: '18.0'
queued_migration_version: 20250416081025
finalized_by: # version of the migration that finalized this BBM

View File

@ -0,0 +1,50 @@
# frozen_string_literal: true
class AddForeignKeysToIssueTrackerData < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
def up
with_lock_retries do
add_column :issue_tracker_data, :project_id, :bigint, if_not_exists: true
add_column :issue_tracker_data, :group_id, :bigint, if_not_exists: true
add_column :issue_tracker_data, :organization_id, :bigint, if_not_exists: true
end
add_concurrent_foreign_key(
:issue_tracker_data,
:projects,
column: :project_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
add_concurrent_foreign_key(
:issue_tracker_data,
:namespaces,
column: :group_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
add_concurrent_foreign_key(
:issue_tracker_data,
:organizations,
column: :organization_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
end
def down
with_lock_retries do
remove_column :issue_tracker_data, :project_id, if_exists: true
remove_column :issue_tracker_data, :group_id, if_exists: true
remove_column :issue_tracker_data, :organization_id, if_exists: true
end
end
end

View File

@ -0,0 +1,50 @@
# frozen_string_literal: true
class AddForeignKeysToJiraTrackerData < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
def up
with_lock_retries do
add_column :jira_tracker_data, :project_id, :bigint, if_not_exists: true
add_column :jira_tracker_data, :group_id, :bigint, if_not_exists: true
add_column :jira_tracker_data, :organization_id, :bigint, if_not_exists: true
end
add_concurrent_foreign_key(
:jira_tracker_data,
:projects,
column: :project_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
add_concurrent_foreign_key(
:jira_tracker_data,
:namespaces,
column: :group_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
add_concurrent_foreign_key(
:jira_tracker_data,
:organizations,
column: :organization_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
end
def down
with_lock_retries do
remove_column :jira_tracker_data, :project_id, if_exists: true
remove_column :jira_tracker_data, :group_id, if_exists: true
remove_column :jira_tracker_data, :organization_id, if_exists: true
end
end
end

View File

@ -0,0 +1,50 @@
# frozen_string_literal: true
class AddForeignKeysToZentaoTrackerData < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
def up
with_lock_retries do
add_column :zentao_tracker_data, :project_id, :bigint, if_not_exists: true
add_column :zentao_tracker_data, :group_id, :bigint, if_not_exists: true
add_column :zentao_tracker_data, :organization_id, :bigint, if_not_exists: true
end
add_concurrent_foreign_key(
:zentao_tracker_data,
:projects,
column: :project_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
add_concurrent_foreign_key(
:zentao_tracker_data,
:namespaces,
column: :group_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
add_concurrent_foreign_key(
:zentao_tracker_data,
:organizations,
column: :organization_id,
foreign_key: true,
on_delete: :cascade,
validate: false
)
end
def down
with_lock_retries do
remove_column :zentao_tracker_data, :project_id, if_exists: true
remove_column :zentao_tracker_data, :group_id, if_exists: true
remove_column :zentao_tracker_data, :organization_id, if_exists: true
end
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class AddFkIndexesToIssueTrackerData < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
TABLE_NAME = :issue_tracker_data
PROJECT_ID_INDEX_NAME = 'index_issue_tracker_data_on_project_id'
GROUP_ID_INDEX_NAME = 'index_issue_tracker_data_on_group_id'
ORGANIZATION_ID_INDEX_NAME = 'index_issue_tracker_data_on_organization_id'
def up
add_concurrent_index TABLE_NAME, :project_id, name: PROJECT_ID_INDEX_NAME
add_concurrent_index TABLE_NAME, :group_id, name: GROUP_ID_INDEX_NAME
add_concurrent_index TABLE_NAME, :organization_id, name: ORGANIZATION_ID_INDEX_NAME
end
def down
remove_concurrent_index_by_name TABLE_NAME, name: PROJECT_ID_INDEX_NAME
remove_concurrent_index_by_name TABLE_NAME, name: GROUP_ID_INDEX_NAME
remove_concurrent_index_by_name TABLE_NAME, name: ORGANIZATION_ID_INDEX_NAME
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class AddFkIndexesToJiraTrackerData < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
TABLE_NAME = :jira_tracker_data
PROJECT_ID_INDEX_NAME = 'index_jira_tracker_data_on_project_id'
GROUP_ID_INDEX_NAME = 'index_jira_tracker_data_on_group_id'
ORGANIZATION_ID_INDEX_NAME = 'index_jira_tracker_data_on_organization_id'
def up
add_concurrent_index TABLE_NAME, :project_id, name: PROJECT_ID_INDEX_NAME
add_concurrent_index TABLE_NAME, :group_id, name: GROUP_ID_INDEX_NAME
add_concurrent_index TABLE_NAME, :organization_id, name: ORGANIZATION_ID_INDEX_NAME
end
def down
remove_concurrent_index_by_name TABLE_NAME, name: PROJECT_ID_INDEX_NAME
remove_concurrent_index_by_name TABLE_NAME, name: GROUP_ID_INDEX_NAME
remove_concurrent_index_by_name TABLE_NAME, name: ORGANIZATION_ID_INDEX_NAME
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class AddFkIndexesToZentaoTrackerData < Gitlab::Database::Migration[2.3]
milestone '18.1'
disable_ddl_transaction!
TABLE_NAME = :zentao_tracker_data
PROJECT_ID_INDEX_NAME = 'index_zentao_tracker_data_on_project_id'
GROUP_ID_INDEX_NAME = 'index_zentao_tracker_data_on_group_id'
ORGANIZATION_ID_INDEX_NAME = 'index_zentao_tracker_data_on_organization_id'
def up
add_concurrent_index TABLE_NAME, :project_id, name: PROJECT_ID_INDEX_NAME
add_concurrent_index TABLE_NAME, :group_id, name: GROUP_ID_INDEX_NAME
add_concurrent_index TABLE_NAME, :organization_id, name: ORGANIZATION_ID_INDEX_NAME
end
def down
remove_concurrent_index_by_name TABLE_NAME, name: PROJECT_ID_INDEX_NAME
remove_concurrent_index_by_name TABLE_NAME, name: GROUP_ID_INDEX_NAME
remove_concurrent_index_by_name TABLE_NAME, name: ORGANIZATION_ID_INDEX_NAME
end
end

View File

@ -0,0 +1,27 @@
# frozen_string_literal: true
class QueueBackfillHasVulnerabilityResolutionCwe78Cwe89 < Gitlab::Database::Migration[2.2]
milestone '18.0'
restrict_gitlab_migration gitlab_schema: :gitlab_sec
MIGRATION = "BackfillHasVulnerabilityResolutionCwe78Cwe89"
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 5000
SUB_BATCH_SIZE = 500
def up
queue_batched_background_migration(
MIGRATION,
:vulnerability_reads,
:id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(MIGRATION, :vulnerability_reads, :id, [])
end
end

View File

@ -0,0 +1,28 @@
# frozen_string_literal: true
class AddAFullUniqueIndexOnIssuableResourceLinks < Gitlab::Database::Migration[2.3]
INDEX_NAME = 'index_unique_issuable_resource_links_on_issue_id_and_link'
disable_ddl_transaction!
DEPENDENT_BATCHED_BACKGROUND_MIGRATIONS = [
20240908225334,
20240911101712,
20241111055711
]
milestone '18.1'
def up
add_concurrent_index(
:issuable_resource_links,
%i[issue_id link],
name: INDEX_NAME,
unique: true
)
end
def down
remove_concurrent_index_by_name(:issuable_resource_links, INDEX_NAME)
end
end

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
class DropPartialUniqueIndexOnIssuableResourceLinks < Gitlab::Database::Migration[2.3]
INDEX_NAME = 'index_unique_issuable_resource_links_on_unique_issue_link'
disable_ddl_transaction!
milestone '18.1'
def up
remove_concurrent_index_by_name(:issuable_resource_links, INDEX_NAME)
end
def down
add_concurrent_index(
:issuable_resource_links,
%i[issue_id link],
where: "is_unique",
unique: true,
name: INDEX_NAME
)
end
end

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class DropDuplicateIndexOnIssuableResourceLinks < Gitlab::Database::Migration[2.3]
INDEX_NAME = 'index_issuable_resource_links_on_issue_id'
disable_ddl_transaction!
milestone '18.1'
def up
remove_concurrent_index_by_name(:issuable_resource_links, INDEX_NAME)
end
def down
add_concurrent_index(
:issuable_resource_links,
:issue_id,
name: INDEX_NAME
)
end
end

View File

@ -0,0 +1 @@
85e47f673c597107f73e9095b8a4c60073dbf66f6a7be43fd15e643638766589

View File

@ -0,0 +1 @@
63c274354ee771689289af94921e0f86d32959e63c6d3983c100bb463e9e8d82

View File

@ -0,0 +1 @@
ecc1899ae817e7a26adf6b01385c1f9958f25e0f4370361c4c3f4cc8fa9e1b64

View File

@ -0,0 +1 @@
adf2bd66d9a0ff65547b79688adfe83debc8fc3f526a28ba5b3860ba22f2e8c9

View File

@ -0,0 +1 @@
3c3579e33ef8865bca8d621aaa50b3f2823528a62dd658b4b76ad0aaf7991a48

View File

@ -0,0 +1 @@
d107eff2ff8ae2e7ab136af22b27776667467d5833aa3d9b250015b9a39d015b

View File

@ -0,0 +1 @@
053e3363047772c52be92e5406702a71e10b7d867a90a715108e6db0421c89b3

View File

@ -0,0 +1 @@
b32978e63750e27da3d9269ea2ed18ef5321ebbceae59c0f96e04410cd5378e8

View File

@ -0,0 +1 @@
ce8e5f153f17d9514002242785fe902a9e4adb800a6f0e1ea024b7da870e8b37

View File

@ -0,0 +1 @@
27a239db30053a8fa0e28f8e87244c6968dce4ef7105da3673519eeca40f43cd

View File

@ -16186,6 +16186,9 @@ CREATE TABLE issue_tracker_data (
encrypted_new_issue_url_iv character varying,
integration_id bigint,
instance_integration_id bigint,
project_id bigint,
group_id bigint,
organization_id bigint,
CONSTRAINT check_d525c6d20b CHECK ((num_nonnulls(instance_integration_id, integration_id) = 1))
);
@ -16399,6 +16402,9 @@ CREATE TABLE jira_tracker_data (
project_keys text[] DEFAULT '{}'::text[] NOT NULL,
customize_jira_issue_enabled boolean DEFAULT false,
instance_integration_id bigint,
project_id bigint,
group_id bigint,
organization_id bigint,
CONSTRAINT check_0bf84b76e9 CHECK ((char_length(vulnerabilities_issuetype) <= 255)),
CONSTRAINT check_160e0f9fe2 CHECK ((num_nonnulls(instance_integration_id, integration_id) = 1)),
CONSTRAINT check_214cf6a48b CHECK ((char_length(project_key) <= 255)),
@ -26090,6 +26096,9 @@ CREATE TABLE zentao_tracker_data (
encrypted_api_token bytea,
encrypted_api_token_iv bytea,
instance_integration_id bigint,
project_id bigint,
group_id bigint,
organization_id bigint,
CONSTRAINT check_500f588095 CHECK ((num_nonnulls(instance_integration_id, integration_id) = 1))
);
@ -35413,8 +35422,6 @@ CREATE INDEX index_issuable_metric_images_on_issue_id ON issuable_metric_images
CREATE INDEX index_issuable_metric_images_on_namespace_id ON issuable_metric_images USING btree (namespace_id);
CREATE INDEX index_issuable_resource_links_on_issue_id ON issuable_resource_links USING btree (issue_id);
CREATE INDEX index_issuable_resource_links_on_namespace_id ON issuable_resource_links USING btree (namespace_id);
CREATE UNIQUE INDEX index_issuable_severities_on_issue_id ON issuable_severities USING btree (issue_id);
@ -35465,10 +35472,16 @@ CREATE INDEX index_issue_metrics_on_namespace_id ON issue_metrics USING btree (n
CREATE INDEX index_issue_on_project_id_state_id_and_blocking_issues_count ON issues USING btree (project_id, state_id, blocking_issues_count);
CREATE INDEX index_issue_tracker_data_on_group_id ON issue_tracker_data USING btree (group_id);
CREATE INDEX index_issue_tracker_data_on_instance_integration_id ON issue_tracker_data USING btree (instance_integration_id);
CREATE INDEX index_issue_tracker_data_on_integration_id ON issue_tracker_data USING btree (integration_id);
CREATE INDEX index_issue_tracker_data_on_organization_id ON issue_tracker_data USING btree (organization_id);
CREATE INDEX index_issue_tracker_data_on_project_id ON issue_tracker_data USING btree (project_id);
CREATE INDEX index_issue_user_mentions_on_namespace_id ON issue_user_mentions USING btree (namespace_id);
CREATE UNIQUE INDEX index_issue_user_mentions_on_note_id ON issue_user_mentions USING btree (note_id) WHERE (note_id IS NOT NULL);
@ -35537,10 +35550,16 @@ CREATE INDEX index_jira_imports_on_project_id_and_jira_project_key ON jira_impor
CREATE INDEX index_jira_imports_on_user_id ON jira_imports USING btree (user_id);
CREATE INDEX index_jira_tracker_data_on_group_id ON jira_tracker_data USING btree (group_id);
CREATE INDEX index_jira_tracker_data_on_instance_integration_id ON jira_tracker_data USING btree (instance_integration_id);
CREATE INDEX index_jira_tracker_data_on_integration_id ON jira_tracker_data USING btree (integration_id);
CREATE INDEX index_jira_tracker_data_on_organization_id ON jira_tracker_data USING btree (organization_id);
CREATE INDEX index_jira_tracker_data_on_project_id ON jira_tracker_data USING btree (project_id);
CREATE INDEX index_job_artifact_states_failed_verification ON ci_job_artifact_states USING btree (verification_retry_at NULLS FIRST) WHERE (verification_state = 3);
CREATE INDEX index_job_artifact_states_needs_verification ON ci_job_artifact_states USING btree (verification_state) WHERE ((verification_state = 0) OR (verification_state = 3));
@ -37519,7 +37538,7 @@ CREATE UNIQUE INDEX index_unique_ci_runner_projects_on_runner_id_and_project_id
CREATE UNIQUE INDEX index_unique_epics_on_issue_id ON epics USING btree (issue_id);
CREATE UNIQUE INDEX index_unique_issuable_resource_links_on_unique_issue_link ON issuable_resource_links USING btree (issue_id, link) WHERE is_unique;
CREATE UNIQUE INDEX index_unique_issuable_resource_links_on_issue_id_and_link ON issuable_resource_links USING btree (issue_id, link);
CREATE UNIQUE INDEX index_unique_issue_link_id_on_related_epic_links ON related_epic_links USING btree (issue_link_id);
@ -38067,10 +38086,16 @@ CREATE UNIQUE INDEX index_xray_reports_on_project_id_and_lang ON xray_reports US
CREATE INDEX index_zens_on_last_rollout_failed_at ON zoekt_enabled_namespaces USING btree (last_rollout_failed_at);
CREATE INDEX index_zentao_tracker_data_on_group_id ON zentao_tracker_data USING btree (group_id);
CREATE INDEX index_zentao_tracker_data_on_instance_integration_id ON zentao_tracker_data USING btree (instance_integration_id);
CREATE INDEX index_zentao_tracker_data_on_integration_id ON zentao_tracker_data USING btree (integration_id);
CREATE INDEX index_zentao_tracker_data_on_organization_id ON zentao_tracker_data USING btree (organization_id);
CREATE INDEX index_zentao_tracker_data_on_project_id ON zentao_tracker_data USING btree (project_id);
CREATE INDEX index_zoekt_indices_on_id_conditional_watermark_level_state ON zoekt_indices USING btree (id) WHERE (((watermark_level = 10) AND (state = 10)) OR (watermark_level = 60));
CREATE INDEX index_zoekt_indices_on_namespace_id ON zoekt_indices USING btree (namespace_id, zoekt_enabled_namespace_id);
@ -41705,6 +41730,9 @@ ALTER TABLE ONLY project_requirement_compliance_statuses
ALTER TABLE ONLY requirements_management_test_reports
ADD CONSTRAINT fk_05094e3d87 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY jira_tracker_data
ADD CONSTRAINT fk_05895afb4c FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY analytics_dashboards_pointers
ADD CONSTRAINT fk_05d96922bd FOREIGN KEY (target_project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -41891,6 +41919,9 @@ ALTER TABLE ONLY user_achievements
ALTER TABLE ONLY internal_ids
ADD CONSTRAINT fk_162941d509 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY jira_tracker_data
ADD CONSTRAINT fk_16ddb573de FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY incident_management_timeline_events
ADD CONSTRAINT fk_17a5fafbd4 FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE;
@ -41999,6 +42030,9 @@ ALTER TABLE ONLY bulk_import_export_uploads
ALTER TABLE ONLY audit_events_streaming_http_instance_namespace_filters
ADD CONSTRAINT fk_23f3ab7df0 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY zentao_tracker_data
ADD CONSTRAINT fk_2417fd4262 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY import_failures
ADD CONSTRAINT fk_24b824da43 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
@ -42446,6 +42480,9 @@ ALTER TABLE ONLY packages_tags
ALTER TABLE ONLY security_policy_project_links
ADD CONSTRAINT fk_5a5eba6f88 FOREIGN KEY (security_policy_id) REFERENCES security_policies(id) ON DELETE CASCADE;
ALTER TABLE ONLY zentao_tracker_data
ADD CONSTRAINT fk_5a5f50a792 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY project_export_jobs
ADD CONSTRAINT fk_5ab0242530 FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
@ -43076,6 +43113,9 @@ ALTER TABLE ONLY operations_strategies
ALTER TABLE ONLY work_item_custom_lifecycle_statuses
ADD CONSTRAINT fk_a546eef539 FOREIGN KEY (lifecycle_id) REFERENCES work_item_custom_lifecycles(id) ON DELETE CASCADE;
ALTER TABLE ONLY issue_tracker_data
ADD CONSTRAINT fk_a54bddafd2 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY lfs_objects_projects
ADD CONSTRAINT fk_a56e02279c FOREIGN KEY (lfs_object_id) REFERENCES lfs_objects(id) ON DELETE RESTRICT NOT VALID;
@ -43190,6 +43230,9 @@ ALTER TABLE ONLY related_epic_links
ALTER TABLE ONLY projects_branch_rules_merge_request_approval_settings
ADD CONSTRAINT fk_b322a941f9 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY issue_tracker_data
ADD CONSTRAINT fk_b33e816ada FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY issues
ADD CONSTRAINT fk_b37be69be6 FOREIGN KEY (work_item_type_id) REFERENCES work_item_types(id);
@ -43358,6 +43401,9 @@ ALTER TABLE ONLY wiki_repository_states
ALTER TABLE ONLY issues
ADD CONSTRAINT fk_c63cbf6c25 FOREIGN KEY (closed_by_id) REFERENCES users(id) ON DELETE SET NULL;
ALTER TABLE ONLY issue_tracker_data
ADD CONSTRAINT fk_c65b54013d FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY sbom_occurrences_vulnerabilities
ADD CONSTRAINT fk_c677cb859e FOREIGN KEY (sbom_occurrence_id) REFERENCES sbom_occurrences(id) ON DELETE CASCADE;
@ -43472,6 +43518,9 @@ ALTER TABLE ONLY issue_user_mentions
ALTER TABLE ONLY user_group_member_roles
ADD CONSTRAINT fk_d222d57eec FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY jira_tracker_data
ADD CONSTRAINT fk_d24014171d FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY boards_epic_user_preferences
ADD CONSTRAINT fk_d32c3d693c FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
@ -43517,6 +43566,9 @@ ALTER TABLE ONLY system_note_metadata
ALTER TABLE ONLY sbom_occurrences
ADD CONSTRAINT fk_d857c6edc1 FOREIGN KEY (component_id) REFERENCES sbom_components(id) ON DELETE CASCADE;
ALTER TABLE ONLY zentao_tracker_data
ADD CONSTRAINT fk_d8eda829f4 FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE NOT VALID;
ALTER TABLE ONLY todos
ADD CONSTRAINT fk_d94154aa95 FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;

View File

@ -1590,10 +1590,11 @@ For manually backing up the Git repository data on disk, there are multiple poss
#### Prevent writes and copy the Git repository data
Git repositories must be copied in a consistent way. They should not be copied during concurrent write
operations, as this can lead to inconsistencies or corruption issues. For more details,
[issue #270422](https://gitlab.com/gitlab-org/gitlab/-/issues/270422 "Provide documentation on preferred method of migrating Gitaly servers")
has a longer discussion explaining the potential problems.
Git repositories must be copied in a consistent way. If repositories
are copied during concurrent write operations,
inconsistencies or corruption issues can occur. For more details,
[issue 270422](https://gitlab.com/gitlab-org/gitlab/-/issues/270422)
has a longer discussion that explains the potential problems.
To prevent writes to the Git repository data, there are two possible approaches:

View File

@ -28,7 +28,7 @@ reasons are:
removes these.
- Artifact files might be left on disk and not deleted by housekeeping. Run the
[Rake task for _orphaned_ artifact files](../raketasks/cleanup.md#remove-orphan-artifact-files)
to remove these. This script should always find work to do, as it also removes empty directories (see above).
to remove these. This script should always find work to do because it also removes empty directories (see above).
- [Artifact housekeeping was changed significantly](#housekeeping-disabled-in-gitlab-150-to-152), and you might need to enable a feature flag to use the updated system.
- The [keep latest artifacts from most recent success jobs](../../ci/jobs/job_artifacts.md#keep-artifacts-from-most-recent-successful-jobs)
feature is enabled.

View File

@ -42,7 +42,7 @@ is distributed among the Gitaly nodes in your instance based on your reference a
#### Repository storage per Gitaly node
Each Gitaly node in your instance has a specific storage capacity. This capacity affects how large individual
repositories can be, as no single repository can exceed the capacity of a single Gitaly node.
repositories can be, because no single repository can exceed the capacity of a single Gitaly node.
For example, if each Gitaly node has 100 GB of storage capacity and there are 3 Gitaly nodes, your instance
can store a total of 300 GB of repository data, but no single repository can exceed 100 GB.

View File

@ -1597,7 +1597,7 @@ sudo -u git -- /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefec
- `-virtual-storage` is the virtual storage the repository is located in.
- `-repository` is the repository's relative path in the storage.
- `-replication-factor` is the desired replication factor of the repository. The minimum value is
`1`, as the primary needs a copy of the repository. The maximum replication factor is the number of
`1` because the primary needs a copy of the repository. The maximum replication factor is the number of
storages in the virtual storage.
On success, the assigned host storages are printed. For example:

View File

@ -136,7 +136,7 @@ You should use the
## Alternative in-database storage
Enabling external diffs may reduce the performance of merge requests, as they
Enabling external diffs may reduce the performance of merge requests because they
must be retrieved in a separate operation to other data. A compromise may be
reached by only storing outdated diffs externally, while keeping current diffs
in the database.

View File

@ -811,7 +811,7 @@ archive. You can modify the cache behavior by changing the following configurati
| `zip_cache_expiration` | The cache expiration interval of ZIP archives. Must be greater than zero to avoid serving stale content. Default is `60s`. |
| `zip_cache_cleanup` | The interval at which archives are cleaned from memory if they have already expired. Default is `30s`. |
| `zip_cache_refresh` | The time interval in which an archive is extended in memory if accessed before `zip_cache_expiration`. This works together with `zip_cache_expiration` to determine if an archive is extended in memory. See the [example below](#zip-cache-refresh-example) for important details. Default is `30s`. |
| `zip_open_timeout` | The maximum time allowed to open a ZIP archive. Increase this time for big archives or slow network connections, as doing so may affect the latency of serving Pages. Default is 30 s. |
| `zip_open_timeout` | The maximum time allowed to open a ZIP archive. Increase this time for big archives or slow network connections because doing so may affect the latency of serving Pages. Default is 30 s. |
| `zip_http_client_timeout` | The maximum time for the ZIP HTTP client. Default is `30m`. |
#### ZIP cache refresh example

View File

@ -16,6 +16,8 @@ Use this guidance to help ensure you have the tools you need.
- Install documentation [linters](testing/_index.md) and configure them in your code editor:
- [markdownlint](testing/markdownlint.md)
- [Vale](testing/vale.md)
- If you're using VS Code, [install the GitLab Workflow extension](../../editor_extensions/visual_studio_code/setup.md)
to get GitLab Duo Chat and other GitLab features in your editor.
- [Set up the docs site to build locally](https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/-/blob/main/doc/setup.md).
- Optional. Install the [Conventional Comments](https://gitlab.com/conventionalcomments/conventional-comments-button) extension for Chrome.
The plugin adds **Conventional Comment** buttons to GitLab comments.

View File

@ -100,7 +100,11 @@ To lock a file:
If you do not have permission to lock the file, the menu item is disabled.
## View and remove locks
## View locked files
Prerequisites:
- You must have at least the Developer role for the project.
To view locked files:
@ -109,10 +113,13 @@ To view locked files:
The **Locked files** page displays all files locked with either Git LFS exclusive locks or the GitLab UI.
## Remove file locks
Prerequisites:
- You must be the user who created the lock.
- You must have at least the Maintainer role for the project.
- You must either:
- Be the user who created the lock.
- Have at least the Maintainer role for the project.
To remove a lock:

View File

@ -140,6 +140,12 @@ module API
false
end
def include_ip_address_in_audit_event?(ip_address)
params[:protocol] == 'ssh' && ip_address && Feature.enabled?(
:stream_audit_events_remote_ip_proxy_protocol, project
)
end
private
def gitaly_context(params)

View File

@ -113,7 +113,16 @@ module API
end
unless Feature.enabled?(:log_git_streaming_audit_events, project)
send_git_audit_streaming_event(protocol: params[:protocol], action: params[:action])
audit_message = { protocol: params[:protocol], action: params[:action] }
# If the protocol is SSH, we need to send the original IP from the PROXY
# protocol to the audit streaming event. The original IP from gitlab-shell
# is set through the `check_ip` parameter.
if include_ip_address_in_audit_event?(Gitlab::IpAddressState.current)
audit_message[:ip_address] = Gitlab::IpAddressState.current
end
send_git_audit_streaming_event(audit_message)
end
response_with_status(**payload)

View File

@ -32,6 +32,7 @@ module API
requires :action, type: String
requires :protocol, type: String
requires :gl_repository, type: String # repository identifier, such as project-7
optional :check_ip, type: String
optional :packfile_stats, type: Hash do
# wants is the number of objects the client announced it wants.
optional :wants, type: Integer
@ -57,13 +58,19 @@ module API
message: ::API::Helpers::InternalHelpers::UNKNOWN_CHECK_RESULT_ERROR)
end
msg = {
audit_message = {
protocol: params[:protocol],
action: params[:action],
verb: check_clone_or_pull_or_push_verb(params)
}
send_git_audit_streaming_event(msg)
response_with_status(message: msg)
# If the protocol is SSH, we need to send the original IP from the PROXY
# protocol to the audit streaming event. The original IP from gitlab-shell
# is set through the `check_ip` parameter.
audit_message[:ip_address] = params[:check_ip] if include_ip_address_in_audit_event?(params[:check_ip])
send_git_audit_streaming_event(audit_message)
response_with_status(message: audit_message.except(:ip_address))
end
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
module Gitlab
module BackgroundMigration
# This batched background migration is EE-only, see
# ee/lib/ee/gitlab/background_migration/backfill_has_vulnerability_resolution_cwe78_cwe89.rb
# for the actual migration code.
class BackfillHasVulnerabilityResolutionCwe78Cwe89 < BatchedMigrationJob
feature_category :vulnerability_management
def perform; end
end
end
end
Gitlab::BackgroundMigration::BackfillHasVulnerabilityResolutionCwe78Cwe89.prepend_mod

View File

@ -54784,6 +54784,9 @@ msgstr ""
msgid "SecurityInventory|Infrastructure as code scanning (IaC)"
msgstr ""
msgid "SecurityInventory|Load more"
msgstr ""
msgid "SecurityInventory|Manage security configuration"
msgstr ""

View File

@ -15,9 +15,22 @@ class SemgrepResultProcessor
UNIQUE_COMMENT_RULES_IDS = %w[builds.sast-custom-rules.appsec-pings.glappsec_ci-job-token builds.sast-custom-rules.secure-coding-guidelines.ruby.glappsec_insecure-regex].freeze
APPSEC_HANDLE = "@gitlab-com/gl-security/appsec"
MESSAGE_SCG_PING_APPSEC = "#{APPSEC_HANDLE} please review this finding, which is a potential violation of [GitLab's secure coding guidelines](https://docs.gitlab.com/development/secure_coding_guidelines/).".freeze
MESSAGE_S1_PING_APPSEC = "#{APPSEC_HANDLE} please review this finding. This MR potentially reintroduces code from a past S1 issue.".freeze
MESSAGE_PING_APPSEC = "#{APPSEC_HANDLE} please review this finding.".freeze
LABEL_INSTRUCTION = 'Apply the ~"appsec-sast-ping::resolved" label after reviewing.'
MESSAGE_SCG_PING_APPSEC =
"#{APPSEC_HANDLE} please review this finding, which is a potential " \
'violation of [GitLab\'s secure coding guidelines]' \
'(https://docs.gitlab.com/development/secure_coding_guidelines/). ' \
"#{LABEL_INSTRUCTION}".freeze
MESSAGE_S1_PING_APPSEC =
"#{APPSEC_HANDLE} please review this finding. This MR potentially " \
'reintroduces code from a past S1 issue. ' \
"#{LABEL_INSTRUCTION}".freeze
MESSAGE_PING_APPSEC =
"#{APPSEC_HANDLE} please review this finding. " \
"#{LABEL_INSTRUCTION}".freeze
MESSAGE_FOOTER = <<~FOOTER

View File

@ -2,7 +2,7 @@ import MockAdapter from 'axios-mock-adapter';
import dashboardGroupsWithChildrenResponse from 'test_fixtures/groups/dashboard/index_with_children.json';
import createMockApollo from 'helpers/mock_apollo_helper';
import { resolvers } from '~/groups/your_work/graphql/resolvers';
import memberGroupsQuery from '~/groups/your_work/graphql/queries/member_groups.query.graphql';
import groupsQuery from '~/groups/your_work/graphql/queries/groups.query.graphql';
import axios from '~/lib/utils/axios_utils';
import { TYPENAME_GROUP } from '~/graphql_shared/constants';
import { convertToGraphQLId } from '~/graphql_shared/utils';
@ -15,7 +15,7 @@ describe('your work groups resolver', () => {
const makeQuery = () => {
return mockApollo.clients.defaultClient.query({
query: memberGroupsQuery,
query: groupsQuery,
variables: { search: 'foo', sort: 'created_desc', page: 2 },
});
};
@ -42,6 +42,7 @@ describe('your work groups resolver', () => {
await makeQuery();
expect(mockAxios.history.get[0].params).toEqual({
active: true,
filter: 'foo',
sort: 'created_desc',
page: 2,

View File

@ -1,40 +1,48 @@
import { autoUpdate } from '@floating-ui/dom';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import FlyoutMenu, { FLYOUT_PADDING } from '~/super_sidebar/components/flyout_menu.vue';
import { setHTMLFixture } from 'helpers/fixtures';
jest.mock('@floating-ui/dom');
describe('FlyoutMenu', () => {
const targetId = 'section-1';
let wrapper;
let dummySection;
let autoUpdateCleanup;
const createComponent = () => {
dummySection = document.createElement('section');
dummySection.addEventListener = jest.fn();
dummySection.getBoundingClientRect = jest.fn();
dummySection.getBoundingClientRect.mockReturnValue({ top: 0, bottom: 5, width: 10 });
document.querySelector = jest.fn();
document.querySelector.mockReturnValue(dummySection);
wrapper = mountExtended(FlyoutMenu, {
attachTo: document.body,
propsData: {
targetId: 'section-1',
targetId,
items: [{ id: 1, title: 'item 1', link: 'https://example.com' }],
},
});
};
beforeEach(() => {
createComponent();
autoUpdateCleanup = autoUpdate.mockReturnValue(jest.fn());
setHTMLFixture(`
<div id="${targetId}"></div>
<div id="${targetId}-flyout"></div>
<div id="super-sidebar"></div>
`);
});
it('renders the component', () => {
createComponent();
expect(wrapper.exists()).toBe(true);
});
it('applies the correct padding', () => {
createComponent();
expect(wrapper.element.style.padding).toContain(`${FLYOUT_PADDING}px`);
expect(wrapper.element.style.paddingLeft).toContain(`${FLYOUT_PADDING * 2}px`);
});
it('cleans up', () => {
createComponent();
wrapper.destroy();
expect(autoUpdateCleanup).toHaveBeenCalled();
});
});

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe QueueBackfillHasVulnerabilityResolutionCwe78Cwe89, migration: :gitlab_sec, feature_category: :vulnerability_management do
let!(:batched_migration) { described_class::MIGRATION }
it 'schedules a new batched migration' do
reversible_migration do |migration|
migration.before -> {
expect(batched_migration).not_to have_scheduled_batched_migration
}
migration.after -> {
expect(batched_migration).to have_scheduled_batched_migration(
table_name: :vulnerability_reads,
column_name: :id,
interval: described_class::DELAY_INTERVAL,
batch_size: described_class::BATCH_SIZE,
sub_batch_size: described_class::SUB_BATCH_SIZE)
}
end
end
end

View File

@ -17,7 +17,7 @@ RSpec.describe Integrations::Propagation::BulkCreateService, feature_category: :
%w[
id project_id group_id inherit_from_id instance template
created_at updated_at
encrypted_properties encrypted_properties_iv organization_id
encrypted_properties encrypted_properties_iv organization_id project_id group_id
]
end
@ -35,7 +35,12 @@ RSpec.describe Integrations::Propagation::BulkCreateService, feature_category: :
end
context 'when integration has data fields' do
let(:excluded_attributes) { %w[id service_id integration_id created_at updated_at] }
let(:excluded_attributes) do
%w[
id service_id integration_id created_at updated_at
organization_id group_id project_id
]
end
it 'updates the data fields from inherited integrations' do
execute_service
@ -60,6 +65,7 @@ RSpec.describe Integrations::Propagation::BulkCreateService, feature_category: :
id project_id group_id inherit_from_id instance template
created_at updated_at
encrypted_properties encrypted_properties_iv organization_id
group_id project_id
]
end
@ -190,6 +196,14 @@ RSpec.describe Integrations::Propagation::BulkCreateService, feature_category: :
it_behaves_like 'creates integration successfully'
it 'sets project_id in data_fields' do
execute_service
expect(created_integration.data_fields.project_id).to eq(project.id)
expect(created_integration.data_fields.group_id).to be_nil
expect(created_integration.data_fields.organization_id).to be_nil
end
context 'with different foreign key of data_fields' do
let(:integration) { create(:zentao_integration, :group, group: group) }
@ -212,6 +226,14 @@ RSpec.describe Integrations::Propagation::BulkCreateService, feature_category: :
it_behaves_like 'creates integration successfully'
it 'sets group_id in data_fields' do
execute_service
expect(created_integration.data_fields.group_id).to eq(subgroup.id)
expect(created_integration.data_fields.project_id).to be_nil
expect(created_integration.data_fields.organization_id).to be_nil
end
context 'with different foreign key of data_fields' do
let(:integration) do
create(:zentao_integration, :group, group: group, inherit_from_id: instance_integration.id)
@ -225,5 +247,32 @@ RSpec.describe Integrations::Propagation::BulkCreateService, feature_category: :
let(:expected_alias) { subgroup.full_path }
end
end
context 'when there are multiple integrations to create' do
let!(:groups) { create_list(:group, 5, parent: group) }
let!(:projects) { create_list(:project, 5, group: group) }
let!(:integration) { create(:jira_integration, :group, group: group, inherit_from_id: instance_integration.id) }
it 'sets correct foreign key to propagated integration data_fields' do
described_class.new(integration, Project.where(id: projects.map(&:id)), 'project').execute
described_class.new(integration, Group.where(id: groups.map(&:id)), 'group').execute
groups.each do |subgroup|
integration = Integration.find_by(group: subgroup)
expect(integration.data_fields.group_id).to eq(integration.group_id)
expect(integration.data_fields.project_id).to eq(integration.project_id)
expect(integration.data_fields.organization_id).to eq(integration.organization_id)
end
projects.each do |project|
integration = Integration.find_by(project: project)
expect(integration.data_fields.group_id).to eq(integration.group_id)
expect(integration.data_fields.project_id).to eq(integration.project_id)
expect(integration.data_fields.organization_id).to eq(integration.organization_id)
end
end
end
end
end

View File

@ -73,7 +73,10 @@ RSpec.describe Integrations::Propagation::BulkUpdateService, feature_category: :
context 'with integration with data fields' do
let(:excluded_attributes) do
%w[id integration_id created_at updated_at encrypted_properties encrypted_properties_iv]
%w[
id integration_id created_at updated_at encrypted_properties encrypted_properties_iv
group_id project_id
]
end
it 'updates the data fields from the integration', :aggregate_failures do

View File

@ -51,7 +51,10 @@ RSpec.shared_examples Integrations::BaseDataFields do
'integration_id',
'created_at',
'updated_at',
'instance_integration_id'
'instance_integration_id',
'group_id',
'project_id',
'organization_id'
)
end
end
@ -60,6 +63,7 @@ RSpec.shared_examples Integrations::BaseDataFields do
context 'when integration is present' do
before do
model.integration = build(:integration)
model.organization_id = 1
end
it { is_expected.to be_valid }
@ -68,6 +72,7 @@ RSpec.shared_examples Integrations::BaseDataFields do
context 'when instance integration is present' do
before do
model.instance_integration = build(:instance_integration)
model.organization_id = 1
end
it { is_expected.to be_valid }
@ -82,7 +87,8 @@ RSpec.shared_examples Integrations::BaseDataFields do
expect(model.errors.full_messages).to contain_exactly(
'Integration must be blank',
'Instance integration must be blank',
'one of integration or instance_integration must be present'
'one of integration or instance_integration must be present',
'one of project_id, group_id or organization_id must be present'
)
end
end
@ -91,9 +97,46 @@ RSpec.shared_examples Integrations::BaseDataFields do
it 'validates presence correctly' do
expect(model.valid?).to eq(false)
expect(model.errors.full_messages).to contain_exactly(
'one of integration or instance_integration must be present'
'one of integration or instance_integration must be present',
'one of project_id, group_id or organization_id must be present'
)
end
end
context 'when sharding key is not set' do
it 'validates presence correctly' do
model.integration = build(:integration)
expect(model.valid?).to eq(false)
expect(model.errors.full_messages).to contain_exactly(
'one of project_id, group_id or organization_id must be present'
)
end
end
end
describe 'set_sharding_key' do
context 'when project_id, group_id, or organization_id is already set' do
it 'does not set new sharding key' do
integration = build(:integration, project_id: 2)
model.project_id = 1
model.integration = integration
model.valid?
expect(model.project_id).to eq(1)
end
end
context 'when project_id, group_id, or organization_id are not set' do
it 'sets the sharding key based on integration' do
integration = build(:integration, project_id: 1)
model.integration = integration
model.valid?
expect(model.project_id).to eq(1)
end
end
end
end