diff --git a/app/assets/javascripts/packages_and_registries/settings/project/components/packages_protection_rule_form.vue b/app/assets/javascripts/packages_and_registries/settings/project/components/packages_protection_rule_form.vue
index 11195247696..4e1b7592cf7 100644
--- a/app/assets/javascripts/packages_and_registries/settings/project/components/packages_protection_rule_form.vue
+++ b/app/assets/javascripts/packages_and_registries/settings/project/components/packages_protection_rule_form.vue
@@ -120,12 +120,9 @@ export default {
{ value: 'MAVEN', text: s__('PackageRegistry|Maven') },
{ value: 'CONAN', text: s__('PackageRegistry|Conan') },
{ value: 'GENERIC', text: s__('PackageRegistry|Generic') },
+ { value: 'NUGET', text: s__('PackageRegistry|NuGet') },
];
- if (this.glFeatures.packagesProtectedPackagesNuget) {
- packageTypeOptions.push({ value: 'NUGET', text: s__('PackageRegistry|NuGet') });
- }
-
if (this.glFeatures.packagesProtectedPackagesHelm) {
packageTypeOptions.push({ value: 'HELM', text: s__('PackageRegistry|Helm') });
}
diff --git a/app/controllers/projects/settings/packages_and_registries_controller.rb b/app/controllers/projects/settings/packages_and_registries_controller.rb
index 3278944693d..3144a66fa02 100644
--- a/app/controllers/projects/settings/packages_and_registries_controller.rb
+++ b/app/controllers/projects/settings/packages_and_registries_controller.rb
@@ -34,7 +34,6 @@ module Projects
def set_feature_flag_packages_protected_packages
push_frontend_feature_flag(:packages_protected_packages_helm, project)
- push_frontend_feature_flag(:packages_protected_packages_nuget, project)
push_frontend_feature_flag(:packages_protected_packages_delete, project)
end
diff --git a/app/graphql/types/packages/protection/rule_package_type_enum.rb b/app/graphql/types/packages/protection/rule_package_type_enum.rb
index b88e05223eb..a8658c8e50f 100644
--- a/app/graphql/types/packages/protection/rule_package_type_enum.rb
+++ b/app/graphql/types/packages/protection/rule_package_type_enum.rb
@@ -31,9 +31,7 @@ module Types
value 'NUGET',
value: 'nuget',
- experiment: { milestone: '18.0' },
- description: 'Packages of the NuGet format. ' \
- 'Available only when feature flag `packages_protected_packages_nuget` is enabled.'
+ description: 'Packages of the NuGet format.'
value 'PYPI',
value: 'pypi',
diff --git a/app/services/packages/nuget/create_or_update_package_service.rb b/app/services/packages/nuget/create_or_update_package_service.rb
index fa35699d478..299cd162c62 100644
--- a/app/services/packages/nuget/create_or_update_package_service.rb
+++ b/app/services/packages/nuget/create_or_update_package_service.rb
@@ -24,7 +24,10 @@ module Packages
def execute
return UNAUTHORIZED_ERROR unless can?(current_user, :create_package, project)
return DUPLICATE_ERROR unless ::Namespace::PackageSetting.duplicates_allowed?(existing_package)
- return ERROR_RESPONSE_PACKAGE_PROTECTED if package_protected?
+
+ if package_protected?(package_name: metadata[:package_name], package_type: :nuget)
+ return ERROR_RESPONSE_PACKAGE_PROTECTED
+ end
package = try_obtain_lease { process_package }
@@ -37,12 +40,6 @@ module Packages
private
- def package_protected?
- return false if Feature.disabled?(:packages_protected_packages_nuget, project)
-
- super(package_name: metadata[:package_name], package_type: :nuget)
- end
-
def existing_package
::Packages::Nuget::PackageFinder
.new(
diff --git a/app/services/packages/nuget/update_package_from_metadata_service.rb b/app/services/packages/nuget/update_package_from_metadata_service.rb
index a66293456f8..5d1385914f0 100644
--- a/app/services/packages/nuget/update_package_from_metadata_service.rb
+++ b/app/services/packages/nuget/update_package_from_metadata_service.rb
@@ -78,8 +78,6 @@ module Packages
end
def package_protected?
- return false if Feature.disabled?(:packages_protected_packages_nuget, @package_file.project)
-
service_response =
::Packages::Protection::CheckRuleExistenceService.for_push(
project: @package_file.project,
diff --git a/config/feature_flags/gitlab_com_derisk/packages_protected_packages_nuget.yml b/config/feature_flags/gitlab_com_derisk/packages_protected_packages_nuget.yml
deleted file mode 100644
index e979e726c88..00000000000
--- a/config/feature_flags/gitlab_com_derisk/packages_protected_packages_nuget.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-name: packages_protected_packages_nuget
-feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/323972
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/184059
-rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/524145
-milestone: '18.0'
-group: group::package registry
-type: gitlab_com_derisk
-default_enabled: false
diff --git a/data/deprecations/18-2-opensearch-1x-deprecation.yml b/data/deprecations/18-2-opensearch-1x-deprecation.yml
new file mode 100644
index 00000000000..43bb58a3f6c
--- /dev/null
+++ b/data/deprecations/18-2-opensearch-1x-deprecation.yml
@@ -0,0 +1,18 @@
+- title: "Support for OpenSearch 1.x in advanced search"
+ # The milestones for the deprecation announcement, and the removal.
+ removal_milestone: "18.5"
+ announcement_milestone: "18.2"
+ # Change breaking_change to false if needed.
+ breaking_change: false
+ reporter: changzhengliu # The GitLab username of the person reporting the change
+ stage: ai-powered
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/554530
+ impact: medium
+ impact_offering: [Dedicated, self-managed]
+ scope: instance
+ resolution_role: Admin
+ manual_task: true
+ body: | # (required) Don't change this line.
+ The [maintenance window for OpenSearch 1.x](https://opensearch.org/releases/#maintenance-policy) has ended.
+ For GitLab Self-Managed and GitLab Dedicated, administrators must upgrade their OpenSearch instance
+ to use advanced search.
diff --git a/db/docs/batched_background_migrations/backfill_approval_merge_request_rules_users_project_id.yml b/db/docs/batched_background_migrations/backfill_approval_merge_request_rules_users_project_id.yml
index 32d35aba43a..7b861c19011 100644
--- a/db/docs/batched_background_migrations/backfill_approval_merge_request_rules_users_project_id.yml
+++ b/db/docs/batched_background_migrations/backfill_approval_merge_request_rules_users_project_id.yml
@@ -3,6 +3,6 @@ migration_job_name: BackfillApprovalMergeRequestRulesUsersProjectId
description: Backfills sharding key `approval_merge_request_rules_users.project_id` from `approval_merge_request_rules`.
feature_category: code_review_workflow
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/183354
-milestone: '17.10'
-queued_migration_version: 20250304132251
+milestone: '18.2'
+queued_migration_version: 20250708203741
finalized_by: # version of the migration that finalized this BBM
diff --git a/db/post_migrate/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id.rb b/db/post_migrate/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id.rb
index c6a3379d216..91f73be2387 100644
--- a/db/post_migrate/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id.rb
+++ b/db/post_migrate/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id.rb
@@ -10,31 +10,10 @@ class QueueBackfillApprovalMergeRequestRulesUsersProjectId < Gitlab::Database::M
SUB_BATCH_SIZE = 100
def up
- queue_batched_background_migration(
- MIGRATION,
- :approval_merge_request_rules_users,
- :id,
- :project_id,
- :approval_merge_request_rules,
- :project_id,
- :approval_merge_request_rule_id,
- job_interval: DELAY_INTERVAL,
- batch_size: BATCH_SIZE,
- sub_batch_size: SUB_BATCH_SIZE
- )
+ # no-op because the original migration failed (https://gitlab.com/gitlab-org/gitlab/-/merge_requests/183354#note_2425371444)
end
def down
- delete_batched_background_migration(
- MIGRATION,
- :approval_merge_request_rules_users,
- :id,
- [
- :project_id,
- :approval_merge_request_rules,
- :project_id,
- :approval_merge_request_rule_id
- ]
- )
+ # no-op because the original migration failed (https://gitlab.com/gitlab-org/gitlab/-/merge_requests/183354#note_2425371444)
end
end
diff --git a/db/post_migrate/20250623071728_queue_backfill_rolled_up_weight_for_work_items.rb b/db/post_migrate/20250623071728_queue_backfill_rolled_up_weight_for_work_items.rb
index b1e42b86e6b..5ee4760d317 100644
--- a/db/post_migrate/20250623071728_queue_backfill_rolled_up_weight_for_work_items.rb
+++ b/db/post_migrate/20250623071728_queue_backfill_rolled_up_weight_for_work_items.rb
@@ -5,22 +5,11 @@ class QueueBackfillRolledUpWeightForWorkItems < Gitlab::Database::Migration[2.3]
restrict_gitlab_migration gitlab_schema: :gitlab_main
- MIGRATION = "BackfillRolledUpWeightForWorkItems"
- DELAY_INTERVAL = 2.minutes
- BATCH_SIZE = 50000
- SUB_BATCH_SIZE = 2500
-
def up
- queue_batched_background_migration(
- MIGRATION,
- :issues,
- :id,
- batch_size: BATCH_SIZE,
- sub_batch_size: SUB_BATCH_SIZE
- )
+ # no-op due to https://gitlab.com/gitlab-com/gl-infra/production-engineering/-/issues/27076
end
def down
- delete_batched_background_migration(MIGRATION, :issues, :id, [])
+ # no-op
end
end
diff --git a/db/post_migrate/20250708203741_requeue_backfill_approval_merge_request_rules_users_project_id.rb b/db/post_migrate/20250708203741_requeue_backfill_approval_merge_request_rules_users_project_id.rb
new file mode 100644
index 00000000000..59f093dd0c9
--- /dev/null
+++ b/db/post_migrate/20250708203741_requeue_backfill_approval_merge_request_rules_users_project_id.rb
@@ -0,0 +1,52 @@
+# frozen_string_literal: true
+
+class RequeueBackfillApprovalMergeRequestRulesUsersProjectId < Gitlab::Database::Migration[2.3]
+ milestone '18.2'
+ restrict_gitlab_migration gitlab_schema: :gitlab_main_cell
+
+ MIGRATION = "BackfillApprovalMergeRequestRulesUsersProjectId"
+ DELAY_INTERVAL = 2.minutes
+ BATCH_SIZE = 1000
+ SUB_BATCH_SIZE = 100
+
+ def up
+ delete_batched_background_migration(
+ MIGRATION,
+ :approval_merge_request_rules_users,
+ :id,
+ [
+ :project_id,
+ :approval_merge_request_rules,
+ :project_id,
+ :approval_merge_request_rule_id
+ ]
+ )
+
+ queue_batched_background_migration(
+ MIGRATION,
+ :approval_merge_request_rules_users,
+ :id,
+ :project_id,
+ :approval_merge_request_rules,
+ :project_id,
+ :approval_merge_request_rule_id,
+ job_interval: DELAY_INTERVAL,
+ batch_size: BATCH_SIZE,
+ sub_batch_size: SUB_BATCH_SIZE
+ )
+ end
+
+ def down
+ delete_batched_background_migration(
+ MIGRATION,
+ :approval_merge_request_rules_users,
+ :id,
+ [
+ :project_id,
+ :approval_merge_request_rules,
+ :project_id,
+ :approval_merge_request_rule_id
+ ]
+ )
+ end
+end
diff --git a/db/post_migrate/20250711065852_remove_backfill_rolled_up_weight_for_work_items.rb b/db/post_migrate/20250711065852_remove_backfill_rolled_up_weight_for_work_items.rb
new file mode 100644
index 00000000000..b80f0c5ab48
--- /dev/null
+++ b/db/post_migrate/20250711065852_remove_backfill_rolled_up_weight_for_work_items.rb
@@ -0,0 +1,17 @@
+# frozen_string_literal: true
+
+class RemoveBackfillRolledUpWeightForWorkItems < Gitlab::Database::Migration[2.3]
+ milestone '18.2'
+
+ restrict_gitlab_migration gitlab_schema: :gitlab_main
+
+ MIGRATION = "BackfillRolledUpWeightForWorkItems"
+
+ def up
+ delete_batched_background_migration(MIGRATION, :issues, :id, [])
+ end
+
+ def down
+ # no-op
+ end
+end
diff --git a/db/schema_migrations/20250708203741 b/db/schema_migrations/20250708203741
new file mode 100644
index 00000000000..6314d8084d7
--- /dev/null
+++ b/db/schema_migrations/20250708203741
@@ -0,0 +1 @@
+bf420569855ae61c7c5f3baf9e9b3f25860d86a53181da4dd1a30be5000fe123
\ No newline at end of file
diff --git a/db/schema_migrations/20250711065852 b/db/schema_migrations/20250711065852
new file mode 100644
index 00000000000..7bf2eeea0ec
--- /dev/null
+++ b/db/schema_migrations/20250711065852
@@ -0,0 +1 @@
+ae5b640a6770842c326c9ce798e0b0a7984eb11f624b8f30fb0ffb7f06c49244
\ No newline at end of file
diff --git a/doc/administration/compliance/audit_event_streaming.md b/doc/administration/compliance/audit_event_streaming.md
index c7014a7e28d..a78ab1f5a6b 100644
--- a/doc/administration/compliance/audit_event_streaming.md
+++ b/doc/administration/compliance/audit_event_streaming.md
@@ -47,51 +47,6 @@ the streaming destination.
Manage streaming destinations for an entire instance.
-## Activate or deactivate streaming destinations
-
-{{< history >}}
-
-- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/537096) in GitLab 18.2.
-
-{{< /history >}}
-
-You can temporarily deactivate audit event streaming to a destination without deleting the destination configuration. When a streaming destination is deactivated:
-
-- Audit events stop streaming to that destination immediately.
-- The destination configuration is preserved.
-- You can reactivate the destination at any time.
-- Other active destinations continue to receive events.
-
-### Deactivate a streaming destination
-
-Prerequisites:
-
-- Administrator access on the instance.
-
-To deactivate a streaming destination:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Clear the **Active** checkbox.
-1. Select **Save** to deactivate the streaming destination.
-
-The destination shows as **Inactive** and stops receiving audit events.
-
-### Activate a streaming destination
-
-To reactivate a previously deactivated streaming destination:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Select the **Active** checkbox.
-1. Select **Save** to activate the streaming destination.
-
-The destination resumes receiving audit events immediately.
-
## HTTP destinations
Prerequisites:
@@ -121,19 +76,6 @@ To add a streaming destination for an instance:
20 headers per streaming destination.
1. After all headers have been filled out, select **Add** to add the new streaming destination.
-### List HTTP destinations
-
-Prerequisites:
-
-- Administrator access on the instance.
-
-To list the streaming destinations for an instance:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand it and see all the custom HTTP headers.
-
### Update an HTTP destination
Prerequisites:
@@ -162,35 +104,6 @@ To update a instance streaming destination's custom HTTP headers:
20 headers per streaming destination.
1. Select **Save** to update the streaming destination.
-### Delete an HTTP destination
-
-Delete streaming destinations for an entire instance. When the last destination is successfully deleted, streaming is
-disabled for the instance.
-
-Prerequisites:
-
-- Administrator access on the instance.
-
-To delete the streaming destinations for an instance:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Select **Delete destination**.
-1. Confirm by selecting **Delete destination** in the dialog.
-
-To delete only the custom HTTP headers for a streaming destination:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. To the right of the item, select **Edit** ({{< icon name="pencil" >}}).
-1. Locate the **Custom HTTP headers** table.
-1. Locate the header that you wish to remove.
-1. To the right of the header, select **Delete** ({{< icon name="remove" >}}).
-1. Select **Save** to update the streaming destination.
-
### Verify event authenticity
{{< history >}}
@@ -288,19 +201,6 @@ To add Google Cloud Logging streaming destinations to an instance:
1. Enter a random string to use as a log ID for the new destination. You can use this later to filter log results in Google Cloud.
1. Select **Add** to add the new streaming destination.
-### List Google Cloud Logging destinations
-
-Prerequisites:
-
-- Administrator access on the instance.
-
-To list Google Cloud Logging streaming destinations for an instance:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the Google Cloud Logging stream to expand and see all the fields.
-
### Update a Google Cloud Logging destination
Prerequisites:
@@ -319,21 +219,6 @@ To update Google Cloud Logging streaming destinations to an instance:
1. Select **Add a new private key** and enter a Google private key to update the private key.
1. Select **Save** to update the streaming destination.
-### Delete a Google Cloud Logging streaming destination
-
-Prerequisites:
-
-- Administrator access on the instance.
-
-To delete Google Cloud Logging streaming destinations to an instance:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the Google Cloud Logging stream to expand.
-1. Select **Delete destination**.
-1. Confirm by selecting **Delete destination** in the dialog.
-
## AWS S3 destinations
{{< history >}}
@@ -366,53 +251,120 @@ To add AWS S3 streaming destinations to an instance:
1. On the main area, select the **Streams** tab.
1. Select **Add streaming destination** and select **AWS S3** to show the section for adding destinations.
1. Enter a random string to use as a name for the new destination.
-1. Enter the Access Key ID, Secret Access Key, Bucket Name, and AWS Region from previously-created AWS access key and bucket to add to the new destination.
+1. Enter the **Access Key ID**, **Secret Access Key**, **Bucket Name**, and **AWS Region** from previously-created AWS
+ access key and bucket to add to the new destination.
1. Select **Add** to add the new streaming destination.
-### List AWS S3 destinations
-
-Prerequisites:
-
-- Administrator access on the instance.
-
-To list AWS S3 streaming destinations for an instance.
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the AWS S3 stream to expand and see all the fields.
-
### Update an AWS S3 destination
Prerequisites:
- Administrator access on the instance.
-To update AWS S3 streaming destinations to an instance:
+To update an AWS S3 streaming destination to an instance:
1. On the left sidebar, at the bottom, select **Admin**.
1. Select **Monitoring > Audit events**.
1. On the main area, select the **Streams** tab.
1. Select the AWS S3 stream to expand.
1. Enter a random string to use as a name for the destination.
-1. Enter the Access Key ID, Secret Access Key, Bucket Name, and AWS Region from previously-created AWS access key and bucket to update the destination.
-1. Select **Add a new Secret Access Key** and enter a AWS Secret Access Key to update the Secret Access Key.
-1. Select **Save** to update the streaming destination.
+1. To update the destination, enter the **Access Key ID**, **Secret Access Key**, **Bucket Name**, and **AWS Region**
+ from the previously-created AWS access key and bucket.
+1. Select **Add a new Secret Access Key** and enter an AWS Secret Access Key to update the Secret Access Key.
+1. Select **Save**.
-### Delete an AWS S3 streaming destination
+## List streaming destinations
Prerequisites:
- Administrator access on the instance.
-To delete AWS S3 streaming destinations on an instance:
+To list the streaming destinations for an instance:
1. On the left sidebar, at the bottom, select **Admin**.
1. Select **Monitoring > Audit events**.
1. On the main area, select the **Streams** tab.
-1. Select the AWS S3 stream to expand.
+1. Select the stream to expand.
+
+## Activate or deactivate streaming destinations
+
+{{< history >}}
+
+- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/537096) in GitLab 18.2.
+
+{{< /history >}}
+
+You can temporarily deactivate audit event streaming to a destination without deleting the destination configuration. When a streaming destination is deactivated:
+
+- Audit events stop streaming to that destination immediately.
+- The destination configuration is preserved.
+- You can reactivate the destination at any time.
+- Other active destinations continue to receive events.
+
+### Deactivate a streaming destination
+
+Prerequisites:
+
+- Administrator access on the instance.
+
+To deactivate a streaming destination:
+
+1. On the left sidebar, at the bottom, select **Admin**.
+1. Select **Monitoring > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
+1. Clear the **Active** checkbox.
+1. Select **Save**.
+
+The destination stops receiving audit events.
+
+### Activate a streaming destination
+
+To reactivate a previously deactivated streaming destination:
+
+1. On the left sidebar, at the bottom, select **Admin**.
+1. Select **Monitoring > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
+1. Select the **Active** checkbox.
+1. Select **Save**.
+
+The destination resumes receiving audit events immediately.
+
+## Delete streaming destinations
+
+Delete streaming destinations for an entire instance. When the last destination is successfully deleted, streaming is
+disabled for the instance.
+
+Prerequisites:
+
+- Administrator access on the instance.
+
+To delete streaming destinations on an instance:
+
+1. On the left sidebar, at the bottom, select **Admin**.
+1. Select **Monitoring > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
1. Select **Delete destination**.
-1. Confirm by selecting **Delete destination** in the dialog.
+1. To confirm, select **Delete destination**.
+
+### Delete only custom HTTP headers
+
+Prerequisites:
+
+- Administrator access on the instance.
+
+To delete only the custom HTTP headers for a streaming destination:
+
+1. On the left sidebar, at the bottom, select **Admin**.
+1. Select **Monitoring > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. To the right of the item, select **Edit** ({{< icon name="pencil" >}}).
+1. Locate the **Custom HTTP headers** table.
+1. Locate the header that you wish to remove.
+1. To the right of the header, select **Delete** ({{< icon name="remove" >}}).
+1. Select **Save**.
## Related topics
diff --git a/doc/api/graphql/reference/_index.md b/doc/api/graphql/reference/_index.md
index 88c12e06e05..44e6c8eb1d0 100644
--- a/doc/api/graphql/reference/_index.md
+++ b/doc/api/graphql/reference/_index.md
@@ -47174,7 +47174,7 @@ Package type of a package protection rule resource.
| `HELM` {{< icon name="warning-solid" >}} | **Introduced** in GitLab 18.1. **Status**: Experiment. Packages of the Helm format.Available only when feature flag `packages_protected_packages_helm` is enabled. |
| `MAVEN` | Packages of the Maven format. |
| `NPM` | Packages of the npm format. |
-| `NUGET` {{< icon name="warning-solid" >}} | **Introduced** in GitLab 18.0. **Status**: Experiment. Packages of the NuGet format. Available only when feature flag `packages_protected_packages_nuget` is enabled. |
+| `NUGET` | Packages of the NuGet format. |
| `PYPI` | Packages of the PyPI format. |
### `PipelineAnalyticsJobStatus`
diff --git a/doc/development/documentation/styleguide/deprecations_and_removals.md b/doc/development/documentation/styleguide/deprecations_and_removals.md
index 864ba55e56b..7a1e5576ca8 100644
--- a/doc/development/documentation/styleguide/deprecations_and_removals.md
+++ b/doc/development/documentation/styleguide/deprecations_and_removals.md
@@ -13,9 +13,11 @@ If a feature is not generally available, you can delete the content outright ins
{{< alert type="note" >}}
-REST API docs [have a separate deprecation style](../restful_api_styleguide.md#deprecations).
-The GraphQL API [has a separate deprecation process](../../../api/graphql/_index.md#deprecation-and-removal-process),
-and [style for the deprecation reason](../../api_graphql_styleguide.md#deprecation-reason-style-guide).
+In the following cases, a separate process applies:
+
+- [Documentation redirects](../redirects.md) to move, rename, or delete pages not related to feature deprecation.
+- [REST API deprecations](../restful_api_styleguide.md#deprecations).
+- [GraphQL API deprecation process](../../../api/graphql/_index.md#deprecation-and-removal-process) and [deprecation reasons](../../api_graphql_styleguide.md#deprecation-reason-style-guide).
{{< /alert >}}
diff --git a/doc/update/deprecations.md b/doc/update/deprecations.md
index a2e8f5873a9..c7fa82e7f2f 100644
--- a/doc/update/deprecations.md
+++ b/doc/update/deprecations.md
@@ -1212,6 +1212,29 @@ In GitLab 18.6, we'll replace the compliance standards adherence dashboard with
+
+
+## GitLab 18.5
+
+
+
+### Support for OpenSearch 1.x in advanced search
+
+
+
+- Announced in GitLab 18.2
+- Removal in GitLab 18.5
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/554530).
+
+
+
+The [maintenance window for OpenSearch 1.x](https://opensearch.org/releases/#maintenance-policy) has ended.
+For GitLab Self-Managed and GitLab Dedicated, administrators must upgrade their OpenSearch instance
+to use advanced search.
+
+
+
+
## GitLab 18.3
diff --git a/doc/user/compliance/audit_event_streaming.md b/doc/user/compliance/audit_event_streaming.md
index 150961acf57..7a52cb25a4e 100644
--- a/doc/user/compliance/audit_event_streaming.md
+++ b/doc/user/compliance/audit_event_streaming.md
@@ -47,51 +47,6 @@ the streaming destination.
{{< /alert >}}
-## Activate or deactivate streaming destinations
-
-{{< history >}}
-
-- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/537096) in GitLab 18.2.
-
-{{< /history >}}
-
-You can temporarily deactivate audit event streaming to a destination without deleting the destination configuration. When a streaming destination is deactivated:
-
-- Audit events stop streaming to that destination immediately.
-- The destination configuration is preserved.
-- You can reactivate the destination at any time.
-- Other active destinations continue to receive events.
-
-### Deactivate a streaming destination
-
-Prerequisites:
-
-- Owner role for a top-level group.
-
-To deactivate a streaming destination:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Clear the **Active** checkbox.
-1. Select **Save** to deactivate the streaming destination.
-
-The destination shows as **Inactive** and stops receiving audit events.
-
-### Activate a streaming destination
-
-To reactivate a previously deactivated streaming destination:
-
-1. On the left sidebar, at the bottom, select **Admin**.
-1. Select **Monitoring > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Select the **Active** checkbox.
-1. Select **Save** to activate the streaming destination.
-
-The destination resumes receiving audit events immediately.
-
## HTTP destinations
Prerequisites:
@@ -121,19 +76,6 @@ To add streaming destinations to a top-level group:
20 headers per streaming destination.
1. After all headers have been filled out, select **Add** to add the new streaming destination.
-### List HTTP destinations
-
-Prerequisites:
-
-- Owner role for a group.
-
-To list the streaming destinations for a top-level group:
-
-1. On the left sidebar, select **Search or go to** and find your group.
-1. Select **Secure > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand it and see all the custom HTTP headers.
-
### Update an HTTP destination
Prerequisites:
@@ -162,35 +104,6 @@ To update a streaming destination's custom HTTP headers:
20 headers per streaming destination.
1. Select **Save** to update the streaming destination.
-### Delete an HTTP destination
-
-Delete streaming destinations for a top-level group. When the last destination is successfully deleted, streaming is
-disabled for the top-level group.
-
-Prerequisites:
-
-- Owner role for a group.
-
-To delete a streaming destination:
-
-1. On the left sidebar, select **Search or go to** and find your group.
-1. Select **Secure > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Select **Delete destination**.
-1. Confirm by selecting **Delete destination** in the dialog.
-
-To delete only the custom HTTP headers for a streaming destination:
-
-1. On the left sidebar, select **Search or go to** and find your group.
-1. Select **Secure > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the stream to expand.
-1. Locate the **Custom HTTP headers** table.
-1. Locate the header that you wish to remove.
-1. To the right of the header, select **Delete** ({{< icon name="remove" >}}).
-1. Select **Save** to update the streaming destination.
-
### Verify event authenticity
{{< history >}}
@@ -312,19 +225,6 @@ To add Google Cloud Logging streaming destinations to a top-level group:
1. Enter a random string to use as a log ID for the new destination. You can use this later to filter log results in Google Cloud.
1. Select **Add** to add the new streaming destination.
-### List Google Cloud Logging destinations
-
-Prerequisites:
-
-- Owner role for a top-level group.
-
-To list Google Cloud Logging streaming destinations for a top-level group:
-
-1. On the left sidebar, select **Search or go to** and find your group.
-1. Select **Secure > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the Google Cloud Logging stream to expand and see all the fields.
-
### Update a Google Cloud Logging destination
{{< history >}}
@@ -349,21 +249,6 @@ To update Google Cloud Logging streaming destinations to a top-level group:
1. Select **Add a new private key** and enter a Google private key to update the private key.
1. Select **Save** to update the streaming destination.
-### Delete a Google Cloud Logging streaming destination
-
-Prerequisites:
-
-- Owner role for a top-level group.
-
-To delete Google Cloud Logging streaming destinations to a top-level group:
-
-1. On the left sidebar, select **Search or go to** and find your group.
-1. Select **Secure > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the Google Cloud Logging stream to expand.
-1. Select **Delete destination**.
-1. Confirm by selecting **Delete destination** in the dialog.
-
## AWS S3 destinations
{{< history >}}
@@ -396,53 +281,120 @@ To add AWS S3 streaming destinations to a top-level group:
1. On the main area, select the **Streams** tab.
1. Select **Add streaming destination** and select **AWS S3** to show the section for adding destinations.
1. Enter a random string to use as a name for the new destination.
-1. Enter the Access Key ID, Secret Access Key, Bucket Name, and AWS Region from previously-created AWS access key and bucket to add to the new destination.
+1. Enter the **Access Key ID**, **Secret Access Key**, **Bucket Name**, and **AWS Region** from previously-created AWS
+ access key and bucket to add to the new destination.
1. Select **Add** to add the new streaming destination.
-### List AWS S3 destinations
+### Update an AWS S3 destination
Prerequisites:
- Owner role for a top-level group.
-To list AWS S3 streaming destinations for a top-level group:
-
-1. On the left sidebar, select **Search or go to** and find your group.
-1. Select **Secure > Audit events**.
-1. On the main area, select the **Streams** tab.
-1. Select the AWS S3 stream to expand and see all the fields.
-
-### Update a AWS S3 destination
-
-Prerequisites:
-
-- Owner role for a top-level group.
-
-To update AWS S3 streaming destinations to a top-level group:
+To update an AWS S3 streaming destination to a top-level group:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Secure > Audit events**.
1. On the main area, select the **Streams** tab.
1. Select the AWS S3 stream to expand.
1. Enter a random string to use as a name for the destination.
-1. Enter the Access Key ID, Secret Access Key, Bucket Name, and AWS Region from previously-created AWS access key and bucket to update the destination.
-1. Select **Add a new Secret Access Key** and enter a AWS Secret Access Key to update the Secret Access Key.
-1. Select **Save** to update the streaming destination.
+1. To update the destination, enter the **Access Key ID**, **Secret Access Key**, **Bucket Name**, and **AWS Region**
+ from previously-created AWS access key and bucket.
+1. To update the Secret Access Key, select **Add a new Secret Access Key** and enter a AWS Secret Access Key.
+1. Select **Save**.
-### Delete a AWS S3 streaming destination
+## List streaming destinations
Prerequisites:
- Owner role for a top-level group.
-To delete AWS S3 streaming destinations to a top-level group:
+To list streaming destinations for a top-level group:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Secure > Audit events**.
1. On the main area, select the **Streams** tab.
-1. Select the AWS S3 stream to expand.
+1. Select the stream to expand.
+
+## Activate or deactivate streaming destinations
+
+{{< history >}}
+
+- [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/537096) in GitLab 18.2.
+
+{{< /history >}}
+
+You can temporarily deactivate audit event streaming to a destination without deleting the destination configuration. When a streaming destination is deactivated:
+
+- Audit events stop streaming to that destination immediately.
+- The destination configuration is preserved.
+- You can reactivate the destination at any time.
+- Other active destinations continue to receive events.
+
+### Deactivate a streaming destination
+
+Prerequisites:
+
+- Owner role for a top-level group.
+
+To deactivate a streaming destination:
+
+1. On the left sidebar, select **Search or go to** and find your group.
+1. Select **Secure > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
+1. Clear the **Active** checkbox.
+1. Select **Save**.
+
+The destination stops receiving audit events.
+
+### Activate a streaming destination
+
+To reactivate a previously deactivated streaming destination:
+
+1. On the left sidebar, select **Search or go to** and find your group.
+1. Select **Secure > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
+1. Select the **Active** checkbox.
+1. Select **Save**.
+
+The destination resumes receiving audit events immediately.
+
+## Delete streaming destinations
+
+Delete streaming destinations for a top-level group. When the last destination is successfully deleted, streaming is
+disabled for the top-level group.
+
+Prerequisites:
+
+- Owner role for a top-level group.
+
+To delete streaming destinations to a top-level group:
+
+1. On the left sidebar, select **Search or go to** and find your group.
+1. Select **Secure > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
1. Select **Delete destination**.
-1. Confirm by selecting **Delete destination** in the dialog.
+1. To confirm, select **Delete destination**.
+
+### Delete only custom HTTP headers
+
+Prerequisites:
+
+- Owner role for a top-level group.
+
+To delete only the custom HTTP headers for a streaming destination:
+
+1. On the left sidebar, select **Search or go to** and find your group.
+1. Select **Secure > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. Select the stream to expand.
+1. Locate the **Custom HTTP headers** table.
+1. Locate the header that you wish to remove.
+1. To the right of the header, select **Delete** ({{< icon name="remove" >}}).
+1. Select **Save**.
## Related topics
diff --git a/doc/user/gitlab_duo/model_selection.md b/doc/user/gitlab_duo/model_selection.md
index 79dd7c1bc9f..9597da57efb 100644
--- a/doc/user/gitlab_duo/model_selection.md
+++ b/doc/user/gitlab_duo/model_selection.md
@@ -3,7 +3,7 @@ stage: AI-powered
group: Custom Models
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
description: Configure large language models for GitLab Duo features.
-title: GitLab Duo Model Selection
+title: GitLab Duo model selection
---
{{< details >}}
@@ -28,30 +28,36 @@ For more information, see the history.
{{< /alert >}}
-On GitLab.com, you can select specific large language models (LLMs) to use with the GitLab Duo AI-native features to meet your performance and compliance requirements.
+To help meet your performance and compliance requirements,
+on GitLab.com, you can choose to use different large language models (LLMs) with GitLab Duo.
-If you do not select a specific LLM, the AI-native features use the GitLab-selected **GitLab Default** LLM. You should use this LLM if you do not have unique requirements.
+If you do not select a specific LLM, all GitLab Duo features use the default LLMs.
+You should use the defaults if you do not have unique requirements.
-## Prerequisites
+## Select an LLM for a feature
+
+Prerequisites:
- The group that you want to select LLMs for must:
- Be a [top-level group](../group/_index.md#group-hierarchy) on GitLab.com.
- Have GitLab Duo Core, Pro, or Enterprise enabled.
- You must have the Owner role for the group.
-## Select an LLM for a feature
+To select a different LLM for a feature:
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Settings > GitLab Duo**.
- If you do not see **GitLab Duo**, ensure you have GitLab Duo Core, Pro or Enterprise enabled for the group.
+ If you **GitLab Duo** is not visible, ensure you have GitLab Duo Core, Pro or Enterprise turned on for the group.
1. Select **Configure features**.
1. For the feature you want to configure, select an LLM from the dropdown list.
-
+
## Troubleshooting
+When selecting models other than the default, you might encounter the following issues.
+
### LLM is not available
If you are using the GitLab Default LLM for a GitLab Duo AI-native feature, GitLab might change the default LLM without notifying the user to maintain optimal performance and reliability.
@@ -67,6 +73,6 @@ If you are assigned a seat in a project that has a specific LLM selected for [co
This might cause increased latency with code completion requests.
-### Agentic Chat incompactibility
+### Agentic Chat incompatibility
-When a specific LLM is selected for Duo-Chat or its sub-features, GitLab Duo Chat disables [GitLab Duo Agentic Chat](../gitlab_duo_chat/agentic_chat.md) in that namespace.
+When a specific LLM is selected for GitLab Duo Chat or its sub-features, [GitLab Duo Agentic Chat](../gitlab_duo_chat/agentic_chat.md) is not available in that namespace.
diff --git a/doc/user/packages/package_registry/package_protection_rules.md b/doc/user/packages/package_registry/package_protection_rules.md
index b2e4f303cc9..d0591095a29 100644
--- a/doc/user/packages/package_registry/package_protection_rules.md
+++ b/doc/user/packages/package_registry/package_protection_rules.md
@@ -27,6 +27,7 @@ title: Protected packages
- Protected Helm charts [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/323973) in GitLab 18.1 [with a flag](../../../administration/feature_flags/_index.md) named `packages_protected_packages_helm`. Disabled by default. This feature is an [experiment](../../../policy/development_stages_support.md).
- Generic protected packages [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/323973) in GitLab 18.1 [with a flag](../../../administration/feature_flags/_index.md) named `packages_protected_packages_generic`. Disabled by default. This feature is an [experiment](../../../policy/development_stages_support.md).
- Generic protected packages became [generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/537971) in GitLab 18.2. Feature flag `packages_protected_packages_generic` removed.
+- NuGet protected packages became [generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/524145) in GitLab 18.2. Feature flag `packages_protected_packages_nuget` removed.
{{< /history >}}
diff --git a/gems/gitlab-active-context/lib/active_context/concerns/bulk_async_process.rb b/gems/gitlab-active-context/lib/active_context/concerns/bulk_async_process.rb
index 849ba4d93df..40295d1befb 100644
--- a/gems/gitlab-active-context/lib/active_context/concerns/bulk_async_process.rb
+++ b/gems/gitlab-active-context/lib/active_context/concerns/bulk_async_process.rb
@@ -11,7 +11,7 @@ module ActiveContext
extend ActiveSupport::Concern
def perform(*args)
- return false unless ActiveContext::Config.indexing_enabled?
+ return false unless ActiveContext.indexing?
if args.empty?
enqueue_all_shards
diff --git a/gems/gitlab-active-context/lib/active_context/concerns/migration_worker.rb b/gems/gitlab-active-context/lib/active_context/concerns/migration_worker.rb
index 34baed29587..34ec7e4ed7c 100644
--- a/gems/gitlab-active-context/lib/active_context/concerns/migration_worker.rb
+++ b/gems/gitlab-active-context/lib/active_context/concerns/migration_worker.rb
@@ -28,13 +28,8 @@ module ActiveContext
private
def preflight_checks
- unless ActiveContext::Config.indexing_enabled?
- log 'indexing disabled. Execution is skipped.'
- return false
- end
-
- unless adapter
- log 'adapter not configured. Execution is skipped.'
+ unless ActiveContext.indexing?
+ log 'indexing disabled in config and adapter not configured. Execution is skipped.'
return false
end
diff --git a/lib/banzai/filter/task_list_filter.rb b/lib/banzai/filter/task_list_filter.rb
index b30e0536487..f2fa74fa9fe 100644
--- a/lib/banzai/filter/task_list_filter.rb
+++ b/lib/banzai/filter/task_list_filter.rb
@@ -59,8 +59,15 @@ module Banzai
override :render_item_checkbox
def render_item_checkbox(item)
+ stripped_source = item.source.sub(ItemPattern, '').strip
+ text = stripped_source.partition(/\<(ol|ul)/)
+ source = ActionView::Base.full_sanitizer.sanitize(text[0])
+ truncated_source = source.truncate(100, separator: ' ', omission: '…')
+ aria_label = format(_('Check option: %{option}'), option: truncated_source)
+
%()
diff --git a/lib/gitlab/database/repair_index.rb b/lib/gitlab/database/repair_index.rb
new file mode 100644
index 00000000000..ef7067bab7b
--- /dev/null
+++ b/lib/gitlab/database/repair_index.rb
@@ -0,0 +1,395 @@
+# frozen_string_literal: true
+
+module Gitlab
+ module Database
+ class RepairIndex
+ include Gitlab::Database::Migrations::TimeoutHelpers
+
+ BATCH_SIZE = 100
+ # SQL templates with placeholders
+ REINDEX_SQL = "REINDEX INDEX CONCURRENTLY %{index_name}"
+ CREATE_INDEX_SQL = "CREATE%{unique_clause} INDEX CONCURRENTLY %{index_name} ON %{table_name} (%{column_list})"
+ UPDATE_REFERENCES_SQL = "UPDATE %{ref_table} SET %{ref_column} = %{good_id} WHERE %{ref_column} IN (%{bad_ids})"
+ DELETE_DUPLICATES_SQL = "DELETE FROM %{table_name} WHERE id IN (%{bad_ids})"
+
+ FIND_DUPLICATE_SETS_SQL = <<~SQL
+ SELECT ARRAY_AGG(id ORDER BY id ASC) as ids
+ FROM %{table_name}
+ GROUP BY %{column_list}
+ HAVING COUNT(*) > 1
+ SQL
+ ENTITIES_WITH_DUPLICATE_REFS_SQL = <<~SQL
+ SELECT DISTINCT %{entity_column}
+ FROM %{ref_table}
+ WHERE %{ref_column} = %{good_id}
+ AND EXISTS (
+ SELECT 1
+ FROM %{ref_table} sub
+ WHERE sub.%{entity_column} = %{ref_table}.%{entity_column}
+ AND sub.%{ref_column} = %{bad_id}
+ )
+ SQL
+ DELETE_DUPLICATE_REFS_SQL = <<~SQL
+ DELETE FROM %{ref_table}
+ WHERE %{ref_column} = %{bad_id}
+ AND %{entity_column} IN (%{entity_ids})
+ SQL
+ FIND_ARRAY_REFS_SQL = <<~SQL
+ SELECT id, %{array_column}
+ FROM %{ref_table}
+ WHERE %{array_column}::bigint[] && ARRAY[%{bad_ids}]::bigint[]
+ SQL
+ UPDATE_ARRAY_REFS_SQL = <<~SQL
+ UPDATE %{ref_table}
+ SET %{array_column} = ARRAY[%{updated_array}]
+ WHERE id = %{record_id}
+ SQL
+ INDEX_EXISTS_SQL = <<~SQL
+ SELECT 1 FROM pg_indexes
+ WHERE indexname = %{index_name}
+ AND tablename = %{table_name}
+ SQL
+
+ # Configuration for known problematic indexes which can be injected via SchemaChecker or CollationChecker
+ INDEXES_TO_REPAIR = {
+ # Implementation based on scripts to fix index on https://gitlab.com/gitlab-org/gitlab/-/issues/372150#note_1083479615
+ # and https://gitlab.com/gitlab-org/gitlab/-/issues/523146#note_2418277173.
+ 'merge_request_diff_commit_users' => {
+ 'index_merge_request_diff_commit_users_on_name_and_email' => {
+ 'columns' => %w[name email],
+ 'unique' => true,
+ 'references' => [
+ {
+ 'table' => 'merge_request_diff_commits',
+ 'column' => 'committer_id'
+ },
+ {
+ 'table' => 'merge_request_diff_commits',
+ 'column' => 'commit_author_id'
+ }
+ ]
+ },
+ 'index_merge_request_diff_commit_users_on_org_id_name_email' => {
+ 'columns' => %w[organization_id name email],
+ 'unique' => true,
+ 'references' => [
+ {
+ 'table' => 'merge_request_diff_commits',
+ 'column' => 'committer_id'
+ },
+ {
+ 'table' => 'merge_request_diff_commits',
+ 'column' => 'commit_author_id'
+ }
+ ]
+ }
+ }
+ }.freeze
+
+ def self.run(database_name: nil, indexes_to_repair: INDEXES_TO_REPAIR, logger: Gitlab::AppLogger, dry_run: false)
+ Gitlab::Database::EachDatabase.each_connection(only: database_name) do |connection, database|
+ new(connection, database, indexes_to_repair, logger, dry_run).run
+ end
+ end
+
+ attr_reader :connection, :database_name, :indexes_to_repair, :logger, :dry_run
+
+ def initialize(connection, database_name, indexes_to_repair, logger, dry_run)
+ @connection = connection
+ @database_name = database_name
+ @indexes_to_repair = indexes_to_repair
+ @logger = logger
+ @dry_run = dry_run
+ end
+
+ def run
+ logger.info("DRY RUN: Analysis only, no changes will be made.") if dry_run
+ logger.info("Running Index repair on database #{database_name}...")
+
+ indexes_to_repair.each do |table_name, indexes|
+ unless table_exists?(table_name)
+ logger.info("Table '#{table_name}' does not exist in database #{database_name}. Skipping.")
+ next
+ end
+
+ indexes.each do |index_name, index_config|
+ logger.info("Processing index '#{index_name}' on table '#{table_name}'...")
+
+ if index_config['unique']
+ logger.info("Index is unique. Checking for duplicate data...")
+ deduplicate_data(table_name, index_config['columns'], index_config['references'])
+ end
+
+ if index_exists?(table_name, index_name)
+ logger.info("Index exists. Reindexing...")
+ reindex_index(index_name)
+ else
+ logger.info("Index does not exist. Creating new index...")
+ create_index(table_name, index_name, index_config['columns'], index_config['unique'])
+ end
+ end
+ end
+
+ logger.info("Index repair completed for database #{database_name}.")
+ end
+
+ private
+
+ def execute(sql)
+ connection.execute(sql) # rubocop:disable Database/AvoidUsingConnectionExecute -- Required for TimeoutHelpers
+ end
+
+ def execute_local(sql, read_only: false)
+ logger.info("SQL: #{sql}")
+
+ return if dry_run && !read_only
+
+ disable_statement_timeout do
+ yield
+ end
+ end
+
+ def table_exists?(table_name)
+ connection.table_exists?(table_name)
+ end
+
+ def index_exists?(table_name, index_name)
+ sql = format(
+ INDEX_EXISTS_SQL,
+ table_name: connection.quote(table_name),
+ index_name: connection.quote(index_name)
+ )
+
+ execute_local(sql, read_only: true) do
+ connection.select_value(sql)
+ end.present?
+ end
+
+ def deduplicate_data(table_name, columns, references)
+ duplicate_sets = find_duplicate_sets(table_name, columns)
+
+ unless duplicate_sets&.any?
+ logger.info("No duplicates found in '#{table_name}' for columns: #{columns.join(',')}.")
+ return
+ end
+
+ logger.warn("Found #{duplicate_sets.count} duplicates in '#{table_name}' for columns: #{columns.join(',')}")
+
+ bad_id_to_good_id_mapping = generate_id_mapping(duplicate_sets)
+ process_references(references, bad_id_to_good_id_mapping)
+ delete_duplicates(table_name, bad_id_to_good_id_mapping)
+ end
+
+ def generate_id_mapping(duplicate_sets)
+ id_mapping = {}
+
+ duplicate_sets.each do |set|
+ ids = parse_pg_array(set['ids'])
+ good_id = ids.first
+ bad_ids = ids[1..]
+
+ bad_ids.each do |bad_id|
+ id_mapping[bad_id] = good_id
+ end
+ end
+
+ id_mapping
+ end
+
+ def process_references(references, id_mapping)
+ return if id_mapping.empty?
+
+ Array(references).each do |ref|
+ ref_table = ref['table']
+ ref_column = ref['column']
+ column_type = ref['type']
+ entity_column = ref['entity_column']
+
+ if column_type == 'array'
+ handle_array_references(ref_table, ref_column, id_mapping)
+ elsif entity_column.present?
+ handle_duplicate_references(ref_table, ref_column, entity_column, id_mapping)
+ else
+ update_references(ref_table, ref_column, id_mapping)
+ end
+ end
+ end
+
+ def handle_array_references(ref_table, ref_column, id_mapping)
+ logger.info("Processing array references in '#{ref_table}.#{ref_column}'...")
+
+ id_mapping.keys.each_slice(BATCH_SIZE) do |bad_ids_batch|
+ bad_ids_quoted = bad_ids_batch.map { |id| connection.quote(id) }.join(',')
+
+ sql = format(
+ FIND_ARRAY_REFS_SQL,
+ ref_table: connection.quote_table_name(ref_table),
+ array_column: connection.quote_column_name(ref_column),
+ bad_ids: bad_ids_quoted
+ )
+
+ records = execute_local(sql, read_only: true) do
+ connection.select_all(sql)
+ end
+
+ next unless records&.any?
+
+ logger.info("Found #{records.count} records with array references to update for this batch")
+
+ records.each do |record|
+ record_id = record['id']
+ tag_ids = parse_pg_array(record[ref_column])
+
+ updated_tag_ids = tag_ids.map { |tag_id| id_mapping.fetch(tag_id, tag_id) }
+
+ sql = format(
+ UPDATE_ARRAY_REFS_SQL,
+ ref_table: connection.quote_table_name(ref_table),
+ array_column: connection.quote_column_name(ref_column),
+ updated_array: updated_tag_ids.join(','),
+ record_id: connection.quote(record_id)
+ )
+
+ execute_local(sql) do
+ connection.update(sql)
+ end
+
+ logger.info("Updated array references for record id=#{record_id} in '#{ref_table}'")
+ end
+ end
+ end
+
+ def handle_duplicate_references(ref_table, ref_column, entity_column, id_mapping)
+ logger.info("Processing references in '#{ref_table}' with duplicate detection...")
+
+ id_mapping.each do |bad_id, good_id|
+ # Find all entities that have both good and bad references
+ sql = format(
+ ENTITIES_WITH_DUPLICATE_REFS_SQL,
+ entity_column: connection.quote_column_name(entity_column),
+ ref_table: connection.quote_table_name(ref_table),
+ ref_column: connection.quote_column_name(ref_column),
+ good_id: connection.quote(good_id),
+ bad_id: connection.quote(bad_id)
+ )
+
+ entities_with_both = execute_local(sql, read_only: true) do
+ connection.select_values(sql)
+ end
+
+ next unless entities_with_both&.any?
+
+ entities_with_both.each_slice(BATCH_SIZE) do |entity_ids_batch|
+ # Delete the references with bad_id for these entities
+ sql = format(
+ DELETE_DUPLICATE_REFS_SQL,
+ ref_table: connection.quote_table_name(ref_table),
+ ref_column: connection.quote_column_name(ref_column),
+ bad_id: connection.quote(bad_id),
+ entity_column: connection.quote_column_name(entity_column),
+ entity_ids: entity_ids_batch.map { |e| connection.quote(e) }.join(',')
+ )
+
+ execute_local(sql) do
+ deleted_count = connection.delete(sql)
+ logger.info("Deleted #{deleted_count} duplicate references in '#{ref_table}' for this batch")
+ end
+ end
+ end
+
+ # update any remaining references
+ update_references(ref_table, ref_column, id_mapping)
+ end
+
+ def update_references(ref_table, ref_column, id_mapping)
+ logger.info("Updating references in '#{ref_table}'...")
+
+ id_mapping.each do |bad_id, good_id|
+ sql = format(
+ UPDATE_REFERENCES_SQL,
+ ref_table: connection.quote_table_name(ref_table),
+ ref_column: connection.quote_column_name(ref_column),
+ good_id: connection.quote(good_id),
+ bad_ids: connection.quote(bad_id)
+ )
+
+ execute_local(sql) do
+ affected_rows = connection.update(sql)
+ logger.info("Updated #{affected_rows} references in '#{ref_table}' from #{bad_id} to #{good_id}")
+ end
+ end
+ end
+
+ def find_duplicate_sets(table_name, columns)
+ logger.info("Checking for duplicates in '#{table_name}' for columns: #{columns.join(',')}...")
+
+ sql = format(
+ FIND_DUPLICATE_SETS_SQL,
+ table_name: connection.quote_table_name(table_name),
+ column_list: columns.map { |col| connection.quote_column_name(col) }.join(', ')
+ )
+
+ execute_local(sql, read_only: true) do
+ connection.select_all(sql)
+ end
+ end
+
+ def delete_duplicates(table_name, id_mapping)
+ return if id_mapping.empty?
+
+ logger.info("Deleting duplicate records from #{table_name}...")
+
+ id_mapping.keys.each_slice(BATCH_SIZE) do |batch|
+ sql = format(
+ DELETE_DUPLICATES_SQL,
+ table_name: connection.quote_table_name(table_name),
+ bad_ids: batch.map { |id| connection.quote(id) }.join(',')
+ )
+
+ execute_local(sql) do
+ affected_rows = connection.delete(sql)
+ logger.info("Deleted #{affected_rows} duplicate records from #{table_name}")
+ end
+ end
+ end
+
+ def reindex_index(index_name)
+ logger.info("Reindexing index '#{index_name}'...")
+
+ sql = format(REINDEX_SQL, index_name: connection.quote_table_name(index_name))
+
+ execute_local(sql) do
+ execute(sql)
+ end
+
+ logger.info("Index reindexed successfully.")
+ end
+
+ def create_index(table_name, index_name, columns, unique = false)
+ unique_clause = unique ? " UNIQUE" : ""
+
+ sql = format(
+ CREATE_INDEX_SQL,
+ unique_clause: unique_clause,
+ index_name: connection.quote_table_name(index_name),
+ table_name: connection.quote_table_name(table_name),
+ column_list: columns.map { |col| connection.quote_column_name(col) }.join(', ')
+ )
+
+ logger.info("Creating#{unique ? ' unique' : ''} index #{index_name}...")
+
+ execute_local(sql) do
+ execute(sql)
+ end
+
+ logger.info("Index created successfully.")
+ end
+
+ def parse_pg_array(pg_array_string)
+ return [] if pg_array_string.nil?
+
+ pg_array_string.tr('{}', '').split(',').map(&:to_i)
+ end
+ end
+ end
+end
diff --git a/lib/tasks/gitlab/db.rake b/lib/tasks/gitlab/db.rake
index 7ea1dbdcb33..d4910716ce9 100644
--- a/lib/tasks/gitlab/db.rake
+++ b/lib/tasks/gitlab/db.rake
@@ -628,6 +628,27 @@ namespace :gitlab do
end
end
+ desc 'GitLab | DB | Repair database indexes according to fixed configuration'
+ task repair_index: :environment do
+ Gitlab::Database::RepairIndex.run(
+ logger: Logger.new($stdout),
+ dry_run: ENV['DRY_RUN'] == 'true'
+ )
+ end
+
+ namespace :repair_index do
+ each_database(databases) do |database_name|
+ desc "GitLab | DB | Repair database indexes on the #{database_name} database"
+ task database_name => :environment do
+ Gitlab::Database::RepairIndex.run(
+ database_name: database_name,
+ logger: Logger.new($stdout),
+ dry_run: ENV['DRY_RUN'] == 'true'
+ )
+ end
+ end
+ end
+
namespace :dictionary do
desc 'Generate database docs yaml'
task generate: :environment do
diff --git a/spec/frontend/packages_and_registries/settings/project/settings/components/packages_protection_rule_form_spec.js b/spec/frontend/packages_and_registries/settings/project/settings/components/packages_protection_rule_form_spec.js
index f129bd52839..1b2eada6d6f 100644
--- a/spec/frontend/packages_and_registries/settings/project/settings/components/packages_protection_rule_form_spec.js
+++ b/spec/frontend/packages_and_registries/settings/project/settings/components/packages_protection_rule_form_spec.js
@@ -24,7 +24,6 @@ describe('Packages Protection Rule Form', () => {
const defaultProvidedValues = {
projectPath: 'path',
glFeatures: {
- packagesProtectedPackagesNuget: true,
packagesProtectedPackagesDelete: true,
packagesProtectedPackagesHelm: true,
},
@@ -106,30 +105,6 @@ describe('Packages Protection Rule Form', () => {
]);
});
- describe('when feature flag packagesProtectedPackagesNuget is disabled', () => {
- it('contains available options without option "NUGET"', () => {
- mountComponent({
- provide: {
- ...defaultProvidedValues,
- glFeatures: {
- ...defaultProvidedValues.glFeatures,
- packagesProtectedPackagesNuget: false,
- },
- },
- });
-
- expect(findPackageTypeSelect().exists()).toBe(true);
- expect(packageTypeSelectOptions()).toEqual([
- 'CONAN',
- 'GENERIC',
- 'HELM',
- 'MAVEN',
- 'NPM',
- 'PYPI',
- ]);
- });
- });
-
describe('when feature flag packagesProtectedPackagesHelm is disabled', () => {
it('contains available options without option "HELM"', () => {
mountComponent({
diff --git a/spec/lib/banzai/filter/task_list_filter_spec.rb b/spec/lib/banzai/filter/task_list_filter_spec.rb
index 0dd5ba9d6a6..3e16c92046b 100644
--- a/spec/lib/banzai/filter/task_list_filter_spec.rb
+++ b/spec/lib/banzai/filter/task_list_filter_spec.rb
@@ -11,6 +11,24 @@ RSpec.describe Banzai::Filter::TaskListFilter, feature_category: :markdown do
expect(doc.xpath('.//li//task-button').count).to eq(2)
end
+ it 'adds `aria-label` to every checkbox in the list' do
+ doc = filter("
\n
[ ] testing item 1
\n
[x] testing item 2
\n
[~] testing item 3
\n
[~] testing item 4 this is a very long label that should be truncated at some point but where does it truncate?
\n
[ ]
suspicious item
\n
[ ]
suspicious item 2
\n
[~]
\n
[~]
\n
[~]
\n
[~] " hijacking quotes \" a \' b ' c
")
+
+ aria_labels = doc.xpath('.//li//input/@aria-label')
+
+ expect(aria_labels.count).to eq(10)
+ expect(aria_labels[0].value).to eq('Check option: testing item 1')
+ expect(aria_labels[1].value).to eq('Check option: testing item 2')
+ expect(aria_labels[2].value).to eq('Check option: testing item 3')
+ expect(aria_labels[3].value).to eq('Check option: testing item 4 this is a very long label that should be truncated at some point but where does it…')
+ expect(aria_labels[4].value).to eq('Check option: suspicious item')
+ expect(aria_labels[5].value).to eq('Check option: suspicious item 2')
+ expect(aria_labels[6].value).to eq('Check option: ')
+ expect(aria_labels[7].value).to eq('Check option: ')
+ expect(aria_labels[8].value).to eq('Check option: ')
+ expect(aria_labels[9].value).to eq("Check option: \" hijacking quotes \" a ' b ' c")
+ end
+
it 'ignores checkbox on following line' do
doc = filter(
<<~HTML
diff --git a/spec/lib/gitlab/database/repair_index_spec.rb b/spec/lib/gitlab/database/repair_index_spec.rb
new file mode 100644
index 00000000000..105ae0bd8c5
--- /dev/null
+++ b/spec/lib/gitlab/database/repair_index_spec.rb
@@ -0,0 +1,312 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::RepairIndex, feature_category: :database do
+ describe '.run' do
+ let(:connection) { instance_double(ActiveRecord::ConnectionAdapters::PostgreSQLAdapter) }
+ let(:database_name) { 'main' }
+ let(:logger) { instance_double(Gitlab::AppLogger, info: nil, warn: nil, error: nil) }
+
+ it 'instantiates the class and calls run' do
+ instance = instance_double(described_class)
+
+ expect(Gitlab::Database::EachDatabase).to receive(:each_connection)
+ .with(only: database_name)
+ .and_yield(connection, database_name)
+
+ expect(described_class).to receive(:new)
+ .with(connection, database_name, described_class::INDEXES_TO_REPAIR, logger, false)
+ .and_return(instance)
+ expect(instance).to receive(:run)
+
+ described_class.run(database_name: database_name, logger: logger)
+ end
+ end
+
+ describe '#run' do
+ let(:connection) { ActiveRecord::Base.connection }
+ let(:database_name) { connection.current_database }
+ let(:logger) { instance_double(Gitlab::AppLogger, info: nil, warn: nil, error: nil) }
+ let(:dry_run) { false }
+
+ let(:test_table) { '_test_repair_index_table' }
+ let(:test_unique_index) { '_test_repair_index_unique_idx' }
+ let(:test_ref_table) { '_test_repair_index_ref_table' }
+ let(:test_entity_ref_table) { '_test_repair_index_entity_ref_table' }
+ let(:test_array_ref_table) { '_test_repair_index_array_ref_table' }
+ let(:test_regular_index) { '_test_repair_regular_idx' }
+
+ let(:indexes_to_repair) do
+ {
+ test_table => {
+ test_unique_index => {
+ 'columns' => %w[name email],
+ 'unique' => true,
+ 'references' => [
+ {
+ 'table' => test_ref_table,
+ 'column' => 'user_id'
+ },
+ {
+ 'table' => test_entity_ref_table,
+ 'column' => 'user_id',
+ 'entity_column' => 'entity_id'
+ },
+ {
+ 'table' => test_array_ref_table,
+ 'column' => 'user_ids',
+ 'type' => 'array'
+ }
+ ]
+ },
+ test_regular_index => {
+ 'columns' => %w[name],
+ 'unique' => false
+ }
+ }
+ }
+ end
+
+ let(:repairer) { described_class.new(connection, database_name, indexes_to_repair, logger, dry_run) }
+
+ before do
+ connection.execute(<<~SQL)
+ CREATE TABLE #{test_table} (
+ id serial PRIMARY KEY,
+ name varchar(255) NOT NULL,
+ email varchar(255) NOT NULL
+ );
+ SQL
+
+ connection.execute(<<~SQL)
+ CREATE TABLE #{test_ref_table} (
+ id serial PRIMARY KEY,
+ user_id integer NOT NULL,
+ data varchar(255) NOT NULL
+ );
+ SQL
+
+ connection.execute(<<~SQL)
+ CREATE TABLE #{test_entity_ref_table} (
+ id serial PRIMARY KEY,
+ user_id integer NOT NULL,
+ entity_id integer NOT NULL,
+ data varchar(255) NOT NULL
+ );
+ SQL
+
+ connection.execute(<<~SQL)
+ CREATE TABLE #{test_array_ref_table} (
+ id serial PRIMARY KEY,
+ user_ids bigint[] NOT NULL,
+ data varchar(255) NOT NULL
+ );
+ SQL
+
+ # Replace the SQL constants for tests to not use CONCURRENTLY
+ stub_const(
+ "#{described_class}::REINDEX_SQL",
+ "REINDEX INDEX %{index_name}"
+ )
+ stub_const(
+ "#{described_class}::CREATE_INDEX_SQL",
+ "CREATE%{unique_clause} INDEX %{index_name} ON %{table_name} (%{column_list})"
+ )
+ end
+
+ after do
+ connection.execute("DROP TABLE IF EXISTS #{test_array_ref_table} CASCADE")
+ connection.execute("DROP TABLE IF EXISTS #{test_entity_ref_table} CASCADE")
+ connection.execute("DROP TABLE IF EXISTS #{test_ref_table} CASCADE")
+ connection.execute("DROP TABLE IF EXISTS #{test_table} CASCADE")
+ end
+
+ context 'when table does not exists' do
+ let(:indexes_to_repair) { { '_non_existing_table_' => {} } }
+
+ it 'logs that the table does not exist and skips processing' do
+ expect(logger).to receive(:info).with(/Table '_non_existing_table_' does not exist/)
+
+ repairer.run
+ end
+ end
+
+ context 'when indexes do not exist' do
+ it 'creates the indexes correctly' do
+ repairer.run
+
+ is_unique = connection.select_value(<<~SQL)
+ SELECT indisunique
+ FROM pg_index i
+ JOIN pg_class c ON i.indexrelid = c.oid
+ JOIN pg_class t ON i.indrelid = t.oid
+ WHERE c.relname = '#{test_unique_index}'
+ AND t.relname = '#{test_table}'
+ SQL
+ expect(is_unique).to be true
+
+ regular_index_exists = connection.select_value(<<~SQL).present?
+ SELECT 1
+ FROM pg_indexes
+ WHERE tablename = '#{test_table}'
+ AND indexname = '#{test_regular_index}'
+ SQL
+ expect(regular_index_exists).to be true
+ end
+ end
+
+ context 'when indexes already exist' do
+ before do
+ connection.execute(<<~SQL)
+ CREATE INDEX #{test_regular_index} ON #{test_table} (name);
+ SQL
+ connection.execute(<<~SQL)
+ CREATE UNIQUE INDEX #{test_unique_index} ON #{test_table} (name, email);
+ SQL
+ end
+
+ it 'reindexes the existing indexes' do
+ expect(logger).to receive(:info).with(/Index reindexed successfully/).twice
+
+ repairer.run
+ end
+ end
+
+ context 'with duplicate data and various reference types' do
+ before do
+ connection.execute(<<~SQL)
+ CREATE INDEX #{test_regular_index} ON #{test_table} (name);
+ SQL
+
+ # Insert duplicate data
+ connection.execute(<<~SQL)
+ INSERT INTO #{test_table} (name, email) VALUES
+ ('test_user', 'test@example.com'), -- ID 1
+ ('test_user', 'test@example.com'), -- ID 2 (duplicate)
+ ('other_user', 'other@example.com'); -- ID 3
+ SQL
+
+ # Create standard references (no entity column)
+ connection.execute(<<~SQL)
+ INSERT INTO #{test_ref_table} (user_id, data) VALUES
+ (1, 'ref to good ID'),
+ (2, 'ref to bad ID - will be updated');
+ SQL
+
+ # Create a unique index on reference table to check reference update does not violate uniqueness
+ connection.execute(<<~SQL)
+ CREATE INDEX unique_test_index_reference ON #{test_entity_ref_table} (user_id, entity_id);
+ SQL
+ # Create entity-based references
+ connection.execute(<<~SQL)
+ INSERT INTO #{test_entity_ref_table} (user_id, entity_id, data) VALUES
+ (1, 100, 'entity ref to good ID'),
+ (2, 100, 'entity ref to bad ID - will be deleted'),
+ (2, 200, 'entity ref to bad ID - will be updated');
+ SQL
+
+ # Create array references
+ connection.execute(<<~SQL)
+ INSERT INTO #{test_array_ref_table} (user_ids, data) VALUES
+ ('{1,3}', 'array without bad IDs'),
+ ('{2,3}', 'array with bad ID');
+ SQL
+ end
+
+ it 'handles all reference types correctly' do
+ # before: 3 users, various references
+ user_count_before = connection.select_value("SELECT COUNT(*) FROM #{test_table}")
+ expect(user_count_before).to eq(3)
+
+ # unique index doesn't exist yet
+ index_exists_before = connection.select_value(<<~SQL).present?
+ SELECT 1
+ FROM pg_indexes
+ WHERE tablename = '#{test_table}'
+ AND indexname = '#{test_unique_index}'
+ SQL
+ expect(index_exists_before).to be false
+
+ repairer.run
+
+ # after: 2 users (duplicate removed)
+ user_count_after = connection.select_value("SELECT COUNT(*) FROM #{test_table}")
+ expect(user_count_after).to eq(2)
+
+ # standard reference updated to good ID
+ standard_ref = connection.select_value(
+ "SELECT user_id FROM #{test_ref_table} WHERE data = 'ref to bad ID - will be updated'"
+ )
+ expect(standard_ref).to eq(1) # Updated from 2 to 1
+
+ # entity-based reference: duplicate deleted
+ entity_100_refs = connection.select_all("SELECT * FROM #{test_entity_ref_table} WHERE entity_id = 100").to_a
+ expect(entity_100_refs.size).to eq(1)
+ expect(entity_100_refs.first['user_id']).to eq(1) # Update from 2 to 1
+
+ # entity-based reference: non-duplicate updated
+ entity_200_ref = connection.select_value("SELECT user_id FROM #{test_entity_ref_table} WHERE entity_id = 200")
+ expect(entity_200_ref).to eq(1) # Updated from 2 to 1
+
+ # array reference updated
+ array_after = connection.select_value(
+ "SELECT user_ids FROM #{test_array_ref_table} WHERE data = 'array with bad ID'"
+ )
+ expect(array_after).to eq("{1,3}") # Update from {2,3} to {1,3}
+
+ # unique index is created correctly
+ is_unique = connection.select_value(<<~SQL)
+ SELECT indisunique
+ FROM pg_index i
+ JOIN pg_class c ON i.indexrelid = c.oid
+ JOIN pg_class t ON i.indrelid = t.oid
+ WHERE c.relname = '#{test_unique_index}'
+ AND t.relname = '#{test_table}'
+ SQL
+ expect(is_unique).to be true
+ end
+
+ context 'with dry run' do
+ let(:dry_run) { true }
+
+ it 'analyzes data but does not make changes' do
+ expect(logger).to receive(:info).with(/Analysis only, no changes will be made/).at_least(:once)
+
+ user_count_before = connection.select_value("SELECT COUNT(*) FROM #{test_table}")
+ standard_ref_before = connection.select_value(
+ "SELECT user_id FROM #{test_ref_table} WHERE data = 'ref to bad ID - will be updated'"
+ )
+ entity_refs_before = connection.select_all("SELECT * FROM #{test_entity_ref_table}").to_a
+ array_ref_before = connection.select_value(
+ "SELECT user_ids FROM #{test_array_ref_table} WHERE data = 'array with bad ID'"
+ )
+
+ repairer.run
+
+ user_count_after = connection.select_value("SELECT COUNT(*) FROM #{test_table}")
+ standard_ref_after = connection.select_value(
+ "SELECT user_id FROM #{test_ref_table} WHERE data = 'ref to bad ID - will be updated'"
+ )
+ entity_refs_after = connection.select_all("SELECT * FROM #{test_entity_ref_table}").to_a
+ array_ref_after = connection.select_value(
+ "SELECT user_ids FROM #{test_array_ref_table} WHERE data = 'array with bad ID'"
+ )
+
+ expect(user_count_after).to eq(user_count_before)
+ expect(standard_ref_after).to eq(standard_ref_before)
+ expect(entity_refs_after).to match_array(entity_refs_before)
+ expect(array_ref_after).to eq(array_ref_before)
+
+ unique_index_exists = connection.select_value(<<~SQL).present?
+ SELECT 1
+ FROM pg_indexes
+ WHERE tablename = '#{test_table}'
+ AND indexname = '#{test_unique_index}'
+ SQL
+ expect(unique_index_exists).to be false
+ end
+ end
+ end
+ end
+end
diff --git a/spec/migrations/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id_spec.rb b/spec/migrations/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id_spec.rb
index 4992f155e15..5fb5321ca54 100644
--- a/spec/migrations/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id_spec.rb
+++ b/spec/migrations/20250304132251_queue_backfill_approval_merge_request_rules_users_project_id_spec.rb
@@ -13,20 +13,7 @@ RSpec.describe QueueBackfillApprovalMergeRequestRulesUsersProjectId, feature_cat
}
migration.after -> {
- expect(batched_migration).to have_scheduled_batched_migration(
- table_name: :approval_merge_request_rules_users,
- column_name: :id,
- interval: described_class::DELAY_INTERVAL,
- batch_size: described_class::BATCH_SIZE,
- sub_batch_size: described_class::SUB_BATCH_SIZE,
- gitlab_schema: :gitlab_main_cell,
- job_arguments: [
- :project_id,
- :approval_merge_request_rules,
- :project_id,
- :approval_merge_request_rule_id
- ]
- )
+ expect(batched_migration).not_to have_scheduled_batched_migration
}
end
end
diff --git a/spec/migrations/20250623071728_queue_backfill_rolled_up_weight_for_work_items_spec.rb b/spec/migrations/20250708203741_requeue_backfill_approval_merge_request_rules_users_project_id_spec.rb
similarity index 51%
rename from spec/migrations/20250623071728_queue_backfill_rolled_up_weight_for_work_items_spec.rb
rename to spec/migrations/20250708203741_requeue_backfill_approval_merge_request_rules_users_project_id_spec.rb
index ea60feed9c1..57d3617ffde 100644
--- a/spec/migrations/20250623071728_queue_backfill_rolled_up_weight_for_work_items_spec.rb
+++ b/spec/migrations/20250708203741_requeue_backfill_approval_merge_request_rules_users_project_id_spec.rb
@@ -3,7 +3,7 @@
require 'spec_helper'
require_migration!
-RSpec.describe QueueBackfillRolledUpWeightForWorkItems, migration: :gitlab_main, feature_category: :team_planning do
+RSpec.describe RequeueBackfillApprovalMergeRequestRulesUsersProjectId, feature_category: :code_review_workflow do
let!(:batched_migration) { described_class::MIGRATION }
it 'schedules a new batched migration' do
@@ -14,10 +14,18 @@ RSpec.describe QueueBackfillRolledUpWeightForWorkItems, migration: :gitlab_main,
migration.after -> {
expect(batched_migration).to have_scheduled_batched_migration(
- table_name: :issues,
+ table_name: :approval_merge_request_rules_users,
column_name: :id,
+ interval: described_class::DELAY_INTERVAL,
batch_size: described_class::BATCH_SIZE,
- sub_batch_size: described_class::SUB_BATCH_SIZE
+ sub_batch_size: described_class::SUB_BATCH_SIZE,
+ gitlab_schema: :gitlab_main_cell,
+ job_arguments: [
+ :project_id,
+ :approval_merge_request_rules,
+ :project_id,
+ :approval_merge_request_rule_id
+ ]
)
}
end
diff --git a/spec/requests/projects/settings/packages_and_registries_controller_spec.rb b/spec/requests/projects/settings/packages_and_registries_controller_spec.rb
index 6ce2e4ac975..9e4c2c2100a 100644
--- a/spec/requests/projects/settings/packages_and_registries_controller_spec.rb
+++ b/spec/requests/projects/settings/packages_and_registries_controller_spec.rb
@@ -32,7 +32,6 @@ RSpec.describe Projects::Settings::PackagesAndRegistriesController, feature_cate
end
it_behaves_like 'pushed feature flag', :packages_protected_packages_helm
- it_behaves_like 'pushed feature flag', :packages_protected_packages_nuget
it_behaves_like 'pushed feature flag', :packages_protected_packages_delete
it_behaves_like 'pushed feature flag', :container_registry_protected_containers_delete
end
diff --git a/spec/services/packages/nuget/create_or_update_package_service_spec.rb b/spec/services/packages/nuget/create_or_update_package_service_spec.rb
index b144162b8df..d7a3530d91d 100644
--- a/spec/services/packages/nuget/create_or_update_package_service_spec.rb
+++ b/spec/services/packages/nuget/create_or_update_package_service_spec.rb
@@ -206,14 +206,6 @@ RSpec.describe Packages::Nuget::CreateOrUpdatePackageService, feature_category:
.and not_change { ::Packages::DependencyLink.count }
.and not_change { ::Packages::Nuget::DependencyLinkMetadatum.count }
end
-
- context 'when feature flag :packages_protected_packages_nuget is disabled' do
- before do
- stub_feature_flags(packages_protected_packages_nuget: false)
- end
-
- it_behaves_like 'valid package'
- end
end
shared_examples 'protected package from deploy token' do
diff --git a/spec/services/packages/nuget/update_package_from_metadata_service_spec.rb b/spec/services/packages/nuget/update_package_from_metadata_service_spec.rb
index b79c3b84d63..8c87f5b52f2 100644
--- a/spec/services/packages/nuget/update_package_from_metadata_service_spec.rb
+++ b/spec/services/packages/nuget/update_package_from_metadata_service_spec.rb
@@ -385,14 +385,6 @@ RSpec.describe Packages::Nuget::UpdatePackageFromMetadataService, :clean_gitlab_
shared_examples 'protected package' do
it_behaves_like 'raising an', described_class::ProtectedPackageError, with_message: "Package 'DummyProject.DummyPackage' with version '1.0.0' is protected"
-
- context 'when feature flag :packages_protected_packages_nuget is disabled' do
- before do
- stub_feature_flags(packages_protected_packages_nuget: false)
- end
-
- it_behaves_like 'updates package and package file and creates metadatum'
- end
end
where(:package_name_pattern, :minimum_access_level_for_push, :package_creator, :package_publishing_actor, :shared_examples_name) do
diff --git a/spec/support/shared_examples/requests/api/nuget_packages_shared_examples.rb b/spec/support/shared_examples/requests/api/nuget_packages_shared_examples.rb
index 3692b8208d1..1bde2f7985a 100644
--- a/spec/support/shared_examples/requests/api/nuget_packages_shared_examples.rb
+++ b/spec/support/shared_examples/requests/api/nuget_packages_shared_examples.rb
@@ -899,14 +899,6 @@ RSpec.shared_examples 'nuget upload endpoint' do |symbol_package: false|
expect(json_response).to include 'message' => '403 Forbidden - Package protected.'
end
-
- context 'when feature flag :packages_protected_packages_nuget is disabled' do
- before do
- stub_feature_flags(packages_protected_packages_nuget: false)
- end
-
- it_behaves_like 'successful nuget upload'
- end
end
context 'for personal access token' do
diff --git a/spec/tasks/gitlab/db_rake_spec.rb b/spec/tasks/gitlab/db_rake_spec.rb
index 04a51557783..1e906f89e1f 100644
--- a/spec/tasks/gitlab/db_rake_spec.rb
+++ b/spec/tasks/gitlab/db_rake_spec.rb
@@ -698,6 +698,83 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout, feature_categor
end
end
+ describe 'repair_index' do
+ context 'with a single database' do
+ before do
+ skip_if_multiple_databases_are_setup
+ end
+
+ it 'calls Gitlab::Database::RepairIndex with correct arguments' do
+ logger_double = instance_double(Logger, level: nil, info: nil, warn: nil, error: nil)
+ allow(Logger).to receive(:new).with($stdout).and_return(logger_double)
+
+ expect(Gitlab::Database::RepairIndex).to receive(:run)
+ .with(logger: logger_double, dry_run: false)
+
+ run_rake_task('gitlab:db:repair_index')
+ end
+
+ it 'respects DRY_RUN environment variable' do
+ stub_env('DRY_RUN', true)
+ logger_double = instance_double(Logger, level: nil, info: nil, warn: nil, error: nil)
+ allow(Logger).to receive(:new).with($stdout).and_return(logger_double)
+
+ expect(Gitlab::Database::RepairIndex).to receive(:run)
+ .with(logger: logger_double, dry_run: true)
+
+ run_rake_task('gitlab:db:repair_index')
+ end
+ end
+
+ context 'with multiple databases' do
+ let(:logger_double) { instance_double(Logger, level: nil, info: nil, warn: nil, error: nil) }
+
+ before do
+ skip_if_multiple_databases_not_setup(:ci)
+
+ allow(Logger).to receive(:new).with($stdout).and_return(logger_double)
+ end
+
+ it 'calls Gitlab::Database::RepairIndex with correct arguments' do
+ expect(Gitlab::Database::RepairIndex).to receive(:run)
+ .with(logger: logger_double, dry_run: false)
+
+ run_rake_task('gitlab:db:repair_index')
+ end
+
+ context 'when the single database task is used' do
+ before do
+ skip_if_shared_database(:ci)
+ end
+
+ it 'calls Gitlab::Database::RepairIndex with the main database' do
+ expect(Gitlab::Database::RepairIndex).to receive(:run)
+ .with(database_name: 'main', logger: logger_double, dry_run: false)
+
+ run_rake_task('gitlab:db:repair_index:main')
+ end
+
+ it 'calls Gitlab::Database::RepairIndex with the ci database' do
+ expect(Gitlab::Database::RepairIndex).to receive(:run)
+ .with(database_name: 'ci', logger: logger_double, dry_run: false)
+
+ run_rake_task('gitlab:db:repair_index:ci')
+ end
+ end
+
+ context 'with geo configured' do
+ before do
+ skip_unless_geo_configured
+ end
+
+ it 'does not create a task for the geo database' do
+ expect { run_rake_task('gitlab:db:repair_index:geo') }
+ .to raise_error(/Don't know how to build task 'gitlab:db:repair_index:geo'/)
+ end
+ end
+ end
+ end
+
describe 'dictionary generate' do
let(:db_config) { instance_double(ActiveRecord::DatabaseConfigurations::HashConfig, name: 'fake_db') }
diff --git a/spec/workers/packages/nuget/extraction_worker_spec.rb b/spec/workers/packages/nuget/extraction_worker_spec.rb
index 950dbb61897..98bb7230344 100644
--- a/spec/workers/packages/nuget/extraction_worker_spec.rb
+++ b/spec/workers/packages/nuget/extraction_worker_spec.rb
@@ -95,14 +95,6 @@ RSpec.describe Packages::Nuget::ExtractionWorker, type: :worker, feature_categor
it_behaves_like 'handling error',
error_class: ::Packages::Nuget::UpdatePackageFromMetadataService::ProtectedPackageError,
error_message: "Package 'DummyProject.DummyPackage' with version '1.0.0' is protected"
-
- context 'when feature flag :packages_protected_packages_nuget is disabled' do
- before do
- stub_feature_flags(packages_protected_packages_nuget: false)
- end
-
- it_behaves_like 'updates package and package file'
- end
end
where(:package_name_pattern, :minimum_access_level_for_push, :package_creator, :params, :shared_examples_name) do