diff --git a/CHANGELOG.md b/CHANGELOG.md index f40100aa2650e..ede53510733ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Bug - Handle unsigned long in sorting order assertion of LongHashSet ([#17207](https://github.com/opensearch-project/OpenSearch/pull/17207)) - Implemented computation of segment replication stats at shard level ([#17055](https://github.com/opensearch-project/OpenSearch/pull/17055)) - [Rule Based Auto-tagging] Add in-memory attribute value store ([#17342](https://github.com/opensearch-project/OpenSearch/pull/17342)) +- Renaming the node role search to warm ([#17573](https://github.com/opensearch-project/OpenSearch/pull/17573)) ### Dependencies - Bump `org.awaitility:awaitility` from 4.2.0 to 4.3.0 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230), [#17439](https://github.com/opensearch-project/OpenSearch/pull/17439)) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index 9058dc2f5b147..4dd5e7b74c96d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -896,7 +896,7 @@ private Map getExpectedCounts( expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), clusterManagerRoleCount); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), ingestRoleCount); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), remoteClusterClientRoleCount); - expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), searchRoleCount); + expectedCounts.put(DiscoveryNodeRole.WARM_ROLE.roleName(), searchRoleCount); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, coordinatingOnlyCount); return expectedCounts; } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index 35b8bdf3dafe5..3ba885812bcfc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -195,7 +195,7 @@ public void testClusterInfoServiceCollectsInformation() { final Map nodeFileCacheStats = info.nodeFileCacheStats; assertNotNull(nodeFileCacheStats); - assertThat("file cache is empty on non search nodes", nodeFileCacheStats.size(), Matchers.equalTo(0)); + assertThat("file cache is empty on non warm nodes", nodeFileCacheStats.size(), Matchers.equalTo(0)); ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getClusterManagerName()); ClusterState state = clusterService.state(); @@ -216,7 +216,7 @@ public void testClusterInfoServiceCollectsInformation() { public void testClusterInfoServiceCollectsFileCacheInformation() { internalCluster().startNodes(1); - internalCluster().ensureAtLeastNumSearchAndDataNodes(2); + internalCluster().ensureAtLeastNumWarmAndDataNodes(2); InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the cluster-manager node @@ -229,7 +229,7 @@ public void testClusterInfoServiceCollectsFileCacheInformation() { assertNotNull("info should not be null", info); final Map nodeFileCacheStats = info.nodeFileCacheStats; assertNotNull(nodeFileCacheStats); - assertThat("file cache is enabled on both search nodes", nodeFileCacheStats.size(), Matchers.equalTo(2)); + assertThat("file cache is enabled on both warm nodes", nodeFileCacheStats.size(), Matchers.equalTo(2)); for (FileCacheStats fileCacheStats : nodeFileCacheStats.values()) { assertThat("file cache is non empty", fileCacheStats.getTotal().getBytes(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java index d7f1c2209f798..e7933989d4302 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java @@ -143,7 +143,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) - .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .put(Node.NODE_WARM_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) .build(); } @@ -177,7 +177,7 @@ public void teardown() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17526") public void testRestartPrimary_NoReplicas() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); @@ -197,10 +197,10 @@ public void testRestartPrimary_NoReplicas() throws Exception { } public void testPrimaryStopped_ReplicaPromoted() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -229,7 +229,7 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); // start another node, index another doc and replicate. - String nodeC = internalCluster().startDataAndSearchNodes(1).get(0); + String nodeC = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME).setId("4").setSource("baz", "baz").get(); refresh(INDEX_NAME); @@ -239,10 +239,10 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { } public void testRestartPrimary() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); assertEquals(getNodeContainingPrimaryShard().getName(), primary); @@ -266,10 +266,10 @@ public void testRestartPrimary() throws Exception { public void testCancelPrimaryAllocation() throws Exception { // this test cancels allocation on the primary - promoting the new replica and recreating the former primary as a replica. - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final int initialDocCount = 1; @@ -296,8 +296,8 @@ public void testCancelPrimaryAllocation() throws Exception { } public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); final Settings settings = Settings.builder() .put(indexSettings()) .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(new ArrayList<>(CODECS) { @@ -340,8 +340,8 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { } public void testIndexReopenClose() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -374,7 +374,7 @@ public void testIndexReopenClose() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17526") public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); ensureGreen(INDEX_NAME); @@ -397,7 +397,7 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { .prepareUpdateSettings(INDEX_NAME) .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) ); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); @@ -420,9 +420,9 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17527") public void testReplicationPostDeleteAndForceMerge() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final int initialDocCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < initialDocCount; i++) { @@ -480,8 +480,8 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { } public void testScrollWithConcurrentIndexAndSearch() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); final List> pendingIndexResponses = new ArrayList<>(); @@ -539,8 +539,8 @@ public void testMultipleShards() throws Exception { .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, indexSettings); ensureGreen(INDEX_NAME); @@ -585,8 +585,8 @@ public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception } private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -638,11 +638,11 @@ private void performReplicationAfterForceMerge(boolean primaryOnly, int expected public void testClosedIndices() { List nodes = new ArrayList<>(); // start 1st node so that it contains the primary - nodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + nodes.add(internalCluster().startDataAndWarmNodes(1).get(0)); createIndex(INDEX_NAME, super.indexSettings()); ensureYellowAndNoInitializingShards(INDEX_NAME); // start 2nd node so that it contains the replica - nodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + nodes.add(internalCluster().startDataAndWarmNodes(1).get(0)); ensureGreen(INDEX_NAME); logger.info("--> Close index"); @@ -657,7 +657,7 @@ public void testClosedIndices() { * @throws Exception when issue is encountered */ public void testNodeDropWithOngoingReplication() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex( INDEX_NAME, Settings.builder() @@ -668,7 +668,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { .build() ); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); // Get replica allocation id @@ -724,11 +724,11 @@ public void testNodeDropWithOngoingReplication() throws Exception { } public void testCancellation() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( SegmentReplicationSourceService.class, @@ -776,8 +776,8 @@ public void testCancellation() throws Exception { @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testDeleteOperations() throws Exception { - final String nodeA = internalCluster().startDataAndSearchNodes(1).get(0); - final String nodeB = internalCluster().startDataAndSearchNodes(1).get(0); + final String nodeA = internalCluster().startDataAndWarmNodes(1).get(0); + final String nodeB = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -818,10 +818,10 @@ public void testDeleteOperations() throws Exception { } public void testUpdateOperations() throws Exception { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final int initialDocCount = scaledRandomIntBetween(1, 5); @@ -870,9 +870,9 @@ public void testDropPrimaryDuringReplication() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replica_count) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, settings); - final List dataNodes = internalCluster().startDataAndSearchNodes(6); + final List dataNodes = internalCluster().startDataAndWarmNodes(6); ensureGreen(INDEX_NAME); int initialDocCount = scaledRandomIntBetween(5, 10); @@ -896,7 +896,7 @@ public void testDropPrimaryDuringReplication() throws Exception { ensureYellow(INDEX_NAME); // start another replica. - dataNodes.add(internalCluster().startDataAndSearchNodes(1).get(0)); + dataNodes.add(internalCluster().startDataAndWarmNodes(1).get(0)); ensureGreen(INDEX_NAME); waitForSearchableDocs(initialDocCount, dataNodes); @@ -913,10 +913,10 @@ public void testDropPrimaryDuringReplication() throws Exception { @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testReplicaHasDiffFilesThanPrimary() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); @@ -970,10 +970,10 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17527") public void testPressureServiceStats() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); int initialDocCount = scaledRandomIntBetween(10, 20); @@ -1036,7 +1036,7 @@ public void testPressureServiceStats() throws Exception { assertTrue(replicaStats.isEmpty()); // start another replica. - String replicaNode_2 = internalCluster().startDataAndSearchNodes(1).get(0); + String replicaNode_2 = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final IndexShard secondReplicaShard = getIndexShard(replicaNode_2, INDEX_NAME); final String second_replica_aid = secondReplicaShard.routingEntry().allocationId().getId(); @@ -1078,7 +1078,7 @@ private void assertAllocationIdsInReplicaShardStats(Set expected, Set nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(primaryNode); final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); // start a replica node, initially will be empty with no shard assignment. - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(replicaNode); // index a doc. @@ -1215,7 +1215,7 @@ public void testPrimaryReceivesDocsDuringReplicaRecovery() throws Exception { } public void testIndexWhileRecoveringReplica() throws Exception { - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( prepareCreate(INDEX_NAME).setMapping( jsonBuilder().startObject() @@ -1239,7 +1239,7 @@ public void testIndexWhileRecoveringReplica() throws Exception { ) ); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); client().prepareIndex(INDEX_NAME) .setId("1") @@ -1290,13 +1290,13 @@ public void testIndexWhileRecoveringReplica() throws Exception { * Tests whether segment replication supports realtime get requests and reads and parses source from the translog to serve strong reads. */ public void testRealtimeGetRequestsSuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime get assertAcked( prepareCreate(INDEX_NAME).setSettings(Settings.builder().put("index.refresh_interval", -1).put(indexSettings())) .addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1328,13 +1328,13 @@ public void testRealtimeGetRequestsSuccessful() { } public void testRealtimeGetRequestsUnsuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( prepareCreate(INDEX_NAME).setSettings( Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ).addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1362,14 +1362,14 @@ public void testRealtimeGetRequestsUnsuccessful() { * Tests whether segment replication supports realtime MultiGet requests and reads and parses source from the translog to serve strong reads. */ public void testRealtimeMultiGetRequestsSuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime multi get assertAcked( prepareCreate(INDEX_NAME).setSettings( Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ).addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1414,13 +1414,13 @@ public void testRealtimeMultiGetRequestsSuccessful() { } public void testRealtimeMultiGetRequestsUnsuccessful() { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( prepareCreate(INDEX_NAME).setSettings( Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ).addAlias(new Alias("alias")) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1460,7 +1460,7 @@ public void testRealtimeMultiGetRequestsUnsuccessful() { * Tests whether segment replication supports realtime termvector requests and reads and parses source from the translog to serve strong reads. */ public void testRealtimeTermVectorRequestsSuccessful() throws IOException { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); XContentBuilder mapping = jsonBuilder().startObject() .startObject("properties") .startObject("field") @@ -1482,7 +1482,7 @@ public void testRealtimeTermVectorRequestsSuccessful() throws IOException { .putList("index.analysis.analyzer.tv_test.filter", "lowercase") ) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); @@ -1537,7 +1537,7 @@ public void testRealtimeTermVectorRequestsSuccessful() throws IOException { } public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { - final String primary = internalCluster().startDataAndSearchNodes(1).get(0); + final String primary = internalCluster().startDataAndWarmNodes(1).get(0); XContentBuilder mapping = jsonBuilder().startObject() .startObject("properties") .startObject("field") @@ -1561,7 +1561,7 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) ) ); - final String replica = internalCluster().startDataAndSearchNodes(1).get(0); + final String replica = internalCluster().startDataAndWarmNodes(1).get(0); ensureGreen(INDEX_NAME); final String id = routingKeyForShard(INDEX_NAME, 0); final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); @@ -1607,15 +1607,15 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { public void testReplicaAlreadyAtCheckpoint() throws Exception { final List nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String primaryNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(primaryNode); final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); // start a replica node, initially will be empty with no shard assignment. - final String replicaNode = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode = internalCluster().startDataAndWarmNodes(1).get(0); nodes.add(replicaNode); - final String replicaNode2 = internalCluster().startDataAndSearchNodes(1).get(0); + final String replicaNode2 = internalCluster().startDataAndWarmNodes(1).get(0); assertAcked( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index 88c9ae436e85f..5ba24d8512b61 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -72,7 +72,7 @@ protected Settings nodeSettings(int nodeOrdinal) { ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .put(Node.NODE_WARM_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) .build(); } @@ -80,7 +80,7 @@ public void testWritableWarmFeatureFlagDisabled() { Settings clusterSettings = Settings.builder().put(super.nodeSettings(0)).put(FeatureFlags.TIERED_REMOTE_INDEX, false).build(); InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(clusterSettings); - internalTestCluster.startDataAndSearchNodes(1); + internalTestCluster.startDataAndWarmNodes(1); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -105,7 +105,7 @@ public void testWritableWarmFeatureFlagDisabled() { public void testWritableWarmBasic() throws Exception { InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(); - internalTestCluster.startDataAndSearchNodes(1); + internalTestCluster.startDataAndWarmNodes(1); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 278328d61f544..5cc4d0706888e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -107,7 +107,7 @@ private Settings.Builder chunkedRepositorySettings(long chunkSize) { /** * Tests a happy path scenario for searchable snapshots by creating 2 indices, * taking a snapshot, restoring them as searchable snapshots. - * Ensures availability of sufficient data nodes and search capable nodes. + * Ensures availability of sufficient data nodes and warm capable nodes. */ public void testCreateSearchableSnapshot() throws Exception { final String snapshotName = "test-snap"; @@ -128,7 +128,7 @@ public void testCreateSearchableSnapshot() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); deleteIndicesAndEnsureGreen(client, indexName1, indexName2); - internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + internalCluster().ensureAtLeastNumWarmNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName1, restoredIndexName2); @@ -153,7 +153,7 @@ public void testSnapshottingSearchableSnapshots() throws Exception { deleteIndicesAndEnsureGreen(client, indexName); // restore the index as a searchable snapshot - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); client.admin() .cluster() .prepareRestoreSnapshot(repoName, initSnapName) @@ -235,7 +235,7 @@ public void testCreateSearchableSnapshotWithDefaultChunks() throws Exception { Settings.Builder repositorySettings = chunkedRepositorySettings(2 << 23); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); createRepositoryWithSettings(repositorySettings, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -261,7 +261,7 @@ public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { Settings.Builder repositorySettings = chunkedRepositorySettings(1000); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); createRepositoryWithSettings(repositorySettings, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -276,7 +276,7 @@ public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { /** * Tests the functionality of remote shard allocation to * ensure it can assign remote shards to a node with local shards given it has the - * search role capabilities. + * warm role capabilities. */ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() throws Exception { final int numReplicasIndex = randomIntBetween(1, 4); @@ -286,7 +286,7 @@ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() final String snapshotName = "test-snap"; final Client client = client(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -306,21 +306,21 @@ public void testSearchableSnapshotAllocationFilterSettings() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numShardsIndex); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numShardsIndex); createIndexWithDocsAndEnsureGreen(numShardsIndex, 1, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); - final Set searchNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) - .filter(DiscoveryNode::isSearchNode) + final Set warmNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) + .filter(DiscoveryNode::isWarmNode) .map(DiscoveryNode::getId) .collect(Collectors.toSet()); - for (int i = searchNodes.size(); i > 2; --i) { - String pickedNode = randomFrom(searchNodes); - searchNodes.remove(pickedNode); + for (int i = warmNodes.size(); i > 2; --i) { + String pickedNode = randomFrom(warmNodes); + warmNodes.remove(pickedNode); assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, true); assertTrue( client.admin() @@ -357,7 +357,7 @@ private void assertIndexAssignedToNodeOrNot(String index, String node, boolean a /** * Tests the functionality of remote shard allocation to * ensure it can handle node drops for failover scenarios and the cluster gets back to a healthy state when - * nodes with search capabilities are added back to the cluster. + * nodes with warm capabilities are added back to the cluster. */ public void testSearchableSnapshotAllocationForFailoverAndRecovery() throws Exception { final int numReplicasIndex = 1; @@ -374,27 +374,27 @@ public void testSearchableSnapshotAllocationForFailoverAndRecovery() throws Exce takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); assertDocCount(restoredIndexName, 100L); - logger.info("--> stop a random search node"); - internalCluster().stopRandomSearchNode(); + logger.info("--> stop a random warm node"); + internalCluster().stopRandomWarmNode(); ensureYellow(restoredIndexName); assertDocCount(restoredIndexName, 100L); - logger.info("--> stop the last search node"); - internalCluster().stopRandomSearchNode(); + logger.info("--> stop the last warm node"); + internalCluster().stopRandomWarmNode(); ensureRed(restoredIndexName); - logger.info("--> add 3 new search nodes"); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 2); + logger.info("--> add 3 new warm nodes"); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex + 2); ensureGreen(restoredIndexName); assertDocCount(restoredIndexName, 100); - logger.info("--> stop a random search node"); - internalCluster().stopRandomSearchNode(); + logger.info("--> stop a random warm node"); + internalCluster().stopRandomWarmNode(); ensureGreen(restoredIndexName); assertDocCount(restoredIndexName, 100); } @@ -414,7 +414,7 @@ public void testSearchableSnapshotIndexIsReadOnly() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); @@ -435,7 +435,7 @@ public void testDeleteSearchableSnapshotBackingIndexThrowsException() throws Exc createRepositoryWithSettings(null, repoName); createIndexWithDocsAndEnsureGreen(0, 100, indexName); takeSnapshot(client, snapshotName, repoName, indexName); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertThrows( SnapshotInUseDeletionException.class, @@ -455,7 +455,7 @@ public void testDeleteSearchableSnapshotBackingIndex() throws Exception { createIndexWithDocsAndEnsureGreen(0, 100, indexName2); takeSnapshot(client, snapshotName1, repoName, indexName1); takeSnapshot(client, snapshotName2, repoName, indexName2); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName2, repoName); client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName1)).actionGet(); } @@ -561,7 +561,7 @@ public void testUpdateIndexSettings() throws InterruptedException { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(1); + internalCluster().ensureAtLeastNumWarmNodes(1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); @@ -618,7 +618,7 @@ public void testFileCacheStats() throws Exception { deleteIndicesAndEnsureGreen(client, indexName1); assertAllNodesFileCacheEmpty(); - internalCluster().ensureAtLeastNumSearchNodes(numNodes); + internalCluster().ensureAtLeastNumWarmNodes(numNodes); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertNodesFileCacheNonEmpty(numNodes); } @@ -644,7 +644,7 @@ public void testFileCacheRestore() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 1); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); @@ -653,7 +653,7 @@ public void testFileCacheRestore() throws Exception { NodesStatsResponse preRestoreStats = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); for (NodeStats nodeStats : preRestoreStats.getNodes()) { - if (nodeStats.getNode().isSearchNode()) { + if (nodeStats.getNode().isWarmNode()) { internalCluster().restartNode(nodeStats.getNode().getName()); } } @@ -664,7 +664,7 @@ public void testFileCacheRestore() throws Exception { for (String node : postRestoreStatsMap.keySet()) { NodeStats preRestoreStat = preRestoreStatsMap.get(node); NodeStats postRestoreStat = postRestoreStatsMap.get(node); - if (preRestoreStat.getNode().isSearchNode()) { + if (preRestoreStat.getNode().isWarmNode()) { assertEquals(preRestoreStat.getFileCacheStats().getUsed(), postRestoreStat.getFileCacheStats().getUsed()); } } @@ -723,7 +723,7 @@ private void assertNodesFileCacheNonEmpty(int numNodes) { int nonEmptyFileCacheNodes = 0; for (NodeStats stats : response.getNodes()) { FileCacheStats fcStats = stats.getFileCacheStats(); - if (stats.getNode().isSearchNode()) { + if (stats.getNode().isWarmNode()) { if (!isFileCacheEmpty(fcStats)) { nonEmptyFileCacheNodes++; } @@ -747,7 +747,7 @@ public void testPruneFileCacheOnIndexDeletion() throws Exception { final Client client = client(); final int numNodes = 2; - internalCluster().ensureAtLeastNumSearchAndDataNodes(numNodes); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numNodes); createIndexWithDocsAndEnsureGreen(1, 100, indexName1); createRepositoryWithSettings(null, repoName); @@ -763,7 +763,7 @@ public void testPruneFileCacheOnIndexDeletion() throws Exception { } /** - * Test scenario that checks the cache folder location on search nodes for the restored index on snapshot restoration + * Test scenario that checks the cache folder location on warm nodes for the restored index on snapshot restoration * and ensures the index folder is cleared on all nodes post index deletion */ public void testCacheIndexFilesClearedOnDelete() throws Exception { @@ -775,7 +775,7 @@ public void testCacheIndexFilesClearedOnDelete() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(numShards); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numShards); createIndexWithDocsAndEnsureGreen(numReplicas, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -796,7 +796,7 @@ public void testCacheIndexFilesClearedOnDelete() throws Exception { } /** - * Test scenario that validates that the default search preference for searchable snapshot + * Test scenario that validates that the default warm preference for searchable snapshot * is primary shards */ public void testDefaultShardPreference() throws Exception { @@ -808,7 +808,7 @@ public void testDefaultShardPreference() throws Exception { final Client client = client(); // Create an index, snapshot and restore as a searchable snapshot index - internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicas + 1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(numReplicas + 1); createIndexWithDocsAndEnsureGreen(numReplicas, 100, indexName); createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName); @@ -861,7 +861,7 @@ public void testRestoreSearchableSnapshotWithIndexStoreTypeThrowsException() thr takeSnapshot(client, snapshotName, repoName, indexName1); deleteIndicesAndEnsureGreen(client, indexName1); - internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex1 + 1); + internalCluster().ensureAtLeastNumWarmNodes(numReplicasIndex1 + 1); // set "index.store.type" to "remote_snapshot" in index settings of restore API and assert appropriate exception with error message // is thrown. @@ -898,8 +898,8 @@ private void assertCacheDirectoryReplicaAndIndexCount(int numCacheFolderCount, i // Get the available NodeEnvironment instances Iterable nodes = internalCluster().getInstances(Node.class); - // Filter out search NodeEnvironment(s) since FileCache is initialized only on search nodes and - // collect the path for all the cache locations on search nodes. + // Filter out warm NodeEnvironment(s) since FileCache is initialized only on warm nodes and + // collect the path for all the cache locations on warm nodes. List searchNodeFileCachePaths = StreamSupport.stream(nodes.spliterator(), false) .filter(node -> node.fileCache() != null) .map(node -> node.getNodeEnvironment().fileCacheNodePath().fileCachePath) @@ -931,12 +931,12 @@ public void testRelocateSearchableSnapshotIndex() throws Exception { takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - String searchNode1 = internalCluster().startSearchOnlyNodes(1).get(0); + String searchNode1 = internalCluster().startWarmOnlyNodes(1).get(0); internalCluster().validateClusterFormed(); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertRemoteSnapshotIndexSettings(client, restoredIndexName); - String searchNode2 = internalCluster().startSearchOnlyNodes(1).get(0); + String searchNode2 = internalCluster().startWarmOnlyNodes(1).get(0); internalCluster().validateClusterFormed(); final Index index = resolveIndex(restoredIndexName); @@ -995,7 +995,7 @@ public void testCreateSearchableSnapshotWithSpecifiedRemoteDataRatio() throws Ex createRepositoryWithSettings(null, repoName); takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); - internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + internalCluster().ensureAtLeastNumWarmNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertDocCount(restoredIndexName1, 100L); @@ -1014,22 +1014,22 @@ public void cleanup() throws Exception { } public void testStartSearchNode() throws Exception { - // test start dedicated search node - internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.SEARCH_ROLE))); - // test start node without search role + // test start dedicated warm node + internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.WARM_ROLE))); + // test start node without warm role internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.DATA_ROLE))); - // test start non-dedicated search node, if the user doesn't configure the cache size, it fails + // test start non-dedicated warm node, if the user doesn't configure the cache size, it fails assertThrows( SettingsException.class, () -> internalCluster().startNode( - Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.WARM_ROLE, DiscoveryNodeRole.DATA_ROLE))) ) ); - // test start non-dedicated search node + // test start non-dedicated warm node assertThrows( SettingsException.class, () -> internalCluster().startNode( - Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.WARM_ROLE, DiscoveryNodeRole.DATA_ROLE))) ) ); } diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index 0e0b4e9be261a..8285f361ee220 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -758,15 +758,15 @@ public BootstrapCheckResult check(BootstrapContext context) { } /** - * Bootstrap check that if a search node contains multiple data paths + * Bootstrap check that if a warm node contains multiple data paths */ static class MultipleDataPathCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (NodeRoleSettings.NODE_ROLES_SETTING.get(context.settings()).contains(DiscoveryNodeRole.SEARCH_ROLE) + if (NodeRoleSettings.NODE_ROLES_SETTING.get(context.settings()).contains(DiscoveryNodeRole.WARM_ROLE) && Environment.PATH_DATA_SETTING.get(context.settings()).size() > 1) { - return BootstrapCheckResult.failure("Multiple data paths are not allowed for search nodes"); + return BootstrapCheckResult.failure("Multiple data paths are not allowed for warm nodes"); } return BootstrapCheckResult.success(); } diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 4121e56fae0f5..804325dc1f565 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -274,7 +274,7 @@ public void onResponse(NodesStatsResponse nodesStatsResponse) { nodeFileCacheStats = Collections.unmodifiableMap( nodesStatsResponse.getNodes() .stream() - .filter(nodeStats -> nodeStats.getNode().isSearchNode()) + .filter(nodeStats -> nodeStats.getNode().isWarmNode()) .collect(Collectors.toMap(nodeStats -> nodeStats.getNode().getId(), NodeStats::getFileCacheStats)) ); } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 12cdafdcdbf1b..d8233d38bc246 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -122,12 +122,12 @@ public static boolean isRemoteClusterClient(final Settings settings) { return hasRole(settings, DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); } - public static boolean isSearchNode(Settings settings) { - return hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE); + public static boolean isWarmNode(Settings settings) { + return hasRole(settings, DiscoveryNodeRole.WARM_ROLE); } - public static boolean isDedicatedSearchNode(Settings settings) { - return getRolesFromSettings(settings).stream().allMatch(DiscoveryNodeRole.SEARCH_ROLE::equals); + public static boolean isDedicatedWarmNode(Settings settings) { + return getRolesFromSettings(settings).stream().allMatch(DiscoveryNodeRole.WARM_ROLE::equals); } private final String nodeName; @@ -480,12 +480,12 @@ public boolean isRemoteClusterClient() { } /** - * Returns whether the node is dedicated to provide search capability. + * Returns whether the node is dedicated to provide searchable snapshot. * - * @return true if the node contains search role, false otherwise + * @return true if the node contains warm role, false otherwise */ - public boolean isSearchNode() { - return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + public boolean isWarmNode() { + return roles.contains(DiscoveryNodeRole.WARM_ROLE); } /** diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 0d2b08656c38d..04bffd6b41761 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -298,13 +298,13 @@ public Setting legacySetting() { }; /** - * Represents the role for a search node, which is dedicated to provide search capability. + * Represents the role for a warm node, which is dedicated to provide searchable snapshots. */ - public static final DiscoveryNodeRole SEARCH_ROLE = new DiscoveryNodeRole("search", "s", true) { + public static final DiscoveryNodeRole WARM_ROLE = new DiscoveryNodeRole("warm", "w", true) { @Override public Setting legacySetting() { - // search role is added in 2.4 so doesn't need to configure legacy setting + // warm role is added in 2.4 so doesn't need to configure legacy setting return null; } @@ -314,7 +314,7 @@ public Setting legacySetting() { * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, SEARCH_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, WARM_ROLE)) ); /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java index 647e993339476..2241d030bd3a4 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java @@ -37,7 +37,7 @@ public static RoutingPool getNodePool(RoutingNode node) { * Helps to determine the appropriate {@link RoutingPool} for a given node from the {@link DiscoveryNode} */ public static RoutingPool getNodePool(DiscoveryNode node) { - if (node.isSearchNode()) { + if (node.isWarmNode()) { return REMOTE_CAPABLE; } return LOCAL_ONLY; diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c1ce90aaa8efa..8323578fe6f7c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -709,7 +709,7 @@ public void apply(Settings value, Settings current, Settings previous) { ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING, // Settings related to Searchable Snapshots - Node.NODE_SEARCH_CACHE_SIZE_SETTING, + Node.NODE_WARM_CACHE_SIZE_SETTING, FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, // Settings related to Remote Refresh Segment Pressure diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 5c6e44d063dd7..9560bfc547480 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -388,7 +388,7 @@ public NodeEnvironment(Settings settings, Environment environment, IndexStoreLis ensureNoShardData(nodePaths); } - if (DiscoveryNode.isSearchNode(settings) == false) { + if (DiscoveryNode.isWarmNode(settings) == false) { ensureNoFileCacheData(fileCacheNodePath); } @@ -1202,15 +1202,15 @@ private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { } /** - * Throws an exception if cache exists on a non-search node. + * Throws an exception if cache exists on a non-warm node. */ private void ensureNoFileCacheData(final NodePath fileCacheNodePath) throws IOException { List cacheDataPaths = collectFileCacheDataPath(fileCacheNodePath); if (cacheDataPaths.isEmpty() == false) { final String message = String.format( Locale.ROOT, - "node does not have the %s role but has data within node search cache: %s. Use 'opensearch-node repurpose' tool to clean up", - DiscoveryNodeRole.SEARCH_ROLE.roleName(), + "node does not have the %s role but has data within node warm cache: %s. Use 'opensearch-node repurpose' tool to clean up", + DiscoveryNodeRole.WARM_ROLE.roleName(), cacheDataPaths ); throw new IllegalStateException(message); diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index 3a8996afed34e..1d68829ececfd 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -69,14 +69,14 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand { static final String ABORTED_BY_USER_MSG = OpenSearchNodeCommand.ABORTED_BY_USER_MSG; static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = OpenSearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG; - static final String NO_CLEANUP = "Node has node.data=true and node.search=true -> no clean up necessary"; + static final String NO_CLEANUP = "Node has node.data=true and node.warm=true -> no clean up necessary"; static final String NO_DATA_TO_CLEAN_UP_FOUND = "No data to clean-up found"; static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found"; static final String NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND = "No file cache to clean-up found"; private static final int FILE_CACHE_NODE_PATH_LOCATION = 0; public NodeRepurposeCommand() { - super("Repurpose this node to another cluster-manager/data/search role, cleaning up any excess persisted data"); + super("Repurpose this node to another cluster-manager/data/warm role, cleaning up any excess persisted data"); } void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception { @@ -86,7 +86,7 @@ void testExecute(Terminal terminal, OptionSet options, Environment env) throws E @Override protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); - if (DiscoveryNode.isDataNode(settings) && DiscoveryNode.isSearchNode(settings)) { + if (DiscoveryNode.isDataNode(settings) && DiscoveryNode.isWarmNode(settings)) { terminal.println(Terminal.Verbosity.NORMAL, NO_CLEANUP); return false; } @@ -97,15 +97,15 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { @Override protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLockId, OptionSet options, Environment env) throws IOException { - assert DiscoveryNode.isDataNode(env.settings()) == false || DiscoveryNode.isSearchNode(env.settings()) == false; + assert DiscoveryNode.isDataNode(env.settings()) == false || DiscoveryNode.isWarmNode(env.settings()) == false; boolean repurposeData = DiscoveryNode.isDataNode(env.settings()) == false; - boolean repurposeSearch = DiscoveryNode.isSearchNode(env.settings()) == false; + boolean repurposeWarm = DiscoveryNode.isWarmNode(env.settings()) == false; if (DiscoveryNode.isClusterManagerNode(env.settings()) == false) { - processNoClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeSearch); + processNoClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeWarm); } else { - processClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeSearch); + processClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeWarm); } } @@ -170,13 +170,13 @@ private void processNoClusterManagerRepurposeNode( if (repurposeData && repurposeSearch) { terminal.println( - "Node is being re-purposed as no-cluster-manager, no-data and no-search. Clean-up of index data and file cache will be performed." + "Node is being re-purposed as no-cluster-manager, no-data and no-warm. Clean-up of index data and file cache will be performed." ); } else if (repurposeData) { terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); } else if (repurposeSearch) { terminal.println( - "Node is being re-purposed as no-cluster-manager and no-search. Clean-up of file cache and corresponding index metadata will be performed." + "Node is being re-purposed as no-cluster-manager and no-warm. Clean-up of file cache and corresponding index metadata will be performed." ); } confirm(terminal, "Do you want to proceed?"); @@ -194,11 +194,11 @@ private void processNoClusterManagerRepurposeNode( } if (repurposeData && repurposeSearch) { - terminal.println("Node successfully repurposed to no-cluster-manager, no-data and no-search."); + terminal.println("Node successfully repurposed to no-cluster-manager, no-data and no-warm."); } else if (repurposeData) { terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); } else if (repurposeSearch) { - terminal.println("Node successfully repurposed to no-cluster-manager and no-search."); + terminal.println("Node successfully repurposed to no-cluster-manager and no-warm."); } } @@ -252,12 +252,12 @@ private void processClusterManagerRepurposeNode( if (repurposeData && repurposeSearch) { terminal.println( - "Node is being re-purposed as cluster-manager, no-data and no-search. Clean-up of shard data and file cache data will be performed." + "Node is being re-purposed as cluster-manager, no-data and no-warm. Clean-up of shard data and file cache data will be performed." ); } else if (repurposeData) { terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed."); } else if (repurposeSearch) { - terminal.println("Node is being re-purposed as cluster-manager and no-search. Clean-up of file cache data will be performed."); + terminal.println("Node is being re-purposed as cluster-manager and no-warm. Clean-up of file cache data will be performed."); } confirm(terminal, "Do you want to proceed?"); @@ -271,11 +271,11 @@ private void processClusterManagerRepurposeNode( } if (repurposeData && repurposeSearch) { - terminal.println("Node successfully repurposed to cluster-manager, no-data and no-search."); + terminal.println("Node successfully repurposed to cluster-manager, no-data and no-warm."); } else if (repurposeData) { terminal.println("Node successfully repurposed to cluster-manager and no-data."); } else if (repurposeSearch) { - terminal.println("Node successfully repurposed to cluster-manager and no-search."); + terminal.println("Node successfully repurposed to cluster-manager and no-warm."); } } diff --git a/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java b/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java index 2de50f4d4295d..6501ef6e588bd 100644 --- a/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java +++ b/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java @@ -56,7 +56,7 @@ public static TieringValidationResult validateHotToWarm( final DiskThresholdSettings diskThresholdSettings ) { final String indexNames = concreteIndices.stream().map(Index::getName).collect(Collectors.joining(", ")); - validateSearchNodes(currentState, indexNames); + validateWarmNodes(currentState, indexNames); validateDiskThresholdWaterMarkNotBreached(currentState, clusterInfo, diskThresholdSettings, indexNames); final TieringValidationResult tieringValidationResult = new TieringValidationResult(concreteIndices); @@ -91,18 +91,18 @@ public static TieringValidationResult validateHotToWarm( } /** - * Validates that there are eligible nodes with the search role in the current cluster state. + * Validates that there are eligible nodes with the warm role in the current cluster state. * (only for the dedicated case - to be removed later) * * @param currentState the current cluster state * @param indexNames the names of the indices being validated - * @throws IllegalArgumentException if there are no eligible search nodes in the cluster + * @throws IllegalArgumentException if there are no eligible warm nodes in the cluster */ - static void validateSearchNodes(final ClusterState currentState, final String indexNames) { + static void validateWarmNodes(final ClusterState currentState, final String indexNames) { if (getEligibleNodes(currentState).isEmpty()) { final String errorMsg = "Rejecting tiering request for indices [" + indexNames - + "] because there are no nodes found with the search role"; + + "] because there are no nodes found with the warm role"; logger.warn(errorMsg); throw new IllegalArgumentException(errorMsg); } @@ -183,7 +183,7 @@ static void validateDiskThresholdWaterMarkNotBreached( } } throw new IllegalArgumentException( - "Disk threshold low watermark is breached on all the search nodes, rejecting tiering request for indices: " + indexNames + "Disk threshold low watermark is breached on all the warm nodes, rejecting tiering request for indices: " + indexNames ); } @@ -265,13 +265,13 @@ static long getTotalAvailableBytesInWarmTier(final Map usages } /** - * Retrieves the set of eligible(search) nodes from the current cluster state. + * Retrieves the set of eligible(warm) nodes from the current cluster state. * * @param currentState the current cluster state * @return the set of eligible nodes */ static Set getEligibleNodes(final ClusterState currentState) { final Map nodes = currentState.getNodes().getDataNodes(); - return nodes.values().stream().filter(DiscoveryNode::isSearchNode).collect(Collectors.toSet()); + return nodes.values().stream().filter(DiscoveryNode::isWarmNode).collect(Collectors.toSet()); } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 222c6e8ba36c4..aaf50681dd636 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -399,9 +399,9 @@ public class Node implements Closeable { private static final String ZERO = "0"; - public static final Setting NODE_SEARCH_CACHE_SIZE_SETTING = new Setting<>( + public static final Setting NODE_WARM_CACHE_SIZE_SETTING = new Setting<>( "node.search.cache.size", - s -> (DiscoveryNode.isDedicatedSearchNode(s)) ? "80%" : ZERO, + s -> (DiscoveryNode.isDedicatedWarmNode(s)) ? "80%" : ZERO, Node::validateFileCacheSize, Property.NodeScope ); @@ -556,8 +556,8 @@ protected Node( .map(IndexStorePlugin::getIndexStoreListener) .filter(Optional::isPresent) .map(Optional::get); - // FileCache is only initialized on search nodes, so we only create FileCacheCleaner on search nodes as well - if (DiscoveryNode.isSearchNode(settings) == false) { + // FileCache is only initialized on warm nodes, so we only create FileCacheCleaner on warm nodes as well + if (DiscoveryNode.isWarmNode(settings) == false) { nodeEnvironment = new NodeEnvironment( settings, environment, @@ -2164,26 +2164,26 @@ DiscoveryNode getNode() { } /** - * Initializes the search cache with a defined capacity. - * The capacity of the cache is based on user configuration for {@link Node#NODE_SEARCH_CACHE_SIZE_SETTING}. - * If the user doesn't configure the cache size, it fails if the node is a data + search node. - * Else it configures the size to 80% of total capacity for a dedicated search node, if not explicitly defined. + * Initializes the warm cache with a defined capacity. + * The capacity of the cache is based on user configuration for {@link Node#NODE_WARM_CACHE_SIZE_SETTING}. + * If the user doesn't configure the cache size, it fails if the node is a data + warm node. + * Else it configures the size to 80% of total capacity for a dedicated warm node, if not explicitly defined. */ private void initializeFileCache(Settings settings, CircuitBreaker circuitBreaker) throws IOException { - if (DiscoveryNode.isSearchNode(settings) == false) { + if (DiscoveryNode.isWarmNode(settings) == false) { return; } - String capacityRaw = NODE_SEARCH_CACHE_SIZE_SETTING.get(settings); + String capacityRaw = NODE_WARM_CACHE_SIZE_SETTING.get(settings); logger.info("cache size [{}]", capacityRaw); if (capacityRaw.equals(ZERO)) { throw new SettingsException( "Unable to initialize the " - + DiscoveryNodeRole.SEARCH_ROLE.roleName() + + DiscoveryNodeRole.WARM_ROLE.roleName() + "-" + DiscoveryNodeRole.DATA_ROLE.roleName() + " node: Missing value for configuration " - + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() + + NODE_WARM_CACHE_SIZE_SETTING.getKey() ); } @@ -2206,7 +2206,7 @@ private static long calculateFileCacheSize(String capacityRaw, long totalSpace) return Math.round(totalSpace * ratioValue.getAsRatio()); } catch (OpenSearchParseException e) { try { - return ByteSizeValue.parseBytesSizeValue(capacityRaw, NODE_SEARCH_CACHE_SIZE_SETTING.getKey()).getBytes(); + return ByteSizeValue.parseBytesSizeValue(capacityRaw, NODE_WARM_CACHE_SIZE_SETTING.getKey()).getBytes(); } catch (OpenSearchParseException ex) { ex.addSuppressed(e); throw ex; @@ -2220,7 +2220,7 @@ private static String validateFileCacheSize(String capacityRaw) { } /** - * Returns the {@link FileCache} instance for remote search node + * Returns the {@link FileCache} instance for remote warm node * Note: Visible for testing */ public FileCache fileCache() { diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 89403b15f6aca..6833aa975ba2a 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -131,7 +131,7 @@ import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; -import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; +import static org.opensearch.node.Node.NODE_WARM_CACHE_SIZE_SETTING; /** * Service responsible for restoring snapshots @@ -919,7 +919,7 @@ private void validateSearchableSnapshotRestorable(long totalRestorableRemoteInde throw new SnapshotRestoreException( snapshot, "Size of the indexes to be restored exceeds the file cache bounds. Increase the file cache capacity on the cluster nodes using " - + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() + + NODE_WARM_CACHE_SIZE_SETTING.getKey() + " setting." ); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java index 10273366af804..6d23ddaa4b8d0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java @@ -56,7 +56,7 @@ protected Collection> nodePlugins() { @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); - internalCluster().ensureAtLeastNumSearchAndDataNodes(1); + internalCluster().ensureAtLeastNumWarmAndDataNodes(1); long bytes = new ByteSizeValue(1000, ByteSizeUnit.KB).getBytes(); final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); clusterInfoService.setDiskUsageFunctionAndRefresh((discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, bytes, bytes - 1)); diff --git a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java index 69102d2e76bef..b3e912ebd6e9c 100644 --- a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java @@ -786,9 +786,9 @@ public void testMultipleDataPathsForSearchNodeCheck() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> performDataPathsCheck(paths, DiscoveryNodeRole.SEARCH_ROLE.roleName()) + () -> performDataPathsCheck(paths, DiscoveryNodeRole.WARM_ROLE.roleName()) ); - assertThat(e.getMessage(), containsString("Multiple data paths are not allowed for search nodes")); + assertThat(e.getMessage(), containsString("Multiple data paths are not allowed for warm nodes")); } public void testMultipleDataPathsForDataNodeCheck() throws NodeValidationException { @@ -802,7 +802,7 @@ public void testSingleDataPathForSearchNodeCheck() throws NodeValidationExceptio Path path = PathUtils.get(createTempDir().toString()); String[] paths = new String[] { path.resolve("a").toString() }; - performDataPathsCheck(paths, DiscoveryNodeRole.SEARCH_ROLE.roleName()); + performDataPathsCheck(paths, DiscoveryNodeRole.WARM_ROLE.roleName()); } private void performDataPathsCheck(String[] paths, String roleName) throws NodeValidationException { diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 6550ed39e8042..40fcb648bea7a 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -56,9 +56,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.opensearch.test.NodeRoles.nonRemoteClusterClientNode; -import static org.opensearch.test.NodeRoles.nonSearchNode; +import static org.opensearch.test.NodeRoles.nonWarmNode; import static org.opensearch.test.NodeRoles.remoteClusterClientNode; -import static org.opensearch.test.NodeRoles.searchNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; @@ -235,12 +234,12 @@ public void testDiscoveryNodeIsRemoteClusterClientUnset() { runTestDiscoveryNodeIsRemoteClusterClient(nonRemoteClusterClientNode(), false); } - public void testDiscoveryNodeIsSearchSet() { - runTestDiscoveryNodeIsSearch(searchNode(), true); + public void testDiscoveryNodeIsWarmSet() { + runTestDiscoveryNodeIsWarm(NodeRoles.warmNode(), true); } - public void testDiscoveryNodeIsSearchUnset() { - runTestDiscoveryNodeIsSearch(nonSearchNode(), false); + public void testDiscoveryNodeIsWarmUnset() { + runTestDiscoveryNodeIsWarm(nonWarmNode(), false); } // Added in 2.0 temporarily, validate the MASTER_ROLE is in the list of known roles. @@ -262,13 +261,13 @@ private void runTestDiscoveryNodeIsRemoteClusterClient(final Settings settings, } } - private void runTestDiscoveryNodeIsSearch(final Settings settings, final boolean expected) { + private void runTestDiscoveryNodeIsWarm(final Settings settings, final boolean expected) { final DiscoveryNode node = DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node"); - assertThat(node.isSearchNode(), equalTo(expected)); + assertThat(node.isWarmNode(), equalTo(expected)); if (expected) { - assertThat(node.getRoles(), hasItem(DiscoveryNodeRole.SEARCH_ROLE)); + assertThat(node.getRoles(), hasItem(DiscoveryNodeRole.WARM_ROLE)); } else { - assertThat(node.getRoles(), not(hasItem(DiscoveryNodeRole.SEARCH_ROLE))); + assertThat(node.getRoles(), not(hasItem(DiscoveryNodeRole.WARM_ROLE))); } } @@ -283,9 +282,9 @@ public void testGetRoleFromRoleNameIsCaseInsensitive() { assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleNameAbbreviation()); } - public void testDiscoveryNodeIsSearchNode() { - final Settings settingWithSearchRole = NodeRoles.onlyRole(DiscoveryNodeRole.SEARCH_ROLE); - final DiscoveryNode node = DiscoveryNode.createLocal(settingWithSearchRole, buildNewFakeTransportAddress(), "node"); - assertThat(node.isSearchNode(), equalTo(true)); + public void testDiscoveryNodeIsWarmNode() { + final Settings settingWithWarmRole = NodeRoles.onlyRole(DiscoveryNodeRole.WARM_ROLE); + final DiscoveryNode node = DiscoveryNode.createLocal(settingWithWarmRole, buildNewFakeTransportAddress(), "node"); + assertThat(node.isWarmNode(), equalTo(true)); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index a7f18aabf8436..523a5f59a81c5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -55,12 +55,12 @@ public abstract class RemoteShardsBalancerBaseTestCase extends OpenSearchAllocat DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE ); - protected static final Set SEARCH_DATA_ROLES = Set.of( + protected static final Set WARM_DATA_ROLES = Set.of( DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE, - DiscoveryNodeRole.SEARCH_ROLE + DiscoveryNodeRole.WARM_ROLE ); - protected static final Set SEARCH_ONLY_ROLE = Set.of(DiscoveryNodeRole.SEARCH_ROLE); + protected static final Set WARM_ONLY_ROLE = Set.of(DiscoveryNodeRole.WARM_ROLE); protected static final int PRIMARIES = 5; protected static final int REPLICAS = 1; @@ -146,12 +146,12 @@ public ClusterState createInitialCluster(int localOnlyNodes, int remoteNodes, bo if (remoteOnly) { for (int i = 0; i < remoteNodes; i++) { String name = getNodeId(i, true); - nb.add(newNode(name, name, SEARCH_ONLY_ROLE)); + nb.add(newNode(name, name, WARM_ONLY_ROLE)); } } else { for (int i = 0; i < remoteNodes; i++) { String name = getNodeId(i, true); - nb.add(newNode(name, name, SEARCH_DATA_ROLES)); + nb.add(newNode(name, name, WARM_DATA_ROLES)); } } DiscoveryNodes nodes = nb.build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 94e91c3f7c3c1..2f20c7c09a42b 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -335,12 +335,12 @@ public void testDiskThresholdForRemoteShards() { .routingTable(initialRoutingTable) .build(); - Set defaultWithSearchRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); - defaultWithSearchRole.add(DiscoveryNodeRole.SEARCH_ROLE); + Set defaultWithWarmRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); + defaultWithWarmRole.add(DiscoveryNodeRole.WARM_ROLE); logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1", defaultWithSearchRole)).add(newNode("node2", defaultWithSearchRole))) + .nodes(DiscoveryNodes.builder().add(newNode("node1", defaultWithWarmRole)).add(newNode("node2", defaultWithWarmRole))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); logShardStates(clusterState); @@ -401,8 +401,8 @@ public void testFileCacheRemoteShardsDecisions() { final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); - Set defaultWithSearchRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); - defaultWithSearchRole.add(DiscoveryNodeRole.SEARCH_ROLE); + Set defaultWithWarmRole = new HashSet<>(CLUSTER_MANAGER_DATA_ROLES); + defaultWithWarmRole.add(DiscoveryNodeRole.WARM_ROLE); DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() @@ -415,14 +415,14 @@ public void testFileCacheRemoteShardsDecisions() { "node1", buildNewFakeTransportAddress(), emptyMap(), - defaultWithSearchRole, + defaultWithWarmRole, Version.CURRENT ); DiscoveryNode discoveryNode2 = new DiscoveryNode( "node2", buildNewFakeTransportAddress(), emptyMap(), - defaultWithSearchRole, + defaultWithWarmRole, Version.CURRENT ); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java index 052c7877404a8..9e25e86ec0797 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java @@ -175,7 +175,7 @@ public void testTargetPoolDedicatedSearchNodeAllocationDecisions() { assertEquals(Decision.NO.type(), deciders.canAllocate(remoteShard, localOnlyNode, globalAllocation).type()); assertEquals(Decision.NO.type(), deciders.canAllocate(remoteIdx, localOnlyNode, globalAllocation).type()); assertEquals(Decision.NO.type(), deciders.canForceAllocatePrimary(unassignedRemoteShard, localOnlyNode, globalAllocation).type()); - // A dedicated search node should not accept local shards and indices. + // A dedicated warm node should not accept local shards and indices. assertEquals(Decision.NO.type(), deciders.canAllocate(localShard, remoteCapableNode, globalAllocation).type()); assertEquals(Decision.NO.type(), deciders.canAllocate(localIdx, remoteCapableNode, globalAllocation).type()); assertEquals( diff --git a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java index 190ad3283dcfc..36213d07a3681 100644 --- a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java @@ -598,8 +598,8 @@ public void testSearchableSnapshotPreference() { .nodes( DiscoveryNodes.builder() .add(newNode("node1", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) - .add(newNode("node2", Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE))) - .add(newNode("node3", Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE))) + .add(newNode("node2", Collections.singleton(DiscoveryNodeRole.WARM_ROLE))) + .add(newNode("node3", Collections.singleton(DiscoveryNodeRole.WARM_ROLE))) .localNodeId("node1") ) .build(); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index d2d6fdc387dfe..e8a690c8fe744 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -66,7 +66,7 @@ import static org.opensearch.env.NodeRepurposeCommand.NO_DATA_TO_CLEAN_UP_FOUND; import static org.opensearch.env.NodeRepurposeCommand.NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND; import static org.opensearch.env.NodeRepurposeCommand.NO_SHARD_DATA_TO_CLEAN_UP_FOUND; -import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; +import static org.opensearch.node.Node.NODE_WARM_CACHE_SIZE_SETTING; import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.clusterManagerNode; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; @@ -81,29 +81,29 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase { private static final Index INDEX = new Index("testIndex", "testUUID"); private Settings dataClusterManagerSettings; - private Settings dataSearchClusterManagerSettings; + private Settings dataWarmClusterManagerSettings; private Environment environment; private Path[] nodePaths; - private Settings dataSearchNoClusterManagerSettings; + private Settings dataWarmNoClusterManagerSettings; private Settings noDataNoClusterManagerSettings; private Settings noDataClusterManagerSettings; - private Settings searchNoDataNoClusterManagerSettings; - private Settings noSearchNoClusterManagerSettings; + private Settings warmNoDataNoClusterManagerSettings; + private Settings noWarmNoClusterManagerSettings; @Before public void createNodePaths() throws IOException { dataClusterManagerSettings = buildEnvSettings(Settings.EMPTY); - Settings defaultSearchSettings = Settings.builder() + Settings defaultWarmSettings = Settings.builder() .put(dataClusterManagerSettings) - .put(NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), new ByteSizeValue(16, ByteSizeUnit.GB).toString()) + .put(NODE_WARM_CACHE_SIZE_SETTING.getKey(), new ByteSizeValue(16, ByteSizeUnit.GB).toString()) .build(); - searchNoDataNoClusterManagerSettings = onlyRole(dataClusterManagerSettings, DiscoveryNodeRole.SEARCH_ROLE); - dataSearchClusterManagerSettings = addRoles(defaultSearchSettings, Set.of(DiscoveryNodeRole.SEARCH_ROLE)); + warmNoDataNoClusterManagerSettings = onlyRole(dataClusterManagerSettings, DiscoveryNodeRole.WARM_ROLE); + dataWarmClusterManagerSettings = addRoles(defaultWarmSettings, Set.of(DiscoveryNodeRole.WARM_ROLE)); noDataClusterManagerSettings = clusterManagerNode(nonDataNode(dataClusterManagerSettings)); - dataSearchNoClusterManagerSettings = nonClusterManagerNode(dataSearchClusterManagerSettings); - noSearchNoClusterManagerSettings = nonClusterManagerNode(defaultSearchSettings); + dataWarmNoClusterManagerSettings = nonClusterManagerNode(dataWarmClusterManagerSettings); + noWarmNoClusterManagerSettings = nonClusterManagerNode(defaultWarmSettings); noDataNoClusterManagerSettings = removeRoles( dataClusterManagerSettings, @@ -132,8 +132,8 @@ public void createNodePaths() throws IOException { public void testEarlyExitNoCleanup() throws Exception { createIndexDataFiles(dataClusterManagerSettings, randomInt(10), randomBoolean()); - verifyNoQuestions(dataSearchClusterManagerSettings, containsString(NO_CLEANUP)); - verifyNoQuestions(dataSearchNoClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataWarmClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataWarmNoClusterManagerSettings, containsString(NO_CLEANUP)); } public void testNothingToCleanup() throws Exception { @@ -156,7 +156,7 @@ public void testNothingToCleanup() throws Exception { verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - verifyNoQuestions(noSearchNoClusterManagerSettings, containsString(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noWarmNoClusterManagerSettings, containsString(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND)); createIndexDataFiles(dataClusterManagerSettings, 0, randomBoolean()); @@ -227,11 +227,11 @@ public void testCleanupShardData() throws Exception { new NodeEnvironment(noDataClusterManagerSettings, environment).close(); } - public void testCleanupSearchNode() throws Exception { + public void testCleanupWarmNode() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(searchNoDataNoClusterManagerSettings, shardCount, hasClusterState, true); + createIndexDataFiles(warmNoDataNoClusterManagerSettings, shardCount, hasClusterState, true); Matcher matcher = allOf( containsString(NodeRepurposeCommand.shardMessage(shardCount, 1)), @@ -251,11 +251,11 @@ public void testCleanupSearchNode() throws Exception { new NodeEnvironment(dataClusterManagerSettings, environment).close(); } - public void testCleanupSearchClusterManager() throws Exception { + public void testCleanupWarmClusterManager() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, true); + createIndexDataFiles(dataWarmClusterManagerSettings, shardCount, hasClusterState, true); String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, shardCount, 0); @@ -266,23 +266,23 @@ public void testCleanupSearchClusterManager() throws Exception { conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noSearchNoClusterManagerSettings, matcher, verbose); + verifyUnchangedOnAbort(noWarmNoClusterManagerSettings, matcher, verbose); // verify test setup - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noSearchNoClusterManagerSettings, environment).close()); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noWarmNoClusterManagerSettings, environment).close()); - verifySuccess(noSearchNoClusterManagerSettings, matcher, verbose); + verifySuccess(noWarmNoClusterManagerSettings, matcher, verbose); // verify clean. - new NodeEnvironment(noSearchNoClusterManagerSettings, environment).close(); + new NodeEnvironment(noWarmNoClusterManagerSettings, environment).close(); } public void testCleanupAll() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, false); - createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, true); + createIndexDataFiles(dataWarmClusterManagerSettings, shardCount, hasClusterState, false); + createIndexDataFiles(dataWarmClusterManagerSettings, shardCount, hasClusterState, true); // environment.dataFiles().length * shardCount will account for the local shard files // + shardCount will account for the additional file cache shard files. diff --git a/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java b/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java index 6b6f74353812b..a20d4debb3959 100644 --- a/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java @@ -46,27 +46,27 @@ import static org.opensearch.indices.tiering.TieringRequestValidator.validateIndexHealth; import static org.opensearch.indices.tiering.TieringRequestValidator.validateOpenIndex; import static org.opensearch.indices.tiering.TieringRequestValidator.validateRemoteStoreIndex; -import static org.opensearch.indices.tiering.TieringRequestValidator.validateSearchNodes; +import static org.opensearch.indices.tiering.TieringRequestValidator.validateWarmNodes; public class TieringRequestValidatorTests extends OpenSearchTestCase { - public void testValidateSearchNodes() { - ClusterState clusterStateWithSearchNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + public void testValidateWarmNodes() { + ClusterState clusterStateWithWarmNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(createNodes(2, 0, 0)) .build(); // throws no errors - validateSearchNodes(clusterStateWithSearchNodes, "test_index"); + validateWarmNodes(clusterStateWithWarmNodes, "test_index"); } - public void testWithNoSearchNodesInCluster() { - ClusterState clusterStateWithNoSearchNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + public void testWithNoWarmNodesInCluster() { + ClusterState clusterStateWithNoWarmNodes = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(createNodes(0, 1, 1)) .build(); // throws error IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateSearchNodes(clusterStateWithNoSearchNodes, "test") + () -> validateWarmNodes(clusterStateWithNoWarmNodes, "test") ); } @@ -212,7 +212,7 @@ public void testValidateEligibleNodesCapacityWithAllRejected() { public void testGetTotalAvailableBytesInWarmTier() { Map diskUsages = diskUsages(2, 500, 100); - assertEquals(200, getTotalAvailableBytesInWarmTier(diskUsages, Set.of("node-s0", "node-s1"))); + assertEquals(200, getTotalAvailableBytesInWarmTier(diskUsages, Set.of("node-w0", "node-w1"))); } public void testEligibleNodes() { @@ -254,15 +254,15 @@ private static Settings createDefaultIndexSettings(String indexUuid) { .build(); } - private DiscoveryNodes createNodes(int numOfSearchNodes, int numOfDataNodes, int numOfIngestNodes) { + private DiscoveryNodes createNodes(int numOfWarmNodes, int numOfDataNodes, int numOfIngestNodes) { DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); - for (int i = 0; i < numOfSearchNodes; i++) { + for (int i = 0; i < numOfWarmNodes; i++) { discoveryNodesBuilder.add( new DiscoveryNode( - "node-s" + i, + "node-w" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), - Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE), + Collections.singleton(DiscoveryNodeRole.WARM_ROLE), Version.CURRENT ) ); @@ -308,10 +308,10 @@ private static ClusterInfo clusterInfo(int noOfNodes, long totalBytes, long free return new ClusterInfo(diskUsages, null, null, null, Map.of(), Map.of()); } - private static Map diskUsages(int noOfSearchNodes, long totalBytes, long freeBytes) { + private static Map diskUsages(int noOfWarmNodes, long totalBytes, long freeBytes) { final Map diskUsages = new HashMap<>(); - for (int i = 0; i < noOfSearchNodes; i++) { - diskUsages.put("node-s" + i, new DiskUsage("node-s" + i, "node-s" + i, "/foo/bar", totalBytes, freeBytes)); + for (int i = 0; i < noOfWarmNodes; i++) { + diskUsages.put("node-w" + i, new DiskUsage("node-w" + i, "node-w" + i, "/foo/bar", totalBytes, freeBytes)); } return diskUsages; } diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index 489abd4bbca04..c259a326c95e3 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -376,17 +376,17 @@ public void testCreateWithCircuitBreakerPlugins() throws IOException { } public void testCreateWithFileCache() throws Exception { - Settings searchRoleSettings = addRoles(baseSettings().build(), Set.of(DiscoveryNodeRole.SEARCH_ROLE)); + Settings warmRoleSettings = addRoles(baseSettings().build(), Set.of(DiscoveryNodeRole.WARM_ROLE)); List> plugins = basePlugins(); ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); - Settings searchRoleSettingsWithConfig = baseSettings().put(searchRoleSettings) - .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + Settings warmRoleSettingsWithConfig = baseSettings().put(warmRoleSettings) + .put(Node.NODE_WARM_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) .build(); - Settings onlySearchRoleSettings = Settings.builder() - .put(searchRoleSettingsWithConfig) + Settings onlyWarmRoleSettings = Settings.builder() + .put(warmRoleSettingsWithConfig) .put( NodeRoles.removeRoles( - searchRoleSettingsWithConfig, + warmRoleSettingsWithConfig, Set.of( DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, @@ -398,22 +398,22 @@ public void testCreateWithFileCache() throws Exception { .build(); // Test exception thrown with configuration missing - assertThrows(SettingsException.class, () -> new MockNode(searchRoleSettings, plugins)); + assertThrows(SettingsException.class, () -> new MockNode(warmRoleSettings, plugins)); // Test file cache is initialized - try (MockNode mockNode = new MockNode(searchRoleSettingsWithConfig, plugins)) { + try (MockNode mockNode = new MockNode(warmRoleSettingsWithConfig, plugins)) { NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); } - // Test data + search node with defined cache size - try (MockNode mockNode = new MockNode(searchRoleSettingsWithConfig, plugins)) { + // Test data + warm node with defined cache size + try (MockNode mockNode = new MockNode(warmRoleSettingsWithConfig, plugins)) { NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); } - // Test dedicated search node with no configuration - try (MockNode mockNode = new MockNode(onlySearchRoleSettings, plugins)) { + // Test dedicated warm node with no configuration + try (MockNode mockNode = new MockNode(onlyWarmRoleSettings, plugins)) { NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); assertTrue(fileCacheNodePath.fileCacheReservedSize.getBytes() > 0); FsProbe fsProbe = new FsProbe(mockNode.getNodeEnvironment(), mockNode.fileCache()); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4f0600588daef..40ba066097507 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -277,7 +277,7 @@ import static org.opensearch.env.Environment.PATH_HOME_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.node.Node.NODE_NAME_SETTING; -import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; +import static org.opensearch.node.Node.NODE_WARM_CACHE_SIZE_SETTING; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; @@ -520,7 +520,7 @@ public void testSearchableSnapshotOverSubscription() { throwable.getMessage() .contains( "Size of the indexes to be restored exceeds the file cache bounds. Increase the file cache capacity on the cluster nodes using " - + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() + + NODE_WARM_CACHE_SIZE_SETTING.getKey() + " setting." ) ); @@ -1654,8 +1654,8 @@ private void setupTestCluster(int clusterManagerNodes, int dataNodes) { startCluster(); } - private void setupTestCluster(int clusterManagerNodes, int dataNodes, int searchNodes) { - testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes, searchNodes); + private void setupTestCluster(int clusterManagerNodes, int dataNodes, int warmNodes) { + testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes, warmNodes); startCluster(); } @@ -1735,7 +1735,7 @@ private final class TestClusterNodes { this(clusterManagerNodes, dataNodes, 0); } - TestClusterNodes(int clusterManagerNodes, int dataNodes, int searchNodes) { + TestClusterNodes(int clusterManagerNodes, int dataNodes, int warmNodes) { for (int i = 0; i < clusterManagerNodes; ++i) { nodes.computeIfAbsent("node" + i, nodeName -> { try { @@ -1754,10 +1754,10 @@ private final class TestClusterNodes { } }); } - for (int i = 0; i < searchNodes; ++i) { - nodes.computeIfAbsent("search-node" + i, nodeName -> { + for (int i = 0; i < warmNodes; ++i) { + nodes.computeIfAbsent("warm-node" + i, nodeName -> { try { - return newSearchNode(nodeName); + return newWarmNode(nodeName); } catch (IOException e) { throw new AssertionError(e); } @@ -1781,8 +1781,8 @@ private TestClusterNode newDataNode(String nodeName) throws IOException { return newNode(nodeName, DiscoveryNodeRole.DATA_ROLE); } - private TestClusterNode newSearchNode(String nodeName) throws IOException { - return newNode(nodeName, DiscoveryNodeRole.SEARCH_ROLE); + private TestClusterNode newWarmNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNodeRole.WARM_ROLE); } private TestClusterNode newNode(String nodeName, DiscoveryNodeRole role) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index f9a09c088095b..2d1d6aa957ddb 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -201,14 +201,14 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - private static final Predicate SEARCH_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( + private static final Predicate WARM_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( nodeAndClient.node.settings(), - DiscoveryNodeRole.SEARCH_ROLE + DiscoveryNodeRole.WARM_ROLE ); - private static final Predicate SEARCH_AND_DATA_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( + private static final Predicate WARM_AND_DATA_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.hasRole( nodeAndClient.node.settings(), - DiscoveryNodeRole.SEARCH_ROLE + DiscoveryNodeRole.WARM_ROLE ) && DiscoveryNode.isDataNode(nodeAndClient.node.settings()); private static final Predicate NO_DATA_NO_CLUSTER_MANAGER_PREDICATE = nodeAndClient -> DiscoveryNode @@ -219,8 +219,8 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - private static final String DEFAULT_SEARCH_CACHE_SIZE_BYTES = "2gb"; - private static final String DEFAULT_SEARCH_CACHE_SIZE_PERCENT = "5%"; + private static final String DEFAULT_WARM_CACHE_SIZE_BYTES = "2gb"; + private static final String DEFAULT_WARM_CACHE_SIZE_PERCENT = "5%"; public static final int DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES = 1; public static final int DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES = 3; @@ -673,36 +673,36 @@ public synchronized void ensureAtLeastNumDataNodes(int n) { } /** - * Ensures that at least n search nodes are present in the cluster. + * Ensures that at least n warm nodes are present in the cluster. * if more nodes than n are present this method will not * stop any of the running nodes. */ - public synchronized void ensureAtLeastNumSearchNodes(int n) { - int size = numSearchNodes(); + public synchronized void ensureAtLeastNumWarmNodes(int n) { + int size = numWarmNodes(); if (size < n) { logger.info("increasing cluster size from {} to {}", size, n); - startNodes(n - size, Settings.builder().put(onlyRole(Settings.EMPTY, DiscoveryNodeRole.SEARCH_ROLE)).build()); + startNodes(n - size, Settings.builder().put(onlyRole(Settings.EMPTY, DiscoveryNodeRole.WARM_ROLE)).build()); validateClusterFormed(); } } /** - * Ensures that at least n data-search nodes are present in the cluster. + * Ensures that at least n data-warm nodes are present in the cluster. * if more nodes than n are present this method will not * stop any of the running nodes. */ - public synchronized void ensureAtLeastNumSearchAndDataNodes(int n) { - int size = numSearchAndDataNodes(); + public synchronized void ensureAtLeastNumWarmAndDataNodes(int n) { + int size = numWarmAndDataNodes(); if (size < n) { logger.info("increasing cluster size from {} to {}", size, n); - Set searchAndDataRoles = Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.SEARCH_ROLE); + Set warmAndDataRoles = Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.WARM_ROLE); Settings settings = Settings.builder() .put( - Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), - randomBoolean() ? DEFAULT_SEARCH_CACHE_SIZE_PERCENT : DEFAULT_SEARCH_CACHE_SIZE_BYTES + Node.NODE_WARM_CACHE_SIZE_SETTING.getKey(), + randomBoolean() ? DEFAULT_WARM_CACHE_SIZE_PERCENT : DEFAULT_WARM_CACHE_SIZE_BYTES ) .build(); - startNodes(n - size, Settings.builder().put(onlyRoles(settings, searchAndDataRoles)).build()); + startNodes(n - size, Settings.builder().put(onlyRoles(settings, warmAndDataRoles)).build()); validateClusterFormed(); } } @@ -1697,11 +1697,11 @@ public InetSocketAddress[] httpAddresses() { } /** - * Stops a random search node in the cluster. Returns true if a node was found to stop, false otherwise. + * Stops a random warm node in the cluster. Returns true if a node was found to stop, false otherwise. */ - public synchronized boolean stopRandomSearchNode() throws IOException { + public synchronized boolean stopRandomWarmNode() throws IOException { ensureOpen(); - NodeAndClient nodeAndClient = getRandomNodeAndClient(SEARCH_NODE_PREDICATE); + NodeAndClient nodeAndClient = getRandomNodeAndClient(WARM_NODE_PREDICATE); if (nodeAndClient != null) { logger.info("Closing random node [{}] ", nodeAndClient.name); stopNodesAndClient(nodeAndClient); @@ -2307,15 +2307,15 @@ public List startClusterManagerOnlyNodes(int numNodes, Settings settings return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build()); } - public List startDataAndSearchNodes(int numNodes) { - return startDataAndSearchNodes(numNodes, Settings.EMPTY); + public List startDataAndWarmNodes(int numNodes) { + return startDataAndWarmNodes(numNodes, Settings.EMPTY); } - public List startDataAndSearchNodes(int numNodes, Settings settings) { - Set searchAndDataRoles = new HashSet<>(); - searchAndDataRoles.add(DiscoveryNodeRole.DATA_ROLE); - searchAndDataRoles.add(DiscoveryNodeRole.SEARCH_ROLE); - return startNodes(numNodes, Settings.builder().put(onlyRoles(settings, searchAndDataRoles)).build()); + public List startDataAndWarmNodes(int numNodes, Settings settings) { + Set warmAndDataRoles = new HashSet<>(); + warmAndDataRoles.add(DiscoveryNodeRole.DATA_ROLE); + warmAndDataRoles.add(DiscoveryNodeRole.WARM_ROLE); + return startNodes(numNodes, Settings.builder().put(onlyRoles(settings, warmAndDataRoles)).build()); } public List startDataOnlyNodes(int numNodes) { @@ -2330,12 +2330,12 @@ public List startDataOnlyNodes(int numNodes, Settings settings, Boolean return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build(), ignoreNodeJoin); } - public List startSearchOnlyNodes(int numNodes) { - return startSearchOnlyNodes(numNodes, Settings.EMPTY); + public List startWarmOnlyNodes(int numNodes) { + return startWarmOnlyNodes(numNodes, Settings.EMPTY); } - public List startSearchOnlyNodes(int numNodes, Settings settings) { - return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.SEARCH_ROLE)).build()); + public List startWarmOnlyNodes(int numNodes, Settings settings) { + return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.WARM_ROLE)).build()); } /** calculates a min cluster-manager nodes value based on the given number of cluster-manager nodes */ @@ -2382,12 +2382,12 @@ public int numDataNodes() { return dataNodeAndClients().size(); } - public int numSearchNodes() { - return searchNodeAndClients().size(); + public int numWarmNodes() { + return warmNodeAndClients().size(); } - public int numSearchAndDataNodes() { - return searchDataNodeAndClients().size(); + public int numWarmAndDataNodes() { + return warmDataNodeAndClients().size(); } @Override @@ -2443,12 +2443,12 @@ private Collection dataNodeAndClients() { return filterNodes(nodes, DATA_NODE_PREDICATE); } - private Collection searchNodeAndClients() { - return filterNodes(nodes, SEARCH_NODE_PREDICATE); + private Collection warmNodeAndClients() { + return filterNodes(nodes, WARM_NODE_PREDICATE); } - private Collection searchDataNodeAndClients() { - return filterNodes(nodes, SEARCH_AND_DATA_NODE_PREDICATE); + private Collection warmDataNodeAndClients() { + return filterNodes(nodes, WARM_AND_DATA_NODE_PREDICATE); } private static Collection filterNodes( diff --git a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java index a3b4431c5aeb8..9c944e4aee62e 100644 --- a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java +++ b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java @@ -208,20 +208,20 @@ public static Settings nonRemoteClusterClientNode(final Settings settings) { return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); } - public static Settings searchNode() { - return searchNode(Settings.EMPTY); + public static Settings warmNode() { + return warmNode(Settings.EMPTY); } - public static Settings searchNode(final Settings settings) { - return addRoles(settings, Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE)); + public static Settings warmNode(final Settings settings) { + return addRoles(settings, Collections.singleton(DiscoveryNodeRole.WARM_ROLE)); } - public static Settings nonSearchNode() { - return nonSearchNode(Settings.EMPTY); + public static Settings nonWarmNode() { + return nonWarmNode(Settings.EMPTY); } - public static Settings nonSearchNode(final Settings settings) { - return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.SEARCH_ROLE)); + public static Settings nonWarmNode(final Settings settings) { + return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.WARM_ROLE)); } }