Skip to content

Commit cc24120

Browse files
committed
Merge branch 'main' into shard_routing_fix
2 parents 037aa62 + 9ba8b4f commit cc24120

File tree

99 files changed

+3186
-486
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

99 files changed

+3186
-486
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
name: "Detect Breaking Changes"
2+
on:
3+
pull_request
4+
5+
jobs:
6+
detect-breaking-change:
7+
runs-on: ubuntu-latest
8+
steps:
9+
- uses: actions/checkout@v4
10+
- uses: actions/setup-java@v4
11+
with:
12+
distribution: temurin # Temurin is a distribution of adoptium
13+
java-version: 21
14+
- uses: gradle/gradle-build-action@v3
15+
with:
16+
cache-disabled: true
17+
arguments: japicmp
18+
gradle-version: 8.7
19+
build-root-directory: server
20+
- if: failure()
21+
run: cat server/build/reports/java-compatibility/report.txt
22+
- if: failure()
23+
uses: actions/upload-artifact@v4
24+
with:
25+
name: java-compatibility-report.html
26+
path: ${{ github.workspace }}/server/build/reports/java-compatibility/report.html
27+

CHANGELOG.md

+5
Original file line numberDiff line numberDiff line change
@@ -103,17 +103,21 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
103103

104104
## [Unreleased 2.x]
105105
### Added
106+
- Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285))
106107
- Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818))
107108
- Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768))
108109
- Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865))
109110
- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697))
110111
- [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954))
112+
- Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044))
113+
- Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656))
111114

112115
### Dependencies
113116
- Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896))
114117
- Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908))
115118
- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893))
116119
- Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924))
120+
- Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996))
117121

118122
### Changed
119123
- [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872))
@@ -125,6 +129,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
125129

126130
### Fixed
127131
- Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849))
132+
- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048))
128133

129134
### Security
130135

modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java

+27
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
import org.apache.lucene.search.DisjunctionMaxQuery;
4848
import org.apache.lucene.search.MatchNoDocsQuery;
4949
import org.apache.lucene.search.MultiPhraseQuery;
50+
import org.apache.lucene.search.NormsFieldExistsQuery;
5051
import org.apache.lucene.search.Query;
5152
import org.apache.lucene.search.SynonymQuery;
5253
import org.apache.lucene.search.TermQuery;
@@ -68,6 +69,7 @@
6869
import org.opensearch.index.query.MatchPhraseQueryBuilder;
6970
import org.opensearch.index.query.MultiMatchQueryBuilder;
7071
import org.opensearch.index.query.QueryShardContext;
72+
import org.opensearch.index.query.QueryStringQueryBuilder;
7173
import org.opensearch.plugins.Plugin;
7274

7375
import java.io.IOException;
@@ -541,6 +543,31 @@ public void testMatchPhrase() throws IOException {
541543
}
542544
}
543545

546+
public void testNestedExistsQuery() throws IOException {
547+
MapperService mapperService = createMapperService(mapping(b -> {
548+
b.startObject("field");
549+
{
550+
b.field("type", "object");
551+
b.startObject("properties");
552+
{
553+
b.startObject("nested_field");
554+
{
555+
b.field("type", "search_as_you_type");
556+
}
557+
b.endObject();
558+
}
559+
b.endObject();
560+
}
561+
b.endObject();
562+
}));
563+
QueryShardContext queryShardContext = createQueryShardContext(mapperService);
564+
Query actual = new QueryStringQueryBuilder("field:*").toQuery(queryShardContext);
565+
Query expected = new ConstantScoreQuery(
566+
new BooleanQuery.Builder().add(new NormsFieldExistsQuery("field.nested_field"), BooleanClause.Occur.SHOULD).build()
567+
);
568+
assertEquals(expected, actual);
569+
}
570+
544571
private static BooleanQuery buildBoolPrefixQuery(String shingleFieldName, String prefixFieldName, List<String> terms) {
545572
final BooleanQuery.Builder builder = new BooleanQuery.Builder();
546573
for (int i = 0; i < terms.size() - 1; i++) {

plugins/repository-hdfs/build.gradle

+1-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ dependencies {
7575
api 'commons-collections:commons-collections:3.2.2'
7676
api "org.apache.commons:commons-compress:${versions.commonscompress}"
7777
api 'org.apache.commons:commons-configuration2:2.10.1'
78-
api 'commons-io:commons-io:2.15.1'
78+
api 'commons-io:commons-io:2.16.0'
7979
api 'org.apache.commons:commons-lang3:3.14.0'
8080
implementation 'com.google.re2j:re2j:1.7'
8181
api 'javax.servlet:servlet-api:2.5'

plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
27875a7935f1ddcc13267eb6fae1f719e0409572

server/build.gradle

+79
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ plugins {
3636
id('opensearch.publish')
3737
id('opensearch.internal-cluster-test')
3838
id('opensearch.optional-dependencies')
39+
id('me.champeau.gradle.japicmp') version '0.4.2'
3940
}
4041

4142
publishing {
@@ -378,3 +379,81 @@ tasks.named("sourcesJar").configure {
378379
duplicatesStrategy = DuplicatesStrategy.EXCLUDE
379380
}
380381
}
382+
383+
/** Compares the current build against a snapshot build */
384+
tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) {
385+
oldClasspath.from(files("${buildDir}/snapshot/opensearch-${version}.jar"))
386+
newClasspath.from(tasks.named('jar'))
387+
onlyModified = true
388+
failOnModification = true
389+
ignoreMissingClasses = true
390+
annotationIncludes = ['@org.opensearch.common.annotation.PublicApi']
391+
txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt")
392+
htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html")
393+
dependsOn downloadSnapshot
394+
}
395+
396+
/** If the Java API Comparison task failed, print a hint if the change should be merged from its target branch */
397+
gradle.taskGraph.afterTask { Task task, TaskState state ->
398+
if (task.name == 'japicmp' && state.failure != null) {
399+
def sha = getGitShaFromJar("${buildDir}/snapshot/opensearch-${version}.jar")
400+
logger.info("Incompatiable java api from snapshot jar built off of commit ${sha}")
401+
402+
if (!inHistory(sha)) {
403+
logger.warn('\u001B[33mPlease merge from the target branch and run this task again.\u001B[0m')
404+
}
405+
}
406+
}
407+
408+
/** Downloads latest snapshot from maven repository */
409+
tasks.register("downloadSnapshot", Copy) {
410+
def mavenSnapshotRepoUrl = "https://aws.oss.sonatype.org/content/repositories/snapshots/"
411+
def groupId = "org.opensearch"
412+
def artifactId = "opensearch"
413+
414+
repositories {
415+
maven {
416+
url mavenSnapshotRepoUrl
417+
}
418+
}
419+
420+
configurations {
421+
snapshotArtifact
422+
}
423+
424+
dependencies {
425+
snapshotArtifact("${groupId}:${artifactId}:${version}:")
426+
}
427+
428+
from configurations.snapshotArtifact
429+
into "$buildDir/snapshot"
430+
}
431+
432+
/** Check if the sha is in the current history */
433+
def inHistory(String sha) {
434+
try {
435+
def commandCheckSha = "git merge-base --is-ancestor ${sha} HEAD"
436+
commandCheckSha.execute()
437+
return true
438+
} catch (Exception) {
439+
return false
440+
}
441+
}
442+
443+
/** Extracts the Git SHA used to build a jar from its manifest */
444+
def getGitShaFromJar(String jarPath) {
445+
def sha = ''
446+
try {
447+
// Open the JAR file
448+
def jarFile = new java.util.jar.JarFile(jarPath)
449+
// Get the manifest from the JAR file
450+
def manifest = jarFile.manifest
451+
def attributes = manifest.mainAttributes
452+
// Assuming the Git SHA is stored under an attribute named 'Git-SHA'
453+
sha = attributes.getValue('Change')
454+
jarFile.close()
455+
} catch (IOException e) {
456+
println "Failed to read the JAR file: $e.message"
457+
}
458+
return sha
459+
}

server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -715,7 +715,8 @@ public static final IndexShard newIndexShard(
715715
nodeId,
716716
null,
717717
DefaultRemoteStoreSettings.INSTANCE,
718-
false
718+
false,
719+
IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting)
719720
);
720721
}
721722

server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java

+84-3
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@
3131
import java.util.stream.Collectors;
3232

3333
import static org.opensearch.cluster.routing.ShardRoutingState.STARTED;
34+
import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE;
35+
import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE;
36+
import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER;
3437
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
3538

3639
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
@@ -58,6 +61,20 @@ public void enablePreferPrimaryBalance() {
5861
);
5962
}
6063

64+
public void setAllocationRelocationStrategy(boolean preferPrimaryBalance, boolean preferPrimaryRebalance, float buffer) {
65+
assertAcked(
66+
client().admin()
67+
.cluster()
68+
.prepareUpdateSettings()
69+
.setPersistentSettings(
70+
Settings.builder()
71+
.put(PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance)
72+
.put(PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance)
73+
.put(PRIMARY_SHARD_REBALANCE_BUFFER.getKey(), buffer)
74+
)
75+
);
76+
}
77+
6178
/**
6279
* This test verifies that the overall primary balance is attained during allocation. This test verifies primary
6380
* balance per index and across all indices is maintained.
@@ -87,7 +104,7 @@ public void testGlobalPrimaryAllocation() throws Exception {
87104
state = client().admin().cluster().prepareState().execute().actionGet().getState();
88105
logger.info(ShardAllocations.printShardDistribution(state));
89106
verifyPerIndexPrimaryBalance();
90-
verifyPrimaryBalance();
107+
verifyPrimaryBalance(0.0f);
91108
}
92109

93110
/**
@@ -224,6 +241,70 @@ public void testAllocationWithDisruption() throws Exception {
224241
verifyPerIndexPrimaryBalance();
225242
}
226243

244+
/**
245+
* Similar to testSingleIndexShardAllocation test but creates multiple indices, multiple nodes adding in and getting
246+
* removed. The test asserts post each such event that primary shard distribution is balanced for each index as well as across the nodes
247+
* when the PREFER_PRIMARY_SHARD_REBALANCE is set to true
248+
*/
249+
public void testAllocationAndRebalanceWithDisruption() throws Exception {
250+
internalCluster().startClusterManagerOnlyNode();
251+
final int maxReplicaCount = 2;
252+
final int maxShardCount = 2;
253+
// Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in
254+
// and preventing primary relocations
255+
final int nodeCount = randomIntBetween(5, 10);
256+
final int numberOfIndices = randomIntBetween(1, 10);
257+
final float buffer = randomIntBetween(1, 4) * 0.10f;
258+
259+
logger.info("--> Creating {} nodes", nodeCount);
260+
final List<String> nodeNames = new ArrayList<>();
261+
for (int i = 0; i < nodeCount; i++) {
262+
nodeNames.add(internalCluster().startNode());
263+
}
264+
setAllocationRelocationStrategy(true, true, buffer);
265+
266+
int shardCount, replicaCount;
267+
ClusterState state;
268+
for (int i = 0; i < numberOfIndices; i++) {
269+
shardCount = randomIntBetween(1, maxShardCount);
270+
replicaCount = randomIntBetween(1, maxReplicaCount);
271+
logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount);
272+
createIndex("test" + i, shardCount, replicaCount, i % 2 == 0);
273+
ensureGreen(TimeValue.timeValueSeconds(60));
274+
if (logger.isTraceEnabled()) {
275+
state = client().admin().cluster().prepareState().execute().actionGet().getState();
276+
logger.info(ShardAllocations.printShardDistribution(state));
277+
}
278+
}
279+
state = client().admin().cluster().prepareState().execute().actionGet().getState();
280+
logger.info(ShardAllocations.printShardDistribution(state));
281+
verifyPerIndexPrimaryBalance();
282+
verifyPrimaryBalance(buffer);
283+
284+
final int additionalNodeCount = randomIntBetween(1, 5);
285+
logger.info("--> Adding {} nodes", additionalNodeCount);
286+
287+
internalCluster().startNodes(additionalNodeCount);
288+
ensureGreen(TimeValue.timeValueSeconds(60));
289+
state = client().admin().cluster().prepareState().execute().actionGet().getState();
290+
logger.info(ShardAllocations.printShardDistribution(state));
291+
verifyPerIndexPrimaryBalance();
292+
verifyPrimaryBalance(buffer);
293+
294+
int nodeCountToStop = additionalNodeCount;
295+
while (nodeCountToStop > 0) {
296+
internalCluster().stopRandomDataNode();
297+
// give replica a chance to promote as primary before terminating node containing the replica
298+
ensureGreen(TimeValue.timeValueSeconds(60));
299+
nodeCountToStop--;
300+
}
301+
state = client().admin().cluster().prepareState().execute().actionGet().getState();
302+
logger.info("--> Cluster state post nodes stop {}", state);
303+
logger.info(ShardAllocations.printShardDistribution(state));
304+
verifyPerIndexPrimaryBalance();
305+
verifyPrimaryBalance(buffer);
306+
}
307+
227308
/**
228309
* Utility method which ensures cluster has balanced primary shard distribution across a single index.
229310
* @throws Exception exception
@@ -263,7 +344,7 @@ private void verifyPerIndexPrimaryBalance() throws Exception {
263344
}, 60, TimeUnit.SECONDS);
264345
}
265346

266-
private void verifyPrimaryBalance() throws Exception {
347+
private void verifyPrimaryBalance(float buffer) throws Exception {
267348
assertBusy(() -> {
268349
final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState();
269350
RoutingNodes nodes = currentState.getRoutingNodes();
@@ -278,7 +359,7 @@ private void verifyPrimaryBalance() throws Exception {
278359
.filter(ShardRouting::primary)
279360
.collect(Collectors.toList())
280361
.size();
281-
assertTrue(primaryCount <= avgPrimaryShardsPerNode);
362+
assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer)));
282363
}
283364
}, 60, TimeUnit.SECONDS);
284365
}

0 commit comments

Comments
 (0)