Skip to content

Commit 3f65d58

Browse files
authored
Merge branch '2.x' into backport/backport-14454-to-2.x
Signed-off-by: Pranshu Shukla <55992439+Pranshu-S@users.noreply.github.com>
2 parents 7694036 + b408ef8 commit 3f65d58

File tree

297 files changed

+11809
-3104
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

297 files changed

+11809
-3104
lines changed

CHANGELOG.md

+8
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,24 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
2727
- Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895))
2828
- Star tree mapping changes ([#14605](https://github.com/opensearch-project/OpenSearch/pull/14605))
2929
- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336))
30+
- Support cancellation for cat shards and node stats API.([#13966](https://github.com/opensearch-project/OpenSearch/pull/13966))
3031
- [Streaming Indexing] Introduce bulk HTTP API streaming flavor ([#15381](https://github.com/opensearch-project/OpenSearch/pull/15381))
32+
- Add support for centralize snapshot creation with pinned timestamp ([#15124](https://github.com/opensearch-project/OpenSearch/pull/15124))
3133
- Add concurrent search support for Derived Fields ([#15326](https://github.com/opensearch-project/OpenSearch/pull/15326))
3234
- [Workload Management] Add query group stats constructs ([#15343](https://github.com/opensearch-project/OpenSearch/pull/15343)))
35+
- Add limit on number of processors for Ingest pipeline([#15460](https://github.com/opensearch-project/OpenSearch/pull/15465)).
3336
- Add runAs to Subject interface and introduce IdentityAwarePlugin extension point ([#14630](https://github.com/opensearch-project/OpenSearch/pull/14630))
3437
- [Workload Management] Add rejection logic for co-ordinator and shard level requests ([#15428](https://github.com/opensearch-project/OpenSearch/pull/15428)))
3538
- Adding translog durability validation in index templates ([#15494](https://github.com/opensearch-project/OpenSearch/pull/15494))
3639
- [Workload Management] Add query group level failure tracking ([#15227](https://github.com/opensearch-project/OpenSearch/pull/15527))
3740
- [Reader Writer Separation] Add searchOnly replica routing configuration ([#15410](https://github.com/opensearch-project/OpenSearch/pull/15410))
3841
- Add index creation using the context field ([#15290](https://github.com/opensearch-project/OpenSearch/pull/15290))
3942
- [Remote Publication] Add remote download stats ([#15291](https://github.com/opensearch-project/OpenSearch/pull/15291)))
43+
- Add support to upload snapshot shard blobs with hashed prefix ([#15426](https://github.com/opensearch-project/OpenSearch/pull/15426))
44+
- Add canRemain method to TargetPoolAllocationDecider to move shards from local to remote pool for hot to warm tiering ([#15010](https://github.com/opensearch-project/OpenSearch/pull/15010))
45+
- Add support for pluggable deciders for concurrent search ([#15363](https://github.com/opensearch-project/OpenSearch/pull/15363))
46+
- Add support for comma-separated list of index names to be used with Snapshot Status API ([#15409](https://github.com/opensearch-project/OpenSearch/pull/15409))[SnapshotV2] Snapshot Status API changes (#15409))
47+
- [Remote Publication] Added checksum validation for cluster state behind a cluster setting ([#15218](https://github.com/opensearch-project/OpenSearch/pull/15218))
4048
- Optimize NodeIndicesStats output behind flag ([#14454](https://github.com/opensearch-project/OpenSearch/pull/14454))
4149

4250
### Dependencies

client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java

+3-1
Original file line numberDiff line numberDiff line change
@@ -230,15 +230,17 @@ public void testSnapshotsStatus() {
230230
Map<String, String> expectedParams = new HashMap<>();
231231
String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0];
232232
String[] snapshots = RequestConvertersTests.randomIndicesNames(1, 5);
233+
String[] indices = RequestConvertersTests.randomIndicesNames(1, 5);
233234
StringBuilder snapshotNames = new StringBuilder(snapshots[0]);
234235
for (int idx = 1; idx < snapshots.length; idx++) {
235236
snapshotNames.append(",").append(snapshots[idx]);
236237
}
237238
boolean ignoreUnavailable = randomBoolean();
238239
String endpoint = "/_snapshot/" + repository + "/" + snapshotNames.toString() + "/_status";
239240

240-
SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository, snapshots);
241+
SnapshotsStatusRequest snapshotsStatusRequest = (new SnapshotsStatusRequest(repository, snapshots)).indices(indices);
241242
RequestConvertersTests.setRandomMasterTimeout(snapshotsStatusRequest, expectedParams);
243+
242244
snapshotsStatusRequest.ignoreUnavailable(ignoreUnavailable);
243245
expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable));
244246

libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java

+6-2
Original file line numberDiff line numberDiff line change
@@ -633,7 +633,7 @@ public final <K, V> void writeMapOfLists(final Map<K, List<V>> map, final Writer
633633
* @param keyWriter The key writer
634634
* @param valueWriter The value writer
635635
*/
636-
public final <K, V> void writeMap(final Map<K, V> map, final Writer<K> keyWriter, final Writer<V> valueWriter) throws IOException {
636+
public <K, V> void writeMap(final Map<K, V> map, final Writer<K> keyWriter, final Writer<V> valueWriter) throws IOException {
637637
writeVInt(map.size());
638638
for (final Map.Entry<K, V> entry : map.entrySet()) {
639639
keyWriter.write(this, entry.getKey());
@@ -969,9 +969,13 @@ public <T extends Writeable> void writeOptionalArray(@Nullable T[] array) throws
969969
}
970970

971971
public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException {
972+
writeOptionalWriteable((out, writable) -> writable.writeTo(out), writeable);
973+
}
974+
975+
public <T extends Writeable> void writeOptionalWriteable(final Writer<T> writer, @Nullable T writeable) throws IOException {
972976
if (writeable != null) {
973977
writeBoolean(true);
974-
writeable.writeTo(this);
978+
writer.write(this, writeable);
975979
} else {
976980
writeBoolean(false);
977981
}

modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java

+21-2
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
import org.opensearch.common.xcontent.support.XContentMapValues;
5050
import org.opensearch.core.xcontent.XContentParser;
5151
import org.opensearch.core.xcontent.XContentParser.Token;
52+
import org.opensearch.index.compositeindex.datacube.DimensionType;
5253
import org.opensearch.index.fielddata.FieldData;
5354
import org.opensearch.index.fielddata.IndexFieldData;
5455
import org.opensearch.index.fielddata.IndexNumericFieldData;
@@ -71,10 +72,12 @@
7172
import java.util.Collections;
7273
import java.util.List;
7374
import java.util.Map;
75+
import java.util.Optional;
7476
import java.util.function.Supplier;
7577

7678
/** A {@link FieldMapper} for scaled floats. Values are internally multiplied
77-
* by a scaling factor and rounded to the closest long. */
79+
* by a scaling factor and rounded to the closest long.
80+
*/
7881
public class ScaledFloatFieldMapper extends ParametrizedFieldMapper {
7982

8083
public static final String CONTENT_TYPE = "scaled_float";
@@ -162,11 +165,21 @@ public ScaledFloatFieldMapper build(BuilderContext context) {
162165
);
163166
return new ScaledFloatFieldMapper(name, type, multiFieldsBuilder.build(this, context), copyTo.build(), this);
164167
}
168+
169+
@Override
170+
public Optional<DimensionType> getSupportedDataCubeDimensionType() {
171+
return Optional.of(DimensionType.NUMERIC);
172+
}
173+
174+
@Override
175+
public boolean isDataCubeMetricSupported() {
176+
return true;
177+
}
165178
}
166179

167180
public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.getSettings()));
168181

169-
public static final class ScaledFloatFieldType extends SimpleMappedFieldType implements NumericPointEncoder {
182+
public static final class ScaledFloatFieldType extends SimpleMappedFieldType implements NumericPointEncoder, FieldValueConverter {
170183

171184
private final double scalingFactor;
172185
private final Double nullValue;
@@ -340,6 +353,12 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) {
340353
private double scale(Object input) {
341354
return new BigDecimal(Double.toString(parse(input))).multiply(BigDecimal.valueOf(scalingFactor)).doubleValue();
342355
}
356+
357+
@Override
358+
public double toDoubleValue(long value) {
359+
double inverseScalingFactor = 1d / scalingFactor;
360+
return value * inverseScalingFactor;
361+
}
343362
}
344363

345364
private final Explicit<Boolean> ignoreMalformed;

modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java

+101-7
Original file line numberDiff line numberDiff line change
@@ -34,18 +34,24 @@
3434

3535
import org.apache.lucene.index.DocValuesType;
3636
import org.apache.lucene.index.IndexableField;
37+
import org.opensearch.common.settings.Settings;
38+
import org.opensearch.common.util.FeatureFlags;
3739
import org.opensearch.common.xcontent.XContentFactory;
3840
import org.opensearch.core.common.bytes.BytesReference;
3941
import org.opensearch.core.xcontent.MediaTypeRegistry;
4042
import org.opensearch.core.xcontent.XContentBuilder;
43+
import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings;
4144
import org.opensearch.plugins.Plugin;
45+
import org.junit.AfterClass;
46+
import org.junit.BeforeClass;
4247

4348
import java.io.IOException;
4449
import java.util.Arrays;
4550
import java.util.Collection;
4651
import java.util.List;
4752

4853
import static java.util.Collections.singletonList;
54+
import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX;
4955
import static org.hamcrest.Matchers.containsString;
5056

5157
public class ScaledFloatFieldMapperTests extends MapperTestCase {
@@ -91,24 +97,112 @@ public void testExistsQueryDocValuesDisabled() throws IOException {
9197
assertParseMinimalWarnings();
9298
}
9399

94-
public void testDefaults() throws Exception {
95-
XContentBuilder mapping = fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0));
100+
@BeforeClass
101+
public static void createMapper() {
102+
FeatureFlags.initializeFeatureFlags(Settings.builder().put(STAR_TREE_INDEX, "true").build());
103+
}
104+
105+
@AfterClass
106+
public static void clearMapper() {
107+
FeatureFlags.initializeFeatureFlags(Settings.EMPTY);
108+
}
109+
110+
public void testScaledFloatWithStarTree() throws Exception {
111+
112+
double scalingFactorField1 = randomDouble() * 100;
113+
double scalingFactorField2 = randomDouble() * 100;
114+
double scalingFactorField3 = randomDouble() * 100;
115+
116+
XContentBuilder mapping = getStarTreeMappingWithScaledFloat(scalingFactorField1, scalingFactorField2, scalingFactorField3);
96117
DocumentMapper mapper = createDocumentMapper(mapping);
97-
assertEquals(mapping.toString(), mapper.mappingSource().toString());
118+
assertTrue(mapping.toString().contains("startree"));
98119

99-
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
100-
IndexableField[] fields = doc.rootDoc().getFields("field");
120+
long randomLongField1 = randomLong();
121+
long randomLongField2 = randomLong();
122+
long randomLongField3 = randomLong();
123+
ParsedDocument doc = mapper.parse(
124+
source(b -> b.field("field1", randomLongField1).field("field2", randomLongField2).field("field3", randomLongField3))
125+
);
126+
validateScaledFloatFields(doc, "field1", randomLongField1, scalingFactorField1);
127+
validateScaledFloatFields(doc, "field2", randomLongField2, scalingFactorField2);
128+
validateScaledFloatFields(doc, "field3", randomLongField3, scalingFactorField3);
129+
}
130+
131+
@Override
132+
protected Settings getIndexSettings() {
133+
return Settings.builder()
134+
.put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true)
135+
.put(super.getIndexSettings())
136+
.build();
137+
}
138+
139+
private static void validateScaledFloatFields(ParsedDocument doc, String field, long value, double scalingFactor) {
140+
IndexableField[] fields = doc.rootDoc().getFields(field);
101141
assertEquals(2, fields.length);
102142
IndexableField pointField = fields[0];
103143
assertEquals(1, pointField.fieldType().pointDimensionCount());
104144
assertFalse(pointField.fieldType().stored());
105-
assertEquals(1230, pointField.numericValue().longValue());
145+
assertEquals((long) (value * scalingFactor), pointField.numericValue().longValue());
106146
IndexableField dvField = fields[1];
107147
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
108-
assertEquals(1230, dvField.numericValue().longValue());
148+
assertEquals((long) (value * scalingFactor), dvField.numericValue().longValue());
109149
assertFalse(dvField.fieldType().stored());
110150
}
111151

152+
private XContentBuilder getStarTreeMappingWithScaledFloat(
153+
double scalingFactorField1,
154+
double scalingFactorField2,
155+
double scalingFactorField3
156+
) throws IOException {
157+
return topMapping(b -> {
158+
b.startObject("composite");
159+
b.startObject("startree");
160+
b.field("type", "star_tree");
161+
b.startObject("config");
162+
b.field("max_leaf_docs", 100);
163+
b.startArray("ordered_dimensions");
164+
b.startObject();
165+
b.field("name", "field1");
166+
b.endObject();
167+
b.startObject();
168+
b.field("name", "field2");
169+
b.endObject();
170+
b.endArray();
171+
b.startArray("metrics");
172+
b.startObject();
173+
b.field("name", "field3");
174+
b.startArray("stats");
175+
b.value("sum");
176+
b.value("value_count");
177+
b.endArray();
178+
b.endObject();
179+
b.endArray();
180+
b.endObject();
181+
b.endObject();
182+
b.endObject();
183+
b.startObject("properties");
184+
b.startObject("field1");
185+
b.field("type", "scaled_float").field("scaling_factor", scalingFactorField1);
186+
b.endObject();
187+
b.startObject("field2");
188+
b.field("type", "scaled_float").field("scaling_factor", scalingFactorField2);
189+
b.endObject();
190+
b.startObject("field3");
191+
b.field("type", "scaled_float").field("scaling_factor", scalingFactorField3);
192+
b.endObject();
193+
b.endObject();
194+
});
195+
}
196+
197+
public void testDefaults() throws Exception {
198+
XContentBuilder mapping = fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0));
199+
DocumentMapper mapper = createDocumentMapper(mapping);
200+
assertEquals(mapping.toString(), mapper.mappingSource().toString());
201+
202+
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
203+
validateScaledFloatFields(doc, "field", 123, 10.0);
204+
}
205+
112206
public void testMissingScalingFactor() {
113207
Exception e = expectThrows(
114208
MapperParsingException.class,

modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java

+9-24
Original file line numberDiff line numberDiff line change
@@ -67,19 +67,11 @@ public void testUrlRepository() throws Exception {
6767

6868
logger.info("--> creating repository");
6969
Path repositoryLocation = randomRepoPath();
70-
assertAcked(
71-
client.admin()
72-
.cluster()
73-
.preparePutRepository("test-repo")
74-
.setType(FsRepository.TYPE)
75-
.setSettings(
76-
Settings.builder()
77-
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
78-
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
79-
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
80-
)
81-
);
82-
70+
Settings.Builder settings = Settings.builder()
71+
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
72+
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
73+
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES);
74+
createRepository("test-repo", FsRepository.TYPE, settings);
8375
createIndex("test-idx");
8476
ensureGreen();
8577

@@ -115,17 +107,10 @@ public void testUrlRepository() throws Exception {
115107
cluster().wipeIndices("test-idx");
116108

117109
logger.info("--> create read-only URL repository");
118-
assertAcked(
119-
client.admin()
120-
.cluster()
121-
.preparePutRepository("url-repo")
122-
.setType(URLRepository.TYPE)
123-
.setSettings(
124-
Settings.builder()
125-
.put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString())
126-
.put("list_directories", randomBoolean())
127-
)
128-
);
110+
Settings.Builder settingsBuilder = Settings.builder()
111+
.put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString())
112+
.put("list_directories", randomBoolean());
113+
createRepository("url-repo", URLRepository.TYPE, settingsBuilder);
129114
logger.info("--> restore index after deletion");
130115
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
131116
.cluster()

plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java

+6-13
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import com.azure.storage.blob.models.BlobStorageException;
3939
import org.opensearch.action.ActionRunnable;
4040
import org.opensearch.action.support.PlainActionFuture;
41-
import org.opensearch.action.support.master.AcknowledgedResponse;
4241
import org.opensearch.common.collect.Tuple;
4342
import org.opensearch.common.settings.MockSecureSettings;
4443
import org.opensearch.common.settings.SecureSettings;
@@ -47,6 +46,7 @@
4746
import org.opensearch.plugins.Plugin;
4847
import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase;
4948
import org.opensearch.repositories.blobstore.BlobStoreRepository;
49+
import org.opensearch.test.OpenSearchIntegTestCase;
5050
import org.junit.AfterClass;
5151

5252
import java.net.HttpURLConnection;
@@ -56,7 +56,6 @@
5656
import reactor.core.scheduler.Schedulers;
5757

5858
import static org.hamcrest.Matchers.blankOrNullString;
59-
import static org.hamcrest.Matchers.equalTo;
6059
import static org.hamcrest.Matchers.not;
6160

6261
public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase {
@@ -103,17 +102,11 @@ protected SecureSettings credentials() {
103102

104103
@Override
105104
protected void createRepository(String repoName) {
106-
AcknowledgedResponse putRepositoryResponse = client().admin()
107-
.cluster()
108-
.preparePutRepository(repoName)
109-
.setType("azure")
110-
.setSettings(
111-
Settings.builder()
112-
.put("container", System.getProperty("test.azure.container"))
113-
.put("base_path", System.getProperty("test.azure.base"))
114-
)
115-
.get();
116-
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
105+
Settings.Builder settings = Settings.builder()
106+
.put("container", System.getProperty("test.azure.container"))
107+
.put("base_path", System.getProperty("test.azure.base"));
108+
109+
OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "azure", settings);
117110
if (Strings.hasText(System.getProperty("test.azure.sas_token"))) {
118111
ensureSasTokenPermissions();
119112
}

0 commit comments

Comments
 (0)