Skip to content

Commit 98e0f7e

Browse files
jed326Jay Deng
authored and
Jay Deng
committed
Perform buildAggregation concurrently and support Composite Aggregations
Signed-off-by: Jay Deng <jayd0104@gmail.com>
1 parent 5b4b4aa commit 98e0f7e

File tree

10 files changed

+132
-41
lines changed

10 files changed

+132
-41
lines changed

CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
102102

103103
## [Unreleased 2.x]
104104
### Added
105+
- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697))
105106

106107
### Dependencies
107108

server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java

+16-13
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import java.util.Collection;
2727
import java.util.List;
2828

29+
import static org.opensearch.indices.IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING;
2930
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
3031
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
3132
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
@@ -50,23 +51,25 @@ public void setupSuiteScopeCluster() throws Exception {
5051
assertAcked(
5152
prepareCreate(
5253
"idx",
53-
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
54+
Settings.builder()
55+
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
56+
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
57+
.put(INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), false)
5458
).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer")
5559
);
5660
waitForRelocation(ClusterHealthStatus.GREEN);
5761

58-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get();
59-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get();
60-
refresh("idx");
61-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get();
62-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get();
63-
refresh("idx");
64-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get();
65-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get();
66-
refresh("idx");
67-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get();
68-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get();
69-
refresh("idx");
62+
indexRandom(
63+
true,
64+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5"),
65+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50"),
66+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2"),
67+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20"),
68+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10"),
69+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15"),
70+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1"),
71+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100")
72+
);
7073

7174
waitForRelocation(ClusterHealthStatus.GREEN);
7275
refresh();

server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java

+3-11
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import org.opensearch.search.query.ReduceableSearchResult;
1616

1717
import java.io.IOException;
18-
import java.util.ArrayList;
1918
import java.util.Collection;
2019
import java.util.List;
20+
import java.util.Objects;
2121

2222
/**
2323
* Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global
@@ -56,17 +56,9 @@ public String getCollectorReason() {
5656

5757
@Override
5858
public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IOException {
59-
final List<Aggregator> aggregators = context.bucketCollectorProcessor().toAggregators(collectors);
60-
final List<InternalAggregation> internals = new ArrayList<>(aggregators.size());
59+
final List<InternalAggregation> internals = context.bucketCollectorProcessor().toInternalAggregations(collectors);
60+
assert internals.stream().noneMatch(Objects::isNull);
6161
context.aggregations().resetBucketMultiConsumer();
62-
for (Aggregator aggregator : aggregators) {
63-
try {
64-
// post collection is called in ContextIndexSearcher after search on leaves are completed
65-
internals.add(aggregator.buildTopLevel());
66-
} catch (IOException e) {
67-
throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e);
68-
}
69-
}
7062

7163
final InternalAggregations internalAggregations = InternalAggregations.from(internals);
7264
return buildAggregationResult(internalAggregations);

server/src/main/java/org/opensearch/search/aggregations/Aggregator.java

+16-4
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
package org.opensearch.search.aggregations;
3434

3535
import org.opensearch.OpenSearchParseException;
36+
import org.opensearch.common.SetOnce;
3637
import org.opensearch.common.annotation.PublicApi;
3738
import org.opensearch.common.lease.Releasable;
3839
import org.opensearch.core.ParseField;
@@ -61,6 +62,8 @@
6162
@PublicApi(since = "1.0.0")
6263
public abstract class Aggregator extends BucketCollector implements Releasable {
6364

65+
private final SetOnce<InternalAggregation> internalAggregation = new SetOnce<>();
66+
6467
/**
6568
* Parses the aggregation request and creates the appropriate aggregator factory for it.
6669
*
@@ -83,6 +86,13 @@ public interface Parser {
8386
AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException;
8487
}
8588

89+
/**
90+
* Returns the InternalAggregation stored during post collection
91+
*/
92+
public InternalAggregation getPostCollectionAggregation() {
93+
return internalAggregation.get();
94+
}
95+
8696
/**
8797
* Return the name of this aggregator.
8898
*/
@@ -185,13 +195,15 @@ public interface BucketComparator {
185195

186196
/**
187197
* Build the result of this aggregation if it is at the "top level"
188-
* of the aggregation tree. If, instead, it is a sub-aggregation of
189-
* another aggregation then the aggregation that contains it will call
190-
* {@link #buildAggregations(long[])}.
198+
* of the aggregation tree and save it. This should get called
199+
* during post collection. If, instead, it is a sub-aggregation
200+
* of another aggregation then the aggregation that contains
201+
* it will call {@link #buildAggregations(long[])}.
191202
*/
192203
public final InternalAggregation buildTopLevel() throws IOException {
193204
assert parent() == null;
194-
return buildAggregations(new long[] { 0 })[0];
205+
this.internalAggregation.set(buildAggregations(new long[] { 0 })[0]);
206+
return internalAggregation.get();
195207
}
196208

197209
/**

server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java

+57
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import java.util.Deque;
2323
import java.util.LinkedList;
2424
import java.util.List;
25+
import java.util.Objects;
2526
import java.util.Queue;
2627

2728
/**
@@ -72,6 +73,15 @@ public void processPostCollection(Collector collectorTree) throws IOException {
7273
}
7374
} else if (currentCollector instanceof BucketCollector) {
7475
((BucketCollector) currentCollector).postCollection();
76+
77+
// Perform build aggregation during post collection
78+
if (currentCollector instanceof Aggregator) {
79+
((Aggregator) currentCollector).buildTopLevel();
80+
} else if (currentCollector instanceof MultiBucketCollector) {
81+
for (Collector innerCollector : ((MultiBucketCollector) currentCollector).getCollectors()) {
82+
collectors.offer(innerCollector);
83+
}
84+
}
7585
}
7686
}
7787
}
@@ -106,4 +116,51 @@ public List<Aggregator> toAggregators(Collection<Collector> collectors) {
106116
}
107117
return aggregators;
108118
}
119+
120+
/**
121+
* Unwraps the input collection of {@link Collector} to get the list of the {@link InternalAggregation}. The
122+
* input is expected to contain the collectors related to Aggregations only as that is passed to {@link AggregationCollectorManager}
123+
* during the reduce phase. This list of {@link InternalAggregation} is used to optionally perform reduce at shard level before
124+
* returning response to coordinator
125+
* @param collectors collection of aggregation collectors to reduce
126+
* @return list of unwrapped {@link InternalAggregation}
127+
*/
128+
public List<InternalAggregation> toInternalAggregations(Collection<Collector> collectors) throws IOException {
129+
List<InternalAggregation> internalAggregations = new ArrayList<>();
130+
131+
final Deque<Collector> allCollectors = new LinkedList<>(collectors);
132+
while (!allCollectors.isEmpty()) {
133+
final Collector currentCollector = allCollectors.pop();
134+
// This can just be Aggregator
135+
if (currentCollector instanceof Aggregator) {
136+
internalAggregations.add(((Aggregator) currentCollector).getPostCollectionAggregation());
137+
} else if (currentCollector instanceof InternalProfileCollector) {
138+
if (((InternalProfileCollector) currentCollector).getCollector() instanceof Aggregator) {
139+
internalAggregations.add(
140+
((Aggregator) ((InternalProfileCollector) currentCollector).getCollector()).getPostCollectionAggregation()
141+
);
142+
} else if (((InternalProfileCollector) currentCollector).getCollector() instanceof MultiBucketCollector) {
143+
allCollectors.addAll(
144+
Arrays.asList(((MultiBucketCollector) ((InternalProfileCollector) currentCollector).getCollector()).getCollectors())
145+
);
146+
}
147+
} else if (currentCollector instanceof MultiBucketCollector) {
148+
allCollectors.addAll(Arrays.asList(((MultiBucketCollector) currentCollector).getCollectors()));
149+
}
150+
}
151+
152+
// Check that internalAggregations does not contain any null objects. If so that means postCollection, and subsequently
153+
// buildAggregation, was not called for a given collector. This can happen as collect will not get called whenever there are no
154+
// leaves on a shard. Since we build the InternalAggregation in postCollection that will not get called in such cases either.
155+
// Therefore we need to manually call it again here to build empty InternalAggregation objects for this collector tree.
156+
if (internalAggregations.stream().anyMatch(Objects::isNull)) {
157+
for (Collector c : collectors) {
158+
processPostCollection(c);
159+
}
160+
// Iterate through collector tree again to get InternalAggregations object
161+
return toInternalAggregations(collectors);
162+
} else {
163+
return internalAggregations;
164+
}
165+
}
109166
}

server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java

+3-5
Original file line numberDiff line numberDiff line change
@@ -131,12 +131,10 @@ public static class MultiBucketConsumer implements IntConsumer {
131131
private final int limit;
132132
private final CircuitBreaker breaker;
133133

134-
// aggregations execute in a single thread for both sequential
135-
// and concurrent search, so no atomic here
134+
// count is currently only updated in final reduce phase which is executed in single thread for both concurrent and non-concurrent
135+
// search
136136
private int count;
137-
138-
// will be updated by multiple threads in concurrent search
139-
// hence making it as LongAdder
137+
// will be updated by multiple threads in concurrent search hence making it as LongAdder
140138
private final LongAdder callCount;
141139
private volatile boolean circuitBreakerTripped;
142140
private final int availProcessors;

server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
import org.opensearch.search.internal.SearchContext;
4141

4242
import java.io.IOException;
43+
import java.util.Arrays;
4344
import java.util.Map;
4445

4546
/**
@@ -80,7 +81,7 @@ protected Aggregator createInternal(
8081

8182
@Override
8283
protected boolean supportsConcurrentSegmentSearch() {
83-
// See https://github.com/opensearch-project/OpenSearch/issues/12331 for details
84-
return false;
84+
// Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details
85+
return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript);
8586
}
8687
}

server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java

+21-5
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,10 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
9494
private final long valueCount;
9595
private final String fieldName;
9696
private Weight weight;
97-
private final GlobalOrdLookupFunction lookupGlobalOrd;
9897
protected final CollectionStrategy collectionStrategy;
9998
protected int segmentsWithSingleValuedOrds = 0;
10099
protected int segmentsWithMultiValuedOrds = 0;
100+
private SortedSetDocValues dvs;
101101

102102
/**
103103
* Lookup global ordinals
@@ -129,11 +129,10 @@ public GlobalOrdinalsStringTermsAggregator(
129129
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
130130
this.valuesSource = valuesSource;
131131
final IndexReader reader = context.searcher().getIndexReader();
132-
final SortedSetDocValues values = reader.leaves().size() > 0
132+
final SortedSetDocValues values = !reader.leaves().isEmpty()
133133
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
134134
: DocValues.emptySortedSet();
135135
this.valueCount = values.getValueCount();
136-
this.lookupGlobalOrd = values::lookupOrd;
137136
this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get;
138137
if (remapGlobalOrds) {
139138
this.collectionStrategy = new RemapGlobalOrds(cardinality);
@@ -885,7 +884,10 @@ PriorityQueue<OrdBucket> buildPriorityQueue(int size) {
885884
}
886885

887886
StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException {
888-
BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd));
887+
// Recreate DocValues as needed for concurrent segment search
888+
SortedSetDocValues values = getDocValues();
889+
BytesRef term = BytesRef.deepCopyOf(values.lookupOrd(temp.globalOrd));
890+
889891
StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
890892
result.bucketOrd = temp.bucketOrd;
891893
result.docCountError = 0;
@@ -1001,7 +1003,9 @@ BucketUpdater<SignificantStringTerms.Bucket> bucketUpdater(long owningBucketOrd)
10011003
long subsetSize = subsetSize(owningBucketOrd);
10021004
return (spare, globalOrd, bucketOrd, docCount) -> {
10031005
spare.bucketOrd = bucketOrd;
1004-
oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
1006+
// Recreate DocValues as needed for concurrent segment search
1007+
SortedSetDocValues values = getDocValues();
1008+
oversizedCopy(values.lookupOrd(globalOrd), spare.termBytes);
10051009
spare.subsetDf = docCount;
10061010
spare.subsetSize = subsetSize;
10071011
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
@@ -1086,4 +1090,16 @@ private void oversizedCopy(BytesRef from, BytesRef to) {
10861090
* Predicate used for {@link #acceptedGlobalOrdinals} if there is no filter.
10871091
*/
10881092
private static final LongPredicate ALWAYS_TRUE = l -> true;
1093+
1094+
/**
1095+
* If DocValues have not been initialized yet for reduce phase, create and set them.
1096+
*/
1097+
private SortedSetDocValues getDocValues() throws IOException {
1098+
if (dvs == null) {
1099+
dvs = !context.searcher().getIndexReader().leaves().isEmpty()
1100+
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
1101+
: DocValues.emptySortedSet();
1102+
}
1103+
return dvs;
1104+
}
10891105
}

server/src/main/java/org/opensearch/search/internal/SearchContext.java

+6
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,12 @@ public List<Aggregator> toAggregators(Collection<Collector> collectors) {
113113
// should not be called when there is no aggregation collector
114114
throw new IllegalStateException("Unexpected toAggregators call on NO_OP_BUCKET_COLLECTOR_PROCESSOR");
115115
}
116+
117+
@Override
118+
public List<InternalAggregation> toInternalAggregations(Collection<Collector> collectors) {
119+
// should not be called when there is no aggregation collector
120+
throw new IllegalStateException("Unexpected toInternalAggregations call on NO_OP_BUCKET_COLLECTOR_PROCESSOR");
121+
}
116122
};
117123

118124
private final List<Releasable> releasables = new CopyOnWriteArrayList<>();

test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java

+6-1
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,12 @@
211211
"LuceneFixedGap",
212212
"LuceneVarGapFixedInterval",
213213
"LuceneVarGapDocFreqInterval",
214-
"Lucene50" })
214+
"Lucene50",
215+
"Lucene90",
216+
"Lucene94",
217+
"Lucene90",
218+
"Lucene95",
219+
"Lucene99" })
215220
@LuceneTestCase.SuppressReproduceLine
216221
public abstract class OpenSearchTestCase extends LuceneTestCase {
217222

0 commit comments

Comments
 (0)