Skip to content

Commit 97656bb

Browse files
jed326Jay Deng
authored and
Jay Deng
committed
Perform buildAggregation concurrently and support Composite Aggregations
Signed-off-by: Jay Deng <jayd0104@gmail.com>
1 parent d4e1ab1 commit 97656bb

File tree

11 files changed

+128
-44
lines changed

11 files changed

+128
-44
lines changed

CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
118118
- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625))
119119
- [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709))
120120
- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583))
121+
- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697))
121122

122123
### Dependencies
123124
- Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288))

server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java

+16-13
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import java.util.Collection;
2727
import java.util.List;
2828

29+
import static org.opensearch.indices.IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING;
2930
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
3031
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
3132
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
@@ -50,23 +51,25 @@ public void setupSuiteScopeCluster() throws Exception {
5051
assertAcked(
5152
prepareCreate(
5253
"idx",
53-
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
54+
Settings.builder()
55+
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
56+
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
57+
.put(INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), false)
5458
).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer")
5559
);
5660
waitForRelocation(ClusterHealthStatus.GREEN);
5761

58-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get();
59-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get();
60-
refresh("idx");
61-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get();
62-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get();
63-
refresh("idx");
64-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get();
65-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get();
66-
refresh("idx");
67-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get();
68-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get();
69-
refresh("idx");
62+
indexRandom(
63+
true,
64+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5"),
65+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50"),
66+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2"),
67+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20"),
68+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10"),
69+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15"),
70+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1"),
71+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100")
72+
);
7073

7174
waitForRelocation(ClusterHealthStatus.GREEN);
7275
refresh();

server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java

+3-11
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import org.opensearch.search.query.ReduceableSearchResult;
1616

1717
import java.io.IOException;
18-
import java.util.ArrayList;
1918
import java.util.Collection;
2019
import java.util.List;
20+
import java.util.Objects;
2121

2222
/**
2323
* Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global
@@ -56,17 +56,9 @@ public String getCollectorReason() {
5656

5757
@Override
5858
public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IOException {
59-
final List<Aggregator> aggregators = context.bucketCollectorProcessor().toAggregators(collectors);
60-
final List<InternalAggregation> internals = new ArrayList<>(aggregators.size());
59+
final List<InternalAggregation> internals = context.bucketCollectorProcessor().toInternalAggregations(collectors);
60+
assert internals.stream().noneMatch(Objects::isNull);
6161
context.aggregations().resetBucketMultiConsumer();
62-
for (Aggregator aggregator : aggregators) {
63-
try {
64-
// post collection is called in ContextIndexSearcher after search on leaves are completed
65-
internals.add(aggregator.buildTopLevel());
66-
} catch (IOException e) {
67-
throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e);
68-
}
69-
}
7062

7163
final InternalAggregations internalAggregations = InternalAggregations.from(internals);
7264
return buildAggregationResult(internalAggregations);

server/src/main/java/org/opensearch/search/aggregations/Aggregator.java

+16-4
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
package org.opensearch.search.aggregations;
3434

3535
import org.opensearch.OpenSearchParseException;
36+
import org.opensearch.common.SetOnce;
3637
import org.opensearch.common.annotation.PublicApi;
3738
import org.opensearch.common.lease.Releasable;
3839
import org.opensearch.core.ParseField;
@@ -61,6 +62,8 @@
6162
@PublicApi(since = "1.0.0")
6263
public abstract class Aggregator extends BucketCollector implements Releasable {
6364

65+
private final SetOnce<InternalAggregation> internalAggregation = new SetOnce<>();
66+
6467
/**
6568
* Parses the aggregation request and creates the appropriate aggregator factory for it.
6669
*
@@ -83,6 +86,13 @@ public interface Parser {
8386
AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException;
8487
}
8588

89+
/**
90+
* Returns the InternalAggregation stored during post collection
91+
*/
92+
public InternalAggregation getPostCollectionAggregation() {
93+
return internalAggregation.get();
94+
}
95+
8696
/**
8797
* Return the name of this aggregator.
8898
*/
@@ -185,13 +195,15 @@ public interface BucketComparator {
185195

186196
/**
187197
* Build the result of this aggregation if it is at the "top level"
188-
* of the aggregation tree. If, instead, it is a sub-aggregation of
189-
* another aggregation then the aggregation that contains it will call
190-
* {@link #buildAggregations(long[])}.
198+
* of the aggregation tree and save it. This should get called
199+
* during post collection. If, instead, it is a sub-aggregation
200+
* of another aggregation then the aggregation that contains
201+
* it will call {@link #buildAggregations(long[])}.
191202
*/
192203
public final InternalAggregation buildTopLevel() throws IOException {
193204
assert parent() == null;
194-
return buildAggregations(new long[] { 0 })[0];
205+
this.internalAggregation.set(buildAggregations(new long[] { 0 })[0]);
206+
return internalAggregation.get();
195207
}
196208

197209
/**

server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java

+57
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import java.util.Deque;
2323
import java.util.LinkedList;
2424
import java.util.List;
25+
import java.util.Objects;
2526
import java.util.Queue;
2627

2728
/**
@@ -72,6 +73,15 @@ public void processPostCollection(Collector collectorTree) throws IOException {
7273
}
7374
} else if (currentCollector instanceof BucketCollector) {
7475
((BucketCollector) currentCollector).postCollection();
76+
77+
// Perform build aggregation during post collection
78+
if (currentCollector instanceof Aggregator) {
79+
((Aggregator) currentCollector).buildTopLevel();
80+
} else if (currentCollector instanceof MultiBucketCollector) {
81+
for (Collector innerCollector : ((MultiBucketCollector) currentCollector).getCollectors()) {
82+
collectors.offer(innerCollector);
83+
}
84+
}
7585
}
7686
}
7787
}
@@ -106,4 +116,51 @@ public List<Aggregator> toAggregators(Collection<Collector> collectors) {
106116
}
107117
return aggregators;
108118
}
119+
120+
/**
121+
* Unwraps the input collection of {@link Collector} to get the list of the {@link InternalAggregation}. The
122+
* input is expected to contain the collectors related to Aggregations only as that is passed to {@link AggregationCollectorManager}
123+
* during the reduce phase. This list of {@link InternalAggregation} is used to optionally perform reduce at shard level before
124+
* returning response to coordinator
125+
* @param collectors collection of aggregation collectors to reduce
126+
* @return list of unwrapped {@link InternalAggregation}
127+
*/
128+
public List<InternalAggregation> toInternalAggregations(Collection<Collector> collectors) throws IOException {
129+
List<InternalAggregation> internalAggregations = new ArrayList<>();
130+
131+
final Deque<Collector> allCollectors = new LinkedList<>(collectors);
132+
while (!allCollectors.isEmpty()) {
133+
final Collector currentCollector = allCollectors.pop();
134+
// This can just be Aggregator
135+
if (currentCollector instanceof Aggregator) {
136+
internalAggregations.add(((Aggregator) currentCollector).getPostCollectionAggregation());
137+
} else if (currentCollector instanceof InternalProfileCollector) {
138+
if (((InternalProfileCollector) currentCollector).getCollector() instanceof Aggregator) {
139+
internalAggregations.add(
140+
((Aggregator) ((InternalProfileCollector) currentCollector).getCollector()).getPostCollectionAggregation()
141+
);
142+
} else if (((InternalProfileCollector) currentCollector).getCollector() instanceof MultiBucketCollector) {
143+
allCollectors.addAll(
144+
Arrays.asList(((MultiBucketCollector) ((InternalProfileCollector) currentCollector).getCollector()).getCollectors())
145+
);
146+
}
147+
} else if (currentCollector instanceof MultiBucketCollector) {
148+
allCollectors.addAll(Arrays.asList(((MultiBucketCollector) currentCollector).getCollectors()));
149+
}
150+
}
151+
152+
// Check that internalAggregations does not contain any null objects. If so that means postCollection, and subsequently
153+
// buildAggregation, was not called for a given collector. This can happen as collect will not get called whenever there are no
154+
// leaves on a shard. Since we build the InternalAggregation in postCollection that will not get called in such cases either.
155+
// Therefore we need to manually call it again here to build empty InternalAggregation objects for this collector tree.
156+
if (internalAggregations.stream().anyMatch(Objects::isNull)) {
157+
for (Collector c : collectors) {
158+
processPostCollection(c);
159+
}
160+
// Iterate through collector tree again to get InternalAggregations object
161+
return toInternalAggregations(collectors);
162+
} else {
163+
return internalAggregations;
164+
}
165+
}
109166
}

server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java

+5-5
Original file line numberDiff line numberDiff line change
@@ -131,12 +131,10 @@ public static class MultiBucketConsumer implements IntConsumer {
131131
private final int limit;
132132
private final CircuitBreaker breaker;
133133

134-
// aggregations execute in a single thread for both sequential
135-
// and concurrent search, so no atomic here
134+
// count is currently only updated in final reduce phase which is executed in single thread for both concurrent and non-concurrent
135+
// search
136136
private int count;
137-
138-
// will be updated by multiple threads in concurrent search
139-
// hence making it as LongAdder
137+
// will be updated by multiple threads in concurrent search hence making it as LongAdder
140138
private final LongAdder callCount;
141139
private volatile boolean circuitBreakerTripped;
142140
private final int availProcessors;
@@ -166,7 +164,9 @@ protected MultiBucketConsumer(
166164
@Override
167165
public void accept(int value) {
168166
if (value != 0) {
167+
// double check where count is actually used
169168
count += value;
169+
// count will not get reset until reduce, so if one thread hits this condition all other threads subsequently will as well.
170170
if (count > limit) {
171171
throw new TooManyBucketsException(
172172
"Trying to create too many buckets. Must be less than or equal to: ["

server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
import org.opensearch.search.internal.SearchContext;
4141

4242
import java.io.IOException;
43+
import java.util.Arrays;
4344
import java.util.Map;
4445

4546
/**
@@ -80,7 +81,7 @@ protected Aggregator createInternal(
8081

8182
@Override
8283
protected boolean supportsConcurrentSegmentSearch() {
83-
// See https://github.com/opensearch-project/OpenSearch/issues/12331 for details
84-
return false;
84+
// Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details
85+
return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript);
8586
}
8687
}

server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java

+12-5
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
9494
private final long valueCount;
9595
private final String fieldName;
9696
private Weight weight;
97-
private final GlobalOrdLookupFunction lookupGlobalOrd;
9897
protected final CollectionStrategy collectionStrategy;
9998
protected int segmentsWithSingleValuedOrds = 0;
10099
protected int segmentsWithMultiValuedOrds = 0;
@@ -129,11 +128,10 @@ public GlobalOrdinalsStringTermsAggregator(
129128
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
130129
this.valuesSource = valuesSource;
131130
final IndexReader reader = context.searcher().getIndexReader();
132-
final SortedSetDocValues values = reader.leaves().size() > 0
131+
final SortedSetDocValues values = !reader.leaves().isEmpty()
133132
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
134133
: DocValues.emptySortedSet();
135134
this.valueCount = values.getValueCount();
136-
this.lookupGlobalOrd = values::lookupOrd;
137135
this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get;
138136
if (remapGlobalOrds) {
139137
this.collectionStrategy = new RemapGlobalOrds(cardinality);
@@ -885,7 +883,12 @@ PriorityQueue<OrdBucket> buildPriorityQueue(int size) {
885883
}
886884

887885
StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException {
888-
BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd));
886+
// Recreate DocValues as needed for concurrent segment search
887+
SortedSetDocValues values = !context.searcher().getIndexReader().leaves().isEmpty()
888+
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
889+
: DocValues.emptySortedSet();
890+
BytesRef term = BytesRef.deepCopyOf(values.lookupOrd(temp.globalOrd));
891+
889892
StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
890893
result.bucketOrd = temp.bucketOrd;
891894
result.docCountError = 0;
@@ -1001,7 +1004,11 @@ BucketUpdater<SignificantStringTerms.Bucket> bucketUpdater(long owningBucketOrd)
10011004
long subsetSize = subsetSize(owningBucketOrd);
10021005
return (spare, globalOrd, bucketOrd, docCount) -> {
10031006
spare.bucketOrd = bucketOrd;
1004-
oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
1007+
// Recreate DocValues as needed for concurrent segment search
1008+
SortedSetDocValues values = !context.searcher().getIndexReader().leaves().isEmpty()
1009+
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
1010+
: DocValues.emptySortedSet();
1011+
oversizedCopy(values.lookupOrd(globalOrd), spare.termBytes);
10051012
spare.subsetDf = docCount;
10061013
spare.subsetSize = subsetSize;
10071014
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);

server/src/main/java/org/opensearch/search/internal/SearchContext.java

+6
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,12 @@ public List<Aggregator> toAggregators(Collection<Collector> collectors) {
113113
// should not be called when there is no aggregation collector
114114
throw new IllegalStateException("Unexpected toAggregators call on NO_OP_BUCKET_COLLECTOR_PROCESSOR");
115115
}
116+
117+
@Override
118+
public List<InternalAggregation> toInternalAggregations(Collection<Collector> collectors) {
119+
// should not be called when there is no aggregation collector
120+
throw new IllegalStateException("Unexpected toInternalAggregations call on NO_OP_BUCKET_COLLECTOR_PROCESSOR");
121+
}
116122
};
117123

118124
private final List<Releasable> releasables = new CopyOnWriteArrayList<>();

server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfiler.java

+3-3
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
import org.opensearch.search.aggregations.Aggregator;
3737
import org.opensearch.search.profile.AbstractProfiler;
3838

39-
import java.util.HashMap;
4039
import java.util.Map;
40+
import java.util.concurrent.ConcurrentHashMap;
4141

4242
/**
4343
* Main class to profile aggregations
@@ -47,14 +47,14 @@
4747
@PublicApi(since = "1.0.0")
4848
public class AggregationProfiler extends AbstractProfiler<AggregationProfileBreakdown, Aggregator> {
4949

50-
private final Map<Aggregator, AggregationProfileBreakdown> profileBreakdownLookup = new HashMap<>();
50+
// Use ConcurrentHashMap as buildAggregation will be performed concurrently and thus require concurrent access
51+
private final Map<Aggregator, AggregationProfileBreakdown> profileBreakdownLookup = new ConcurrentHashMap<>();
5152

5253
public AggregationProfiler() {
5354
super(new InternalAggregationProfileTree());
5455
}
5556

5657
/**
57-
* This method does not need to be thread safe for concurrent search use case as well.
5858
* The {@link AggregationProfileBreakdown} for each Aggregation operator is created in sync path when
5959
* {@link org.opensearch.search.aggregations.BucketCollector#preCollection()} is called
6060
* on the Aggregation collector instances during construction.

test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java

+6-1
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,12 @@
211211
"LuceneFixedGap",
212212
"LuceneVarGapFixedInterval",
213213
"LuceneVarGapDocFreqInterval",
214-
"Lucene50" })
214+
"Lucene50",
215+
"Lucene90",
216+
"Lucene94",
217+
"Lucene90",
218+
"Lucene95",
219+
"Lucene99" })
215220
@LuceneTestCase.SuppressReproduceLine
216221
public abstract class OpenSearchTestCase extends LuceneTestCase {
217222

0 commit comments

Comments
 (0)