Skip to content

Commit 6b18e9c

Browse files
jed326Jay Deng
authored and
Jay Deng
committed
Perform buildAggregation concurrently and support Composite Aggregations
Signed-off-by: Jay Deng <jayd0104@gmail.com>
1 parent 5b4b4aa commit 6b18e9c

File tree

12 files changed

+141
-41
lines changed

12 files changed

+141
-41
lines changed

CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
102102

103103
## [Unreleased 2.x]
104104
### Added
105+
- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697))
105106

106107
### Dependencies
107108

server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java

+16-13
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import java.util.Collection;
2727
import java.util.List;
2828

29+
import static org.opensearch.indices.IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING;
2930
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
3031
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
3132
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
@@ -50,23 +51,25 @@ public void setupSuiteScopeCluster() throws Exception {
5051
assertAcked(
5152
prepareCreate(
5253
"idx",
53-
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
54+
Settings.builder()
55+
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
56+
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
57+
.put(INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), false)
5458
).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer")
5559
);
5660
waitForRelocation(ClusterHealthStatus.GREEN);
5761

58-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get();
59-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get();
60-
refresh("idx");
61-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get();
62-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get();
63-
refresh("idx");
64-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get();
65-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get();
66-
refresh("idx");
67-
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get();
68-
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get();
69-
refresh("idx");
62+
indexRandom(
63+
true,
64+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5"),
65+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50"),
66+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2"),
67+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20"),
68+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10"),
69+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15"),
70+
client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1"),
71+
client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100")
72+
);
7073

7174
waitForRelocation(ClusterHealthStatus.GREEN);
7275
refresh();

server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java

+3-11
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import org.opensearch.search.query.ReduceableSearchResult;
1616

1717
import java.io.IOException;
18-
import java.util.ArrayList;
1918
import java.util.Collection;
2019
import java.util.List;
20+
import java.util.Objects;
2121

2222
/**
2323
* Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global
@@ -56,17 +56,9 @@ public String getCollectorReason() {
5656

5757
@Override
5858
public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IOException {
59-
final List<Aggregator> aggregators = context.bucketCollectorProcessor().toAggregators(collectors);
60-
final List<InternalAggregation> internals = new ArrayList<>(aggregators.size());
59+
final List<InternalAggregation> internals = context.bucketCollectorProcessor().toInternalAggregations(collectors);
60+
assert internals.stream().noneMatch(Objects::isNull);
6161
context.aggregations().resetBucketMultiConsumer();
62-
for (Aggregator aggregator : aggregators) {
63-
try {
64-
// post collection is called in ContextIndexSearcher after search on leaves are completed
65-
internals.add(aggregator.buildTopLevel());
66-
} catch (IOException e) {
67-
throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e);
68-
}
69-
}
7062

7163
final InternalAggregations internalAggregations = InternalAggregations.from(internals);
7264
return buildAggregationResult(internalAggregations);

server/src/main/java/org/opensearch/search/aggregations/Aggregator.java

+16-4
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
package org.opensearch.search.aggregations;
3434

3535
import org.opensearch.OpenSearchParseException;
36+
import org.opensearch.common.SetOnce;
3637
import org.opensearch.common.annotation.PublicApi;
3738
import org.opensearch.common.lease.Releasable;
3839
import org.opensearch.core.ParseField;
@@ -61,6 +62,8 @@
6162
@PublicApi(since = "1.0.0")
6263
public abstract class Aggregator extends BucketCollector implements Releasable {
6364

65+
private final SetOnce<InternalAggregation> internalAggregation = new SetOnce<>();
66+
6467
/**
6568
* Parses the aggregation request and creates the appropriate aggregator factory for it.
6669
*
@@ -83,6 +86,13 @@ public interface Parser {
8386
AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException;
8487
}
8588

89+
/**
90+
* Returns the InternalAggregation stored during post collection
91+
*/
92+
public InternalAggregation getPostCollectionAggregation() {
93+
return internalAggregation.get();
94+
}
95+
8696
/**
8797
* Return the name of this aggregator.
8898
*/
@@ -185,13 +195,15 @@ public interface BucketComparator {
185195

186196
/**
187197
* Build the result of this aggregation if it is at the "top level"
188-
* of the aggregation tree. If, instead, it is a sub-aggregation of
189-
* another aggregation then the aggregation that contains it will call
190-
* {@link #buildAggregations(long[])}.
198+
* of the aggregation tree and save it. This should get called
199+
* during post collection. If, instead, it is a sub-aggregation
200+
* of another aggregation then the aggregation that contains
201+
* it will call {@link #buildAggregations(long[])}.
191202
*/
192203
public final InternalAggregation buildTopLevel() throws IOException {
193204
assert parent() == null;
194-
return buildAggregations(new long[] { 0 })[0];
205+
this.internalAggregation.set(buildAggregations(new long[] { 0 })[0]);
206+
return internalAggregation.get();
195207
}
196208

197209
/**

server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java

+36
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,15 @@ public void processPostCollection(Collector collectorTree) throws IOException {
7272
}
7373
} else if (currentCollector instanceof BucketCollector) {
7474
((BucketCollector) currentCollector).postCollection();
75+
76+
// Perform build aggregation during post collection
77+
if (currentCollector instanceof Aggregator) {
78+
((Aggregator) currentCollector).buildTopLevel();
79+
} else if (currentCollector instanceof MultiBucketCollector) {
80+
for (Collector innerCollector : ((MultiBucketCollector) currentCollector).getCollectors()) {
81+
collectors.offer(innerCollector);
82+
}
83+
}
7584
}
7685
}
7786
}
@@ -106,4 +115,31 @@ public List<Aggregator> toAggregators(Collection<Collector> collectors) {
106115
}
107116
return aggregators;
108117
}
118+
119+
/**
120+
* Unwraps the input collection of {@link Collector} to get the list of the {@link InternalAggregation}. The
121+
* input is expected to contain the collectors related to Aggregations only as that is passed to {@link AggregationCollectorManager}
122+
* during the reduce phase. This list of {@link InternalAggregation} is used to optionally perform reduce at shard level before
123+
* returning response to coordinator
124+
* @param collectors collection of aggregation collectors to reduce
125+
* @return list of unwrapped {@link InternalAggregation}
126+
*/
127+
public List<InternalAggregation> toInternalAggregations(Collection<Collector> collectors) throws IOException {
128+
List<InternalAggregation> internalAggregations = new ArrayList<>();
129+
130+
final Deque<Collector> allCollectors = new LinkedList<>(collectors);
131+
while (!allCollectors.isEmpty()) {
132+
Collector currentCollector = allCollectors.pop();
133+
if (currentCollector instanceof InternalProfileCollector) {
134+
currentCollector = ((InternalProfileCollector) currentCollector).getCollector();
135+
}
136+
137+
if (currentCollector instanceof Aggregator) {
138+
internalAggregations.add(((Aggregator) currentCollector).getPostCollectionAggregation());
139+
} else if (currentCollector instanceof MultiBucketCollector) {
140+
allCollectors.addAll(Arrays.asList(((MultiBucketCollector) currentCollector).getCollectors()));
141+
}
142+
}
143+
return internalAggregations;
144+
}
109145
}

server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java

+15
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,10 @@
1212
import org.apache.lucene.search.CollectorManager;
1313
import org.opensearch.search.internal.SearchContext;
1414
import org.opensearch.search.profile.query.CollectorResult;
15+
import org.opensearch.search.query.ReduceableSearchResult;
1516

1617
import java.io.IOException;
18+
import java.util.Collection;
1719
import java.util.Collections;
1820
import java.util.Objects;
1921

@@ -42,6 +44,19 @@ public Collector newCollector() throws IOException {
4244
}
4345
}
4446

47+
@Override
48+
public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IOException {
49+
// If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in
50+
// search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore
51+
// we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree.
52+
if (context.searcher().getLeafContexts().isEmpty()) {
53+
for (Collector c : collectors) {
54+
context.bucketCollectorProcessor().processPostCollection(c);
55+
}
56+
}
57+
return super.reduce(collectors);
58+
}
59+
4560
@Override
4661
protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) {
4762
// Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices

server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java

+3-5
Original file line numberDiff line numberDiff line change
@@ -131,12 +131,10 @@ public static class MultiBucketConsumer implements IntConsumer {
131131
private final int limit;
132132
private final CircuitBreaker breaker;
133133

134-
// aggregations execute in a single thread for both sequential
135-
// and concurrent search, so no atomic here
134+
// count is currently only updated in final reduce phase which is executed in single thread for both concurrent and non-concurrent
135+
// search
136136
private int count;
137-
138-
// will be updated by multiple threads in concurrent search
139-
// hence making it as LongAdder
137+
// will be updated by multiple threads in concurrent search hence making it as LongAdder
140138
private final LongAdder callCount;
141139
private volatile boolean circuitBreakerTripped;
142140
private final int availProcessors;

server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java

+15
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,10 @@
1212
import org.apache.lucene.search.CollectorManager;
1313
import org.opensearch.search.internal.SearchContext;
1414
import org.opensearch.search.profile.query.CollectorResult;
15+
import org.opensearch.search.query.ReduceableSearchResult;
1516

1617
import java.io.IOException;
18+
import java.util.Collection;
1719
import java.util.Collections;
1820
import java.util.Objects;
1921

@@ -42,6 +44,19 @@ public Collector newCollector() throws IOException {
4244
}
4345
}
4446

47+
@Override
48+
public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IOException {
49+
// If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in
50+
// search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore
51+
// we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree.
52+
if (context.searcher().getLeafContexts().isEmpty()) {
53+
for (Collector c : collectors) {
54+
context.bucketCollectorProcessor().processPostCollection(c);
55+
}
56+
}
57+
return super.reduce(collectors);
58+
}
59+
4560
@Override
4661
protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) {
4762
// Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices

server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,8 @@ protected Aggregator createInternal(
8080

8181
@Override
8282
protected boolean supportsConcurrentSegmentSearch() {
83-
// See https://github.com/opensearch-project/OpenSearch/issues/12331 for details
84-
return false;
83+
// Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details
84+
// return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript);
85+
return true;
8586
}
8687
}

server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java

+21-5
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,10 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
9494
private final long valueCount;
9595
private final String fieldName;
9696
private Weight weight;
97-
private final GlobalOrdLookupFunction lookupGlobalOrd;
9897
protected final CollectionStrategy collectionStrategy;
9998
protected int segmentsWithSingleValuedOrds = 0;
10099
protected int segmentsWithMultiValuedOrds = 0;
100+
private SortedSetDocValues dvs;
101101

102102
/**
103103
* Lookup global ordinals
@@ -129,11 +129,10 @@ public GlobalOrdinalsStringTermsAggregator(
129129
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
130130
this.valuesSource = valuesSource;
131131
final IndexReader reader = context.searcher().getIndexReader();
132-
final SortedSetDocValues values = reader.leaves().size() > 0
132+
final SortedSetDocValues values = !reader.leaves().isEmpty()
133133
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
134134
: DocValues.emptySortedSet();
135135
this.valueCount = values.getValueCount();
136-
this.lookupGlobalOrd = values::lookupOrd;
137136
this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get;
138137
if (remapGlobalOrds) {
139138
this.collectionStrategy = new RemapGlobalOrds(cardinality);
@@ -885,7 +884,10 @@ PriorityQueue<OrdBucket> buildPriorityQueue(int size) {
885884
}
886885

887886
StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException {
888-
BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd));
887+
// Recreate DocValues as needed for concurrent segment search
888+
SortedSetDocValues values = getDocValues();
889+
BytesRef term = BytesRef.deepCopyOf(values.lookupOrd(temp.globalOrd));
890+
889891
StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
890892
result.bucketOrd = temp.bucketOrd;
891893
result.docCountError = 0;
@@ -1001,7 +1003,9 @@ BucketUpdater<SignificantStringTerms.Bucket> bucketUpdater(long owningBucketOrd)
10011003
long subsetSize = subsetSize(owningBucketOrd);
10021004
return (spare, globalOrd, bucketOrd, docCount) -> {
10031005
spare.bucketOrd = bucketOrd;
1004-
oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
1006+
// Recreate DocValues as needed for concurrent segment search
1007+
SortedSetDocValues values = getDocValues();
1008+
oversizedCopy(values.lookupOrd(globalOrd), spare.termBytes);
10051009
spare.subsetDf = docCount;
10061010
spare.subsetSize = subsetSize;
10071011
spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
@@ -1086,4 +1090,16 @@ private void oversizedCopy(BytesRef from, BytesRef to) {
10861090
* Predicate used for {@link #acceptedGlobalOrdinals} if there is no filter.
10871091
*/
10881092
private static final LongPredicate ALWAYS_TRUE = l -> true;
1093+
1094+
/**
1095+
* If DocValues have not been initialized yet for reduce phase, create and set them.
1096+
*/
1097+
private SortedSetDocValues getDocValues() throws IOException {
1098+
if (dvs == null) {
1099+
dvs = !context.searcher().getIndexReader().leaves().isEmpty()
1100+
? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
1101+
: DocValues.emptySortedSet();
1102+
}
1103+
return dvs;
1104+
}
10891105
}

server/src/main/java/org/opensearch/search/internal/SearchContext.java

+6
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,12 @@ public List<Aggregator> toAggregators(Collection<Collector> collectors) {
113113
// should not be called when there is no aggregation collector
114114
throw new IllegalStateException("Unexpected toAggregators call on NO_OP_BUCKET_COLLECTOR_PROCESSOR");
115115
}
116+
117+
@Override
118+
public List<InternalAggregation> toInternalAggregations(Collection<Collector> collectors) {
119+
// should not be called when there is no aggregation collector
120+
throw new IllegalStateException("Unexpected toInternalAggregations call on NO_OP_BUCKET_COLLECTOR_PROCESSOR");
121+
}
116122
};
117123

118124
private final List<Releasable> releasables = new CopyOnWriteArrayList<>();

test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java

+6-1
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,12 @@
211211
"LuceneFixedGap",
212212
"LuceneVarGapFixedInterval",
213213
"LuceneVarGapDocFreqInterval",
214-
"Lucene50" })
214+
"Lucene50",
215+
"Lucene90",
216+
"Lucene94",
217+
"Lucene90",
218+
"Lucene95",
219+
"Lucene99" })
215220
@LuceneTestCase.SuppressReproduceLine
216221
public abstract class OpenSearchTestCase extends LuceneTestCase {
217222

0 commit comments

Comments
 (0)