|
49 | 49 | import org.opensearch.common.settings.Settings;
|
50 | 50 | import org.opensearch.core.action.ActionListener;
|
51 | 51 | import org.opensearch.search.internal.InternalSearchResponse;
|
| 52 | +import org.opensearch.tasks.CancellableTask; |
52 | 53 | import org.opensearch.tasks.Task;
|
| 54 | +import org.opensearch.tasks.TaskListener; |
53 | 55 | import org.opensearch.tasks.TaskManager;
|
54 | 56 | import org.opensearch.telemetry.tracing.noop.NoopTracer;
|
55 | 57 | import org.opensearch.test.OpenSearchTestCase;
|
|
62 | 64 | import java.util.IdentityHashMap;
|
63 | 65 | import java.util.List;
|
64 | 66 | import java.util.Set;
|
| 67 | +import java.util.concurrent.CountDownLatch; |
65 | 68 | import java.util.concurrent.ExecutorService;
|
| 69 | +import java.util.concurrent.TimeUnit; |
66 | 70 | import java.util.concurrent.atomic.AtomicInteger;
|
67 | 71 | import java.util.concurrent.atomic.AtomicReference;
|
68 | 72 |
|
@@ -289,4 +293,118 @@ public void testDefaultMaxConcurrentSearches() {
|
289 | 293 | assertThat(result, equalTo(1));
|
290 | 294 | }
|
291 | 295 |
|
| 296 | + public void testCancellation() { |
| 297 | + // Initialize dependencies of TransportMultiSearchAction |
| 298 | + Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); |
| 299 | + ActionFilters actionFilters = mock(ActionFilters.class); |
| 300 | + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); |
| 301 | + ThreadPool threadPool = new ThreadPool(settings); |
| 302 | + TransportService transportService = new TransportService( |
| 303 | + Settings.EMPTY, |
| 304 | + mock(Transport.class), |
| 305 | + threadPool, |
| 306 | + TransportService.NOOP_TRANSPORT_INTERCEPTOR, |
| 307 | + boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), |
| 308 | + null, |
| 309 | + Collections.emptySet(), |
| 310 | + NoopTracer.INSTANCE |
| 311 | + ) { |
| 312 | + @Override |
| 313 | + public TaskManager getTaskManager() { |
| 314 | + return taskManager; |
| 315 | + } |
| 316 | + }; |
| 317 | + ClusterService clusterService = mock(ClusterService.class); |
| 318 | + when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); |
| 319 | + |
| 320 | + // Keep track of the number of concurrent searches started by multi search api, |
| 321 | + // and if there are more searches than is allowed create an error and remember that. |
| 322 | + int maxAllowedConcurrentSearches = 1; // Allow 1 search at a time. |
| 323 | + AtomicInteger counter = new AtomicInteger(); |
| 324 | + AtomicReference<AssertionError> errorHolder = new AtomicReference<>(); |
| 325 | + // randomize whether or not requests are executed asynchronously |
| 326 | + ExecutorService executorService = threadPool.executor(ThreadPool.Names.GENERIC); |
| 327 | + final Set<SearchRequest> requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>())); |
| 328 | + CountDownLatch countDownLatch = new CountDownLatch(1); |
| 329 | + CancellableTask[] parentTask = new CancellableTask[1]; |
| 330 | + NodeClient client = new NodeClient(settings, threadPool) { |
| 331 | + @Override |
| 332 | + public void search(final SearchRequest request, final ActionListener<SearchResponse> listener) { |
| 333 | + if (parentTask[0] != null && parentTask[0].isCancelled()) { |
| 334 | + fail("Should not execute search after parent task is cancelled"); |
| 335 | + } |
| 336 | + try { |
| 337 | + countDownLatch.await(10, TimeUnit.MILLISECONDS); |
| 338 | + } catch (InterruptedException e) { |
| 339 | + throw new RuntimeException(e); |
| 340 | + } |
| 341 | + |
| 342 | + requests.add(request); |
| 343 | + executorService.execute(() -> { |
| 344 | + counter.decrementAndGet(); |
| 345 | + listener.onResponse( |
| 346 | + new SearchResponse( |
| 347 | + InternalSearchResponse.empty(), |
| 348 | + null, |
| 349 | + 0, |
| 350 | + 0, |
| 351 | + 0, |
| 352 | + 0L, |
| 353 | + ShardSearchFailure.EMPTY_ARRAY, |
| 354 | + SearchResponse.Clusters.EMPTY |
| 355 | + ) |
| 356 | + ); |
| 357 | + }); |
| 358 | + } |
| 359 | + |
| 360 | + @Override |
| 361 | + public String getLocalNodeId() { |
| 362 | + return "local_node_id"; |
| 363 | + } |
| 364 | + }; |
| 365 | + |
| 366 | + TransportMultiSearchAction action = new TransportMultiSearchAction( |
| 367 | + threadPool, |
| 368 | + actionFilters, |
| 369 | + transportService, |
| 370 | + clusterService, |
| 371 | + 10, |
| 372 | + System::nanoTime, |
| 373 | + client |
| 374 | + ); |
| 375 | + |
| 376 | + // Execute the multi search api and fail if we find an error after executing: |
| 377 | + try { |
| 378 | + /* |
| 379 | + * Allow for a large number of search requests in a single batch as previous implementations could stack overflow if the number |
| 380 | + * of requests in a single batch was large |
| 381 | + */ |
| 382 | + int numSearchRequests = scaledRandomIntBetween(1024, 8192); |
| 383 | + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); |
| 384 | + multiSearchRequest.maxConcurrentSearchRequests(maxAllowedConcurrentSearches); |
| 385 | + for (int i = 0; i < numSearchRequests; i++) { |
| 386 | + multiSearchRequest.add(new SearchRequest()); |
| 387 | + } |
| 388 | + MultiSearchResponse[] responses = new MultiSearchResponse[1]; |
| 389 | + Exception[] exceptions = new Exception[1]; |
| 390 | + parentTask[0] = (CancellableTask) action.execute(multiSearchRequest, new TaskListener<>() { |
| 391 | + @Override |
| 392 | + public void onResponse(Task task, MultiSearchResponse items) { |
| 393 | + responses[0] = items; |
| 394 | + } |
| 395 | + |
| 396 | + @Override |
| 397 | + public void onFailure(Task task, Exception e) { |
| 398 | + exceptions[0] = e; |
| 399 | + } |
| 400 | + }); |
| 401 | + parentTask[0].cancel("Giving up"); |
| 402 | + countDownLatch.countDown(); |
| 403 | + |
| 404 | + assertNull(responses[0]); |
| 405 | + assertNull(exceptions[0]); |
| 406 | + } finally { |
| 407 | + assertTrue(OpenSearchTestCase.terminate(threadPool)); |
| 408 | + } |
| 409 | + } |
292 | 410 | }
|
0 commit comments