|
30 | 30 |
|
31 | 31 | import org.apache.lucene.store.IndexInput;
|
32 | 32 | import org.opensearch.cluster.metadata.RepositoryMetadata;
|
33 |
| -import org.opensearch.common.CheckedTriFunction; |
| 33 | +import org.opensearch.common.CheckedConsumer; |
34 | 34 | import org.opensearch.common.StreamContext;
|
35 | 35 | import org.opensearch.common.blobstore.BlobPath;
|
36 | 36 | import org.opensearch.common.blobstore.stream.write.StreamContextSupplier;
|
|
49 | 49 | import org.opensearch.repositories.s3.async.AsyncTransferManager;
|
50 | 50 | import org.opensearch.test.OpenSearchTestCase;
|
51 | 51 | import org.junit.After;
|
| 52 | +import org.junit.Assert; |
52 | 53 | import org.junit.Before;
|
53 | 54 |
|
54 | 55 | import java.io.IOException;
|
@@ -466,24 +467,30 @@ private void testWriteBlobByStreams(boolean expectException, boolean throwExcept
|
466 | 467 | exceptionRef.set(ex);
|
467 | 468 | countDownLatch.countDown();
|
468 | 469 | });
|
469 |
| - blobContainer.asyncBlobUpload(new WriteContext("write_blob_by_streams_max_retries", new StreamContextSupplier() { |
470 |
| - @Override |
471 |
| - public StreamContext supplyStreamContext(long partSize) { |
472 |
| - return new StreamContext(new CheckedTriFunction<Integer, Long, Long, InputStreamContainer, IOException>() { |
473 |
| - @Override |
474 |
| - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { |
475 |
| - InputStream inputStream = new OffsetRangeIndexInputStream(new ByteArrayIndexInput("desc", bytes), size, position); |
476 |
| - openInputStreams.add(inputStream); |
477 |
| - return new InputStreamContainer(inputStream, size, position); |
478 |
| - } |
479 |
| - }, partSize, calculateLastPartSize(bytes.length, partSize), calculateNumberOfParts(bytes.length, partSize)); |
480 |
| - } |
481 |
| - }, bytes.length, false, WritePriority.NORMAL, uploadSuccess -> { |
| 470 | + |
| 471 | + StreamContextSupplier streamContextSupplier = partSize -> new StreamContext((partNo, size, position) -> { |
| 472 | + InputStream inputStream = new OffsetRangeIndexInputStream(new ByteArrayIndexInput("desc", bytes), size, position); |
| 473 | + openInputStreams.add(inputStream); |
| 474 | + return new InputStreamContainer(inputStream, size, position); |
| 475 | + }, partSize, calculateLastPartSize(bytes.length, partSize), calculateNumberOfParts(bytes.length, partSize)); |
| 476 | + |
| 477 | + CheckedConsumer<Boolean, IOException> uploadFinalizer = uploadSuccess -> { |
482 | 478 | assertTrue(uploadSuccess);
|
483 | 479 | if (throwExceptionOnFinalizeUpload) {
|
484 | 480 | throw new RuntimeException();
|
485 | 481 | }
|
486 |
| - }, false, null), completionListener); |
| 482 | + }; |
| 483 | + |
| 484 | + WriteContext writeContext = new WriteContext.Builder().fileName("write_blob_by_streams_max_retries") |
| 485 | + .streamContextSupplier(streamContextSupplier) |
| 486 | + .fileSize(bytes.length) |
| 487 | + .failIfAlreadyExists(false) |
| 488 | + .writePriority(WritePriority.NORMAL) |
| 489 | + .uploadFinalizer(uploadFinalizer) |
| 490 | + .doRemoteDataIntegrityCheck(false) |
| 491 | + .build(); |
| 492 | + |
| 493 | + blobContainer.asyncBlobUpload(writeContext, completionListener); |
487 | 494 |
|
488 | 495 | assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS));
|
489 | 496 | // wait for completableFuture to finish
|
@@ -516,24 +523,30 @@ private void testWriteBlobByStreamsLargeBlob(boolean expectException, boolean th
|
516 | 523 | countDownLatch.countDown();
|
517 | 524 | });
|
518 | 525 | List<InputStream> openInputStreams = new ArrayList<>();
|
519 |
| - blobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { |
520 |
| - @Override |
521 |
| - public StreamContext supplyStreamContext(long partSize) { |
522 |
| - return new StreamContext(new CheckedTriFunction<Integer, Long, Long, InputStreamContainer, IOException>() { |
523 |
| - @Override |
524 |
| - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { |
525 |
| - InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); |
526 |
| - openInputStreams.add(inputStream); |
527 |
| - return new InputStreamContainer(inputStream, size, position); |
528 |
| - } |
529 |
| - }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); |
530 |
| - } |
531 |
| - }, blobSize, false, WritePriority.NORMAL, uploadSuccess -> { |
| 526 | + |
| 527 | + StreamContextSupplier streamContextSupplier = partSize1 -> new StreamContext((partNo, size, position) -> { |
| 528 | + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); |
| 529 | + openInputStreams.add(inputStream); |
| 530 | + return new InputStreamContainer(inputStream, size, position); |
| 531 | + }, partSize1, calculateLastPartSize(blobSize, partSize1), calculateNumberOfParts(blobSize, partSize1)); |
| 532 | + |
| 533 | + CheckedConsumer<Boolean, IOException> uploadFinalizer = uploadSuccess -> { |
532 | 534 | assertTrue(uploadSuccess);
|
533 | 535 | if (throwExceptionOnFinalizeUpload) {
|
534 | 536 | throw new RuntimeException();
|
535 | 537 | }
|
536 |
| - }, false, null), completionListener); |
| 538 | + }; |
| 539 | + |
| 540 | + WriteContext writeContext = new WriteContext.Builder().fileName("write_large_blob") |
| 541 | + .streamContextSupplier(streamContextSupplier) |
| 542 | + .fileSize(blobSize) |
| 543 | + .failIfAlreadyExists(false) |
| 544 | + .writePriority(WritePriority.NORMAL) |
| 545 | + .uploadFinalizer(uploadFinalizer) |
| 546 | + .doRemoteDataIntegrityCheck(false) |
| 547 | + .build(); |
| 548 | + |
| 549 | + blobContainer.asyncBlobUpload(writeContext, completionListener); |
537 | 550 |
|
538 | 551 | assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS));
|
539 | 552 | if (expectException || throwExceptionOnFinalizeUpload) {
|
@@ -632,20 +645,23 @@ private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException) t
|
632 | 645 |
|
633 | 646 | List<InputStream> openInputStreams = new ArrayList<>();
|
634 | 647 | final S3BlobContainer s3BlobContainer = Mockito.spy(new S3BlobContainer(blobPath, blobStore));
|
635 |
| - s3BlobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { |
636 |
| - @Override |
637 |
| - public StreamContext supplyStreamContext(long partSize) { |
638 |
| - return new StreamContext(new CheckedTriFunction<Integer, Long, Long, InputStreamContainer, IOException>() { |
639 |
| - @Override |
640 |
| - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { |
641 |
| - InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); |
642 |
| - openInputStreams.add(inputStream); |
643 |
| - return new InputStreamContainer(inputStream, size, position); |
644 |
| - } |
645 |
| - }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); |
646 |
| - } |
647 |
| - }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { assertTrue(uploadSuccess); }, false, null), completionListener); |
648 | 648 |
|
| 649 | + StreamContextSupplier streamContextSupplier = partSize1 -> new StreamContext((partNo, size, position) -> { |
| 650 | + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); |
| 651 | + openInputStreams.add(inputStream); |
| 652 | + return new InputStreamContainer(inputStream, size, position); |
| 653 | + }, partSize1, calculateLastPartSize(blobSize, partSize1), calculateNumberOfParts(blobSize, partSize1)); |
| 654 | + |
| 655 | + WriteContext writeContext = new WriteContext.Builder().fileName("write_large_blob") |
| 656 | + .streamContextSupplier(streamContextSupplier) |
| 657 | + .fileSize(blobSize) |
| 658 | + .failIfAlreadyExists(false) |
| 659 | + .writePriority(WritePriority.HIGH) |
| 660 | + .uploadFinalizer(Assert::assertTrue) |
| 661 | + .doRemoteDataIntegrityCheck(false) |
| 662 | + .build(); |
| 663 | + |
| 664 | + s3BlobContainer.asyncBlobUpload(writeContext, completionListener); |
649 | 665 | assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS));
|
650 | 666 | if (expectException) {
|
651 | 667 | assertNotNull(exceptionRef.get());
|
|
0 commit comments